Merge remote-tracking branch 'origin/3.0' into feat/3.0/TD-32231
This commit is contained in:
commit
81b34e55d7
|
@ -121,6 +121,7 @@ TAGS
|
|||
contrib/*
|
||||
!contrib/CMakeLists.txt
|
||||
!contrib/test
|
||||
!contrib/azure-cmake
|
||||
sql
|
||||
debug*/
|
||||
.env
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
project(
|
||||
TDengine
|
||||
VERSION 3.0
|
||||
DESCRIPTION "An open-source big data platform designed and optimized for the Internet of Things(IOT)"
|
||||
TDengine
|
||||
VERSION 3.0
|
||||
DESCRIPTION "An open-source big data platform designed and optimized for the Internet of Things(IOT)"
|
||||
)
|
||||
|
||||
if (NOT DEFINED TD_SOURCE_DIR)
|
||||
set( TD_SOURCE_DIR ${PROJECT_SOURCE_DIR} )
|
||||
if(NOT DEFINED TD_SOURCE_DIR)
|
||||
set(TD_SOURCE_DIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
||||
|
@ -15,13 +15,11 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
|||
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
||||
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
||||
|
||||
|
||||
include(${TD_SUPPORT_DIR}/cmake.platform)
|
||||
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||
include(${TD_SUPPORT_DIR}/cmake.options)
|
||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||
|
||||
|
||||
# contrib
|
||||
add_subdirectory(contrib)
|
||||
|
||||
|
@ -33,8 +31,8 @@ target_include_directories(api INTERFACE "include/client")
|
|||
|
||||
# src
|
||||
if(${BUILD_TEST})
|
||||
include(CTest)
|
||||
enable_testing()
|
||||
include(CTest)
|
||||
enable_testing()
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
add_subdirectory(source)
|
||||
|
@ -44,5 +42,5 @@ add_subdirectory(examples/c)
|
|||
add_subdirectory(tests)
|
||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
|
||||
# docs
|
||||
# docs
|
||||
add_subdirectory(docs/doxgen)
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
# azure
|
||||
ExternalProject_Add(azure
|
||||
URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz
|
||||
URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1"
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
#BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -3,7 +3,7 @@ set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
|||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
set(TD_BUILD_KEEPER_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
# set output directory
|
||||
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
||||
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin)
|
||||
SET(TD_TESTS_OUTPUT_DIR ${PROJECT_BINARY_DIR}/test)
|
||||
|
@ -13,50 +13,50 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
|
|||
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
|
||||
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
||||
|
||||
if (NOT DEFINED TD_GRANT)
|
||||
SET(TD_GRANT FALSE)
|
||||
endif()
|
||||
IF(NOT DEFINED TD_GRANT)
|
||||
SET(TD_GRANT FALSE)
|
||||
ENDIF()
|
||||
|
||||
IF (NOT DEFINED BUILD_WITH_RAND_ERR)
|
||||
SET(BUILD_WITH_RAND_ERR FALSE)
|
||||
ELSE ()
|
||||
SET(BUILD_WITH_RAND_ERR TRUE)
|
||||
endif()
|
||||
IF(NOT DEFINED BUILD_WITH_RAND_ERR)
|
||||
SET(BUILD_WITH_RAND_ERR FALSE)
|
||||
ELSE()
|
||||
SET(BUILD_WITH_RAND_ERR TRUE)
|
||||
ENDIF()
|
||||
|
||||
IF ("${WEBSOCKET}" MATCHES "true")
|
||||
IF("${WEBSOCKET}" MATCHES "true")
|
||||
SET(TD_WEBSOCKET TRUE)
|
||||
MESSAGE("Enable websocket")
|
||||
ADD_DEFINITIONS(-DWEBSOCKET)
|
||||
ELSE ()
|
||||
ELSE()
|
||||
SET(TD_WEBSOCKET FALSE)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
IF ("${BUILD_HTTP}" STREQUAL "")
|
||||
IF (TD_LINUX)
|
||||
IF (TD_ARM_32)
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE ()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
IF("${BUILD_HTTP}" STREQUAL "")
|
||||
IF(TD_LINUX)
|
||||
IF(TD_ARM_32)
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF()
|
||||
ELSEIF(TD_DARWIN)
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF()
|
||||
ELSEIF(${BUILD_HTTP} MATCHES "false")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
ELSEIF(${BUILD_HTTP} MATCHES "true")
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSE ()
|
||||
ELSEIF(${BUILD_HTTP} MATCHES "internal")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
SET(TD_BUILD_TAOSA_INTERNAL TRUE)
|
||||
ELSE()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF ()
|
||||
ELSEIF (${BUILD_HTTP} MATCHES "false")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
ELSEIF (${BUILD_HTTP} MATCHES "true")
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ELSEIF (${BUILD_HTTP} MATCHES "internal")
|
||||
SET(TD_BUILD_HTTP FALSE)
|
||||
SET(TD_BUILD_TAOSA_INTERNAL TRUE)
|
||||
ELSE ()
|
||||
SET(TD_BUILD_HTTP TRUE)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
IF (TD_BUILD_HTTP)
|
||||
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
|
||||
ENDIF ()
|
||||
IF(TD_BUILD_HTTP)
|
||||
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
|
||||
ENDIF()
|
||||
|
||||
IF ("${BUILD_KEEPER}" STREQUAL "")
|
||||
SET(TD_BUILD_KEEPER FALSE)
|
||||
|
@ -71,125 +71,133 @@ ELSE ()
|
|||
SET(TD_BUILD_KEEPER FALSE)
|
||||
ENDIF ()
|
||||
|
||||
IF ("${BUILD_TOOLS}" STREQUAL "")
|
||||
IF (TD_LINUX)
|
||||
IF (TD_ARM_32)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSEIF (TD_ARM_64)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSE ()
|
||||
SET(BUILD_TOOLS "false")
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSE ()
|
||||
SET(BUILD_TOOLS "false")
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
IF("${BUILD_TOOLS}" STREQUAL "")
|
||||
IF(TD_LINUX)
|
||||
IF(TD_ARM_32)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSEIF(TD_ARM_64)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSE()
|
||||
SET(BUILD_TOOLS "false")
|
||||
ENDIF()
|
||||
ELSEIF(TD_DARWIN)
|
||||
SET(BUILD_TOOLS "false")
|
||||
ELSE()
|
||||
SET(BUILD_TOOLS "false")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF ("${BUILD_TOOLS}" MATCHES "false")
|
||||
IF("${BUILD_TOOLS}" MATCHES "false")
|
||||
MESSAGE("${Yellow} Will _not_ build taos_tools! ${ColourReset}")
|
||||
SET(TD_TAOS_TOOLS FALSE)
|
||||
ELSE ()
|
||||
ELSE()
|
||||
MESSAGE("")
|
||||
MESSAGE("${Green} Will build taos_tools! ${ColourReset}")
|
||||
MESSAGE("")
|
||||
SET(TD_TAOS_TOOLS TRUE)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
IF (${TD_WINDOWS})
|
||||
IF(${TD_WINDOWS})
|
||||
SET(TAOS_LIB taos_static)
|
||||
ELSE ()
|
||||
ELSE()
|
||||
SET(TAOS_LIB taos)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
# build TSZ by default
|
||||
IF ("${TSZ_ENABLED}" MATCHES "false")
|
||||
set(VAR_TSZ "" CACHE INTERNAL "global variant empty" )
|
||||
IF("${TSZ_ENABLED}" MATCHES "false")
|
||||
set(VAR_TSZ "" CACHE INTERNAL "global variant empty")
|
||||
ELSE()
|
||||
# define add
|
||||
MESSAGE(STATUS "build with TSZ enabled")
|
||||
ADD_DEFINITIONS(-DTD_TSZ)
|
||||
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" )
|
||||
# define add
|
||||
MESSAGE(STATUS "build with TSZ enabled")
|
||||
ADD_DEFINITIONS(-DTD_TSZ)
|
||||
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz")
|
||||
ENDIF()
|
||||
|
||||
# force set all platform to JEMALLOC_ENABLED = false
|
||||
SET(JEMALLOC_ENABLED OFF)
|
||||
IF (TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
IF (${CMAKE_BUILD_TYPE} MATCHES "Release")
|
||||
MESSAGE("${Green} will build Release version! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD")
|
||||
|
||||
ELSE ()
|
||||
MESSAGE("${Green} will build Debug version! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
IF(TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
|
||||
IF(${CMAKE_BUILD_TYPE} MATCHES "Release")
|
||||
MESSAGE("${Green} will build Release version! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD")
|
||||
|
||||
ELSE()
|
||||
MESSAGE("${Green} will build Debug version! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
ENDIF()
|
||||
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||
|
||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
# ENDIF ()
|
||||
IF (CMAKE_DEPFILE_FLAGS_C)
|
||||
IF(CMAKE_DEPFILE_FLAGS_C)
|
||||
SET(CMAKE_DEPFILE_FLAGS_C "")
|
||||
ENDIF ()
|
||||
IF (CMAKE_DEPFILE_FLAGS_CXX)
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_DEPFILE_FLAGS_CXX)
|
||||
SET(CMAKE_DEPFILE_FLAGS_CXX "")
|
||||
ENDIF ()
|
||||
IF (CMAKE_C_FLAGS_DEBUG)
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_C_FLAGS_DEBUG)
|
||||
SET(CMAKE_C_FLAGS_DEBUG "" CACHE STRING "" FORCE)
|
||||
ENDIF ()
|
||||
IF (CMAKE_CXX_FLAGS_DEBUG)
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_CXX_FLAGS_DEBUG)
|
||||
SET(CMAKE_CXX_FLAGS_DEBUG "" CACHE STRING "" FORCE)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
||||
|
||||
ELSE ()
|
||||
IF (${TD_DARWIN})
|
||||
ELSE()
|
||||
IF(${TD_DARWIN})
|
||||
set(CMAKE_MACOSX_RPATH 0)
|
||||
ENDIF ()
|
||||
IF (${COVER} MATCHES "true")
|
||||
ENDIF()
|
||||
|
||||
IF(${COVER} MATCHES "true")
|
||||
MESSAGE(STATUS "Test coverage mode, add extra flags")
|
||||
SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage")
|
||||
SET(GCC_COVERAGE_LINK_FLAGS "-lgcov --coverage")
|
||||
SET(GCC_COVERAGE_LINK_FLAGS "-lgcov --coverage")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
# disable all assert
|
||||
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
|
||||
IF((${DISABLE_ASSERT} MATCHES "true") OR(${DISABLE_ASSERTS} MATCHES "true"))
|
||||
ADD_DEFINITIONS(-DDISABLE_ASSERT)
|
||||
MESSAGE(STATUS "Disable all asserts")
|
||||
ENDIF()
|
||||
|
||||
INCLUDE(CheckCCompilerFlag)
|
||||
IF (TD_ARM_64 OR TD_ARM_32)
|
||||
|
||||
IF(TD_ARM_64 OR TD_ARM_32)
|
||||
SET(COMPILER_SUPPORT_SSE42 false)
|
||||
ELSEIF (("${CMAKE_C_COMPILER_ID}" MATCHES "Clang") OR ("${CMAKE_C_COMPILER_ID}" MATCHES "AppleClang"))
|
||||
ELSEIF(("${CMAKE_C_COMPILER_ID}" MATCHES "Clang") OR("${CMAKE_C_COMPILER_ID}" MATCHES "AppleClang"))
|
||||
SET(COMPILER_SUPPORT_SSE42 true)
|
||||
MESSAGE(STATUS "Always enable sse4.2 for Clang/AppleClang")
|
||||
ELSE()
|
||||
CHECK_C_COMPILER_FLAG("-msse4.2" COMPILER_SUPPORT_SSE42)
|
||||
ENDIF()
|
||||
|
||||
IF (TD_ARM_64 OR TD_ARM_32)
|
||||
SET(COMPILER_SUPPORT_FMA false)
|
||||
SET(COMPILER_SUPPORT_AVX false)
|
||||
SET(COMPILER_SUPPORT_AVX2 false)
|
||||
SET(COMPILER_SUPPORT_AVX512F false)
|
||||
SET(COMPILER_SUPPORT_AVX512BMI false)
|
||||
SET(COMPILER_SUPPORT_AVX512VL false)
|
||||
IF(TD_ARM_64 OR TD_ARM_32)
|
||||
SET(COMPILER_SUPPORT_FMA false)
|
||||
SET(COMPILER_SUPPORT_AVX false)
|
||||
SET(COMPILER_SUPPORT_AVX2 false)
|
||||
SET(COMPILER_SUPPORT_AVX512F false)
|
||||
SET(COMPILER_SUPPORT_AVX512BMI false)
|
||||
SET(COMPILER_SUPPORT_AVX512VL false)
|
||||
ELSE()
|
||||
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
|
||||
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
|
||||
|
||||
INCLUDE(CheckCSourceRuns)
|
||||
SET(CMAKE_REQUIRED_FLAGS "-mavx")
|
||||
check_c_source_runs("
|
||||
INCLUDE(CheckCSourceRuns)
|
||||
SET(CMAKE_REQUIRED_FLAGS "-mavx")
|
||||
check_c_source_runs("
|
||||
#include <immintrin.h>
|
||||
int main() {
|
||||
__m256d a, b, c;
|
||||
|
@ -199,7 +207,7 @@ ELSE ()
|
|||
c = _mm256_add_pd(a, b);
|
||||
_mm256_storeu_pd(buf, c);
|
||||
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
|
||||
if (buf[i] != 0) {
|
||||
IF (buf[i] != 0) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -207,8 +215,8 @@ ELSE ()
|
|||
}
|
||||
" COMPILER_SUPPORT_AVX)
|
||||
|
||||
SET(CMAKE_REQUIRED_FLAGS "-mavx2")
|
||||
check_c_source_runs("
|
||||
SET(CMAKE_REQUIRED_FLAGS "-mavx2")
|
||||
check_c_source_runs("
|
||||
#include <immintrin.h>
|
||||
int main() {
|
||||
__m256i a, b, c;
|
||||
|
@ -218,7 +226,7 @@ ELSE ()
|
|||
c = _mm256_and_si256(a, b);
|
||||
_mm256_storeu_si256((__m256i *)buf, c);
|
||||
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
|
||||
if (buf[i] != 0) {
|
||||
IF (buf[i] != 0) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -227,40 +235,42 @@ ELSE ()
|
|||
" COMPILER_SUPPORT_AVX2)
|
||||
ENDIF()
|
||||
|
||||
IF (COMPILER_SUPPORT_SSE42)
|
||||
IF(COMPILER_SUPPORT_SSE42)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
|
||||
ENDIF()
|
||||
|
||||
IF ("${SIMD_SUPPORT}" MATCHES "true")
|
||||
IF (COMPILER_SUPPORT_FMA)
|
||||
IF("${SIMD_SUPPORT}" MATCHES "true")
|
||||
IF(COMPILER_SUPPORT_FMA)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
|
||||
MESSAGE(STATUS "FMA instructions is ACTIVATED")
|
||||
ENDIF()
|
||||
IF (COMPILER_SUPPORT_AVX)
|
||||
|
||||
IF(COMPILER_SUPPORT_AVX)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
||||
MESSAGE(STATUS "AVX instructions is ACTIVATED")
|
||||
ENDIF()
|
||||
IF (COMPILER_SUPPORT_AVX2)
|
||||
|
||||
IF(COMPILER_SUPPORT_AVX2)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2")
|
||||
MESSAGE(STATUS "AVX2 instructions is ACTIVATED")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF ("${SIMD_AVX512_SUPPORT}" MATCHES "true")
|
||||
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
|
||||
IF("${SIMD_AVX512_SUPPORT}" MATCHES "true")
|
||||
IF(COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
|
||||
MESSAGE(STATUS "avx512f/avx512bmi enabled by compiler")
|
||||
ENDIF()
|
||||
|
||||
IF (COMPILER_SUPPORT_AVX512VL)
|
||||
IF(COMPILER_SUPPORT_AVX512VL)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
|
||||
MESSAGE(STATUS "avx512vl enabled by compiler")
|
||||
MESSAGE(STATUS "avx512vl enabled by compiler")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
|
@ -268,16 +278,17 @@ ELSE ()
|
|||
SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
|
||||
IF (${BUILD_SANITIZER})
|
||||
IF(${BUILD_SANITIZER})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF (${BUILD_RELEASE})
|
||||
|
||||
# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF(${BUILD_RELEASE})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
ELSE ()
|
||||
ELSE()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 3.0
|
||||
GIT_TAG main
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 3.0
|
||||
GIT_TAG main
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -10,39 +10,36 @@ if(${BUILD_WITH_S3})
|
|||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
|
||||
|
||||
elseif(${BUILD_WITH_COS})
|
||||
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
|
||||
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
if(${BUILD_WITH_COS})
|
||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
||||
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
||||
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
endif(${BUILD_WITH_COS})
|
||||
if(${BUILD_WITH_COS})
|
||||
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
endif()
|
||||
|
||||
|
||||
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
|
@ -59,7 +56,7 @@ endif()
|
|||
# taosadapter
|
||||
if(${BUILD_HTTP})
|
||||
MESSAGE("BUILD_HTTP is on")
|
||||
else ()
|
||||
else()
|
||||
MESSAGE("BUILD_HTTP is off, use taosAdapter")
|
||||
cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
@ -110,19 +107,18 @@ cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
|||
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
# xz
|
||||
#cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
#lzma2
|
||||
# lzma2
|
||||
cat("${TD_SUPPORT_DIR}/lzma_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
|
||||
if (${BUILD_CONTRIB})
|
||||
if(${BUILD_CONTRIB})
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
endif()
|
||||
else()
|
||||
if (NOT ${TD_LINUX})
|
||||
if(NOT ${TD_LINUX})
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
|
@ -134,9 +130,9 @@ else()
|
|||
endif()
|
||||
endif()
|
||||
|
||||
#cat("${TD_SUPPORT_DIR}/zstd_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/zstd_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
#libuv
|
||||
# libuv
|
||||
if(${BUILD_WITH_UV})
|
||||
cat("${TD_SUPPORT_DIR}/libuv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_UV})
|
||||
|
@ -152,17 +148,17 @@ if(${BUILD_WITH_S3})
|
|||
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_S3)
|
||||
|
||||
# cos
|
||||
elseif(${BUILD_WITH_COS})
|
||||
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_COS)
|
||||
|
||||
endif()
|
||||
|
||||
# crashdump
|
||||
|
@ -191,9 +187,9 @@ endif()
|
|||
# download dependencies
|
||||
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
# ================================================================================================
|
||||
# Build
|
||||
|
@ -206,25 +202,27 @@ if(${BUILD_TEST})
|
|||
gtest
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src>
|
||||
)
|
||||
|
||||
if(${TD_WINDOWS})
|
||||
target_include_directories(
|
||||
gtest
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_win>
|
||||
)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
if(${TD_LINUX})
|
||||
target_include_directories(
|
||||
gtest
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_linux>
|
||||
)
|
||||
endif(${TD_LINUX})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_include_directories(
|
||||
gtest
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_darwin>
|
||||
)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
# cJson
|
||||
|
@ -236,15 +234,16 @@ option(CJSON_BUILD_SHARED_LIBS "Overrides BUILD_SHARED_LIBS if CJSON_OVERRIDE_BU
|
|||
add_subdirectory(cJson EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
cjson
|
||||
|
||||
# see https://stackoverflow.com/questions/25676277/cmake-target-include-directories-prints-an-error-when-i-try-to-add-the-source
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cJson>
|
||||
)
|
||||
unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
||||
|
||||
# xml2
|
||||
#if(${BUILD_WITH_S3})
|
||||
# add_subdirectory(xml2 EXCLUDE_FROM_ALL)
|
||||
#endif()
|
||||
# if(${BUILD_WITH_S3})
|
||||
# add_subdirectory(xml2 EXCLUDE_FROM_ALL)
|
||||
# endif()
|
||||
|
||||
# lz4
|
||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||
|
@ -255,10 +254,12 @@ target_include_directories(
|
|||
|
||||
# zlib
|
||||
set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in")
|
||||
|
||||
if(${TD_DARWIN})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=deprecated-non-prototype")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-non-prototype")
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
add_subdirectory(zlib EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
zlibstatic
|
||||
|
@ -274,9 +275,9 @@ unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
|||
|
||||
# add_subdirectory(xz EXCLUDE_FROM_ALL)
|
||||
# target_include_directories(
|
||||
# xz
|
||||
# PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/xz
|
||||
# PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/xz
|
||||
# xz
|
||||
# PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/xz
|
||||
# PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/xz
|
||||
# )
|
||||
|
||||
# leveldb
|
||||
|
@ -291,24 +292,27 @@ endif(${BUILD_WITH_LEVELDB})
|
|||
|
||||
# rocksdb
|
||||
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
||||
if (${BUILD_WITH_UV})
|
||||
if(${BUILD_WITH_UV})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
endif (${BUILD_WITH_UV})
|
||||
endif(${BUILD_WITH_UV})
|
||||
|
||||
if (${BUILD_WITH_ROCKSDB})
|
||||
if (${BUILD_CONTRIB})
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
if(${BUILD_CONTRIB})
|
||||
if(${TD_LINUX})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
|
||||
MESSAGE(STATUS "ROCKSDB CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
||||
|
||||
|
@ -316,22 +320,23 @@ if (${BUILD_WITH_ROCKSDB})
|
|||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
if (${TD_DARWIN_ARM64})
|
||||
if(${TD_DARWIN_ARM64})
|
||||
set(HAS_ARMV8_CRC true)
|
||||
endif(${TD_DARWIN_ARM64})
|
||||
|
||||
if (${TD_WINDOWS})
|
||||
if(${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
||||
option(WITH_JNI "" OFF)
|
||||
|
||||
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
||||
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||
endif()
|
||||
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
|
||||
if(${TD_DARWIN})
|
||||
option(HAVE_THREAD_LOCAL "" OFF)
|
||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||
|
@ -357,30 +362,32 @@ if (${BUILD_WITH_ROCKSDB})
|
|||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||
)
|
||||
else()
|
||||
if (NOT ${TD_LINUX})
|
||||
if(NOT ${TD_LINUX})
|
||||
MESSAGE(STATUS "ROCKSDB CXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
if (${TD_DARWIN_ARM64})
|
||||
if(${TD_DARWIN_ARM64})
|
||||
set(HAS_ARMV8_CRC true)
|
||||
endif(${TD_DARWIN_ARM64})
|
||||
|
||||
if (${TD_WINDOWS})
|
||||
if(${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
||||
option(WITH_JNI "" OFF)
|
||||
|
||||
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
||||
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||
endif()
|
||||
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
|
||||
if(${TD_DARWIN})
|
||||
option(HAVE_THREAD_LOCAL "" OFF)
|
||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||
|
@ -406,44 +413,44 @@ if (${BUILD_WITH_ROCKSDB})
|
|||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||
)
|
||||
endif()
|
||||
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(${BUILD_WITH_S3})
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.2/include)
|
||||
MESSAGE("build with s3: ${BUILD_WITH_S3}")
|
||||
|
||||
# cos
|
||||
elseif(${BUILD_WITH_COS})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
|
||||
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
|
||||
option(ENABLE_TEST "Enable the tests" OFF)
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
||||
#MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
|
||||
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
||||
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
||||
# ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
|
||||
option(ENABLE_TEST "Enable the tests" OFF)
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
||||
|
||||
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
cos_c_sdk
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
|
||||
)
|
||||
# MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
||||
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
||||
|
||||
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
|
||||
else()
|
||||
|
||||
endif(${TD_LINUX})
|
||||
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
cos_c_sdk
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
|
||||
)
|
||||
|
||||
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
|
||||
else()
|
||||
endif(${TD_LINUX})
|
||||
endif()
|
||||
|
||||
# pthread
|
||||
if(${BUILD_PTHREAD})
|
||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
|
||||
add_definitions(-DPTW32_STATIC_LIB)
|
||||
add_subdirectory(pthread EXCLUDE_FROM_ALL)
|
||||
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
|
||||
|
@ -451,16 +458,15 @@ if(${BUILD_PTHREAD})
|
|||
target_link_libraries(pthread INTERFACE libpthreadVC3)
|
||||
endif()
|
||||
|
||||
|
||||
# jemalloc
|
||||
if(${JEMALLOC_ENABLED})
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(jemalloc
|
||||
PREFIX "jemalloc"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls
|
||||
BUILD_COMMAND ${MAKE}
|
||||
PREFIX "jemalloc"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls
|
||||
BUILD_COMMAND ${MAKE}
|
||||
)
|
||||
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
|
||||
endif()
|
||||
|
@ -514,12 +520,13 @@ endif(${BUILD_WCWIDTH})
|
|||
|
||||
# LIBUV
|
||||
if(${BUILD_WITH_UV})
|
||||
if (TD_WINDOWS)
|
||||
if(TD_WINDOWS)
|
||||
# There is no GetHostNameW function on win7.
|
||||
file(READ "libuv/src/win/util.c" LIBUV_WIN_UTIL_CONTENT)
|
||||
string(REPLACE "if (GetHostNameW(buf, UV_MAXHOSTNAMESIZE" "DWORD nSize = UV_MAXHOSTNAMESIZE;\n if (GetComputerNameW(buf, &nSize" LIBUV_WIN_UTIL_CONTENT "${LIBUV_WIN_UTIL_CONTENT}")
|
||||
file(WRITE "libuv/src/win/util.c" "${LIBUV_WIN_UTIL_CONTENT}")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
add_subdirectory(libuv EXCLUDE_FROM_ALL)
|
||||
endif(${BUILD_WITH_UV})
|
||||
|
||||
|
@ -535,6 +542,7 @@ if(${BUILD_WITH_SQLITE})
|
|||
INTERFACE m
|
||||
INTERFACE pthread
|
||||
)
|
||||
|
||||
if(NOT TD_WINDOWS)
|
||||
target_link_libraries(sqlite
|
||||
INTERFACE dl
|
||||
|
@ -545,36 +553,38 @@ endif(${BUILD_WITH_SQLITE})
|
|||
# addr2line
|
||||
if(${BUILD_ADDR2LINE})
|
||||
if(NOT ${TD_WINDOWS})
|
||||
check_include_file( "sys/types.h" HAVE_SYS_TYPES_H)
|
||||
check_include_file( "sys/stat.h" HAVE_SYS_STAT_H )
|
||||
check_include_file( "inttypes.h" HAVE_INTTYPES_H )
|
||||
check_include_file( "stddef.h" HAVE_STDDEF_H )
|
||||
check_include_file( "stdlib.h" HAVE_STDLIB_H )
|
||||
check_include_file( "string.h" HAVE_STRING_H )
|
||||
check_include_file( "memory.h" HAVE_MEMORY_H )
|
||||
check_include_file( "strings.h" HAVE_STRINGS_H )
|
||||
check_include_file( "stdint.h" HAVE_STDINT_H )
|
||||
check_include_file( "unistd.h" HAVE_UNISTD_H )
|
||||
check_include_file( "sgidefs.h" HAVE_SGIDEFS_H )
|
||||
check_include_file( "stdafx.h" HAVE_STDAFX_H )
|
||||
check_include_file( "elf.h" HAVE_ELF_H )
|
||||
check_include_file( "libelf.h" HAVE_LIBELF_H )
|
||||
check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
||||
check_include_file( "alloca.h" HAVE_ALLOCA_H )
|
||||
check_include_file( "elfaccess.h" HAVE_ELFACCESS_H)
|
||||
check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H )
|
||||
check_include_file( "sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H)
|
||||
check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H)
|
||||
check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H )
|
||||
check_include_file("sys/types.h" HAVE_SYS_TYPES_H)
|
||||
check_include_file("sys/stat.h" HAVE_SYS_STAT_H)
|
||||
check_include_file("inttypes.h" HAVE_INTTYPES_H)
|
||||
check_include_file("stddef.h" HAVE_STDDEF_H)
|
||||
check_include_file("stdlib.h" HAVE_STDLIB_H)
|
||||
check_include_file("string.h" HAVE_STRING_H)
|
||||
check_include_file("memory.h" HAVE_MEMORY_H)
|
||||
check_include_file("strings.h" HAVE_STRINGS_H)
|
||||
check_include_file("stdint.h" HAVE_STDINT_H)
|
||||
check_include_file("unistd.h" HAVE_UNISTD_H)
|
||||
check_include_file("sgidefs.h" HAVE_SGIDEFS_H)
|
||||
check_include_file("stdafx.h" HAVE_STDAFX_H)
|
||||
check_include_file("elf.h" HAVE_ELF_H)
|
||||
check_include_file("libelf.h" HAVE_LIBELF_H)
|
||||
check_include_file("libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
||||
check_include_file("alloca.h" HAVE_ALLOCA_H)
|
||||
check_include_file("elfaccess.h" HAVE_ELFACCESS_H)
|
||||
check_include_file("sys/elf_386.h" HAVE_SYS_ELF_386_H)
|
||||
check_include_file("sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H)
|
||||
check_include_file("sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H)
|
||||
check_include_file("sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H)
|
||||
set(VERSION 0.3.1)
|
||||
set(PACKAGE_VERSION "\"${VERSION}\"")
|
||||
configure_file(libdwarf/cmake/config.h.cmake config.h)
|
||||
file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c")
|
||||
add_library(libdwarf STATIC ${LIBDWARF_SOURCES})
|
||||
set_target_properties(libdwarf PROPERTIES OUTPUT_NAME "libdwarf")
|
||||
|
||||
if(HAVE_LIBELF_H OR HAVE_LIBELF_LIBELF_H)
|
||||
target_link_libraries(libdwarf PUBLIC libelf)
|
||||
endif()
|
||||
|
||||
target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_CURRENT_BINARY_DIR})
|
||||
file(READ "addr2line/addr2line.c" ADDR2LINE_CONTENT)
|
||||
string(REPLACE "static int" "int" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}")
|
||||
|
@ -583,7 +593,7 @@ if(${BUILD_ADDR2LINE})
|
|||
file(WRITE "addr2line/addr2line.c" "${ADDR2LINE_CONTENT}")
|
||||
add_library(addr2line STATIC "addr2line/addr2line.c")
|
||||
target_link_libraries(addr2line PUBLIC libdwarf dl z)
|
||||
target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf" )
|
||||
target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf")
|
||||
endif(NOT ${TD_WINDOWS})
|
||||
endif(${BUILD_ADDR2LINE})
|
||||
|
||||
|
@ -592,31 +602,41 @@ if(${BUILD_GEOS})
|
|||
if(${TD_LINUX})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
|
||||
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||
if (${TD_WINDOWS})
|
||||
|
||||
if(${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
|
||||
else ()
|
||||
else()
|
||||
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
target_include_directories(
|
||||
geos_c
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
||||
)
|
||||
endif(${BUILD_GEOS})
|
||||
|
||||
if (${BUILD_PCRE2})
|
||||
if(${BUILD_PCRE2})
|
||||
add_subdirectory(pcre2 EXCLUDE_FROM_ALL)
|
||||
endif(${BUILD_PCRE2})
|
||||
|
||||
if(${TD_LINUX} AND ${BUILD_WITH_S3})
|
||||
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
# ================================================================================================
|
||||
# Build test
|
||||
# ================================================================================================
|
||||
MESSAGE("build with dependency tests: ${BUILD_DEPENDENCY_TESTS}")
|
||||
|
||||
if(${BUILD_DEPENDENCY_TESTS})
|
||||
add_subdirectory(test EXCLUDE_FROM_ALL)
|
||||
endif(${BUILD_DEPENDENCY_TESTS})
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
# lib_azure_sdk
|
||||
set(AZURE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1")
|
||||
set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk")
|
||||
|
||||
file(GLOB AZURE_SDK_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/credentials/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/tracing/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_UNIFIED_SRC
|
||||
${AZURE_SDK_SRC}
|
||||
)
|
||||
|
||||
set(AZURE_SDK_INCLUDES
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/"
|
||||
)
|
||||
|
||||
add_library(_azure_sdk STATIC ${AZURE_SDK_UNIFIED_SRC})
|
||||
target_compile_definitions(_azure_sdk PRIVATE BUILD_CURL_HTTP_TRANSPORT_ADAPTER)
|
||||
|
||||
target_include_directories(
|
||||
_azure_sdk
|
||||
PUBLIC "$ENV{HOME}/.cos-local.2/include"
|
||||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(CURL_LIBRARY curl)
|
||||
# find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(CoreFoundation_Library CoreFoundation)
|
||||
# find_library(SystemConfiguration_Library SystemConfiguration)
|
||||
target_link_libraries(
|
||||
_azure_sdk
|
||||
PRIVATE ${CURL_LIBRARY}
|
||||
PRIVATE ${SSL_LIBRARY}
|
||||
PRIVATE ${CRYPTO_LIBRARY}
|
||||
PRIVATE ${XML2_LIBRARY}
|
||||
|
||||
# PRIVATE xml2
|
||||
PRIVATE zlib
|
||||
|
||||
# PRIVATE ${CoreFoundation_Library}
|
||||
# PRIVATE ${SystemConfiguration_Library}
|
||||
)
|
||||
|
||||
# Originally, on Windows azure-core is built with bcrypt and crypt32 by default
|
||||
if(TARGET OpenSSL::SSL)
|
||||
target_link_libraries(_azure_sdk PRIVATE OpenSSL::Crypto OpenSSL::SSL)
|
||||
endif()
|
||||
|
||||
# Originally, on Windows azure-core is built with winhttp by default
|
||||
if(TARGET td_contrib::curl)
|
||||
target_link_libraries(_azure_sdk PRIVATE td_contrib::curl)
|
||||
endif()
|
||||
|
||||
target_include_directories(_azure_sdk SYSTEM BEFORE PUBLIC ${AZURE_SDK_INCLUDES})
|
||||
add_library(td_contrib::azure_sdk ALIAS _azure_sdk)
|
|
@ -28,5 +28,6 @@ if(${BUILD_WITH_TRAFT})
|
|||
# add_subdirectory(traft)
|
||||
endif(${BUILD_WITH_TRAFT})
|
||||
|
||||
add_subdirectory(azure)
|
||||
add_subdirectory(tdev)
|
||||
add_subdirectory(lz4)
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
set(CMAKE_CXX_STANDARD 14)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED True)
|
||||
|
||||
add_executable(
|
||||
azure-test
|
||||
main.cpp
|
||||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(CoreFoundation_Library CoreFoundation)
|
||||
# find_library(SystemConfiguration_Library SystemConfiguration)
|
||||
target_link_libraries(
|
||||
azure-test
|
||||
PRIVATE _azure_sdk
|
||||
PRIVATE ${CURL_LIBRARY}
|
||||
PRIVATE ${XML2_LIBRARY}
|
||||
PRIVATE ${SSL_LIBRARY}
|
||||
PRIVATE ${CRYPTO_LIBRARY}
|
||||
PRIVATE dl
|
||||
PRIVATE pthread
|
||||
)
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
|
||||
// Include the necessary SDK headers
|
||||
#include <azure/core.hpp>
|
||||
#include <azure/storage/blobs.hpp>
|
||||
|
||||
// Add appropriate using namespace directives
|
||||
using namespace Azure::Storage;
|
||||
using namespace Azure::Storage::Blobs;
|
||||
|
||||
// Secrets should be stored & retrieved from secure locations such as Azure::KeyVault. For
|
||||
// convenience and brevity of samples, the secrets are retrieved from environment variables.
|
||||
|
||||
std::string GetEndpointUrl() {
|
||||
// return std::getenv("AZURE_STORAGE_ACCOUNT_URL");
|
||||
std::string accountId = getenv("ablob_account_id");
|
||||
if (accountId.empty()) {
|
||||
return accountId;
|
||||
}
|
||||
|
||||
return accountId + ".blob.core.windows.net";
|
||||
}
|
||||
|
||||
std::string GetAccountName() {
|
||||
// return std::getenv("AZURE_STORAGE_ACCOUNT_NAME");
|
||||
return getenv("ablob_account_id");
|
||||
}
|
||||
|
||||
std::string GetAccountKey() {
|
||||
// return std::getenv("AZURE_STORAGE_ACCOUNT_KEY");
|
||||
|
||||
return getenv("ablob_account_secret");
|
||||
}
|
||||
|
||||
int main() {
|
||||
std::string endpointUrl = GetEndpointUrl();
|
||||
std::string accountName = GetAccountName();
|
||||
std::string accountKey = GetAccountKey();
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
std::string accountURL = "https://fd2d01cd892f844eeaa2273.blob.core.windows.net";
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = "myblobcontainer";
|
||||
// auto containerClient = blobServiceClient.GetBlobContainerClient("myblobcontainer");
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient("td-test");
|
||||
|
||||
// Create the container if it does not exist
|
||||
std::cout << "Creating container: " << containerName << std::endl;
|
||||
// containerClient.CreateIfNotExists();
|
||||
|
||||
std::string blobName = "blob.txt";
|
||||
uint8_t blobContent[] = "Hello Azure!";
|
||||
// Create the block blob client
|
||||
BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName);
|
||||
|
||||
// Upload the blob
|
||||
std::cout << "Uploading blob: " << blobName << std::endl;
|
||||
blobClient.UploadFrom(blobContent, sizeof(blobContent));
|
||||
/*
|
||||
auto blockBlobClient = BlockBlobClient(endpointUrl, sharedKeyCredential);
|
||||
|
||||
// Create some data to upload into the blob.
|
||||
std::vector<uint8_t> data = {1, 2, 3, 4};
|
||||
Azure::Core::IO::MemoryBodyStream stream(data);
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobResult> response = blockBlobClient.Upload(stream);
|
||||
|
||||
Models::UploadBlockBlobResult model = response.Value;
|
||||
std::cout << "Last modified date of uploaded blob: " << model.LastModified.ToString()
|
||||
<< std::endl;
|
||||
*/
|
||||
} catch (const Azure::Core::RequestFailedException& e) {
|
||||
std::cout << "Status Code: " << static_cast<int>(e.StatusCode) << ", Reason Phrase: " << e.ReasonPhrase
|
||||
<< std::endl;
|
||||
std::cout << e.what() << std::endl;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -80,7 +80,7 @@ These pseudocolumns occur after the aggregation clause.
|
|||
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
||||
|
||||
1. NONE: No fill (the default fill mode)
|
||||
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`.
|
||||
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note that only exprs in select list that contains normal cols need to specify fill value, exprs like `_wstart`, `_wend`, `_wduration`, `_wstart + 1a`, `now`, `1+1`, partition keys like tbname(when using partition by) don't need to specify fill value. But exprs like `timediff(last(ts), _wstart)` need to specify fill value.
|
||||
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
|
||||
4. NULL: Fill with NULL, `FILL(NULL)`
|
||||
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.locationtech.jts</groupId>
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
</dependency>
|
||||
<!-- druid -->
|
||||
<dependency>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
<!-- <scope>system</scope>-->
|
||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||
</dependency>
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.3</version>
|
||||
<version>3.4.0</version>
|
||||
</dependency>
|
||||
<!-- ANCHOR_END: dep-->
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@ public class SchemalessWsTest {
|
|||
private static final String jsonDemo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
final String url = "jdbc:TAOS-RS://" + host + ":6041?user=root&password=taosdata&batchfetch=true";
|
||||
try(Connection connection = DriverManager.getConnection(url)){
|
||||
final String url = "jdbc:TAOS-WS://" + host + ":6041?user=root&password=taosdata";
|
||||
try (Connection connection = DriverManager.getConnection(url)) {
|
||||
init(connection);
|
||||
AbstractConnection conn = connection.unwrap(AbstractConnection.class);
|
||||
|
||||
|
|
|
@ -12,9 +12,9 @@ public class WSConnectExample {
|
|||
public static void main(String[] args) throws Exception {
|
||||
// use
|
||||
// String jdbcUrl =
|
||||
// "jdbc:TAOS-RS://localhost:6041/dbName?user=root&password=taosdata&batchfetch=true";
|
||||
// "jdbc:TAOS-WS://localhost:6041/dbName?user=root&password=taosdata";
|
||||
// if you want to connect a specified database named "dbName".
|
||||
String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata&batchfetch=true";
|
||||
String jdbcUrl = "jdbc:TAOS-WS://localhost:6041?user=root&password=taosdata";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
|
|
|
@ -15,7 +15,7 @@ public class WSParameterBindingBasicDemo {
|
|||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||
String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041";
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||
init(conn);
|
||||
|
||||
|
@ -40,7 +40,7 @@ public class WSParameterBindingBasicDemo {
|
|||
pstmt.setFloat(4, random.nextFloat());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
int [] exeResult = pstmt.executeBatch();
|
||||
int[] exeResult = pstmt.executeBatch();
|
||||
// you can check exeResult here
|
||||
System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters.");
|
||||
}
|
||||
|
@ -60,7 +60,8 @@ public class WSParameterBindingBasicDemo {
|
|||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("CREATE DATABASE IF NOT EXISTS power");
|
||||
stmt.execute("USE power");
|
||||
stmt.execute("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
|
||||
stmt.execute(
|
||||
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public class WSParameterBindingFullDemo {
|
|||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||
String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041/";
|
||||
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||
|
||||
|
@ -51,8 +51,10 @@ public class WSParameterBindingFullDemo {
|
|||
stmtAll(conn);
|
||||
|
||||
} catch (SQLException ex) {
|
||||
// handle any errors, please refer to the JDBC specifications for detailed exceptions info
|
||||
System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
|
||||
// handle any errors, please refer to the JDBC specifications for detailed
|
||||
// exceptions info
|
||||
System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: "
|
||||
+ ex.getMessage());
|
||||
throw ex;
|
||||
} catch (Exception ex) {
|
||||
System.out.println("Failed to insert data using stmt, ErrMessage: " + ex.getMessage());
|
||||
|
@ -104,30 +106,29 @@ public class WSParameterBindingFullDemo {
|
|||
pstmt.setTagBoolean(3, true);
|
||||
pstmt.setTagString(4, "binary_value");
|
||||
pstmt.setTagNString(5, "nchar_value");
|
||||
pstmt.setTagVarbinary(6, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
||||
pstmt.setTagGeometry(7, new byte[]{
|
||||
pstmt.setTagVarbinary(6, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e });
|
||||
pstmt.setTagGeometry(7, new byte[] {
|
||||
0x01, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
||||
0x00, 0x00, 0x00, 0x59, 0x40 });
|
||||
|
||||
long current = System.currentTimeMillis();
|
||||
|
||||
|
||||
pstmt.setTimestamp(1, new Timestamp(current));
|
||||
pstmt.setInt(2, 1);
|
||||
pstmt.setDouble(3, 1.1);
|
||||
pstmt.setBoolean(4, true);
|
||||
pstmt.setString(5, "binary_value");
|
||||
pstmt.setNString(6, "nchar_value");
|
||||
pstmt.setVarbinary(7, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
||||
pstmt.setGeometry(8, new byte[]{
|
||||
pstmt.setVarbinary(7, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e });
|
||||
pstmt.setGeometry(8, new byte[] {
|
||||
0x01, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
||||
0x00, 0x00, 0x00, 0x59, 0x40 });
|
||||
pstmt.addBatch();
|
||||
pstmt.executeBatch();
|
||||
System.out.println("Successfully inserted rows to example_all_type_stmt.ntb");
|
||||
|
|
|
@ -111,7 +111,7 @@ TDengine 还支持直接向超级表写入数据。需要注意的是,超级
|
|||
|
||||
```sql
|
||||
insert into meters (tbname, ts, current, voltage, phase, location, group_id)
|
||||
values( "d1001v, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
|
||||
values( "d1001, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
|
||||
```
|
||||
|
||||
### 零代码写入
|
||||
|
|
|
@ -3,6 +3,6 @@
|
|||
```
|
||||
|
||||
:::note
|
||||
对于 Rust 连接器, 连接方式的不同只体现在使用的特性不同。如果启用了 "ws" 特性,那么只有 Websocket 的实现会被编译进来。
|
||||
对于 Rust 连接器, 连接方式的不同只体现在使用的特性不同。如果启用了 "ws" 特性,那么只有 WebSocket 的实现会被编译进来。
|
||||
|
||||
:::
|
||||
|
|
|
@ -28,7 +28,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
|
||||
1. 通过客户端驱动程序 taosc 直接与服务端程序 taosd 建立连接,这种连接方式下文中简称 “原生连接”。
|
||||
2. 通过 taosAdapter 组件提供的 REST API 建立与 taosd 的连接,这种连接方式下文中简称 “REST 连接”
|
||||
3. 通过 taosAdapter 组件提供的 Websocket API 建立与 taosd 的连接,这种连接方式下文中简称 “Websocket 连接”
|
||||
3. 通过 taosAdapter 组件提供的 WebSocket API 建立与 taosd 的连接,这种连接方式下文中简称 “WebSocket 连接”
|
||||
|
||||

|
||||
|
||||
|
@ -38,9 +38,9 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
关键不同点在于:
|
||||
|
||||
1. 使用 原生连接,需要保证客户端的驱动程序 taosc 和服务端的 TDengine 版本配套。
|
||||
2. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但是无法体验数据订阅和二进制数据类型等功能。另外与 原生连接 和 Websocket 连接相比,REST连接的性能最低。REST 接口是无状态的。在使用 REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。
|
||||
3. 使用 Websocket 连接,用户也无需安装客户端驱动程序 taosc。
|
||||
4. 连接云服务实例,必须使用 REST 连接 或 Websocket 连接。
|
||||
2. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但是无法体验数据订阅和二进制数据类型等功能。另外与 原生连接 和 WebSocket 连接相比,REST连接的性能最低。REST 接口是无状态的。在使用 REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。
|
||||
3. 使用 WebSocket 连接,用户也无需安装客户端驱动程序 taosc。
|
||||
4. 连接云服务实例,必须使用 REST 连接 或 WebSocket 连接。
|
||||
|
||||
**推荐使用 WebSocket 连接**
|
||||
|
||||
|
@ -126,7 +126,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
```bash
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
:::note 此安装包为 Websocket 连接器
|
||||
:::note 此安装包为 WebSocket 连接器
|
||||
- 同时安装 `taospy` 和 `taos-ws-py`
|
||||
```bash
|
||||
pip3 install taospy[ws]
|
||||
|
@ -182,7 +182,7 @@ taos = { version = "*"}
|
|||
```
|
||||
|
||||
:::info
|
||||
Rust 连接器通过不同的特性区分不同的连接方式。默认同时支持原生连接和 Websocket 连接,如果仅需要建立 Websocket 连接,可设置 `ws` 特性:
|
||||
Rust 连接器通过不同的特性区分不同的连接方式。默认同时支持原生连接和 WebSocket 连接,如果仅需要建立 WebSocket 连接,可设置 `ws` 特性:
|
||||
|
||||
```toml
|
||||
taos = { version = "*", default-features = false, features = ["ws"] }
|
||||
|
@ -201,7 +201,7 @@ taos = { version = "*", default-features = false, features = ["ws"] }
|
|||
```
|
||||
npm install @tdengine/websocket
|
||||
```
|
||||
:::note Node.js 目前只支持 Websocket 连接
|
||||
:::note Node.js 目前只支持 WebSocket 连接
|
||||
- **安装验证**
|
||||
- 新建安装验证目录,例如:`~/tdengine-test`,下载 GitHub 上 [nodejsChecker.js 源代码](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/nodejsChecker.js)到本地。
|
||||
- 在命令行中执行以下命令。
|
||||
|
@ -271,12 +271,10 @@ dotnet add package TDengine.Connector
|
|||
<TabItem label="Java" value="java">
|
||||
Java 连接器建立连接的参数有 URL 和 Properties。
|
||||
TDengine 的 JDBC URL 规范格式为:
|
||||
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]`
|
||||
`jdbc:[TAOS|TAOS-WS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]`
|
||||
|
||||
URL 和 Properties 的详细参数说明和如何使用详见 [url 规范](../../reference/connector/java/#url-规范)
|
||||
|
||||
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
Python 连接器使用 `connect()` 方法来建立连接,下面是连接参数的具体说明:
|
||||
|
@ -387,8 +385,8 @@ DSN 的详细说明和如何使用详见 [连接功能](../../reference/connecto
|
|||
- `reconnectIntervalMs`:重连间隔毫秒时间,默认为 2000。
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
**Websocket 连接**
|
||||
C/C++ 语言连接器 Websocket 连接方式使用 `ws_connect()` 函数用于建立与 TDengine 数据库的连接。其参数为 DSN 描述字符串,其基本结构如下:
|
||||
**WebSocket 连接**
|
||||
C/C++ 语言连接器 WebSocket 连接方式使用 `ws_connect()` 函数用于建立与 TDengine 数据库的连接。其参数为 DSN 描述字符串,其基本结构如下:
|
||||
|
||||
```text
|
||||
<driver>[+<protocol>]://[[<username>:<password>@]<host>:<port>][/<database>][?<p1>=<v1>[&<p2>=<v2>]]
|
||||
|
@ -417,8 +415,8 @@ C/C++ 语言连接器原生连接方式使用 `taos_connect()` 函数用于建
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Websocket 连接
|
||||
下面是各语言连接器建立 Websocket 连接代码样例。演示了如何使用 Websocket 连接方式连接到 TDengine 数据库,并对连接设定一些参数。整个过程主要涉及到数据库连接的建立和异常处理。
|
||||
### WebSocket 连接
|
||||
下面是各语言连接器建立 WebSocket 连接代码样例。演示了如何使用 WebSocket 连接方式连接到 TDengine 数据库,并对连接设定一些参数。整个过程主要涉及到数据库连接的建立和异常处理。
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem label="Java" value="java">
|
||||
|
|
|
@ -33,7 +33,7 @@ REST API:直接调用 `taosadapter` 提供的 REST API 接口,进行数据
|
|||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
```python title="Websocket 连接"
|
||||
```python title="WebSocket 连接"
|
||||
{{#include docs/examples/python/create_db_ws.py}}
|
||||
```
|
||||
|
||||
|
@ -69,7 +69,7 @@ REST API:直接调用 `taosadapter` 提供的 REST API 接口,进行数据
|
|||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
```c title="Websocket 连接"
|
||||
```c title="WebSocket 连接"
|
||||
{{#include docs/examples/c-ws/create_db_demo.c:create_db_and_table}}
|
||||
```
|
||||
|
||||
|
@ -114,7 +114,7 @@ NOW 为系统内部函数,默认为客户端所在计算机当前时间。 NOW
|
|||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
```python title="Websocket 连接"
|
||||
```python title="WebSocket 连接"
|
||||
{{#include docs/examples/python/insert_ws.py}}
|
||||
```
|
||||
|
||||
|
@ -151,7 +151,7 @@ NOW 为系统内部函数,默认为客户端所在计算机当前时间。 NOW
|
|||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
```c title="Websocket 连接"
|
||||
```c title="WebSocket 连接"
|
||||
{{#include docs/examples/c-ws/insert_data_demo.c:insert_data}}
|
||||
```
|
||||
|
||||
|
@ -189,7 +189,7 @@ curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
|||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
```python title="Websocket 连接"
|
||||
```python title="WebSocket 连接"
|
||||
{{#include docs/examples/python/query_ws.py}}
|
||||
```
|
||||
|
||||
|
@ -230,7 +230,7 @@ rust 连接器还支持使用 **serde** 进行反序列化行为结构体的结
|
|||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
```c title="Websocket 连接"
|
||||
```c title="WebSocket 连接"
|
||||
{{#include docs/examples/c-ws/query_data_demo.c:query_data}}
|
||||
```
|
||||
|
||||
|
@ -273,7 +273,7 @@ reqId 可用于请求链路追踪,reqId 就像分布式系统中的 traceId
|
|||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
```python title="Websocket 连接"
|
||||
```python title="WebSocket 连接"
|
||||
{{#include docs/examples/python/reqid_ws.py}}
|
||||
```
|
||||
|
||||
|
@ -310,7 +310,7 @@ reqId 可用于请求链路追踪,reqId 就像分布式系统中的 traceId
|
|||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
```c "Websocket 连接"
|
||||
```c "WebSocket 连接"
|
||||
{{#include docs/examples/c-ws/with_reqid_demo.c:with_reqid}}
|
||||
```
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
|
|||
|
||||
:::
|
||||
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
|
|
@ -23,7 +23,7 @@ import TabItem from "@theme/TabItem";
|
|||
- 执行批量插入操作,将这些数据行插入到对应的子表中。
|
||||
3. 最后打印实际插入表中的行数。
|
||||
|
||||
## Websocket 连接
|
||||
## WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
```java
|
||||
|
|
|
@ -94,7 +94,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
- Websocket 连接: 因为使用 dsn,不需要 `td.connect.ip`,`td.connect.port`,`td.connect.user` 和 `td.connect.pass` 四个配置项,其余同通用配置项。
|
||||
- WebSocket 连接: 因为使用 dsn,不需要 `td.connect.ip`,`td.connect.port`,`td.connect.user` 和 `td.connect.pass` 四个配置项,其余同通用配置项。
|
||||
- 原生连接: 同通用基础配置项。
|
||||
|
||||
</TabItem>
|
||||
|
@ -103,8 +103,8 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Websocket 连接
|
||||
介绍各语言连接器使用 Websocket 连接方式创建消费者。指定连接的服务器地址,设置自动提交,从最新消息开始消费,指定 `group.id` 和 `client.id` 等信息。有的语言的连接器还支持反序列化参数。
|
||||
### WebSocket 连接
|
||||
介绍各语言连接器使用 WebSocket 连接方式创建消费者。指定连接的服务器地址,设置自动提交,从最新消息开始消费,指定 `group.id` 和 `client.id` 等信息。有的语言的连接器还支持反序列化参数。
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
@ -234,7 +234,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
|
||||
## 订阅消费数据
|
||||
消费者订阅主题后,可以开始接收并处理这些主题中的消息。订阅消费数据的示例代码如下:
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
@ -403,7 +403,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
## 指定订阅的 Offset
|
||||
消费者可以指定从特定 Offset 开始读取分区中的消息,这允许消费者重读消息或跳过已处理的消息。下面展示各语言连接器如何指定订阅的 Offset。
|
||||
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
@ -549,7 +549,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
|
||||
**注意**:手工提交消费进度前确保消息正常处理完成,否则处理出错的消息不会被再次消费。自动提交是在本次 `poll` 消息时可能会提交上次消息的消费进度,因此请确保消息处理完毕再进行下一次 `poll` 或消息获取。
|
||||
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
@ -663,7 +663,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
## 取消订阅和关闭消费
|
||||
消费者可以取消对主题的订阅,停止接收消息。当消费者不再需要时,应该关闭消费者实例,以释放资源和断开与 TDengine 服务器的连接。
|
||||
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
@ -766,7 +766,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
|
|||
|
||||
|
||||
## 完整示例
|
||||
### Websocket 连接
|
||||
### WebSocket 连接
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
<details>
|
||||
|
|
|
@ -30,9 +30,8 @@ toc_max_heading_level: 4
|
|||
目前只有 Java 连接器在 WebSocket 连接模式下支持双活,其配置示例如下
|
||||
|
||||
```java
|
||||
url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||
url = "jdbc:TAOS-WS://" + host + ":6041/?user=root&password=taosdata";
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_SLAVE_CLUSTER_HOST, "192.168.1.11");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_SLAVE_CLUSTER_PORT, "6041");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
|
@ -43,13 +42,13 @@ connection = DriverManager.getConnection(url, properties);
|
|||
|
||||
其中的配置属性及含义如下表
|
||||
|
||||
| 属性名 | 含义 |
|
||||
| ----------------- | ------------------ |
|
||||
| PROPERTY_KEY_SLAVE_CLUSTER_HOST | 第二节点的主机名或者 ip,默认空 |
|
||||
| PROPERTY_KEY_SLAVE_CLUSTER_PORT | 第二节点的端口号,默认空 |
|
||||
| PROPERTY_KEY_ENABLE_AUTO_RECONNECT | 是否启用自动重连。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。双活场景下请设置为 true |
|
||||
| PROPERTY_KEY_RECONNECT_INTERVAL_MS | 重连的时间间隔,单位毫秒:默认 2000 毫秒,也就是 2 秒;最小值为 0, 表示立即重试;最大值不做限制 |
|
||||
| PROPERTY_KEY_RECONNECT_RETRY_COUNT | 每节点最多重试次数:默认值为 3;最小值为 0,表示不进行重试;最大值不做限制 |
|
||||
| 属性名 | 含义 |
|
||||
| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| PROPERTY_KEY_SLAVE_CLUSTER_HOST | 第二节点的主机名或者 ip,默认空 |
|
||||
| PROPERTY_KEY_SLAVE_CLUSTER_PORT | 第二节点的端口号,默认空 |
|
||||
| PROPERTY_KEY_ENABLE_AUTO_RECONNECT | 是否启用自动重连。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。双活场景下请设置为 true |
|
||||
| PROPERTY_KEY_RECONNECT_INTERVAL_MS | 重连的时间间隔,单位毫秒:默认 2000 毫秒,也就是 2 秒;最小值为 0, 表示立即重试;最大值不做限制 |
|
||||
| PROPERTY_KEY_RECONNECT_RETRY_COUNT | 每节点最多重试次数:默认值为 3;最小值为 0,表示不进行重试;最大值不做限制 |
|
||||
|
||||
### 约束条件
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
|
|||
|
||||
| 参数名称 | 参数含义 |
|
||||
|:-----------:|:----------------------------------------------------------:|
|
||||
|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:localhost:6030 |
|
||||
|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:${hostname}:6030,若无法获取 ${hostname},则赋值为 localhost |
|
||||
|secondEp | 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值 |
|
||||
|numOfRpcSessions | 一个客户端能创建的最大连接数,取值范围:10-50000000(单位为毫秒);缺省值:500000 |
|
||||
|telemetryReporting | 是否上传 telemetry,0: 不上传,1: 上传;缺省值:1 |
|
||||
|
|
|
@ -306,7 +306,7 @@ http 返回内容:
|
|||
|
||||
## taosAdapter 监控指标
|
||||
|
||||
taosAdapter 采集 REST/Websocket 相关请求的监控指标。将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。
|
||||
taosAdapter 采集 REST/WebSocket 相关请求的监控指标。将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。
|
||||
|
||||
#### adapter\_requests 表
|
||||
|
||||
|
@ -330,8 +330,8 @@ taosAdapter 采集 REST/Websocket 相关请求的监控指标。将监控指标
|
|||
| other\_fail | INT UNSIGNED | | 其他失败请求数 |
|
||||
| query\_in\_process | INT UNSIGNED | | 正在处理查询请求数 |
|
||||
| write\_in\_process | INT UNSIGNED | | 正在处理写入请求数 |
|
||||
| endpoint | VARCHAR | | 请求端点 |
|
||||
| req\_type | NCHAR | TAG | 请求类型:0 为 REST,1 为 Websocket |
|
||||
| endpoint | VARCHAR | | 请求端点 |
|
||||
| req\_type | NCHAR | TAG | 请求类型:0 为 REST,1 为 WebSocket |
|
||||
|
||||
## 结果返回条数限制
|
||||
|
||||
|
|
|
@ -4,11 +4,11 @@ sidebar_label: taos
|
|||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用的方式。 使用前需要安装 TDengine Server 安装包或 TDengine Client 安装包。
|
||||
TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用工具。 使用前需要安装 TDengine Server 安装包或 TDengine Client 安装包。
|
||||
|
||||
## 启动
|
||||
|
||||
要进入 TDengine CLI,您只要在终端执行 `taos` 即可。
|
||||
要进入 TDengine CLI,您在终端执行 `taos` 即可。
|
||||
|
||||
```bash
|
||||
taos
|
||||
|
@ -23,6 +23,11 @@ taos>
|
|||
```
|
||||
|
||||
进入 TDengine CLI 后,你可执行各种 SQL 语句,包括插入、查询以及各种管理命令。
|
||||
退出 TDengine CLI, 执行 `q` 或 `quit` 或 `exit` 回车即可
|
||||
```shell
|
||||
taos> quit
|
||||
```
|
||||
|
||||
|
||||
## 执行 SQL 脚本
|
||||
|
||||
|
@ -66,7 +71,7 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
|
|||
- -l PKTLEN: 网络测试时使用的测试包大小
|
||||
- -n NETROLE: 网络连接测试时的测试范围,默认为 `client`, 可选值为 `client`、`server`
|
||||
- -N PKTNUM: 网络测试时使用的测试包数量
|
||||
- -r: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t)
|
||||
- -r: 将时间列转化为无符号 64 位整数类型输出(即 C 语言中 uint64_t)
|
||||
- -R: 使用 RESTful 模式连接服务端
|
||||
- -s COMMAND: 以非交互模式执行的 SQL 命令
|
||||
- -t: 测试服务端启动状态,状态同-k
|
||||
|
@ -84,6 +89,13 @@ taos -h h1.taos.com -s "use db; show tables;"
|
|||
|
||||
也可以通过配置文件中的参数设置来控制 TDengine CLI 的行为。可用配置参数请参考[客户端配置](../../components/taosc)
|
||||
|
||||
## TDengine CLI TAB 键补全
|
||||
|
||||
- TAB 键前为空命令状态下按 TAB 键,会列出 TDengine CLI 支持的所有命令
|
||||
- TAB 键前为空格状态下按 TAB 键,会显示此位置可以出现的所有命令词的第一个,再次按 TAB 键切为下一个
|
||||
- TAB 键前为字符串,会搜索与此字符串前缀匹配的所有可出现命令词,并显示第一个,再次按 TAB 键切为下一个
|
||||
- 输入反斜杠 `\` + TAB 键, 会自动补全为列显示模式命令词 `\G;`
|
||||
|
||||
## TDengine CLI 小技巧
|
||||
|
||||
- 可以使用上下光标键查看历史输入的指令
|
||||
|
@ -91,7 +103,6 @@ taos -h h1.taos.com -s "use db; show tables;"
|
|||
- Ctrl+C 中止正在进行中的查询
|
||||
- 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存
|
||||
- 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source <file-name>` 自动执行该文件里所有的 SQL 语句
|
||||
- 输入 `q` 或 `quit` 或 `exit` 回车,可以退出 TDengine CLI
|
||||
|
||||
## TDengine CLI 导出查询结果到文件中
|
||||
|
||||
|
|
|
@ -42,8 +42,8 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符。如果用户字符串长度超出声明长度,将会报错。 |
|
||||
| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
|
||||
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型
|
||||
| 18 | VARBINARY | 自定义 | 可变长的二进制数据|
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型,3.1.0.0 版本开始支持
|
||||
| 18 | VARBINARY | 自定义 | 可变长的二进制数据, 3.1.1.0 版本开始支持|
|
||||
|
||||
:::note
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ window_clause: {
|
|||
FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
||||
|
||||
1. 不进行填充:NONE(默认填充模式)。
|
||||
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1, 若查询列表中有多列需要FILL, 则需要给每一个FILL列指定VALUE, 如`SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`。
|
||||
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1, 若查询列表中有多列需要FILL, 则需要给每一个FILL列指定VALUE, 如`SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`, 注意, SELECT表达式中只有包含普通列时才需要指定FILL VALUE, 如`_wstart`, `_wstart+1a`, `now`, `1+1` 以及使用partition by时的partition key(如tbname)都不需要指定VALUE, 如`timediff(last(ts), _wstart)`则需要指定VALUE。
|
||||
3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。
|
||||
4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。
|
||||
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。
|
||||
|
|
|
@ -153,7 +153,7 @@ SELECT * from information_schema.`ins_streams`;
|
|||
|
||||
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。
|
||||
|
||||
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY最小时间是5s,如果低于5s,创建流计算时会报错。
|
||||
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY 最小时间是 5s,如果低于 5s,创建流计算时会报错。
|
||||
|
||||
MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算
|
||||
|
||||
|
|
|
@ -5,14 +5,14 @@ toc_max_heading_level: 4
|
|||
---
|
||||
|
||||
C/C++ 开发人员可以使用 TDengine 的客户端驱动,即 C/C++连接器 (以下都用 TDengine 客户端驱动表示),开发自己的应用来连接 TDengine 集群完成数据存储、查询以及其他功能。TDengine 客户端驱动的 API 类似于 MySQL 的 C API。应用程序使用时,需要包含 TDengine 头文件,里面列出了提供的 API 的函数原型;应用程序还要链接到所在平台上对应的动态库。
|
||||
TDengine 的客户端驱动提供了 taosws 和 taos 两个动态库,分别支持 Websocket 连接和原生连接。 Websocket 连接和原生连接的区别是 Websocket 连接方式不要求客户端和服务端版本完全匹配,而原生连接要求,在性能上 Websocket 连接方式也接近于原生连接,一般我们推荐使用 Websocket 连接方式。
|
||||
TDengine 的客户端驱动提供了 taosws 和 taos 两个动态库,分别支持 WebSocket 连接和原生连接。 WebSocket 连接和原生连接的区别是 WebSocket 连接方式不要求客户端和服务端版本完全匹配,而原生连接要求,在性能上 WebSocket 连接方式也接近于原生连接,一般我们推荐使用 WebSocket 连接方式。
|
||||
|
||||
下面我们分开介绍两种连接方式的使用方法。
|
||||
|
||||
|
||||
## Websocket 连接方式
|
||||
## WebSocket 连接方式
|
||||
|
||||
Websocket 连接方式需要使用 taosws.h 头文件和 taosws 动态库。
|
||||
WebSocket 连接方式需要使用 taosws.h 头文件和 taosws 动态库。
|
||||
|
||||
```c
|
||||
#include <taosws.h>
|
||||
|
@ -44,7 +44,7 @@ TDengine 客户端驱动的动态库位于:
|
|||
### 错误码
|
||||
|
||||
在 C 接口的设计中,错误码采用整数类型表示,每个错误码都对应一个特定的错误状态。如未特别说明,当 API 的返回值是整数时,_0_ 代表成功,其它是代表失败原因的错误码,当返回值是指针时, _NULL_ 表示失败。
|
||||
Websocket 连接方式单独的错误码在 `taosws.h` 中,
|
||||
WebSocket 连接方式单独的错误码在 `taosws.h` 中,
|
||||
|
||||
|
||||
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
||||
|
@ -82,7 +82,7 @@ WebSocket 连接方式错误码只保留了原生连接错误码的后两个字
|
|||
|
||||
#### DSN
|
||||
|
||||
C/C++ Websocket 连接器通过 DSN 连接描述字符串来表示连接信息。
|
||||
C/C++ WebSocket 连接器通过 DSN 连接描述字符串来表示连接信息。
|
||||
DSN 描述字符串基本结构如下:
|
||||
|
||||
```text
|
||||
|
@ -96,16 +96,16 @@ DSN 描述字符串基本结构如下:
|
|||
- **driver**: 必须指定驱动名以便连接器选择何种方式创建连接,支持如下驱动名:
|
||||
- **taos**: 默认驱动,支持 SQL 执行,参数绑定,无模式写入。
|
||||
- **tmq**: 使用 TMQ 订阅数据。
|
||||
- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 Websocket 方式建立连接。
|
||||
- **http/ws**: 使用 Websocket 协议。
|
||||
- **https/wss**: 在 Websocket 连接方式下显示启用 SSL/TLS 协议。
|
||||
- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 WebSocket 方式建立连接。
|
||||
- **http/ws**: 使用 WebSocket 协议。
|
||||
- **https/wss**: 在 WebSocket 连接方式下显示启用 SSL/TLS 协议。
|
||||
|
||||
- **username/password**: 用于创建连接的用户名及密码。
|
||||
- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时 Websocket 连接默认为 `localhost:6041` 。
|
||||
- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时 WebSocket 连接默认为 `localhost:6041` 。
|
||||
- **database**: 指定默认连接的数据库名,可选参数。
|
||||
- **params**:其他可选参数。
|
||||
|
||||
一个完整的 DSN 描述字符串示例如下:`taos+ws://localhost:6041/test`, 表示使用 Websocket(`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。
|
||||
一个完整的 DSN 描述字符串示例如下:`taos+ws://localhost:6041/test`, 表示使用 WebSocket(`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。
|
||||
|
||||
#### 基础 API
|
||||
|
||||
|
|
|
@ -33,14 +33,15 @@ REST 连接支持所有能运行 Java 的平台。
|
|||
|
||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||
| 3.3.3 | 1. 解决了 Websocket statement 关闭导致的内存泄漏 | - |
|
||||
| 3.3.2 | 1. 优化 Websocket 连接下的参数绑定性能;2. 优化了对 mybatis 的支持 | - |
|
||||
| 3.3.0 | 1. 优化 Websocket 连接下的数据传输性能;2. 支持跳过 SSL 验证,默认关闭 | 3.3.2.0 及更高版本 |
|
||||
| 3.4.0 | 1. 使用 jackson 库替换 fastjson 库;2. WebSocket 采用独立协议标识;3. 优化后台拉取线程使用,避免用户误用导致超时。 | - |
|
||||
| 3.3.3 | 1. 解决了 WebSocket statement 关闭导致的内存泄漏 | - |
|
||||
| 3.3.2 | 1. 优化 WebSocket 连接下的参数绑定性能;2. 优化了对 mybatis 的支持 | - |
|
||||
| 3.3.0 | 1. 优化 WebSocket 连接下的数据传输性能;2. 支持跳过 SSL 验证,默认关闭 | 3.3.2.0 及更高版本 |
|
||||
| 3.2.11 | 解决了 Native 连接关闭结果集 bug | - |
|
||||
| 3.2.10 | 1. REST/WebSocket 连接支持传输中的数据压缩;2. Websocket 自动重连机制,默认关闭;3. Connection 类提供无模式写入的方法;4. 优化了原生连接的数据拉取性能;5. 修复了一些已知问题;6.元数据获取函数可以返回支持的函数列表。 | - |
|
||||
| 3.2.9 | 解决了 Websocket prepareStatement 关闭 bug | - |
|
||||
| 3.2.8 | 优化了自动提交, 解决了 websocket 手动提交 bug, 优化 Websocket prepareStatement 使用一个连接, 元数据支持视图 | - |
|
||||
| 3.2.7 | 支持 VARBINARY 和 GEOMETRY 类型,增加 native 连接的时区设置支持。增加 websocket 自动重连功能。 | 3.2.0.0 及更高版本 |
|
||||
| 3.2.10 | 1. REST/WebSocket 连接支持传输中的数据压缩;2. WebSocket 自动重连机制,默认关闭;3. Connection 类提供无模式写入的方法;4. 优化了原生连接的数据拉取性能;5. 修复了一些已知问题;6.元数据获取函数可以返回支持的函数列表。 | - |
|
||||
| 3.2.9 | 解决了 WebSocket prepareStatement 关闭 bug | - |
|
||||
| 3.2.8 | 优化了自动提交, 解决了 WebSocket 手动提交 bug, 优化 WebSocket prepareStatement 使用一个连接, 元数据支持视图 | - |
|
||||
| 3.2.7 | 支持 VARBINARY 和 GEOMETRY 类型,增加 native 连接的时区设置支持。增加 WebSocket 自动重连功能。 | 3.2.0.0 及更高版本 |
|
||||
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
|
||||
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
|
||||
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
||||
|
@ -195,17 +196,14 @@ WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/w
|
|||
## API 参考
|
||||
### JDBC 驱动
|
||||
|
||||
taos-jdbcdriver 实现了 JDBC 标准的 Driver 接口,提供了两个实现类:RestfulDriver 和 TSDBDriver。
|
||||
Websocket 和 REST 连接使用驱动类 `com.taosdata.jdbc.rs.RestfulDriver`。原生连接使用驱动类 `com.taosdata.jdbc.TSDBDriver`。
|
||||
|
||||
taos-jdbcdriver 实现了 JDBC 标准的 Driver 接口,提供了 3 个实现类。
|
||||
- WebSocket 连接使用驱动类 `com.taosdata.jdbc.ws.WebSocketDriver`。
|
||||
- 原生连接使用驱动类 `com.taosdata.jdbc.TSDBDriver`。
|
||||
- REST 连接使用驱动类 `com.taosdata.jdbc.rs.RestfulDriver`。
|
||||
|
||||
#### URL 规范
|
||||
TDengine 的 JDBC URL 规范格式为:
|
||||
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]`
|
||||
|
||||
对于建立连接,原生连接与 REST 连接有细微不同。 Websocket 和 REST 连接使用驱动类 `com.taosdata.jdbc.rs.RestfulDriver`。原生连接使用驱动类 `com.taosdata.jdbc.TSDBDriver`。
|
||||
|
||||
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
|
||||
`jdbc:[TAOS|TAOS-WS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]`
|
||||
|
||||
**原生连接**
|
||||
`jdbc:TAOS://taosdemo.com:6030/power?user=root&password=taosdata`,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 power 的连接。这个 URL
|
||||
|
@ -234,23 +232,38 @@ TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可
|
|||
|
||||
> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
|
||||
|
||||
**WebSocket 连接**
|
||||
使用 JDBC WebSocket 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
|
||||
|
||||
**Websocket 和 REST 连接**
|
||||
使用 JDBC Websocket 或 REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
|
||||
1. driverClass 指定为“com.taosdata.jdbc.ws.WebSocketDriver”;
|
||||
2. jdbcUrl 以“jdbc:TAOS-WS://”开头;
|
||||
3. 使用 6041 作为连接端口。
|
||||
|
||||
对于 WebSocket 连接,url 中的配置参数如下:
|
||||
- user:登录 TDengine 用户名,默认值 'root'。
|
||||
- password:用户登录密码,默认值 'taosdata'。
|
||||
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
|
||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 60000。
|
||||
- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 60000。
|
||||
- useSSL: 连接中是否使用 SSL。
|
||||
|
||||
**注意**:部分配置项(比如:locale、timezone)在 WebSocket 连接中不生效。
|
||||
|
||||
**REST 连接**
|
||||
使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
|
||||
|
||||
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
|
||||
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
|
||||
3. 使用 6041 作为连接端口。
|
||||
|
||||
对于 Websocket 和 REST 连接,url 中的配置参数如下:
|
||||
对于 REST 连接,url 中的配置参数如下:
|
||||
- user:登录 TDengine 用户名,默认值 'root'。
|
||||
- password:用户登录密码,默认值 'taosdata'。
|
||||
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
|
||||
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
|
||||
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 60000。
|
||||
- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 60000。仅在 batchfetch 设置为 false 时生效。
|
||||
- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 60000。 仅在 batchfetch 设置为 true 时生效。
|
||||
- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 60000。
|
||||
- useSSL: 连接中是否使用 SSL。
|
||||
- httpPoolSize: REST 并发请求大小,默认 20。
|
||||
|
||||
|
@ -272,7 +285,7 @@ TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可
|
|||
properties 中的配置参数如下:
|
||||
- TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 'root'。
|
||||
- TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 'taosdata'。
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。因历史原因使用 REST 连接时,若设置此参数为 true 会变成 WebSocket 连接。
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 sq 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
|
||||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR:仅在使用 JDBC 原生连接时生效。客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
|
||||
- TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。
|
||||
|
@ -280,16 +293,16 @@ properties 中的配置参数如下:
|
|||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE:仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。因为历史的原因,我们只支持POSIX标准的部分规范,如UTC-8(代表中国上上海), GMT-8,Asia/Shanghai 这几种形式。
|
||||
- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms, 默认值为 60000。仅在 REST 连接时生效。
|
||||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms,默认值为 60000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 60000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 60000。 仅 WebSocket 连接下有效。
|
||||
- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 WebSocket/REST 连接时生效。
|
||||
- TSDBDriver.HTTP_POOL_SIZE: REST 并发请求大小,默认 20。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: 传输过程是否启用压缩。仅在使用 REST/Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: 是否启用自动重连。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: 传输过程是否启用压缩。仅在使用 REST/WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: 是否启用自动重连。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
> **注意**:启用自动重连仅对简单执行 SQL 语句以及 无模式写入、数据订阅有效。对于参数绑定无效。自动重连仅对连接建立时通过参数指定数据库有效,对后面的 `use db` 语句切换数据库无效。
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_INTERVAL_MS: 自动重连重试间隔,单位毫秒,默认值 2000。仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_RETRY_COUNT: 自动重连重试次数,默认值 3,仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: 关闭 SSL 证书验证 。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: 关闭 SSL 证书验证 。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
|
||||
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。
|
||||
|
||||
|
@ -1154,7 +1167,7 @@ JDBC 驱动支持标准的 ResultSet 接口,提供了用于读取结果集中
|
|||
PreparedStatement 允许使用预编译的 SQL 语句,这可以提高性能并提供参数化查询的能力,从而增加安全性。
|
||||
JDBC 驱动提供了实现 PreparedStatement 接口的两个类:
|
||||
1. 对应原生连接的 TSDBPreparedStatement
|
||||
2. 对应 Websocket 连接的 TSWSPreparedStatement
|
||||
2. 对应 WebSocket 连接的 TSWSPreparedStatement
|
||||
|
||||
因 JDBC 标准没有高性能绑定数据的接口,TSDBPreparedStatement 和 TSWSPreparedStatement 都新增了一些方法,用来扩展参数绑定能力。
|
||||
> **注意**:由于 PreparedStatement 继承了 Statement 接口,因此对于这部分重复的接口不再赘述,请参考 Statement 接口中对应描述。
|
||||
|
@ -1347,8 +1360,8 @@ JDBC 标准不支持数据订阅,因此本章所有接口都是扩展接口。
|
|||
- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: 传输过程是否启用压缩。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: 是否启用自动重连。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: 传输过程是否启用压缩。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: 是否启用自动重连。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_INTERVAL_MS: 自动重连重试间隔,单位毫秒,默认值 2000。仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_RETRY_COUNT: 自动重连重试次数,默认值 3,仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。
|
||||
|
||||
|
|
|
@ -18,9 +18,9 @@ import RequestId from "./_request_id.mdx";
|
|||
|
||||
## 连接方式
|
||||
|
||||
`taos` 提供两种建立连接的方式。一般我们推荐使用 **Websocket 连接**。
|
||||
`taos` 提供两种建立连接的方式。一般我们推荐使用 **WebSocket 连接**。
|
||||
- **原生连接**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例。
|
||||
- **Websocket 连接**,它通过 taosAdapter 的 Websocket 接口连接 TDengine 运行实例。
|
||||
- **WebSocket 连接**,它通过 taosAdapter 的 WebSocket 接口连接 TDengine 运行实例。
|
||||
|
||||
你可以通过不同的 “特性(即 Cargo 关键字 `features`)” 来指定使用哪种连接器(默认同时支持)。
|
||||
|
||||
|
@ -29,13 +29,13 @@ import RequestId from "./_request_id.mdx";
|
|||
## 支持的平台
|
||||
|
||||
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||
Websocket 连接支持所有能运行 Rust 的平台。
|
||||
WebSocket 连接支持所有能运行 Rust 的平台。
|
||||
|
||||
## 版本历史
|
||||
|
||||
| Rust 连接器版本 | TDengine 版本 | 主要功能 |
|
||||
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||
| v0.12.3 | 3.3.0.0 or later | 优化了 Websocket 查询和插入性能,支持了 VARBINARY 和 GEOMETRY 类型 |
|
||||
| v0.12.3 | 3.3.0.0 or later | 优化了 WebSocket 查询和插入性能,支持了 VARBINARY 和 GEOMETRY 类型 |
|
||||
| v0.12.0 | 3.2.3.0 or later | WS 支持压缩。 |
|
||||
| v0.11.0 | 3.2.0.0 | TMQ 功能优化。 |
|
||||
| v0.10.0 | 3.1.0.0 | WS endpoint 变更。 |
|
||||
|
@ -115,15 +115,15 @@ DSN 描述字符串基本结构如下:
|
|||
- **driver**: 必须指定驱动名以便连接器选择何种方式创建连接,支持如下驱动名:
|
||||
- **taos**: 使用 TDengine 连接器驱动,默认是使用 taos 驱动。
|
||||
- **tmq**: 使用 TMQ 订阅数据。
|
||||
- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 Websocket 方式建立连接。
|
||||
- **http/ws**: 使用 Websocket 创建连接。
|
||||
- **https/wss**: 在 Websocket 连接方式下显示启用 SSL/TLS 连接。
|
||||
- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 WebSocket 方式建立连接。
|
||||
- **http/ws**: 使用 WebSocket 创建连接。
|
||||
- **https/wss**: 在 WebSocket 连接方式下显示启用 SSL/TLS 连接。
|
||||
- **username/password**: 用于创建连接的用户名及密码。
|
||||
- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时(`taos://`),原生连接默认为 `localhost:6030`,Websocket 连接默认为 `localhost:6041` 。
|
||||
- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时(`taos://`),原生连接默认为 `localhost:6030`,WebSocket 连接默认为 `localhost:6041` 。
|
||||
- **database**: 指定默认连接的数据库名,可选参数。
|
||||
- **params**:其他可选参数。
|
||||
|
||||
一个完整的 DSN 描述字符串示例如下:`taos+ws://localhost:6041/test`, 表示使用 Websocket(`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。
|
||||
一个完整的 DSN 描述字符串示例如下:`taos+ws://localhost:6041/test`, 表示使用 WebSocket(`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。
|
||||
|
||||
#### TaosBuilder
|
||||
TaosBuilder 结构体主要提供了根据 DSN 构建 Taos 对象的方法,还提供了检查连接,以及获取客户端版本号等功能。
|
||||
|
|
|
@ -14,10 +14,10 @@ import RequestId from "./_request_id.mdx";
|
|||
Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-python)。
|
||||
|
||||
## 连接方式
|
||||
`taospy`主要提供三种形式的连接器。一般我们推荐使用 **Websocket 连接**。
|
||||
`taospy`主要提供三种形式的连接器。一般我们推荐使用 **WebSocket 连接**。
|
||||
- **原生连接**,对应 `taospy` 包的 `taos` 模块。通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、数据订阅、schemaless 接口和参数绑定接口等功能。
|
||||
- **REST 连接**,对应 `taospy` 包的 `taosrest` 模块。通过 taosAdapter 提供的 HTTP 接口连接 TDengine 实例,不支持 schemaless 和数据订阅等特性。
|
||||
- **Websocket 连接**,对应 `taos-ws-py` 包,可以选装。通过 taosAdapter 提供的 Websocket 接口连接 TDengine 实例,WebSocket 连接实现的功能集合和原生连接有少量不同。
|
||||
- **WebSocket 连接**,对应 `taos-ws-py` 包,可以选装。通过 taosAdapter 提供的 WebSocket 接口连接 TDengine 实例,WebSocket 连接实现的功能集合和原生连接有少量不同。
|
||||
|
||||
连接方式的详细介绍请参考:[连接方式](../../../develop/connect/#连接方式)
|
||||
|
||||
|
@ -48,9 +48,9 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|
|||
|2.7.9|数据订阅支持获取消费进度和重置消费进度|
|
||||
|2.7.8|新增 `execute_many`|
|
||||
|
||||
|Python Websocket Connector 版本|主要变化|
|
||||
|Python WebSocket Connector 版本|主要变化|
|
||||
|:----------------------------:|:-----:|
|
||||
|0.3.2|优化 Websocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题|
|
||||
|0.3.2|优化 WebSocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题|
|
||||
|0.2.9|已知问题修复|
|
||||
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT|
|
||||
|0.2.4|数据订阅新增取消订阅方法|
|
||||
|
|
|
@ -14,7 +14,7 @@ Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-conne
|
|||
|
||||
## 连接方式
|
||||
|
||||
Node.js 连接器目前仅支持 Websocket 连接器, 其通过 taosAdapter 提供的 Websocket 接口连接 TDengine 实例。
|
||||
Node.js 连接器目前仅支持 WebSocket 连接器, 其通过 taosAdapter 提供的 WebSocket 接口连接 TDengine 实例。
|
||||
|
||||
连接方式的详细介绍请参考:[连接方式](../../../develop/connect/#连接方式)
|
||||
|
||||
|
@ -48,7 +48,7 @@ Node.js 连接器目前仅支持 Websocket 连接器, 其通过 taosAdapter
|
|||
| 107 | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
||||
| 108 | connection has been closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 109 | fetch block data parse fail | 获取到的查询数据,解析失败 |
|
||||
| 110 | websocket connection has reached its maximum limit | Websocket 连接达到上限 |
|
||||
| 110 | websocket connection has reached its maximum limit | WebSocket 连接达到上限 |
|
||||
- [TDengine Node.js Connector Error Code](https://github.com/taosdata/taos-connector-node/blob/main/nodejs/src/common/wsError.ts)
|
||||
- TDengine 其他功能模块的报错,请参考 [错误码](../../../reference/error-code)
|
||||
|
||||
|
@ -104,7 +104,7 @@ Node.js 连接器目前仅支持 Websocket 连接器, 其通过 taosAdapter
|
|||
|
||||
## API 参考
|
||||
|
||||
Node.js 连接器(`@tdengine/websocket`), 其通过 taosAdapter 提供的 Websocket 接口连接 TDengine 实例。
|
||||
Node.js 连接器(`@tdengine/websocket`), 其通过 taosAdapter 提供的 WebSocket 接口连接 TDengine 实例。
|
||||
|
||||
### URL 规范
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ import RequestId from "./_request_id.mdx";
|
|||
|
||||
`TDengine.Connector` 提供两种形式的连接器
|
||||
* **原生连接**,通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、数据订阅、schemaless 接口和参数绑定接口等功能。
|
||||
* **Websocket 连接**,通过 taosAdapter 提供的 Websocket 接口连接 TDengine 实例,WebSocket 连接实现的功能集合和原生连接有少量不同。(自 v3.0.1 起)
|
||||
* **WebSocket 连接**,通过 taosAdapter 提供的 WebSocket 接口连接 TDengine 实例,WebSocket 连接实现的功能集合和原生连接有少量不同。(自 v3.0.1 起)
|
||||
|
||||
连接方式的详细介绍请参考:[连接方式](../../../develop/connect/#连接方式)
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位
|
|||
|
||||
## 版本历史
|
||||
|
||||
| taos_odbc版本 | 主要变化 | TDengine 版本 |
|
||||
| taos_odbc 版本 | 主要变化 | TDengine 版本 |
|
||||
| :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- |
|
||||
| v1.1.0 | 1. 支持视图功能;<br/>2. 支持 VARBINARY/GEOMETRY 数据类型; | 3.3.3.0及更高版本 |
|
||||
| v1.0.2 | 支持 CP1252 字符编码; | 3.2.3.0及更高版本 |
|
||||
|
@ -145,7 +145,7 @@ WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位
|
|||
|
||||
## API 参考
|
||||
|
||||
本节按功能分类汇总了 ODBC API,关于完整的 ODBC API 参考,请访问 http://msdn.microsoft.com/en-us/library/ms714177.aspx 的ODBC程序员参考页面。
|
||||
本节按功能分类汇总了 ODBC API,关于完整的 ODBC API 参考,请访问 http://msdn.microsoft.com/en-us/library/ms714177.aspx 的 ODBC 程序员参考页面。
|
||||
|
||||
### 数据源和驱动程序管理
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
| **连接管理** | 支持 | 支持 | 支持 |
|
||||
| **执行 SQL** | 支持 | 支持 | 支持 |
|
||||
|
||||
### 使用 Websocket 接口
|
||||
### 使用 WebSocket 接口
|
||||
|
||||
| **功能特性** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** | **C/C++** |
|
||||
| ------------------- | -------- | ---------- | ------ | ------ | ----------- | -------- | --------- |
|
||||
|
|
|
@ -32,6 +32,8 @@ extern int32_t tsS3PageCacheSize;
|
|||
extern int32_t tsS3UploadDelaySec;
|
||||
|
||||
int32_t s3Init();
|
||||
int32_t s3Begin();
|
||||
void s3End();
|
||||
int32_t s3CheckCfg();
|
||||
int32_t s3PutObjectFromFile(const char *file, const char *object);
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp);
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_AZURE_H_
|
||||
#define _TD_AZURE_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tlog.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int32_t azBegin();
|
||||
void azEnd();
|
||||
int32_t azCheckCfg();
|
||||
int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size);
|
||||
int32_t azGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock);
|
||||
void azDeleteObjectsByPrefix(const char *prefix);
|
||||
|
||||
int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp);
|
||||
int32_t azGetObjectsByPrefix(const char *prefix, const char *path);
|
||||
int32_t azGetObjectToFile(const char *object_name, const char *fileName);
|
||||
int32_t azDeleteObjects(const char *object_name[], int nobject);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // _TD_AZURE_H_
|
|
@ -333,6 +333,7 @@ typedef struct SFillLogicNode {
|
|||
SNode* pWStartTs;
|
||||
SNode* pValues; // SNodeListNode
|
||||
STimeWindow timeRange;
|
||||
SNodeList* pFillNullExprs;
|
||||
} SFillLogicNode;
|
||||
|
||||
typedef struct SSortLogicNode {
|
||||
|
@ -677,6 +678,7 @@ typedef struct SFillPhysiNode {
|
|||
SNode* pWStartTs; // SColumnNode
|
||||
SNode* pValues; // SNodeListNode
|
||||
STimeWindow timeRange;
|
||||
SNodeList* pFillNullExprs;
|
||||
} SFillPhysiNode;
|
||||
|
||||
typedef SFillPhysiNode SStreamFillPhysiNode;
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_TCS_H_
|
||||
#define _TD_TCS_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tlog.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern int8_t tsS3Enabled;
|
||||
extern int8_t tsS3EnabledCfg;
|
||||
|
||||
extern int32_t tsS3UploadDelaySec;
|
||||
extern int32_t tsS3BlockSize;
|
||||
extern int32_t tsS3BlockCacheSize;
|
||||
extern int32_t tsS3PageCacheSize;
|
||||
|
||||
extern int8_t tsS3StreamEnabled;
|
||||
|
||||
int32_t tcsInit();
|
||||
void tcsUninit();
|
||||
|
||||
int32_t tcsCheckCfg();
|
||||
|
||||
int32_t tcsPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size);
|
||||
int32_t tcsGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock);
|
||||
|
||||
void tcsDeleteObjectsByPrefix(const char *prefix);
|
||||
|
||||
int32_t tcsPutObjectFromFile2(const char *file, const char *object, int8_t withcp);
|
||||
int32_t tcsGetObjectsByPrefix(const char *prefix, const char *path);
|
||||
int32_t tcsDeleteObjects(const char *object_name[], int nobject);
|
||||
int32_t tcsGetObjectToFile(const char *object_name, const char *fileName);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // _TD_TCS_H_
|
|
@ -453,10 +453,10 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_DEFAULT_S3_CHUNK_SIZE (256 * 1024)
|
||||
#define TSDB_MIN_S3_KEEP_LOCAL (1 * 1440) // unit minute
|
||||
#define TSDB_MAX_S3_KEEP_LOCAL (365000 * 1440)
|
||||
#define TSDB_DEFAULT_S3_KEEP_LOCAL (3650 * 1440)
|
||||
#define TSDB_DEFAULT_S3_KEEP_LOCAL (365 * 1440)
|
||||
#define TSDB_MIN_S3_COMPACT 0
|
||||
#define TSDB_MAX_S3_COMPACT 1
|
||||
#define TSDB_DEFAULT_S3_COMPACT 0
|
||||
#define TSDB_DEFAULT_S3_COMPACT 1
|
||||
|
||||
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
|
||||
#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 3600
|
||||
|
@ -506,7 +506,7 @@ typedef enum ELogicConditionType {
|
|||
#ifdef WINDOWS
|
||||
#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections.
|
||||
#else
|
||||
#define TSDB_MAX_RPC_THREADS 20
|
||||
#define TSDB_MAX_RPC_THREADS 50
|
||||
#endif
|
||||
|
||||
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
|
||||
|
|
|
@ -57,6 +57,7 @@ extern int32_t rpcDebugFlag;
|
|||
extern int32_t qDebugFlag;
|
||||
extern int32_t stDebugFlag;
|
||||
extern int32_t wDebugFlag;
|
||||
extern int32_t azDebugFlag;
|
||||
extern int32_t sDebugFlag;
|
||||
extern int32_t tsdbDebugFlag;
|
||||
extern int32_t tqDebugFlag;
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
aux_source_directory(src CLIENT_SRC)
|
||||
|
||||
IF (TD_ENTERPRISE)
|
||||
LIST(APPEND CLIENT_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/clientView.c)
|
||||
ENDIF ()
|
||||
if(TD_ENTERPRISE)
|
||||
LIST(APPEND CLIENT_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/clientView.c)
|
||||
endif()
|
||||
|
||||
if(TD_WINDOWS)
|
||||
add_library(taos SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in)
|
||||
|
|
|
@ -1,121 +1,122 @@
|
|||
aux_source_directory(src COMMON_SRC)
|
||||
IF (TD_ENTERPRISE)
|
||||
LIST(APPEND COMMON_SRC ${TD_ENTERPRISE_DIR}/src/plugins/common/src/tglobal.c)
|
||||
ENDIF()
|
||||
|
||||
if(TD_ENTERPRISE)
|
||||
LIST(APPEND COMMON_SRC ${TD_ENTERPRISE_DIR}/src/plugins/common/src/tglobal.c)
|
||||
endif()
|
||||
|
||||
add_library(common STATIC ${COMMON_SRC})
|
||||
|
||||
if (DEFINED GRANT_CFG_INCLUDE_DIR)
|
||||
if(DEFINED GRANT_CFG_INCLUDE_DIR)
|
||||
add_definitions(-DGRANTS_CFG)
|
||||
endif()
|
||||
|
||||
IF (TD_GRANT)
|
||||
if(TD_GRANT)
|
||||
ADD_DEFINITIONS(-D_GRANT)
|
||||
ENDIF ()
|
||||
endif()
|
||||
|
||||
IF (TD_STORAGE)
|
||||
if(TD_STORAGE)
|
||||
ADD_DEFINITIONS(-D_STORAGE)
|
||||
TARGET_LINK_LIBRARIES(common PRIVATE storage)
|
||||
ENDIF ()
|
||||
endif()
|
||||
|
||||
IF (TD_ENTERPRISE)
|
||||
IF(${BUILD_WITH_S3})
|
||||
if(TD_ENTERPRISE)
|
||||
if(${BUILD_WITH_S3})
|
||||
add_definitions(-DUSE_S3)
|
||||
ELSEIF(${BUILD_WITH_COS})
|
||||
add_definitions(-DUSE_COS)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
target_include_directories(
|
||||
common
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/common"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
PRIVATE "${GRANT_CFG_INCLUDE_DIR}"
|
||||
common
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/common"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
PRIVATE "${GRANT_CFG_INCLUDE_DIR}"
|
||||
)
|
||||
IF(${TD_WINDOWS})
|
||||
target_include_directories(
|
||||
common
|
||||
PRIVATE "${TD_SOURCE_DIR}/contrib/pthread"
|
||||
PRIVATE "${TD_SOURCE_DIR}/contrib/msvcregex"
|
||||
)
|
||||
ENDIF ()
|
||||
|
||||
if(${TD_WINDOWS})
|
||||
target_include_directories(
|
||||
common
|
||||
PRIVATE "${TD_SOURCE_DIR}/contrib/pthread"
|
||||
PRIVATE "${TD_SOURCE_DIR}/contrib/msvcregex"
|
||||
)
|
||||
endif()
|
||||
|
||||
target_link_libraries(
|
||||
common
|
||||
PUBLIC os
|
||||
PUBLIC util
|
||||
INTERFACE api
|
||||
common
|
||||
PUBLIC os
|
||||
PUBLIC util
|
||||
INTERFACE api
|
||||
)
|
||||
|
||||
if(${BUILD_S3})
|
||||
if(${BUILD_WITH_S3})
|
||||
target_include_directories(
|
||||
common
|
||||
|
||||
if(${BUILD_WITH_S3})
|
||||
target_include_directories(
|
||||
common
|
||||
PUBLIC "$ENV{HOME}/.cos-local.2/include"
|
||||
)
|
||||
|
||||
PUBLIC "$ENV{HOME}/.cos-local.2/include"
|
||||
)
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2)
|
||||
find_library(S3_LIBRARY s3)
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
target_link_libraries(
|
||||
common
|
||||
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2)
|
||||
find_library(S3_LIBRARY s3)
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
target_link_libraries(
|
||||
common
|
||||
# s3
|
||||
PUBLIC ${S3_LIBRARY}
|
||||
PUBLIC ${CURL_LIBRARY}
|
||||
PUBLIC ${SSL_LIBRARY}
|
||||
PUBLIC ${CRYPTO_LIBRARY}
|
||||
PUBLIC ${XML2_LIBRARY}
|
||||
)
|
||||
|
||||
# s3
|
||||
PUBLIC ${S3_LIBRARY}
|
||||
PUBLIC ${CURL_LIBRARY}
|
||||
PUBLIC ${SSL_LIBRARY}
|
||||
PUBLIC ${CRYPTO_LIBRARY}
|
||||
PUBLIC ${XML2_LIBRARY}
|
||||
)
|
||||
add_definitions(-DUSE_S3)
|
||||
endif()
|
||||
|
||||
add_definitions(-DUSE_S3)
|
||||
if(${BUILD_WITH_COS})
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
|
||||
find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
|
||||
find_library(MINIXML_LIBRARY mxml)
|
||||
find_library(CURL_LIBRARY curl)
|
||||
target_link_libraries(
|
||||
common
|
||||
|
||||
# s3
|
||||
PUBLIC cos_c_sdk_static
|
||||
PUBLIC ${APR_UTIL_LIBRARY}
|
||||
PUBLIC ${APR_LIBRARY}
|
||||
PUBLIC ${MINIXML_LIBRARY}
|
||||
PUBLIC ${CURL_LIBRARY}
|
||||
)
|
||||
|
||||
# s3
|
||||
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
|
||||
|
||||
if(APR_CONFIG_BIN)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND ${APR_CONFIG_BIN} --includedir
|
||||
OUTPUT_VARIABLE APR_INCLUDE_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
endif()
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
|
||||
find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
|
||||
find_library(MINIXML_LIBRARY mxml)
|
||||
find_library(CURL_LIBRARY curl)
|
||||
target_link_libraries(
|
||||
common
|
||||
|
||||
# s3
|
||||
PUBLIC cos_c_sdk_static
|
||||
PUBLIC ${APR_UTIL_LIBRARY}
|
||||
PUBLIC ${APR_LIBRARY}
|
||||
PUBLIC ${MINIXML_LIBRARY}
|
||||
PUBLIC ${CURL_LIBRARY}
|
||||
)
|
||||
|
||||
# s3
|
||||
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
|
||||
IF (APR_CONFIG_BIN)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND ${APR_CONFIG_BIN} --includedir
|
||||
OUTPUT_VARIABLE APR_INCLUDE_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
ENDIF()
|
||||
include_directories (${APR_INCLUDE_DIR})
|
||||
target_include_directories(
|
||||
common
|
||||
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
|
||||
PUBLIC "$ENV{HOME}/.cos-local.1/include"
|
||||
)
|
||||
|
||||
add_definitions(-DUSE_COS)
|
||||
endif(${BUILD_WITH_COS})
|
||||
include_directories(${APR_INCLUDE_DIR})
|
||||
target_include_directories(
|
||||
common
|
||||
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
|
||||
PUBLIC "$ENV{HOME}/.cos-local.1/include"
|
||||
)
|
||||
|
||||
add_definitions(-DUSE_COS)
|
||||
endif(${BUILD_WITH_COS})
|
||||
endif()
|
||||
|
||||
if(${BUILD_TEST})
|
||||
ADD_SUBDIRECTORY(test)
|
||||
ADD_SUBDIRECTORY(test)
|
||||
endif(${BUILD_TEST})
|
||||
|
|
|
@ -89,20 +89,8 @@ static void s3DumpCfgByEp(int8_t epIndex) {
|
|||
|
||||
int32_t s3CheckCfg() {
|
||||
int32_t code = 0, lino = 0;
|
||||
int8_t i = 0;
|
||||
|
||||
if (!tsS3Enabled) {
|
||||
(void)fprintf(stderr, "s3 not configured.\n");
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
code = s3Begin();
|
||||
if (code != 0) {
|
||||
(void)fprintf(stderr, "failed to initialize s3.\n");
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
for (; i < tsS3EpNum; i++) {
|
||||
for (int8_t i = 0; i < tsS3EpNum; i++) {
|
||||
(void)fprintf(stdout, "test s3 ep (%d/%d):\n", i + 1, tsS3EpNum);
|
||||
s3DumpCfgByEp(i);
|
||||
|
||||
|
@ -192,7 +180,7 @@ int32_t s3CheckCfg() {
|
|||
(void)fprintf(stdout, "=================================================================\n");
|
||||
}
|
||||
|
||||
s3End();
|
||||
// s3End();
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
@ -1529,6 +1517,8 @@ void s3EvictCache(const char *path, long object_size) {}
|
|||
#include "cos_http_io.h"
|
||||
#include "cos_log.h"
|
||||
|
||||
int32_t s3Begin() { TAOS_RETURN(TSDB_CODE_SUCCESS); }
|
||||
|
||||
int32_t s3Init() {
|
||||
if (cos_http_io_initialize(NULL, 0) != COSE_OK) {
|
||||
return -1;
|
||||
|
@ -1967,6 +1957,10 @@ long s3Size(const char *object_name) {
|
|||
#else
|
||||
|
||||
int32_t s3Init() { return 0; }
|
||||
int32_t s3Begin() { TAOS_RETURN(TSDB_CODE_SUCCESS); }
|
||||
|
||||
void s3End() {}
|
||||
int32_t s3CheckCfg() { return 0; }
|
||||
int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; }
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp) { return 0; }
|
||||
int32_t s3PutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) { return 0; }
|
||||
|
|
|
@ -160,7 +160,11 @@ int32_t startRsync() {
|
|||
code = system(cmd);
|
||||
if (code != 0) {
|
||||
uError("[rsync] cmd:%s start server failed, code:%d," ERRNO_ERR_FORMAT, cmd, code, ERRNO_ERR_DATA);
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
if (errno == 0) {
|
||||
return 0;
|
||||
} else {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
}
|
||||
} else {
|
||||
uInfo("[rsync] cmd:%s start server successful", cmd);
|
||||
}
|
||||
|
@ -358,4 +362,4 @@ int32_t deleteRsync(const char* id) {
|
|||
|
||||
uDebug("[rsync] delete data:%s successful", id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ int32_t tsShellActivityTimer = 3; // second
|
|||
// queue & threads
|
||||
int32_t tsNumOfRpcThreads = 1;
|
||||
int32_t tsNumOfRpcSessions = 30000;
|
||||
int32_t tsShareConnLimit = 8;
|
||||
int32_t tsShareConnLimit = 10;
|
||||
int32_t tsReadTimeout = 900;
|
||||
int32_t tsTimeToGetAvailableConn = 500000;
|
||||
int32_t tsKeepAliveIdle = 60;
|
||||
|
@ -287,7 +287,7 @@ int32_t tsTtlUnit = 86400;
|
|||
int32_t tsTtlPushIntervalSec = 10;
|
||||
int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups
|
||||
int32_t tsS3MigrateIntervalSec = 60 * 60; // interval of s3migrate db in all vgroups
|
||||
bool tsS3MigrateEnabled = 1;
|
||||
bool tsS3MigrateEnabled = 0;
|
||||
int32_t tsGrantHBInterval = 60;
|
||||
int32_t tsUptimeInterval = 300; // seconds
|
||||
char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits
|
||||
|
@ -308,6 +308,7 @@ char tsS3AppId[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {"<appid>"};
|
|||
int8_t tsS3Enabled = false;
|
||||
int8_t tsS3EnabledCfg = false;
|
||||
int8_t tsS3Oss[TSDB_MAX_EP_NUM] = {false};
|
||||
int8_t tsS3Ablob = false;
|
||||
int8_t tsS3StreamEnabled = false;
|
||||
|
||||
int8_t tsS3Https[TSDB_MAX_EP_NUM] = {true};
|
||||
|
@ -436,6 +437,7 @@ int32_t taosSetS3Cfg(SConfig *pCfg) {
|
|||
}
|
||||
tsS3Https[i] = (strstr(tsS3Endpoint[i], "https://") != NULL);
|
||||
tsS3Oss[i] = (strstr(tsS3Endpoint[i], "aliyuncs.") != NULL);
|
||||
tsS3Ablob = (strstr(tsS3Endpoint[i], ".blob.core.windows.net") != NULL);
|
||||
}
|
||||
|
||||
if (tsS3BucketName[0] != '<') {
|
||||
|
@ -542,6 +544,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "vDebugFlag", vDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "mDebugFlag", mDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "wDebugFlag", wDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "azDebugFlag", azDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
|
@ -1063,6 +1066,9 @@ static int32_t taosSetServerLogCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "wDebugFlag");
|
||||
wDebugFlag = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "azDebugFlag");
|
||||
azDebugFlag = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "sDebugFlag");
|
||||
sDebugFlag = pItem->i32;
|
||||
|
||||
|
@ -1987,13 +1993,14 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
|
|||
|
||||
{ // 'bool/int32_t/int64_t/float/double' variables with general modification function
|
||||
static OptionNameAndVar debugOptions[] = {
|
||||
{"dDebugFlag", &dDebugFlag}, {"vDebugFlag", &vDebugFlag}, {"mDebugFlag", &mDebugFlag},
|
||||
{"wDebugFlag", &wDebugFlag}, {"sDebugFlag", &sDebugFlag}, {"tsdbDebugFlag", &tsdbDebugFlag},
|
||||
{"tqDebugFlag", &tqDebugFlag}, {"fsDebugFlag", &fsDebugFlag}, {"udfDebugFlag", &udfDebugFlag},
|
||||
{"smaDebugFlag", &smaDebugFlag}, {"idxDebugFlag", &idxDebugFlag}, {"tdbDebugFlag", &tdbDebugFlag},
|
||||
{"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"smaDebugFlag", &smaDebugFlag},
|
||||
{"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"metaDebugFlag", &metaDebugFlag},
|
||||
{"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, {"tqClientDebug", &tqClientDebug},
|
||||
{"dDebugFlag", &dDebugFlag}, {"vDebugFlag", &vDebugFlag}, {"mDebugFlag", &mDebugFlag},
|
||||
{"wDebugFlag", &wDebugFlag}, {"azDebugFlag", &azDebugFlag}, {"sDebugFlag", &sDebugFlag},
|
||||
{"tsdbDebugFlag", &tsdbDebugFlag}, {"tqDebugFlag", &tqDebugFlag}, {"fsDebugFlag", &fsDebugFlag},
|
||||
{"udfDebugFlag", &udfDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"idxDebugFlag", &idxDebugFlag},
|
||||
{"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag},
|
||||
{"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag},
|
||||
{"metaDebugFlag", &metaDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag},
|
||||
{"tqClientDebug", &tqClientDebug},
|
||||
};
|
||||
|
||||
static OptionNameAndVar options[] = {{"audit", &tsEnableAudit},
|
||||
|
@ -2371,6 +2378,7 @@ static int32_t taosSetAllDebugFlag(SConfig *pCfg, int32_t flag) {
|
|||
taosCheckAndSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&azDebugFlag, "azDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, noNeedToSetVars);
|
||||
taosCheckAndSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, noNeedToSetVars);
|
||||
|
|
|
@ -10,29 +10,28 @@ add_subdirectory(test)
|
|||
aux_source_directory(exe EXEC_SRC)
|
||||
add_executable(taosd ${EXEC_SRC})
|
||||
target_include_directories(
|
||||
taosd
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/node_mgmt/inc"
|
||||
taosd
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/node_mgmt/inc"
|
||||
)
|
||||
|
||||
IF (TD_ENTERPRISE)
|
||||
IF(TD_ENTERPRISE)
|
||||
IF(${BUILD_WITH_S3})
|
||||
add_definitions(-DUSE_S3)
|
||||
add_definitions(-DUSE_S3)
|
||||
ELSEIF(${BUILD_WITH_COS})
|
||||
add_definitions(-DUSE_COS)
|
||||
add_definitions(-DUSE_COS)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
|
||||
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
|
||||
ELSE ()
|
||||
ELSE()
|
||||
SET(LINK_JEMALLOC "")
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEPENDENCIES(taosd jemalloc)
|
||||
target_link_libraries(taosd dnode crypt ${LINK_JEMALLOC})
|
||||
ELSE ()
|
||||
target_link_libraries(taosd dnode crypt)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEPENDENCIES(taosd jemalloc)
|
||||
target_link_libraries(taosd dnode crypt ${LINK_JEMALLOC})
|
||||
ELSE()
|
||||
target_link_libraries(taosd dnode crypt)
|
||||
ENDIF()
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "jemalloc/jemalloc.h"
|
||||
#endif
|
||||
#include "dmUtil.h"
|
||||
#include "tcs.h"
|
||||
|
||||
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
|
||||
#include "cus_name.h"
|
||||
|
@ -330,10 +331,9 @@ static int32_t dmCheckS3() {
|
|||
int32_t code = 0;
|
||||
SConfig *pCfg = taosGetCfg();
|
||||
cfgDumpCfgS3(pCfg, 0, true);
|
||||
#if defined(USE_S3)
|
||||
extern int32_t s3CheckCfg();
|
||||
|
||||
code = s3CheckCfg();
|
||||
#if defined(USE_S3)
|
||||
code = tcsCheckCfg();
|
||||
#endif
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1,24 +1,25 @@
|
|||
aux_source_directory(src MGMT_DNODE)
|
||||
add_library(mgmt_dnode STATIC ${MGMT_DNODE})
|
||||
if (DEFINED GRANT_CFG_INCLUDE_DIR)
|
||||
|
||||
if(DEFINED GRANT_CFG_INCLUDE_DIR)
|
||||
add_definitions(-DGRANTS_CFG)
|
||||
endif()
|
||||
|
||||
IF (NOT BUILD_DM_MODULE)
|
||||
if(NOT BUILD_DM_MODULE)
|
||||
MESSAGE(STATUS "NOT BUILD_DM_MODULE")
|
||||
target_link_directories(
|
||||
mgmt_dnode
|
||||
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/dm_static"
|
||||
)
|
||||
ELSE()
|
||||
MESSAGE(STATUS "BUILD_DM_MODULE")
|
||||
ENDIF()
|
||||
else()
|
||||
MESSAGE(STATUS "BUILD_DM_MODULE")
|
||||
endif()
|
||||
|
||||
target_include_directories(
|
||||
mgmt_dnode
|
||||
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
PUBLIC "${GRANT_CFG_INCLUDE_DIR}"
|
||||
mgmt_dnode
|
||||
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
PUBLIC "${GRANT_CFG_INCLUDE_DIR}"
|
||||
)
|
||||
target_link_libraries(
|
||||
mgmt_dnode node_util dmodule
|
||||
mgmt_dnode node_util dmodule
|
||||
)
|
|
@ -47,7 +47,6 @@ static void *dmStatusThreadFp(void *param) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
extern SMonVloadInfo tsVinfo;
|
||||
static void *dmStatusInfoThreadFp(void *param) {
|
||||
SDnodeMgmt *pMgmt = param;
|
||||
int64_t lastTime = taosGetTimestampMs();
|
||||
|
@ -73,19 +72,6 @@ static void *dmStatusInfoThreadFp(void *param) {
|
|||
}
|
||||
}
|
||||
}
|
||||
dDebug("begin to lock status info when thread exit");
|
||||
if (taosThreadMutexLock(&pMgmt->pData->statusInfolock) != 0) {
|
||||
dError("failed to lock status info lock");
|
||||
return NULL;
|
||||
}
|
||||
if (tsVinfo.pVloads != NULL) {
|
||||
taosArrayDestroy(tsVinfo.pVloads);
|
||||
tsVinfo.pVloads = NULL;
|
||||
}
|
||||
if (taosThreadMutexUnlock(&pMgmt->pData->statusInfolock) != 0) {
|
||||
dError("failed to unlock status info lock");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -213,13 +213,13 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal)
|
|||
taosQueueGetThreadId(pVnode->pApplyW.queue));
|
||||
tMultiWorkerCleanup(&pVnode->pApplyW);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode query queue:%p is empty", pVnode->vgId, pVnode->pQueryQ);
|
||||
while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode fetch queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
|
||||
taosQueueGetThreadId(pVnode->pFetchQ));
|
||||
while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode query queue:%p is empty", pVnode->vgId, pVnode->pQueryQ);
|
||||
while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10);
|
||||
|
||||
tqNotifyClose(pVnode->pImpl->pTq);
|
||||
dInfo("vgId:%d, wait for vnode stream queue:%p is empty", pVnode->vgId, pVnode->pStreamQ);
|
||||
while (!taosQueueEmpty(pVnode->pStreamQ)) taosMsleep(10);
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
aux_source_directory(src IMPLEMENT_SRC)
|
||||
add_library(dnode STATIC ${IMPLEMENT_SRC})
|
||||
target_link_libraries(
|
||||
dnode mgmt_mnode mgmt_qnode mgmt_snode mgmt_vnode mgmt_dnode monitorfw
|
||||
dnode mgmt_mnode mgmt_qnode mgmt_snode mgmt_vnode mgmt_dnode monitorfw tcs
|
||||
)
|
||||
|
||||
IF (TD_ENTERPRISE)
|
||||
IF(TD_ENTERPRISE)
|
||||
IF(${BUILD_WITH_S3})
|
||||
add_definitions(-DUSE_S3)
|
||||
add_definitions(-DUSE_S3)
|
||||
ELSEIF(${BUILD_WITH_COS})
|
||||
add_definitions(-DUSE_COS)
|
||||
add_definitions(-DUSE_COS)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF (DEFINED GRANT_CFG_INCLUDE_DIR)
|
||||
IF(DEFINED GRANT_CFG_INCLUDE_DIR)
|
||||
add_definitions(-DGRANTS_CFG)
|
||||
ENDIF()
|
||||
|
||||
target_include_directories(
|
||||
dnode
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
dnode
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "libs/function/tudf.h"
|
||||
#include "tgrant.h"
|
||||
#include "tcompare.h"
|
||||
#include "tcs.h"
|
||||
#include "tanal.h"
|
||||
// clang-format on
|
||||
|
||||
|
@ -98,9 +99,9 @@ static bool dmDataSpaceAvailable() {
|
|||
static int32_t dmCheckDiskSpace() {
|
||||
// availability
|
||||
int32_t code = 0;
|
||||
code = osUpdate();
|
||||
if(code != 0) {
|
||||
code = 0; // ignore the error, just log it
|
||||
code = osUpdate();
|
||||
if (code != 0) {
|
||||
code = 0; // ignore the error, just log it
|
||||
dError("failed to update os info since %s", tstrerror(code));
|
||||
}
|
||||
if (!dmDataSpaceAvailable()) {
|
||||
|
@ -163,13 +164,6 @@ static int32_t dmCheckDataDirVersionWrapper() {
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
#if defined(USE_S3)
|
||||
|
||||
extern int32_t s3Begin();
|
||||
extern void s3End();
|
||||
extern int8_t tsS3Enabled;
|
||||
|
||||
#endif
|
||||
|
||||
int32_t dmInit() {
|
||||
dInfo("start to init dnode env");
|
||||
|
@ -187,7 +181,7 @@ int32_t dmInit() {
|
|||
if ((code = dmInitDnode(dmInstance())) != 0) return code;
|
||||
if ((code = InitRegexCache() != 0)) return code;
|
||||
#if defined(USE_S3)
|
||||
if ((code = s3Begin()) != 0) return code;
|
||||
if ((code = tcsInit()) != 0) return code;
|
||||
#endif
|
||||
|
||||
dInfo("dnode env is initialized");
|
||||
|
@ -221,7 +215,7 @@ void dmCleanup() {
|
|||
DestroyRegexCache();
|
||||
|
||||
#if defined(USE_S3)
|
||||
s3End();
|
||||
tcsUninit();
|
||||
#endif
|
||||
|
||||
dInfo("dnode env is cleaned up");
|
||||
|
|
|
@ -219,6 +219,7 @@ int32_t dmInitVars(SDnode *pDnode) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern SMonVloadInfo tsVinfo;
|
||||
void dmClearVars(SDnode *pDnode) {
|
||||
for (EDndNodeType ntype = DNODE; ntype < NODE_END; ++ntype) {
|
||||
SMgmtWrapper *pWrapper = &pDnode->wrappers[ntype];
|
||||
|
@ -254,6 +255,25 @@ void dmClearVars(SDnode *pDnode) {
|
|||
(void)taosThreadRwlockUnlock(&pData->lock);
|
||||
|
||||
(void)taosThreadRwlockDestroy(&pData->lock);
|
||||
|
||||
dDebug("begin to lock status info when thread exit");
|
||||
if (taosThreadMutexLock(&pData->statusInfolock) != 0) {
|
||||
dError("failed to lock status info lock");
|
||||
return;
|
||||
}
|
||||
if (tsVinfo.pVloads != NULL) {
|
||||
taosArrayDestroy(tsVinfo.pVloads);
|
||||
tsVinfo.pVloads = NULL;
|
||||
}
|
||||
if (taosThreadMutexUnlock(&pData->statusInfolock) != 0) {
|
||||
dError("failed to unlock status info lock");
|
||||
return;
|
||||
}
|
||||
if (taosThreadMutexDestroy(&pData->statusInfolock) != 0) {
|
||||
dError("failed to destroy status info lock");
|
||||
}
|
||||
memset(&pData->statusInfolock, 0, sizeof(pData->statusInfolock));
|
||||
|
||||
(void)taosThreadMutexDestroy(&pDnode->mutex);
|
||||
memset(&pDnode->mutex, 0, sizeof(pDnode->mutex));
|
||||
}
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "dmMgmt.h"
|
||||
#include "qworker.h"
|
||||
#include "tversion.h"
|
||||
#include "tanal.h"
|
||||
#include "tversion.h"
|
||||
|
||||
static inline void dmSendRsp(SRpcMsg *pMsg) {
|
||||
if (rpcSendResponse(pMsg) != 0) {
|
||||
|
@ -411,7 +411,7 @@ int32_t dmInitClient(SDnode *pDnode) {
|
|||
|
||||
rpcInit.noDelayFp = rpcNoDelayMsg;
|
||||
|
||||
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3) / 2;
|
||||
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3);
|
||||
connLimitNum = TMAX(connLimitNum, 10);
|
||||
connLimitNum = TMIN(connLimitNum, 500);
|
||||
|
||||
|
|
|
@ -1,44 +1,46 @@
|
|||
aux_source_directory(src MNODE_SRC)
|
||||
IF (TD_PRIVILEGE)
|
||||
|
||||
if(TD_PRIVILEGE)
|
||||
ADD_DEFINITIONS(-D_PRIVILEGE)
|
||||
ENDIF ()
|
||||
IF (TD_ENTERPRISE)
|
||||
endif()
|
||||
|
||||
if(TD_ENTERPRISE)
|
||||
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/privilege/src/privilege.c)
|
||||
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDb.c)
|
||||
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndVgroup.c)
|
||||
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDnode.c)
|
||||
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/mndView.c)
|
||||
|
||||
IF(${BUILD_WITH_S3})
|
||||
if(${BUILD_WITH_S3})
|
||||
add_definitions(-DUSE_S3)
|
||||
ELSEIF(${BUILD_WITH_COS})
|
||||
add_definitions(-DUSE_COS)
|
||||
ENDIF()
|
||||
endif()
|
||||
|
||||
IF(${BUILD_WITH_ANALYSIS})
|
||||
if(${BUILD_WITH_ANALYSIS})
|
||||
add_definitions(-DUSE_ANAL)
|
||||
ENDIF()
|
||||
ENDIF ()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_library(mnode STATIC ${MNODE_SRC})
|
||||
target_include_directories(
|
||||
mnode
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
mnode
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
target_link_libraries(
|
||||
mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser audit monitorfw
|
||||
mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser audit monitorfw
|
||||
)
|
||||
|
||||
IF (DEFINED GRANT_CFG_INCLUDE_DIR)
|
||||
if(DEFINED GRANT_CFG_INCLUDE_DIR)
|
||||
add_definitions(-DGRANTS_CFG)
|
||||
ENDIF()
|
||||
endif()
|
||||
|
||||
IF (TD_GRANT)
|
||||
if(TD_GRANT)
|
||||
TARGET_LINK_LIBRARIES(mnode grant)
|
||||
ADD_DEFINITIONS(-D_GRANT)
|
||||
ENDIF ()
|
||||
endif()
|
||||
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
add_subdirectory(test)
|
||||
endif(${BUILD_TEST})
|
||||
|
|
|
@ -583,7 +583,7 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
|
|||
if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
|
||||
if (pCfg->s3ChunkSize <= 0) pCfg->s3ChunkSize = TSDB_DEFAULT_S3_CHUNK_SIZE;
|
||||
if (pCfg->s3KeepLocal <= 0) pCfg->s3KeepLocal = TSDB_DEFAULT_S3_KEEP_LOCAL;
|
||||
if (pCfg->s3Compact <= 0) pCfg->s3Compact = TSDB_DEFAULT_S3_COMPACT;
|
||||
if (pCfg->s3Compact < 0) pCfg->s3Compact = TSDB_DEFAULT_S3_COMPACT;
|
||||
if (pCfg->withArbitrator < 0) pCfg->withArbitrator = TSDB_DEFAULT_DB_WITH_ARBITRATOR;
|
||||
if (pCfg->encryptAlgorithm < 0) pCfg->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO;
|
||||
}
|
||||
|
|
|
@ -4076,7 +4076,7 @@ typedef struct SMDropTbDbInfo {
|
|||
|
||||
typedef struct SMDropTbTsmaInfo {
|
||||
char tsmaResTbDbFName[TSDB_DB_FNAME_LEN];
|
||||
char tsmaResTbNamePrefix[TSDB_TABLE_NAME_LEN];
|
||||
char tsmaResTbNamePrefix[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t suid;
|
||||
SMDropTbDbInfo dbInfo; // reference to DbInfo in pDbMap
|
||||
} SMDropTbTsmaInfo;
|
||||
|
@ -4207,6 +4207,7 @@ static int32_t mndCreateDropTbsTxnPrepare(SRpcMsg *pRsp, SMndDropTbsWithTsmaCtx
|
|||
SMnode *pMnode = pRsp->info.node;
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pRsp, "drop-tbs");
|
||||
mndTransSetChangeless(pTrans);
|
||||
mndTransSetSerial(pTrans);
|
||||
if (pTrans == NULL) {
|
||||
code = TSDB_CODE_MND_RETURN_VALUE_NULL;
|
||||
if (terrno != 0) code = terrno;
|
||||
|
@ -4294,6 +4295,18 @@ static int32_t mndDropTbAdd(SMnode *pMnode, SHashObj *pVgHashMap, const SVgroupI
|
|||
return 0;
|
||||
}
|
||||
|
||||
int vgInfoCmp(const void* lp, const void* rp) {
|
||||
SVgroupInfo* pLeft = (SVgroupInfo*)lp;
|
||||
SVgroupInfo* pRight = (SVgroupInfo*)rp;
|
||||
if (pLeft->hashBegin < pRight->hashBegin) {
|
||||
return -1;
|
||||
} else if (pLeft->hashBegin > pRight->hashBegin) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndGetDbVgInfoForTsma(SMnode *pMnode, const char *dbname, SMDropTbTsmaInfo *pInfo) {
|
||||
int32_t code = 0;
|
||||
SDbObj *pDb = mndAcquireDb(pMnode, dbname);
|
||||
|
@ -4308,6 +4321,7 @@ static int32_t mndGetDbVgInfoForTsma(SMnode *pMnode, const char *dbname, SMDropT
|
|||
goto _end;
|
||||
}
|
||||
mndBuildDBVgroupInfo(pDb, pMnode, pInfo->dbInfo.dbVgInfos);
|
||||
taosArraySort(pInfo->dbInfo.dbVgInfos, vgInfoCmp);
|
||||
|
||||
pInfo->dbInfo.hashPrefix = pDb->cfg.hashPrefix;
|
||||
pInfo->dbInfo.hashSuffix = pDb->cfg.hashSuffix;
|
||||
|
@ -4380,9 +4394,8 @@ static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWith
|
|||
if (pInfos) {
|
||||
SMDropTbTsmaInfo info = {0};
|
||||
int32_t len = sprintf(buf, "%s", pSma->name);
|
||||
len = taosCreateMD5Hash(buf, len);
|
||||
sprintf(info.tsmaResTbDbFName, "%s", pSma->db);
|
||||
snprintf(info.tsmaResTbNamePrefix, TSDB_TABLE_NAME_LEN, "%s", buf);
|
||||
snprintf(info.tsmaResTbNamePrefix, TSDB_TABLE_FNAME_LEN, "%s", buf);
|
||||
SMDropTbDbInfo *pDbInfo = taosHashGet(pCtx->pDbMap, pSma->db, TSDB_DB_FNAME_LEN);
|
||||
info.suid = pSma->dstTbUid;
|
||||
if (!pDbInfo) {
|
||||
|
@ -4417,14 +4430,17 @@ static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWith
|
|||
|
||||
SMDropTbTsmaInfos *pInfos = taosHashGet(pCtx->pTsmaMap, &pTb->suid, sizeof(pTb->suid));
|
||||
SArray *pVgInfos = NULL;
|
||||
char buf[TSDB_TABLE_FNAME_LEN];
|
||||
char buf[TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN + 1];
|
||||
char resTbFullName[TSDB_TABLE_FNAME_LEN + 1] = {0};
|
||||
for (int32_t j = 0; j < pInfos->pTsmaInfos->size; ++j) {
|
||||
SMDropTbTsmaInfo *pInfo = taosArrayGet(pInfos->pTsmaInfos, j);
|
||||
int32_t len = sprintf(buf, "%s.%s_%s", pInfo->tsmaResTbDbFName, pInfo->tsmaResTbNamePrefix, pTb->name);
|
||||
uint32_t hashVal =
|
||||
taosGetTbHashVal(buf, len, pInfo->dbInfo.hashMethod, pInfo->dbInfo.hashPrefix, pInfo->dbInfo.hashSuffix);
|
||||
int32_t len = sprintf(buf, "%s_%s", pInfo->tsmaResTbNamePrefix, pTb->name);
|
||||
len = taosCreateMD5Hash(buf, len);
|
||||
len = snprintf(resTbFullName, TSDB_TABLE_FNAME_LEN + 1, "%s.%s", pInfo->tsmaResTbDbFName, buf);
|
||||
uint32_t hashVal = taosGetTbHashVal(resTbFullName, len, pInfo->dbInfo.hashMethod, pInfo->dbInfo.hashPrefix,
|
||||
pInfo->dbInfo.hashSuffix);
|
||||
const SVgroupInfo *pVgInfo = taosArraySearch(pInfo->dbInfo.dbVgInfos, &hashVal, vgHashValCmp, TD_EQ);
|
||||
void *p = taosStrdup(buf + strlen(pInfo->tsmaResTbDbFName) + TSDB_NAME_DELIMITER_LEN);
|
||||
void *p = taosStrdup(resTbFullName + strlen(pInfo->tsmaResTbDbFName) + TSDB_NAME_DELIMITER_LEN);
|
||||
if (taosArrayPush(pCtx->pResTbNames, &p) == NULL) {
|
||||
code = terrno;
|
||||
goto _end;
|
||||
|
|
|
@ -4,7 +4,7 @@ aux_source_directory(. MNODE_ARBGROUP_TEST_SRC)
|
|||
add_executable(arbgroupTest ${MNODE_ARBGROUP_TEST_SRC})
|
||||
target_link_libraries(
|
||||
arbgroupTest
|
||||
PRIVATE dnode nodes planner gtest qcom
|
||||
PRIVATE dnode nodes planner gtest qcom tcs
|
||||
)
|
||||
|
||||
add_test(
|
||||
|
|
|
@ -1,24 +1,26 @@
|
|||
# vnode
|
||||
add_subdirectory(src/tqCommon)
|
||||
add_library(vnode STATIC "")
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(vnode PRIVATE -Wno-error=single-bit-bitfield-constant-conversion)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
set(
|
||||
VNODE_SOURCE_FILES
|
||||
"src/vnd/vnodeOpen.c"
|
||||
"src/vnd/vnodeBufPool.c"
|
||||
"src/vnd/vnodeCfg.c"
|
||||
"src/vnd/vnodeCommit.c"
|
||||
"src/vnd/vnodeQuery.c"
|
||||
"src/vnd/vnodeModule.c"
|
||||
"src/vnd/vnodeSvr.c"
|
||||
"src/vnd/vnodeSync.c"
|
||||
"src/vnd/vnodeSnapshot.c"
|
||||
"src/vnd/vnodeRetention.c"
|
||||
"src/vnd/vnodeInitApi.c"
|
||||
"src/vnd/vnodeAsync.c"
|
||||
"src/vnd/vnodeHash.c"
|
||||
VNODE_SOURCE_FILES
|
||||
"src/vnd/vnodeOpen.c"
|
||||
"src/vnd/vnodeBufPool.c"
|
||||
"src/vnd/vnodeCfg.c"
|
||||
"src/vnd/vnodeCommit.c"
|
||||
"src/vnd/vnodeQuery.c"
|
||||
"src/vnd/vnodeModule.c"
|
||||
"src/vnd/vnodeSvr.c"
|
||||
"src/vnd/vnodeSync.c"
|
||||
"src/vnd/vnodeSnapshot.c"
|
||||
"src/vnd/vnodeRetention.c"
|
||||
"src/vnd/vnodeInitApi.c"
|
||||
"src/vnd/vnodeAsync.c"
|
||||
"src/vnd/vnodeHash.c"
|
||||
|
||||
# meta
|
||||
"src/meta/metaOpen.c"
|
||||
|
@ -40,23 +42,23 @@ set(
|
|||
"src/sma/smaSnapshot.c"
|
||||
"src/sma/smaTimeRange.c"
|
||||
|
||||
# # tsdb
|
||||
# "src/tsdb/tsdbCommit.c"
|
||||
# "src/tsdb/tsdbFile.c"
|
||||
# "src/tsdb/tsdbFS.c"
|
||||
# "src/tsdb/tsdbOpen.c"
|
||||
# "src/tsdb/tsdbMemTable.c"
|
||||
# "src/tsdb/tsdbRead.c"
|
||||
# "src/tsdb/tsdbCache.c"
|
||||
# "src/tsdb/tsdbWrite.c"
|
||||
# "src/tsdb/tsdbReaderWriter.c"
|
||||
# "src/tsdb/tsdbUtil.c"
|
||||
# "src/tsdb/tsdbSnapshot.c"
|
||||
# "src/tsdb/tsdbCacheRead.c"
|
||||
# "src/tsdb/tsdbRetention.c"
|
||||
# "src/tsdb/tsdbDiskData.c"
|
||||
# "src/tsdb/tsdbMergeTree.c"
|
||||
# "src/tsdb/tsdbDataIter.c"
|
||||
# # tsdb
|
||||
# "src/tsdb/tsdbCommit.c"
|
||||
# "src/tsdb/tsdbFile.c"
|
||||
# "src/tsdb/tsdbFS.c"
|
||||
# "src/tsdb/tsdbOpen.c"
|
||||
# "src/tsdb/tsdbMemTable.c"
|
||||
# "src/tsdb/tsdbRead.c"
|
||||
# "src/tsdb/tsdbCache.c"
|
||||
# "src/tsdb/tsdbWrite.c"
|
||||
# "src/tsdb/tsdbReaderWriter.c"
|
||||
# "src/tsdb/tsdbUtil.c"
|
||||
# "src/tsdb/tsdbSnapshot.c"
|
||||
# "src/tsdb/tsdbCacheRead.c"
|
||||
# "src/tsdb/tsdbRetention.c"
|
||||
# "src/tsdb/tsdbDiskData.c"
|
||||
# "src/tsdb/tsdbMergeTree.c"
|
||||
# "src/tsdb/tsdbDataIter.c"
|
||||
|
||||
# tq
|
||||
"src/tq/tq.c"
|
||||
|
@ -71,14 +73,13 @@ set(
|
|||
"src/tq/tqSnapshot.c"
|
||||
"src/tq/tqStreamStateSnap.c"
|
||||
"src/tq/tqStreamTaskSnap.c"
|
||||
|
||||
)
|
||||
|
||||
aux_source_directory("src/tsdb/" TSDB_SOURCE_FILES)
|
||||
list(
|
||||
APPEND
|
||||
VNODE_SOURCE_FILES
|
||||
${TSDB_SOURCE_FILES}
|
||||
APPEND
|
||||
VNODE_SOURCE_FILES
|
||||
${TSDB_SOURCE_FILES}
|
||||
)
|
||||
|
||||
target_sources(
|
||||
|
@ -87,38 +88,38 @@ target_sources(
|
|||
${VNODE_SOURCE_FILES}
|
||||
)
|
||||
|
||||
IF (TD_VNODE_PLUGINS)
|
||||
target_sources(
|
||||
vnode
|
||||
PRIVATE
|
||||
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompact.c
|
||||
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompactMonitor.c
|
||||
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/vnodeCompact.c
|
||||
)
|
||||
ENDIF ()
|
||||
if(TD_VNODE_PLUGINS)
|
||||
target_sources(
|
||||
vnode
|
||||
PRIVATE
|
||||
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompact.c
|
||||
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompactMonitor.c
|
||||
${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/vnodeCompact.c
|
||||
)
|
||||
endif()
|
||||
|
||||
# IF (NOT ${TD_LINUX})
|
||||
# if (NOT ${TD_LINUX})
|
||||
# target_include_directories(
|
||||
# vnode
|
||||
# PUBLIC "inc"
|
||||
# PUBLIC "src/inc"
|
||||
# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
# PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
|
||||
# vnode
|
||||
# PUBLIC "inc"
|
||||
# PUBLIC "src/inc"
|
||||
# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
# PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
|
||||
# )
|
||||
# ELSE()
|
||||
# target_include_directories(
|
||||
# vnode
|
||||
# PUBLIC "inc"
|
||||
# PUBLIC "src/inc"
|
||||
# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
# vnode
|
||||
# PUBLIC "inc"
|
||||
# PUBLIC "src/inc"
|
||||
# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
# )
|
||||
#ENDIF(NOT ${TD_LINUX})
|
||||
|
||||
if (${BUILD_CONTRIB})
|
||||
# endif(NOT ${TD_LINUX})
|
||||
if(${BUILD_CONTRIB})
|
||||
target_include_directories(
|
||||
vnode
|
||||
PUBLIC "inc"
|
||||
PUBLIC "src/inc"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/crypt"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/vnode"
|
||||
|
@ -129,24 +130,26 @@ else()
|
|||
vnode
|
||||
PUBLIC "inc"
|
||||
PUBLIC "src/inc"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/crypt"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/vnode"
|
||||
)
|
||||
if (${TD_LINUX})
|
||||
target_include_directories(
|
||||
vnode
|
||||
|
||||
if(${TD_LINUX})
|
||||
target_include_directories(
|
||||
vnode
|
||||
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
|
||||
)
|
||||
target_link_directories(
|
||||
vnode
|
||||
vnode
|
||||
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
target_link_directories(
|
||||
vnode
|
||||
vnode
|
||||
PUBLIC "${CMAKE_BINARY_DIR}/build/lib"
|
||||
)
|
||||
|
||||
|
@ -164,10 +167,11 @@ target_link_libraries(
|
|||
PUBLIC tdb
|
||||
PUBLIC audit
|
||||
PUBLIC crypt
|
||||
PUBLIC tcs
|
||||
|
||||
# PUBLIC bdb
|
||||
# PUBLIC scalar
|
||||
#PUBLIC zstd
|
||||
# PUBLIC zstd
|
||||
PUBLIC rocksdb
|
||||
PUBLIC transport
|
||||
PUBLIC stream
|
||||
|
@ -175,9 +179,9 @@ target_link_libraries(
|
|||
PUBLIC tqCommon
|
||||
)
|
||||
|
||||
IF (TD_GRANT)
|
||||
TARGET_LINK_LIBRARIES(vnode PUBLIC grant)
|
||||
ENDIF ()
|
||||
if(TD_GRANT)
|
||||
TARGET_LINK_LIBRARIES(vnode PUBLIC grant)
|
||||
endif()
|
||||
|
||||
target_compile_definitions(vnode PUBLIC -DMETA_REFACT)
|
||||
|
||||
|
|
|
@ -496,6 +496,7 @@ void metaULock(SMeta *pMeta) {
|
|||
static void metaCleanup(SMeta **ppMeta) {
|
||||
SMeta *pMeta = *ppMeta;
|
||||
if (pMeta) {
|
||||
metaInfo("vgId:%d meta clean up, path:%s", TD_VID(pMeta->pVnode), pMeta->path);
|
||||
if (pMeta->pEnv) metaAbort(pMeta);
|
||||
if (pMeta->pCache) metaCacheClose(pMeta);
|
||||
#ifdef BUILD_NO_CALL
|
||||
|
|
|
@ -12,8 +12,8 @@
|
|||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "cos.h"
|
||||
#include "functionMgt.h"
|
||||
#include "tcs.h"
|
||||
#include "tsdb.h"
|
||||
#include "tsdbDataFileRW.h"
|
||||
#include "tsdbIter.h"
|
||||
|
@ -1258,7 +1258,8 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray
|
|||
}
|
||||
|
||||
if (NULL == pLastCol || cmp_res < 0 || (cmp_res == 0 && !COL_VAL_IS_NONE(pColVal))) {
|
||||
SLastCol lastColTmp = {.rowKey = *pRowKey, .colVal = *pColVal, .dirty = 0, .cacheStatus = TSDB_LAST_CACHE_VALID};
|
||||
SLastCol lastColTmp = {
|
||||
.rowKey = *pRowKey, .colVal = *pColVal, .dirty = 0, .cacheStatus = TSDB_LAST_CACHE_VALID};
|
||||
if ((code = tsdbCachePutToRocksdb(pTsdb, &idxKey->key, &lastColTmp)) != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("tsdb/cache: vgId:%d, put rocks failed at line %d since %s.", TD_VID(pTsdb->pVnode), lino,
|
||||
tstrerror(code));
|
||||
|
@ -1705,8 +1706,7 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA
|
|||
if (pLastCol && pLastCol->cacheStatus != TSDB_LAST_CACHE_NO_CACHE) {
|
||||
code = tsdbCachePutToLRU(pTsdb, &idxKey->key, pLastCol, 0);
|
||||
if (code) {
|
||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
|
||||
taosMemoryFreeClear(pToFree);
|
||||
TAOS_CHECK_EXIT(code);
|
||||
}
|
||||
|
@ -3064,9 +3064,8 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow, bool *pI
|
|||
|
||||
iMax[nMax] = i;
|
||||
max[nMax++] = pIter->input[i].pRow;
|
||||
} else {
|
||||
pIter->input[i].next = false;
|
||||
}
|
||||
pIter->input[i].next = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3520,7 +3519,7 @@ static int32_t tsdbCacheLoadBlockS3(STsdbFD *pFD, uint8_t **ppBlock) {
|
|||
|
||||
int64_t block_offset = (pFD->blkno - 1) * tsS3BlockSize * pFD->szPage;
|
||||
|
||||
TAOS_CHECK_RETURN(s3GetObjectBlock(pFD->objName, block_offset, tsS3BlockSize * pFD->szPage, 0, ppBlock));
|
||||
TAOS_CHECK_RETURN(tcsGetObjectBlock(pFD->objName, block_offset, tsS3BlockSize * pFD->szPage, 0, ppBlock));
|
||||
|
||||
tsdbTrace("block:%p load from s3", *ppBlock);
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "tsdbFile2.h"
|
||||
#include "cos.h"
|
||||
#include "tcs.h"
|
||||
#include "vnd.h"
|
||||
|
||||
// to_json
|
||||
|
@ -318,7 +318,7 @@ static void tsdbTFileObjRemoveLC(STFileObj *fobj, bool remove_all) {
|
|||
}
|
||||
*(dot + 1) = 0;
|
||||
|
||||
s3DeleteObjectsByPrefix(object_name_prefix);
|
||||
tcsDeleteObjectsByPrefix(object_name_prefix);
|
||||
|
||||
// remove local last chunk file
|
||||
dot = strrchr(lc_path, '.');
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cos.h"
|
||||
#include "crypt.h"
|
||||
#include "tcs.h"
|
||||
#include "tsdb.h"
|
||||
#include "tsdbDef.h"
|
||||
#include "vnd.h"
|
||||
|
@ -391,7 +391,7 @@ static int32_t tsdbReadFileBlock(STsdbFD *pFD, int64_t offset, int64_t size, boo
|
|||
|
||||
snprintf(dot + 1, TSDB_FQDN_LEN - (dot + 1 - object_name_prefix), "%d.data", chunkno);
|
||||
|
||||
code = s3GetObjectBlock(object_name_prefix, cOffset, nRead, check, &pBlock);
|
||||
code = tcsGetObjectBlock(object_name_prefix, cOffset, nRead, check, &pBlock);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
memcpy(buf + n, pBlock, nRead);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cos.h"
|
||||
#include "tcs.h"
|
||||
#include "tsdb.h"
|
||||
#include "tsdbFS2.h"
|
||||
#include "vnd.h"
|
||||
|
@ -426,35 +426,6 @@ static int32_t tsdbS3FidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int32_t s3Kee
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile *to) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
char fname[TSDB_FILENAME_LEN];
|
||||
TdFilePtr fdFrom = NULL;
|
||||
// TdFilePtr fdTo = NULL;
|
||||
|
||||
tsdbTFileName(rtner->tsdb, to, fname);
|
||||
|
||||
fdFrom = taosOpenFile(from->fname, TD_FILE_READ);
|
||||
if (fdFrom == NULL) {
|
||||
TAOS_CHECK_GOTO(terrno, &lino, _exit);
|
||||
}
|
||||
|
||||
char *object_name = taosDirEntryBaseName(fname);
|
||||
TAOS_CHECK_GOTO(s3PutObjectFromFile2(from->fname, object_name, 1), &lino, _exit);
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
tsdbError("vgId:%d %s failed at line %s:%d since %s", TD_VID(rtner->tsdb->pVnode), __func__, __FILE__, lino,
|
||||
tstrerror(code));
|
||||
}
|
||||
if (taosCloseFile(&fdFrom) != 0) {
|
||||
tsdbTrace("vgId:%d, failed to close file", TD_VID(rtner->tsdb->pVnode));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbMigrateDataFileLCS3(SRTNer *rtner, const STFileObj *fobj, int64_t size, int64_t chunksize) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
@ -519,7 +490,7 @@ static int32_t tsdbMigrateDataFileLCS3(SRTNer *rtner, const STFileObj *fobj, int
|
|||
snprintf(dot + 1, TSDB_FQDN_LEN - (dot + 1 - object_name_prefix), "%d.data", cn);
|
||||
int64_t c_offset = chunksize * (cn - fobj->f->lcn);
|
||||
|
||||
TAOS_CHECK_GOTO(s3PutObjectFromFileOffset(fname, object_name_prefix, c_offset, chunksize), &lino, _exit);
|
||||
TAOS_CHECK_GOTO(tcsPutObjectFromFileOffset(fname, object_name_prefix, c_offset, chunksize), &lino, _exit);
|
||||
}
|
||||
|
||||
// copy last chunk
|
||||
|
@ -618,7 +589,7 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, int64
|
|||
snprintf(dot + 1, TSDB_FQDN_LEN - (dot + 1 - object_name_prefix), "%d.data", cn);
|
||||
int64_t c_offset = chunksize * (cn - 1);
|
||||
|
||||
TAOS_CHECK_GOTO(s3PutObjectFromFileOffset(fobj->fname, object_name_prefix, c_offset, chunksize), &lino, _exit);
|
||||
TAOS_CHECK_GOTO(tcsPutObjectFromFileOffset(fobj->fname, object_name_prefix, c_offset, chunksize), &lino, _exit);
|
||||
}
|
||||
|
||||
// copy last chunk
|
||||
|
@ -741,8 +712,6 @@ _exit:
|
|||
int32_t tsdbAsyncS3Migrate(STsdb *tsdb, int64_t now) {
|
||||
int32_t code = 0;
|
||||
|
||||
extern int8_t tsS3EnabledCfg;
|
||||
|
||||
int32_t expired = grantCheck(TSDB_GRANT_OBJECT_STORAGE);
|
||||
if (expired && tsS3Enabled) {
|
||||
tsdbWarn("s3 grant expired: %d", expired);
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cos.h"
|
||||
#include "sync.h"
|
||||
#include "tcs.h"
|
||||
#include "tsdb.h"
|
||||
#include "vnd.h"
|
||||
|
||||
|
@ -327,7 +327,7 @@ void vnodeDestroy(int32_t vgId, const char *path, STfs *pTfs, int32_t nodeId) {
|
|||
if (nodeId > 0 && vgId > 0 /*&& nlevel > 1*/ && tsS3Enabled) {
|
||||
char vnode_prefix[TSDB_FILENAME_LEN];
|
||||
snprintf(vnode_prefix, TSDB_FILENAME_LEN, "%d/v%df", nodeId, vgId);
|
||||
s3DeleteObjectsByPrefix(vnode_prefix);
|
||||
tcsDeleteObjectsByPrefix(vnode_prefix);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,4 +22,6 @@ add_subdirectory(stream)
|
|||
add_subdirectory(planner)
|
||||
add_subdirectory(qworker)
|
||||
add_subdirectory(geometry)
|
||||
add_subdirectory(command)
|
||||
add_subdirectory(command)
|
||||
add_subdirectory(azure)
|
||||
add_subdirectory(tcs)
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
aux_source_directory(src AUDIT_SRC)
|
||||
IF (TD_ENTERPRISE)
|
||||
|
||||
IF(TD_ENTERPRISE)
|
||||
LIST(APPEND AUDIT_SRC ${TD_ENTERPRISE_DIR}/src/plugins/audit/src/audit.c)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
add_library(audit STATIC ${AUDIT_SRC})
|
||||
target_include_directories(
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
# if(${TD_LINUX})
|
||||
aux_source_directory(src AZ_SRC)
|
||||
|
||||
add_library(az STATIC ${AZ_SRC})
|
||||
|
||||
if(${BUILD_S3})
|
||||
add_definitions(-DUSE_S3)
|
||||
target_link_libraries(
|
||||
az
|
||||
PUBLIC _azure_sdk
|
||||
PUBLIC crypt
|
||||
)
|
||||
endif()
|
||||
|
||||
target_include_directories(
|
||||
az
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/azure"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
az
|
||||
PUBLIC cjson
|
||||
PUBLIC os
|
||||
PUBLIC util
|
||||
PUBLIC common
|
||||
)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
# endif(${TD_LINUX})
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_AZ_INT_H_
|
||||
#define _TD_AZ_INT_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tlog.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// clang-format off
|
||||
#define azFatal(...) { if (azDebugFlag & DEBUG_FATAL) { taosPrintLog("AZR FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
|
||||
#define azError(...) { if (azDebugFlag & DEBUG_ERROR) { taosPrintLog("AZR ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
|
||||
#define azWarn(...) { if (azDebugFlag & DEBUG_WARN) { taosPrintLog("AZR WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
|
||||
#define azInfo(...) { if (azDebugFlag & DEBUG_INFO) { taosPrintLog("AZR ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define azDebug(...) { if (azDebugFlag & DEBUG_DEBUG) { taosPrintLog("AZR ", DEBUG_DEBUG, azDebugFlag, __VA_ARGS__); }}
|
||||
#define azTrace(...) { if (azDebugFlag & DEBUG_TRACE) { taosPrintLog("AZR ", DEBUG_TRACE, azDebugFlag, __VA_ARGS__); }}
|
||||
// clang-format on
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // _TD_AZ_INT_H_
|
|
@ -0,0 +1,191 @@
|
|||
#pragma once
|
||||
|
||||
#include "azure/storage/blobs/blob_options.hpp"
|
||||
|
||||
#include <azure/core/io/body_stream.hpp>
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Blobs {
|
||||
namespace _detail {
|
||||
enum class AvroDatumType {
|
||||
String,
|
||||
Bytes,
|
||||
Int,
|
||||
Long,
|
||||
Float,
|
||||
Double,
|
||||
Bool,
|
||||
Null,
|
||||
Record,
|
||||
Enum,
|
||||
Array,
|
||||
Map,
|
||||
Union,
|
||||
Fixed,
|
||||
};
|
||||
|
||||
class AvroStreamReader final {
|
||||
public:
|
||||
// position of a vector that lives through vector resizing
|
||||
struct ReaderPos final {
|
||||
const std::vector<uint8_t>* BufferPtr = nullptr;
|
||||
size_t Offset = 0;
|
||||
};
|
||||
explicit AvroStreamReader(Core::IO::BodyStream& stream) : m_stream(&stream), m_pos{&m_streambuffer, 0} {}
|
||||
AvroStreamReader(const AvroStreamReader&) = delete;
|
||||
AvroStreamReader& operator=(const AvroStreamReader&) = delete;
|
||||
|
||||
int64_t ParseInt(const Core::Context& context);
|
||||
void Advance(size_t n, const Core::Context& context);
|
||||
// Read at least n bytes from m_stream and append data to m_streambuffer. Return number of bytes
|
||||
// available in m_streambuffer;
|
||||
size_t Preload(size_t n, const Core::Context& context);
|
||||
size_t TryPreload(size_t n, const Core::Context& context);
|
||||
// discards data that's before m_pos
|
||||
void Discard();
|
||||
|
||||
private:
|
||||
size_t AvailableBytes() const { return m_streambuffer.size() - m_pos.Offset; }
|
||||
|
||||
private:
|
||||
Core::IO::BodyStream* m_stream;
|
||||
std::vector<uint8_t> m_streambuffer;
|
||||
ReaderPos m_pos;
|
||||
|
||||
friend class AvroDatum;
|
||||
};
|
||||
|
||||
class AvroSchema final {
|
||||
public:
|
||||
static const AvroSchema StringSchema;
|
||||
static const AvroSchema BytesSchema;
|
||||
static const AvroSchema IntSchema;
|
||||
static const AvroSchema LongSchema;
|
||||
static const AvroSchema FloatSchema;
|
||||
static const AvroSchema DoubleSchema;
|
||||
static const AvroSchema BoolSchema;
|
||||
static const AvroSchema NullSchema;
|
||||
static AvroSchema RecordSchema(std::string name, const std::vector<std::pair<std::string, AvroSchema>>& fieldsSchema);
|
||||
static AvroSchema ArraySchema(AvroSchema elementSchema);
|
||||
static AvroSchema MapSchema(AvroSchema elementSchema);
|
||||
static AvroSchema UnionSchema(std::vector<AvroSchema> schemas);
|
||||
static AvroSchema FixedSchema(std::string name, int64_t size);
|
||||
|
||||
const std::string& Name() const { return m_name; }
|
||||
AvroDatumType Type() const { return m_type; }
|
||||
const std::vector<std::string>& FieldNames() const { return m_status->m_keys; }
|
||||
AvroSchema ItemSchema() const { return m_status->m_schemas[0]; }
|
||||
const std::vector<AvroSchema>& FieldSchemas() const { return m_status->m_schemas; }
|
||||
size_t Size() const { return static_cast<size_t>(m_status->m_size); }
|
||||
|
||||
private:
|
||||
explicit AvroSchema(AvroDatumType type) : m_type(type) {}
|
||||
|
||||
private:
|
||||
AvroDatumType m_type;
|
||||
std::string m_name;
|
||||
|
||||
struct SharedStatus {
|
||||
std::vector<std::string> m_keys;
|
||||
std::vector<AvroSchema> m_schemas;
|
||||
int64_t m_size = 0;
|
||||
};
|
||||
std::shared_ptr<SharedStatus> m_status;
|
||||
};
|
||||
|
||||
class AvroDatum final {
|
||||
public:
|
||||
AvroDatum() : m_schema(AvroSchema::NullSchema) {}
|
||||
explicit AvroDatum(AvroSchema schema) : m_schema(std::move(schema)) {}
|
||||
|
||||
void Fill(AvroStreamReader& reader, const Core::Context& context);
|
||||
void Fill(AvroStreamReader::ReaderPos& data);
|
||||
|
||||
const AvroSchema& Schema() const { return m_schema; }
|
||||
|
||||
template <class T>
|
||||
T Value() const;
|
||||
struct StringView {
|
||||
const uint8_t* Data = nullptr;
|
||||
size_t Length = 0;
|
||||
};
|
||||
|
||||
private:
|
||||
AvroSchema m_schema;
|
||||
AvroStreamReader::ReaderPos m_data;
|
||||
};
|
||||
|
||||
using AvroMap = std::map<std::string, AvroDatum>;
|
||||
|
||||
class AvroRecord final {
|
||||
public:
|
||||
bool HasField(const std::string& key) const { return FindField(key) != m_keys->size(); }
|
||||
const AvroDatum& Field(const std::string& key) const { return m_values.at(FindField(key)); }
|
||||
AvroDatum& Field(const std::string& key) { return m_values.at(FindField(key)); }
|
||||
const AvroDatum& FieldAt(size_t i) const { return m_values.at(i); }
|
||||
AvroDatum& FieldAt(size_t i) { return m_values.at(i); }
|
||||
|
||||
private:
|
||||
size_t FindField(const std::string& key) const {
|
||||
auto i = find(m_keys->begin(), m_keys->end(), key);
|
||||
return i - m_keys->begin();
|
||||
}
|
||||
const std::vector<std::string>* m_keys = nullptr;
|
||||
std::vector<AvroDatum> m_values;
|
||||
|
||||
friend class AvroDatum;
|
||||
};
|
||||
|
||||
class AvroObjectContainerReader final {
|
||||
public:
|
||||
explicit AvroObjectContainerReader(Core::IO::BodyStream& stream);
|
||||
|
||||
bool End() const { return m_eof; }
|
||||
// Calling Next() will invalidates the previous AvroDatum returned by this function and all
|
||||
// AvroDatums propagated from there.
|
||||
AvroDatum Next(const Core::Context& context) { return NextImpl(m_objectSchema.get(), context); }
|
||||
|
||||
private:
|
||||
AvroDatum NextImpl(const AvroSchema* schema, const Core::Context& context);
|
||||
|
||||
private:
|
||||
std::unique_ptr<AvroStreamReader> m_reader;
|
||||
std::unique_ptr<AvroSchema> m_objectSchema;
|
||||
std::string m_syncMarker;
|
||||
int64_t m_remainingObjectInCurrentBlock = 0;
|
||||
bool m_eof = false;
|
||||
};
|
||||
|
||||
class AvroStreamParser final : public Core::IO::BodyStream {
|
||||
public:
|
||||
explicit AvroStreamParser(std::unique_ptr<Azure::Core::IO::BodyStream> inner,
|
||||
std::function<void(int64_t, int64_t)> progressCallback,
|
||||
std::function<void(BlobQueryError)> errorCallback)
|
||||
: m_inner(std::move(inner)),
|
||||
m_parser(*m_inner),
|
||||
m_progressCallback(std::move(progressCallback)),
|
||||
m_errorCallback(std::move(errorCallback)) {}
|
||||
|
||||
int64_t Length() const override { return -1; }
|
||||
void Rewind() override { this->m_inner->Rewind(); }
|
||||
|
||||
private:
|
||||
size_t OnRead(uint8_t* buffer, size_t count, const Azure::Core::Context& context) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<Azure::Core::IO::BodyStream> m_inner;
|
||||
AvroObjectContainerReader m_parser;
|
||||
std::function<void(int64_t, int64_t)> m_progressCallback;
|
||||
std::function<void(BlobQueryError)> m_errorCallback;
|
||||
AvroDatum::StringView m_parserBuffer;
|
||||
};
|
||||
|
||||
} // namespace _detail
|
||||
} // namespace Blobs
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
|
@ -0,0 +1,260 @@
|
|||
#pragma once
|
||||
|
||||
#include "azure/storage/blobs/blob_client.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Files {
|
||||
namespace DataLake {
|
||||
class FileClient;
|
||||
}
|
||||
} // namespace Files
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Blobs {
|
||||
|
||||
/**
|
||||
* @brief The TDBlockBlobClient allows you to manipulate Azure Storage block blobs.
|
||||
*
|
||||
* Block blobs let you upload large blobs efficiently. Block blobs are comprised of blocks, each
|
||||
* of which is identified by a block ID. You create or modify a block blob by writing a set of
|
||||
* blocks and committing them by their block IDs. Each block can be a different size.
|
||||
*
|
||||
* When you upload a block to a blob in your storage account, it is associated with the specified
|
||||
* block blob, but it does not become part of the blob until you commit a list of blocks that
|
||||
* includes the new block's ID. New blocks remain in an uncommitted state until they are
|
||||
* specifically committed or discarded. Writing a block does not update the last modified time of
|
||||
* an existing blob.
|
||||
*/
|
||||
class TDBlockBlobClient final : public BlobClient {
|
||||
public:
|
||||
/**
|
||||
* @brief Initialize a new instance of TDBlockBlobClient.
|
||||
*
|
||||
* @param connectionString A connection string includes the authentication information required
|
||||
* for your application to access data in an Azure Storage account at runtime.
|
||||
* @param blobContainerName The name of the container containing this blob.
|
||||
* @param blobName The name of this blob.
|
||||
* @param options Optional client options that define the transport pipeline policies for
|
||||
* authentication, retries, etc., that are applied to every request.
|
||||
* @return A new TDBlockBlobClient instance.
|
||||
*/
|
||||
static TDBlockBlobClient CreateFromConnectionString(const std::string& connectionString,
|
||||
const std::string& blobContainerName, const std::string& blobName,
|
||||
const BlobClientOptions& options = BlobClientOptions());
|
||||
|
||||
/**
|
||||
* @brief Initialize a new instance of TDBlockBlobClient.
|
||||
*
|
||||
* @param blobUrl A URL
|
||||
* referencing the blob that includes the name of the account, the name of the container, and
|
||||
* the name of the blob.
|
||||
* @param credential The shared key credential used to sign
|
||||
* requests.
|
||||
* @param options Optional client options that define the transport pipeline
|
||||
* policies for authentication, retries, etc., that are applied to every request.
|
||||
*/
|
||||
explicit TDBlockBlobClient(const std::string& blobUrl, std::shared_ptr<StorageSharedKeyCredential> credential,
|
||||
const BlobClientOptions& options = BlobClientOptions());
|
||||
|
||||
/**
|
||||
* @brief Initialize a new instance of TDBlockBlobClient.
|
||||
*
|
||||
* @param blobUrl A URL
|
||||
* referencing the blob that includes the name of the account, the name of the container, and
|
||||
* the name of the blob.
|
||||
* @param credential The token credential used to sign requests.
|
||||
* @param options Optional client options that define the transport pipeline policies for
|
||||
* authentication, retries, etc., that are applied to every request.
|
||||
*/
|
||||
explicit TDBlockBlobClient(const std::string& blobUrl, std::shared_ptr<Core::Credentials::TokenCredential> credential,
|
||||
const BlobClientOptions& options = BlobClientOptions());
|
||||
|
||||
/**
|
||||
* @brief Initialize a new instance of TDBlockBlobClient.
|
||||
*
|
||||
* @param blobUrl A URL
|
||||
* referencing the blob that includes the name of the account, the name of the container, and
|
||||
* the name of the blob, and possibly also a SAS token.
|
||||
* @param options Optional client
|
||||
* options that define the transport pipeline policies for authentication, retries, etc., that
|
||||
* are applied to every request.
|
||||
*/
|
||||
explicit TDBlockBlobClient(const std::string& blobUrl, const BlobClientOptions& options = BlobClientOptions());
|
||||
|
||||
/**
|
||||
* @brief Initializes a new instance of the TDBlockBlobClient class with an identical URL
|
||||
* source but the specified snapshot timestamp.
|
||||
*
|
||||
* @param snapshot The snapshot
|
||||
* identifier.
|
||||
* @return A new TDBlockBlobClient instance.
|
||||
* @remarks Pass empty string to remove the snapshot returning the base blob.
|
||||
*/
|
||||
TDBlockBlobClient WithSnapshot(const std::string& snapshot) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a clone of this instance that references a version ID rather than the base
|
||||
* blob.
|
||||
*
|
||||
* @param versionId The version ID returning a URL to the base blob.
|
||||
* @return A new TDBlockBlobClient instance.
|
||||
* @remarks Pass empty string to remove the version ID returning the base blob.
|
||||
*/
|
||||
TDBlockBlobClient WithVersionId(const std::string& versionId) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block blob, or updates the content of an existing block blob. Updating
|
||||
* an existing block blob overwrites any existing metadata on the blob.
|
||||
*
|
||||
* @param content A BodyStream containing the content to upload.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A UploadBlockBlobResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::UploadBlockBlobResult> Upload(
|
||||
Azure::Core::IO::BodyStream& content, const UploadBlockBlobOptions& options = UploadBlockBlobOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block blob, or updates the content of an existing block blob. Updating
|
||||
* an existing block blob overwrites any existing metadata on the blob.
|
||||
*
|
||||
* @param buffer A memory buffer containing the content to upload.
|
||||
* @param bufferSize Size of the memory buffer.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A UploadBlockBlobFromResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> UploadFrom(
|
||||
const uint8_t* buffer, size_t bufferSize,
|
||||
const UploadBlockBlobFromOptions& options = UploadBlockBlobFromOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block blob, or updates the content of an existing block blob. Updating
|
||||
* an existing block blob overwrites any existing metadata on the blob.
|
||||
*
|
||||
* @param fileName A file containing the content to upload.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A UploadBlockBlobFromResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> UploadFrom(
|
||||
const std::string& fileName, const UploadBlockBlobFromOptions& options = UploadBlockBlobFromOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> UploadFrom(
|
||||
const std::string& fileName, int64_t offset, int64_t size,
|
||||
const UploadBlockBlobFromOptions& options = UploadBlockBlobFromOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new Block Blob where the contents of the blob are read from a given URL.
|
||||
*
|
||||
* @param sourceUri Specifies the URL of the source blob.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A UploadBlockBlobFromUriResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::UploadBlockBlobFromUriResult> UploadFromUri(
|
||||
const std::string& sourceUri, const UploadBlockBlobFromUriOptions& options = UploadBlockBlobFromUriOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block as part of a block blob's staging area to be eventually
|
||||
* committed via the CommitBlockList operation.
|
||||
*
|
||||
* @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the
|
||||
* string must be less than or equal to 64 bytes in size.
|
||||
* @param content A BodyStream containing the content to upload.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A StageBlockResult describing the state of the updated block.
|
||||
*/
|
||||
Azure::Response<Models::StageBlockResult> StageBlock(
|
||||
const std::string& blockId, Azure::Core::IO::BodyStream& content,
|
||||
const StageBlockOptions& options = StageBlockOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Creates a new block to be committed as part of a blob where the contents are read from
|
||||
* the sourceUri.
|
||||
*
|
||||
* @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the
|
||||
* string must be less than or equal to 64 bytes in size.
|
||||
* @param sourceUri Specifies the uri of the source
|
||||
* blob. The value may be a uri of up to 2 KB in length that specifies a blob. The source blob
|
||||
* must either be public or must be authenticated via a shared access signature. If the source
|
||||
* blob is public, no authentication is required to perform the operation.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A StageBlockFromUriResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::StageBlockFromUriResult> StageBlockFromUri(
|
||||
const std::string& blockId, const std::string& sourceUri,
|
||||
const StageBlockFromUriOptions& options = StageBlockFromUriOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Writes a blob by specifying the list of block IDs that make up the blob. In order to
|
||||
* be written as part of a blob, a block must have been successfully written to the server in a
|
||||
* prior StageBlock operation. You can call CommitBlockList to update a blob by uploading only
|
||||
* those blocks that have changed, then committing the new and existing blocks together. You can
|
||||
* do this by specifying whether to commit a block from the committed block list or from the
|
||||
* uncommitted block list, or to commit the most recently uploaded version of the block,
|
||||
* whichever list it may belong to.
|
||||
*
|
||||
* @param blockIds Base64 encoded block IDs to indicate that make up the blob.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A CommitBlobBlockListResult describing the state of the updated block blob.
|
||||
*/
|
||||
Azure::Response<Models::CommitBlockListResult> CommitBlockList(
|
||||
const std::vector<std::string>& blockIds, const CommitBlockListOptions& options = CommitBlockListOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Retrieves the list of blocks that have been uploaded as part of a block blob. There
|
||||
* are two block lists maintained for a blob. The Committed Block list has blocks that have been
|
||||
* successfully committed to a given blob with CommitBlockList. The Uncommitted Block list has
|
||||
* blocks that have been uploaded for a blob using StageBlock, but that have not yet been
|
||||
* committed.
|
||||
*
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A GetBlobBlockListResult describing requested block list.
|
||||
*/
|
||||
Azure::Response<Models::GetBlockListResult> GetBlockList(
|
||||
const GetBlockListOptions& options = GetBlockListOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
/**
|
||||
* @brief Returns the result of a query against the blob.
|
||||
*
|
||||
* @param querySqlExpression The query expression in SQL.
|
||||
* @param options Optional parameters to execute this function.
|
||||
* @param context Context for cancelling long running operations.
|
||||
* @return A QueryBlobResult describing the query result.
|
||||
*/
|
||||
Azure::Response<Models::QueryBlobResult> Query(const std::string& querySqlExpression,
|
||||
const QueryBlobOptions& options = QueryBlobOptions(),
|
||||
const Azure::Core::Context& context = Azure::Core::Context()) const;
|
||||
|
||||
explicit TDBlockBlobClient(BlobClient blobClient);
|
||||
|
||||
private:
|
||||
friend class BlobClient;
|
||||
friend class Files::DataLake::DataLakeFileClient;
|
||||
};
|
||||
|
||||
} // namespace Blobs
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
|
@ -0,0 +1,551 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define ALLOW_FORBID_FUNC
|
||||
|
||||
#include "az.h"
|
||||
#include "azInt.h"
|
||||
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
#if defined(USE_S3)
|
||||
|
||||
#include <azure/core.hpp>
|
||||
#include <azure/storage/blobs.hpp>
|
||||
#include "td_block_blob_client.hpp"
|
||||
|
||||
// Add appropriate using namespace directives
|
||||
using namespace Azure::Storage;
|
||||
using namespace Azure::Storage::Blobs;
|
||||
|
||||
extern char tsS3Hostname[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeyId[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN];
|
||||
extern char tsS3BucketName[TSDB_FQDN_LEN];
|
||||
|
||||
extern int8_t tsS3Enabled;
|
||||
extern int8_t tsS3EpNum;
|
||||
|
||||
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
void azEnd() {}
|
||||
|
||||
static void checkPrint(const char *fmt, ...) {
|
||||
va_list arg_ptr;
|
||||
va_start(arg_ptr, fmt);
|
||||
(void)vfprintf(stderr, fmt, arg_ptr);
|
||||
va_end(arg_ptr);
|
||||
}
|
||||
|
||||
static void azDumpCfgByEp(int8_t epIndex) {
|
||||
// clang-format off
|
||||
checkPrint(
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n"
|
||||
"%-24s %s\n",
|
||||
"hostName", tsS3Hostname[epIndex],
|
||||
"bucketName", tsS3BucketName,
|
||||
"protocol", "https only",
|
||||
"uristyle", "path only",
|
||||
"accessKey", tsS3AccessKeyId[epIndex],
|
||||
"accessKeySecret", tsS3AccessKeySecret[epIndex]);
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
static int32_t azListBucket(char const *bucketname) {
|
||||
int32_t code = 0;
|
||||
const std::string delimiter = "/";
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
accountURL = "https://" + accountURL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = bucketname;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
Azure::Storage::Blobs::ListBlobsOptions options;
|
||||
options.Prefix = "s3";
|
||||
|
||||
checkPrint("objects:\n");
|
||||
for (auto pageResult = containerClient.ListBlobs(options); pageResult.HasPage(); pageResult.MoveToNextPage()) {
|
||||
for (const auto &blob : pageResult.Blobs) {
|
||||
checkPrint("%s\n", blob.Name.c_str());
|
||||
}
|
||||
}
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azCheckCfg() {
|
||||
int32_t code = 0, lino = 0;
|
||||
|
||||
azDumpCfgByEp(0);
|
||||
|
||||
// test put
|
||||
char testdata[17] = "0123456789abcdef";
|
||||
const char *objectname[] = {"s3test.txt"};
|
||||
char path[PATH_MAX] = {0};
|
||||
int ds_len = strlen(TD_DIRSEP);
|
||||
int tmp_len = strlen(tsTempDir);
|
||||
|
||||
(void)snprintf(path, PATH_MAX, "%s", tsTempDir);
|
||||
if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP);
|
||||
(void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", objectname[0]);
|
||||
} else {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", objectname[0]);
|
||||
}
|
||||
|
||||
uint8_t *pBlock = NULL;
|
||||
int c_offset = 10;
|
||||
int c_len = 6;
|
||||
char buf[7] = {0};
|
||||
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC);
|
||||
if (!fp) {
|
||||
checkPrint("failed to open test file: %s.\n", path);
|
||||
TAOS_CHECK_GOTO(terrno, &lino, _next);
|
||||
}
|
||||
if (taosWriteFile(fp, testdata, strlen(testdata)) < 0) {
|
||||
checkPrint("failed to write test file: %s.\n", path);
|
||||
TAOS_CHECK_GOTO(terrno, &lino, _next);
|
||||
}
|
||||
if (taosFsyncFile(fp) < 0) {
|
||||
checkPrint("failed to fsync test file: %s.\n", path);
|
||||
TAOS_CHECK_GOTO(terrno, &lino, _next);
|
||||
}
|
||||
(void)taosCloseFile(&fp);
|
||||
|
||||
checkPrint("\nstart to put object: %s, file: %s content: %s\n", objectname[0], path, testdata);
|
||||
code = azPutObjectFromFileOffset(path, objectname[0], 0, 16);
|
||||
if (code != 0) {
|
||||
checkPrint("put object %s : failed.\n", objectname[0]);
|
||||
TAOS_CHECK_GOTO(code, &lino, _next);
|
||||
}
|
||||
checkPrint("put object %s: success.\n\n", objectname[0]);
|
||||
|
||||
// list buckets
|
||||
checkPrint("start to list bucket %s by prefix s3.\n", tsS3BucketName);
|
||||
code = azListBucket(tsS3BucketName);
|
||||
if (code != 0) {
|
||||
checkPrint("listing bucket %s : failed.\n", tsS3BucketName);
|
||||
TAOS_CHECK_GOTO(code, &lino, _next);
|
||||
}
|
||||
checkPrint("listing bucket %s: success.\n\n", tsS3BucketName);
|
||||
|
||||
// test range get
|
||||
checkPrint("start to range get object %s offset: %d len: %d.\n", objectname[0], c_offset, c_len);
|
||||
code = azGetObjectBlock(objectname[0], c_offset, c_len, true, &pBlock);
|
||||
if (code != 0) {
|
||||
checkPrint("get object %s : failed.\n", objectname[0]);
|
||||
TAOS_CHECK_GOTO(code, &lino, _next);
|
||||
}
|
||||
|
||||
(void)memcpy(buf, pBlock, c_len);
|
||||
taosMemoryFree(pBlock);
|
||||
checkPrint("object content: %s\n", buf);
|
||||
checkPrint("get object %s: success.\n\n", objectname[0]);
|
||||
|
||||
// delete test object
|
||||
checkPrint("start to delete object: %s.\n", objectname[0]);
|
||||
// code = azDeleteObjectsByPrefix(objectname[0]);
|
||||
azDeleteObjectsByPrefix(objectname[0]);
|
||||
/*
|
||||
if (code != 0) {
|
||||
(void)fprintf(stderr, "delete object %s : failed.\n", objectname[0]);
|
||||
TAOS_CHECK_GOTO(code, &lino, _next);
|
||||
}
|
||||
*/
|
||||
checkPrint("delete object %s: success.\n\n", objectname[0]);
|
||||
|
||||
_next:
|
||||
if (fp) {
|
||||
(void)taosCloseFile(&fp);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
checkPrint("s3 check failed, code: %d, line: %d.\n", code, lino);
|
||||
}
|
||||
|
||||
checkPrint("=================================================================\n");
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
static int32_t azPutObjectFromFileOffsetImpl(const char *file, const char *object_name, int64_t offset, int64_t size) {
|
||||
int32_t code = 0;
|
||||
|
||||
std::string endpointUrl = tsS3Hostname[0];
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
|
||||
accountURL = "https://" + accountURL;
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
std::string blobName = "blob.txt";
|
||||
uint8_t blobContent[] = "Hello Azure!";
|
||||
// Create the block blob client
|
||||
// BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName);
|
||||
// TDBlockBlobClient blobClient(containerClient.GetBlobClient(blobName));
|
||||
TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name));
|
||||
|
||||
blobClient.UploadFrom(file, offset, size);
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s: Status Code: %d, Reason Phrase: %s", __func__, static_cast<int>(e.StatusCode), e.ReasonPhrase.c_str());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) {
|
||||
int32_t code = 0;
|
||||
|
||||
try {
|
||||
code = azPutObjectFromFileOffsetImpl(file, object_name, offset, size);
|
||||
} catch (const std::exception &e) {
|
||||
azError("%s: Reason Phrase: %s", __func__, e.what());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
static int32_t azGetObjectBlockImpl(const char *object_name, int64_t offset, int64_t size, bool check,
|
||||
uint8_t **ppBlock) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
uint8_t *buf = NULL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
accountURL = "https://" + accountURL;
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name));
|
||||
|
||||
Blobs::DownloadBlobToOptions options;
|
||||
options.Range = Azure::Core::Http::HttpRange();
|
||||
options.Range.Value().Offset = offset;
|
||||
options.Range.Value().Length = size;
|
||||
|
||||
buf = (uint8_t *)taosMemoryCalloc(1, size);
|
||||
if (!buf) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
auto res = blobClient.DownloadTo(buf, size, options);
|
||||
if (check && res.Value.ContentRange.Length.Value() != size) {
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
*ppBlock = buf;
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
if (buf) {
|
||||
taosMemoryFree(buf);
|
||||
}
|
||||
*ppBlock = NULL;
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
static int32_t azGetObjectBlockRetry(const char *object_name, int64_t offset, int64_t size, bool check,
|
||||
uint8_t **ppBlock) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
// May use an exponential backoff policy for retries with 503
|
||||
int retryCount = 0;
|
||||
static int maxRetryCount = 5;
|
||||
static int minRetryInterval = 1000; // ms
|
||||
static int maxRetryInterval = 3000; // ms
|
||||
|
||||
_retry:
|
||||
code = azGetObjectBlockImpl(object_name, offset, size, check, ppBlock);
|
||||
if (TSDB_CODE_SUCCESS != code && retryCount++ < maxRetryCount) {
|
||||
taosMsleep(taosRand() % (maxRetryInterval - minRetryInterval + 1) + minRetryInterval);
|
||||
uInfo("%s: 0x%x(%s) and retry get object", __func__, code, tstrerror(code));
|
||||
goto _retry;
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
try {
|
||||
code = azGetObjectBlockRetry(object_name, offset, size, check, ppBlock);
|
||||
} catch (const std::exception &e) {
|
||||
azError("%s: Reason Phrase: %s", __func__, e.what());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
static void azDeleteObjectsByPrefixImpl(const char *prefix) {
|
||||
const std::string delimiter = "/";
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
accountURL = "https://" + accountURL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
Azure::Storage::Blobs::ListBlobsOptions options;
|
||||
options.Prefix = prefix;
|
||||
|
||||
std::set<std::string> listBlobs;
|
||||
for (auto pageResult = containerClient.ListBlobs(options); pageResult.HasPage(); pageResult.MoveToNextPage()) {
|
||||
for (const auto &blob : pageResult.Blobs) {
|
||||
listBlobs.insert(blob.Name);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto blobName : listBlobs) {
|
||||
auto blobClient = containerClient.GetAppendBlobClient(blobName);
|
||||
blobClient.Delete();
|
||||
}
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
void azDeleteObjectsByPrefix(const char *prefix) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
try {
|
||||
azDeleteObjectsByPrefixImpl(prefix);
|
||||
} catch (const std::exception &e) {
|
||||
azError("%s: Reason Phrase: %s", __func__, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp) {
|
||||
int32_t code = 0, lino = 0;
|
||||
uint64_t contentLength = 0;
|
||||
|
||||
if (taosStatFile(file, (int64_t *)&contentLength, NULL, NULL) < 0) {
|
||||
azError("ERROR: %s Failed to stat file %s: ", __func__, file);
|
||||
TAOS_RETURN(terrno);
|
||||
}
|
||||
|
||||
code = azPutObjectFromFileOffset(file, object, 0, contentLength);
|
||||
if (code != 0) {
|
||||
azError("ERROR: %s Failed to put file %s: ", __func__, file);
|
||||
TAOS_CHECK_GOTO(code, &lino, _exit);
|
||||
}
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
azError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t azGetObjectToFileImpl(const char *object_name, const char *fileName) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
accountURL = "https://" + accountURL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name));
|
||||
|
||||
auto res = blobClient.DownloadTo(fileName);
|
||||
if (res.Value.ContentRange.Length.Value() <= 0) {
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azGetObjectToFile(const char *object_name, const char *fileName) {
|
||||
int32_t code = 0;
|
||||
|
||||
try {
|
||||
code = azGetObjectToFileImpl(object_name, fileName);
|
||||
} catch (const std::exception &e) {
|
||||
azError("%s: Reason Phrase: %s", __func__, e.what());
|
||||
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
int32_t azGetObjectsByPrefix(const char *prefix, const char *path) {
|
||||
const std::string delimiter = "/";
|
||||
std::string accountName = tsS3AccessKeyId[0];
|
||||
std::string accountKey = tsS3AccessKeySecret[0];
|
||||
std::string accountURL = tsS3Hostname[0];
|
||||
accountURL = "https://" + accountURL;
|
||||
|
||||
try {
|
||||
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||
|
||||
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||
|
||||
std::string containerName = tsS3BucketName;
|
||||
auto containerClient = blobServiceClient.GetBlobContainerClient(containerName);
|
||||
|
||||
Azure::Storage::Blobs::ListBlobsOptions options;
|
||||
options.Prefix = prefix;
|
||||
|
||||
std::set<std::string> listBlobs;
|
||||
for (auto pageResult = containerClient.ListBlobs(options); pageResult.HasPage(); pageResult.MoveToNextPage()) {
|
||||
for (const auto &blob : pageResult.Blobs) {
|
||||
listBlobs.insert(blob.Name);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto blobName : listBlobs) {
|
||||
const char *tmp = strchr(blobName.c_str(), '/');
|
||||
tmp = (tmp == NULL) ? blobName.c_str() : tmp + 1;
|
||||
char fileName[PATH_MAX] = {0};
|
||||
if (path[strlen(path) - 1] != TD_DIRSEP_CHAR) {
|
||||
(void)snprintf(fileName, PATH_MAX, "%s%s%s", path, TD_DIRSEP, tmp);
|
||||
} else {
|
||||
(void)snprintf(fileName, PATH_MAX, "%s%s", path, tmp);
|
||||
}
|
||||
if (azGetObjectToFile(blobName.c_str(), fileName)) {
|
||||
TAOS_RETURN(TSDB_CODE_FAILED);
|
||||
}
|
||||
}
|
||||
} catch (const Azure::Core::RequestFailedException &e) {
|
||||
azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast<int>(e.StatusCode),
|
||||
e.ReasonPhrase.c_str());
|
||||
TAOS_RETURN(TSDB_CODE_FAILED);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t azDeleteObjects(const char *object_name[], int nobject) {
|
||||
for (int i = 0; i < nobject; ++i) {
|
||||
azDeleteObjectsByPrefix(object_name[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
int32_t azBegin() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
void azEnd() {}
|
||||
|
||||
int32_t azCheckCfg() { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t azGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void azDeleteObjectsByPrefix(const char *prefix) {}
|
||||
|
||||
int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp) { return 0; }
|
||||
|
||||
int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { return 0; }
|
||||
|
||||
int32_t azGetObjectToFile(const char *object_name, const char *fileName) { return 0; }
|
||||
|
||||
int32_t azDeleteObjects(const char *object_name[], int nobject) { return 0; }
|
||||
|
||||
#endif
|
|
@ -0,0 +1,531 @@
|
|||
#if defined(USE_S3)
|
||||
#include <td_avro_parser.h>
|
||||
|
||||
#include <azure/core/azure_assert.hpp>
|
||||
#include <azure/core/internal/json/json.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Blobs {
|
||||
namespace _detail {
|
||||
|
||||
namespace {
|
||||
int64_t parseInt(AvroStreamReader::ReaderPos& data) {
|
||||
uint64_t r = 0;
|
||||
int nb = 0;
|
||||
while (true) {
|
||||
uint8_t c = (*data.BufferPtr)[data.Offset++];
|
||||
r = r | ((static_cast<uint64_t>(c) & 0x7f) << (nb * 7));
|
||||
if (c & 0x80) {
|
||||
++nb;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return static_cast<int64_t>(r >> 1) ^ -static_cast<int64_t>(r & 0x01);
|
||||
}
|
||||
|
||||
AvroSchema ParseSchemaFromJsonString(const std::string& jsonSchema) {
|
||||
const static std::map<std::string, AvroSchema> BuiltinNameSchemaMap = {
|
||||
{"string", AvroSchema::StringSchema}, {"bytes", AvroSchema::BytesSchema}, {"int", AvroSchema::IntSchema},
|
||||
{"long", AvroSchema::LongSchema}, {"float", AvroSchema::FloatSchema}, {"double", AvroSchema::DoubleSchema},
|
||||
{"boolean", AvroSchema::BoolSchema}, {"null", AvroSchema::NullSchema}, {"string", AvroSchema::StringSchema},
|
||||
};
|
||||
std::map<std::string, AvroSchema> nameSchemaMap = BuiltinNameSchemaMap;
|
||||
|
||||
std::function<AvroSchema(const Core::Json::_internal::json& obj)> parseSchemaFromJsonObject;
|
||||
parseSchemaFromJsonObject = [&](const Core::Json::_internal::json& obj) -> AvroSchema {
|
||||
if (obj.is_string()) {
|
||||
auto typeName = obj.get<std::string>();
|
||||
return nameSchemaMap.find(typeName)->second;
|
||||
} else if (obj.is_array()) {
|
||||
std::vector<AvroSchema> unionSchemas;
|
||||
for (const auto& s : obj) {
|
||||
unionSchemas.push_back(parseSchemaFromJsonObject(s));
|
||||
}
|
||||
return AvroSchema::UnionSchema(std::move(unionSchemas));
|
||||
} else if (obj.is_object()) {
|
||||
if (obj.count("namespace") != 0) {
|
||||
throw std::runtime_error("Namespace isn't supported yet in Avro schema.");
|
||||
}
|
||||
if (obj.count("aliases") != 0) {
|
||||
throw std::runtime_error("Alias isn't supported yet in Avro schema.");
|
||||
}
|
||||
auto typeName = obj["type"].get<std::string>();
|
||||
auto i = nameSchemaMap.find(typeName);
|
||||
if (i != nameSchemaMap.end()) {
|
||||
return i->second;
|
||||
}
|
||||
if (typeName == "record") {
|
||||
std::vector<std::pair<std::string, AvroSchema>> fieldsSchema;
|
||||
for (const auto& field : obj["fields"]) {
|
||||
fieldsSchema.push_back(
|
||||
std::make_pair(field["name"].get<std::string>(), parseSchemaFromJsonObject(field["type"])));
|
||||
}
|
||||
|
||||
const std::string recordName = obj["name"].get<std::string>();
|
||||
auto recordSchema = AvroSchema::RecordSchema(recordName, std::move(fieldsSchema));
|
||||
nameSchemaMap.insert(std::make_pair(recordName, recordSchema));
|
||||
return recordSchema;
|
||||
} else if (typeName == "enum") {
|
||||
throw std::runtime_error("Enum type isn't supported yet in Avro schema.");
|
||||
} else if (typeName == "array") {
|
||||
return AvroSchema::ArraySchema(parseSchemaFromJsonObject(obj["items"]));
|
||||
} else if (typeName == "map") {
|
||||
return AvroSchema::MapSchema(parseSchemaFromJsonObject(obj["items"]));
|
||||
} else if (typeName == "fixed") {
|
||||
const std::string fixedName = obj["name"].get<std::string>();
|
||||
auto fixedSchema = AvroSchema::FixedSchema(fixedName, obj["size"].get<int64_t>());
|
||||
nameSchemaMap.insert(std::make_pair(fixedName, fixedSchema));
|
||||
return fixedSchema;
|
||||
} else {
|
||||
throw std::runtime_error("Unrecognized type " + typeName + " in Avro schema.");
|
||||
}
|
||||
}
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
};
|
||||
|
||||
auto jsonRoot = Core::Json::_internal::json::parse(jsonSchema.begin(), jsonSchema.end());
|
||||
return parseSchemaFromJsonObject(jsonRoot);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
int64_t AvroStreamReader::ParseInt(const Core::Context& context) {
|
||||
uint64_t r = 0;
|
||||
int nb = 0;
|
||||
while (true) {
|
||||
Preload(1, context);
|
||||
uint8_t c = m_streambuffer[m_pos.Offset++];
|
||||
|
||||
r = r | ((static_cast<uint64_t>(c) & 0x7f) << (nb * 7));
|
||||
if (c & 0x80) {
|
||||
++nb;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return static_cast<int64_t>(r >> 1) ^ -static_cast<int64_t>(r & 0x01);
|
||||
}
|
||||
|
||||
void AvroStreamReader::Advance(size_t n, const Core::Context& context) {
|
||||
Preload(n, context);
|
||||
m_pos.Offset += n;
|
||||
}
|
||||
|
||||
size_t AvroStreamReader::Preload(size_t n, const Core::Context& context) {
|
||||
size_t oldAvailable = AvailableBytes();
|
||||
while (true) {
|
||||
size_t newAvailable = TryPreload(n, context);
|
||||
if (newAvailable >= n) {
|
||||
return newAvailable;
|
||||
}
|
||||
if (oldAvailable == newAvailable) {
|
||||
throw std::runtime_error("Unexpected EOF of Avro stream.");
|
||||
}
|
||||
oldAvailable = newAvailable;
|
||||
}
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
|
||||
size_t AvroStreamReader::TryPreload(size_t n, const Core::Context& context) {
|
||||
size_t availableBytes = AvailableBytes();
|
||||
if (availableBytes >= n) {
|
||||
return availableBytes;
|
||||
}
|
||||
const size_t MinRead = 4096;
|
||||
size_t tryReadSize = (std::max)(n, MinRead);
|
||||
size_t currSize = m_streambuffer.size();
|
||||
m_streambuffer.resize(m_streambuffer.size() + tryReadSize);
|
||||
size_t actualReadSize = m_stream->Read(m_streambuffer.data() + currSize, tryReadSize, context);
|
||||
m_streambuffer.resize(currSize + actualReadSize);
|
||||
return AvailableBytes();
|
||||
}
|
||||
|
||||
void AvroStreamReader::Discard() {
|
||||
constexpr size_t MinimumReleaseMemory = 128 * 1024;
|
||||
if (m_pos.Offset < MinimumReleaseMemory) {
|
||||
return;
|
||||
}
|
||||
const size_t availableBytes = AvailableBytes();
|
||||
std::memmove(&m_streambuffer[0], &m_streambuffer[m_pos.Offset], availableBytes);
|
||||
m_streambuffer.resize(availableBytes);
|
||||
m_pos.Offset = 0;
|
||||
}
|
||||
|
||||
const AvroSchema AvroSchema::StringSchema(AvroDatumType::String);
|
||||
const AvroSchema AvroSchema::BytesSchema(AvroDatumType::Bytes);
|
||||
const AvroSchema AvroSchema::IntSchema(AvroDatumType::Int);
|
||||
const AvroSchema AvroSchema::LongSchema(AvroDatumType::Long);
|
||||
const AvroSchema AvroSchema::FloatSchema(AvroDatumType::Float);
|
||||
const AvroSchema AvroSchema::DoubleSchema(AvroDatumType::Double);
|
||||
const AvroSchema AvroSchema::BoolSchema(AvroDatumType::Bool);
|
||||
const AvroSchema AvroSchema::NullSchema(AvroDatumType::Null);
|
||||
|
||||
AvroSchema AvroSchema::RecordSchema(std::string name,
|
||||
const std::vector<std::pair<std::string, AvroSchema>>& fieldsSchema) {
|
||||
AvroSchema recordSchema(AvroDatumType::Record);
|
||||
recordSchema.m_name = std::move(name);
|
||||
recordSchema.m_status = std::make_shared<SharedStatus>();
|
||||
for (auto& i : fieldsSchema) {
|
||||
recordSchema.m_status->m_keys.push_back(i.first);
|
||||
recordSchema.m_status->m_schemas.push_back(i.second);
|
||||
}
|
||||
return recordSchema;
|
||||
}
|
||||
|
||||
AvroSchema AvroSchema::ArraySchema(AvroSchema elementSchema) {
|
||||
AvroSchema arraySchema(AvroDatumType::Array);
|
||||
arraySchema.m_status = std::make_shared<SharedStatus>();
|
||||
arraySchema.m_status->m_schemas.push_back(std::move(elementSchema));
|
||||
return arraySchema;
|
||||
}
|
||||
|
||||
AvroSchema AvroSchema::MapSchema(AvroSchema elementSchema) {
|
||||
AvroSchema mapSchema(AvroDatumType::Map);
|
||||
mapSchema.m_status = std::make_shared<SharedStatus>();
|
||||
mapSchema.m_status->m_schemas.push_back(std::move(elementSchema));
|
||||
return mapSchema;
|
||||
}
|
||||
|
||||
AvroSchema AvroSchema::UnionSchema(std::vector<AvroSchema> schemas) {
|
||||
AvroSchema unionSchema(AvroDatumType::Union);
|
||||
unionSchema.m_status = std::make_shared<SharedStatus>();
|
||||
unionSchema.m_status->m_schemas = std::move(schemas);
|
||||
return unionSchema;
|
||||
}
|
||||
|
||||
AvroSchema AvroSchema::FixedSchema(std::string name, int64_t size) {
|
||||
AvroSchema fixedSchema(AvroDatumType::Fixed);
|
||||
fixedSchema.m_name = std::move(name);
|
||||
fixedSchema.m_status = std::make_shared<SharedStatus>();
|
||||
fixedSchema.m_status->m_size = size;
|
||||
return fixedSchema;
|
||||
}
|
||||
|
||||
void AvroDatum::Fill(AvroStreamReader& reader, const Core::Context& context) {
|
||||
m_data = reader.m_pos;
|
||||
if (m_schema.Type() == AvroDatumType::String || m_schema.Type() == AvroDatumType::Bytes) {
|
||||
int64_t stringSize = reader.ParseInt(context);
|
||||
reader.Advance(static_cast<size_t>(stringSize), context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Int || m_schema.Type() == AvroDatumType::Long ||
|
||||
m_schema.Type() == AvroDatumType::Enum) {
|
||||
reader.ParseInt(context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Float) {
|
||||
reader.Advance(4, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Double) {
|
||||
reader.Advance(8, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Bool) {
|
||||
reader.Advance(1, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Null) {
|
||||
reader.Advance(0, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Record) {
|
||||
for (const auto& s : m_schema.FieldSchemas()) {
|
||||
AvroDatum(s).Fill(reader, context);
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Array) {
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = reader.ParseInt(context);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
} else if (numElementsInBlock < 0) {
|
||||
int64_t blockSize = reader.ParseInt(context);
|
||||
reader.Advance(static_cast<size_t>(blockSize), context);
|
||||
} else {
|
||||
for (auto i = 0; i < numElementsInBlock; ++i) {
|
||||
AvroDatum(m_schema.ItemSchema()).Fill(reader, context);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Map) {
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = reader.ParseInt(context);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
} else if (numElementsInBlock < 0) {
|
||||
int64_t blockSize = reader.ParseInt(context);
|
||||
reader.Advance(static_cast<size_t>(blockSize), context);
|
||||
} else {
|
||||
for (int64_t i = 0; i < numElementsInBlock; ++i) {
|
||||
AvroDatum(AvroSchema::StringSchema).Fill(reader, context);
|
||||
AvroDatum(m_schema.ItemSchema()).Fill(reader, context);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Union) {
|
||||
int64_t i = reader.ParseInt(context);
|
||||
AvroDatum(m_schema.FieldSchemas()[static_cast<size_t>(i)]).Fill(reader, context);
|
||||
} else if (m_schema.Type() == AvroDatumType::Fixed) {
|
||||
reader.Advance(m_schema.Size(), context);
|
||||
} else {
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
}
|
||||
|
||||
void AvroDatum::Fill(AvroStreamReader::ReaderPos& data) {
|
||||
m_data = data;
|
||||
if (m_schema.Type() == AvroDatumType::String || m_schema.Type() == AvroDatumType::Bytes) {
|
||||
int64_t stringSize = parseInt(data);
|
||||
data.Offset += static_cast<size_t>(stringSize);
|
||||
} else if (m_schema.Type() == AvroDatumType::Int || m_schema.Type() == AvroDatumType::Long ||
|
||||
m_schema.Type() == AvroDatumType::Enum) {
|
||||
parseInt(data);
|
||||
} else if (m_schema.Type() == AvroDatumType::Float) {
|
||||
data.Offset += 4;
|
||||
} else if (m_schema.Type() == AvroDatumType::Double) {
|
||||
data.Offset += 8;
|
||||
} else if (m_schema.Type() == AvroDatumType::Bool) {
|
||||
data.Offset += 1;
|
||||
} else if (m_schema.Type() == AvroDatumType::Null) {
|
||||
data.Offset += 0;
|
||||
} else if (m_schema.Type() == AvroDatumType::Record) {
|
||||
for (const auto& s : m_schema.FieldSchemas()) {
|
||||
AvroDatum(s).Fill(data);
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Array) {
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = parseInt(data);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
} else if (numElementsInBlock < 0) {
|
||||
int64_t blockSize = parseInt(data);
|
||||
data.Offset += static_cast<size_t>(blockSize);
|
||||
} else {
|
||||
for (auto i = 0; i < numElementsInBlock; ++i) {
|
||||
AvroDatum(m_schema.ItemSchema()).Fill(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Map) {
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = parseInt(data);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
} else if (numElementsInBlock < 0) {
|
||||
int64_t blockSize = parseInt(data);
|
||||
data.Offset += static_cast<size_t>(blockSize);
|
||||
} else {
|
||||
for (int64_t i = 0; i < numElementsInBlock; ++i) {
|
||||
AvroDatum(AvroSchema::StringSchema).Fill(data);
|
||||
AvroDatum(m_schema.ItemSchema()).Fill(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (m_schema.Type() == AvroDatumType::Union) {
|
||||
int64_t i = parseInt(data);
|
||||
AvroDatum(m_schema.FieldSchemas()[static_cast<size_t>(i)]).Fill(data);
|
||||
} else if (m_schema.Type() == AvroDatumType::Fixed) {
|
||||
data.Offset += m_schema.Size();
|
||||
} else {
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
AvroDatum::StringView AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
if (m_schema.Type() == AvroDatumType::String || m_schema.Type() == AvroDatumType::Bytes) {
|
||||
const int64_t length = parseInt(data);
|
||||
const uint8_t* start = &(*data.BufferPtr)[data.Offset];
|
||||
StringView ret{start, static_cast<size_t>(length)};
|
||||
data.Offset += static_cast<size_t>(length);
|
||||
return ret;
|
||||
}
|
||||
if (m_schema.Type() == AvroDatumType::Fixed) {
|
||||
const size_t fixedSize = m_schema.Size();
|
||||
const uint8_t* start = &(*data.BufferPtr)[data.Offset];
|
||||
StringView ret{start, fixedSize};
|
||||
data.Offset += fixedSize;
|
||||
return ret;
|
||||
}
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
|
||||
template <>
|
||||
std::string AvroDatum::Value() const {
|
||||
auto stringView = Value<StringView>();
|
||||
return std::string(stringView.Data, stringView.Data + stringView.Length);
|
||||
}
|
||||
|
||||
template <>
|
||||
std::vector<uint8_t> AvroDatum::Value() const {
|
||||
auto stringView = Value<StringView>();
|
||||
return std::vector<uint8_t>(stringView.Data, stringView.Data + stringView.Length);
|
||||
}
|
||||
|
||||
template <>
|
||||
int64_t AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
return parseInt(data);
|
||||
}
|
||||
|
||||
template <>
|
||||
int32_t AvroDatum::Value() const {
|
||||
return static_cast<int32_t>(Value<int64_t>());
|
||||
}
|
||||
|
||||
template <>
|
||||
bool AvroDatum::Value() const {
|
||||
return Value<int64_t>();
|
||||
}
|
||||
|
||||
template <>
|
||||
std::nullptr_t AvroDatum::Value() const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <>
|
||||
AvroRecord AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
|
||||
AvroRecord r;
|
||||
r.m_keys = &m_schema.FieldNames();
|
||||
for (const auto& schema : m_schema.FieldSchemas()) {
|
||||
auto datum = AvroDatum(schema);
|
||||
datum.Fill(data);
|
||||
r.m_values.push_back(std::move(datum));
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
template <>
|
||||
AvroMap AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
|
||||
AvroMap m;
|
||||
while (true) {
|
||||
int64_t numElementsInBlock = parseInt(data);
|
||||
if (numElementsInBlock == 0) {
|
||||
break;
|
||||
}
|
||||
if (numElementsInBlock < 0) {
|
||||
numElementsInBlock = -numElementsInBlock;
|
||||
parseInt(data);
|
||||
}
|
||||
for (int64_t i = 0; i < numElementsInBlock; ++i) {
|
||||
auto keyDatum = AvroDatum(AvroSchema::StringSchema);
|
||||
keyDatum.Fill(data);
|
||||
auto valueDatum = AvroDatum(m_schema.ItemSchema());
|
||||
valueDatum.Fill(data);
|
||||
m[keyDatum.Value<std::string>()] = valueDatum;
|
||||
}
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
template <>
|
||||
AvroDatum AvroDatum::Value() const {
|
||||
auto data = m_data;
|
||||
if (m_schema.Type() == AvroDatumType::Union) {
|
||||
int64_t i = parseInt(data);
|
||||
auto datum = AvroDatum(m_schema.FieldSchemas()[static_cast<size_t>(i)]);
|
||||
datum.Fill(data);
|
||||
return datum;
|
||||
}
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
|
||||
AvroObjectContainerReader::AvroObjectContainerReader(Core::IO::BodyStream& stream)
|
||||
: m_reader(std::make_unique<AvroStreamReader>(stream)) {}
|
||||
|
||||
AvroDatum AvroObjectContainerReader::NextImpl(const AvroSchema* schema, const Core::Context& context) {
|
||||
AZURE_ASSERT_FALSE(m_eof);
|
||||
static const auto SyncMarkerSchema = AvroSchema::FixedSchema("Sync", 16);
|
||||
if (!schema) {
|
||||
static AvroSchema FileHeaderSchema = []() {
|
||||
std::vector<std::pair<std::string, AvroSchema>> fieldsSchema;
|
||||
fieldsSchema.push_back(std::make_pair("magic", AvroSchema::FixedSchema("Magic", 4)));
|
||||
fieldsSchema.push_back(std::make_pair("meta", AvroSchema::MapSchema(AvroSchema::BytesSchema)));
|
||||
fieldsSchema.push_back(std::make_pair("sync", SyncMarkerSchema));
|
||||
return AvroSchema::RecordSchema("org.apache.avro.file.Header", std::move(fieldsSchema));
|
||||
}();
|
||||
auto fileHeaderDatum = AvroDatum(FileHeaderSchema);
|
||||
fileHeaderDatum.Fill(*m_reader, context);
|
||||
auto fileHeader = fileHeaderDatum.Value<AvroRecord>();
|
||||
if (fileHeader.Field("magic").Value<std::string>() != "Obj\01") {
|
||||
throw std::runtime_error("Invalid Avro object container magic.");
|
||||
}
|
||||
AvroMap meta = fileHeader.Field("meta").Value<AvroMap>();
|
||||
std::string objectSchemaJson = meta["avro.schema"].Value<std::string>();
|
||||
std::string codec = "null";
|
||||
if (meta.count("avro.codec") != 0) {
|
||||
codec = meta["avro.codec"].Value<std::string>();
|
||||
}
|
||||
if (codec != "null") {
|
||||
throw std::runtime_error("Unsupported Avro codec: " + codec);
|
||||
}
|
||||
m_syncMarker = fileHeader.Field("sync").Value<std::string>();
|
||||
m_objectSchema = std::make_unique<AvroSchema>(ParseSchemaFromJsonString(objectSchemaJson));
|
||||
schema = m_objectSchema.get();
|
||||
}
|
||||
|
||||
if (m_remainingObjectInCurrentBlock == 0) {
|
||||
m_reader->Discard();
|
||||
m_remainingObjectInCurrentBlock = m_reader->ParseInt(context);
|
||||
int64_t ObjectsSize = m_reader->ParseInt(context);
|
||||
m_reader->Preload(static_cast<size_t>(ObjectsSize), context);
|
||||
}
|
||||
|
||||
auto objectDatum = AvroDatum(*m_objectSchema);
|
||||
objectDatum.Fill(*m_reader, context);
|
||||
if (--m_remainingObjectInCurrentBlock == 0) {
|
||||
auto markerDatum = AvroDatum(SyncMarkerSchema);
|
||||
markerDatum.Fill(*m_reader, context);
|
||||
auto marker = markerDatum.Value<std::string>();
|
||||
if (marker != m_syncMarker) {
|
||||
throw std::runtime_error("Sync marker doesn't match.");
|
||||
}
|
||||
m_eof = m_reader->TryPreload(1, context) == 0;
|
||||
}
|
||||
return objectDatum;
|
||||
}
|
||||
|
||||
size_t AvroStreamParser::OnRead(uint8_t* buffer, size_t count, Azure::Core::Context const& context) {
|
||||
if (m_parserBuffer.Length != 0) {
|
||||
size_t bytesToCopy = (std::min)(m_parserBuffer.Length, count);
|
||||
std::memcpy(buffer, m_parserBuffer.Data, bytesToCopy);
|
||||
m_parserBuffer.Data += bytesToCopy;
|
||||
m_parserBuffer.Length -= bytesToCopy;
|
||||
return bytesToCopy;
|
||||
}
|
||||
while (!m_parser.End()) {
|
||||
auto datum = m_parser.Next(context);
|
||||
if (datum.Schema().Type() == AvroDatumType::Union) {
|
||||
datum = datum.Value<AvroDatum>();
|
||||
}
|
||||
if (datum.Schema().Type() != AvroDatumType::Record) {
|
||||
continue;
|
||||
}
|
||||
if (datum.Schema().Name() == "com.microsoft.azure.storage.queryBlobContents.resultData") {
|
||||
auto record = datum.Value<AvroRecord>();
|
||||
auto dataDatum = record.Field("data");
|
||||
m_parserBuffer = dataDatum.Value<AvroDatum::StringView>();
|
||||
return OnRead(buffer, count, context);
|
||||
}
|
||||
if (datum.Schema().Name() == "com.microsoft.azure.storage.queryBlobContents.progress" && m_progressCallback) {
|
||||
auto record = datum.Value<AvroRecord>();
|
||||
auto bytesScanned = record.Field("bytesScanned").Value<int64_t>();
|
||||
auto totalBytes = record.Field("totalBytes").Value<int64_t>();
|
||||
m_progressCallback(bytesScanned, totalBytes);
|
||||
}
|
||||
if (datum.Schema().Name() == "com.microsoft.azure.storage.queryBlobContents.error" && m_errorCallback) {
|
||||
auto record = datum.Value<AvroRecord>();
|
||||
BlobQueryError e;
|
||||
e.Name = record.Field("name").Value<std::string>();
|
||||
e.Description = record.Field("description").Value<std::string>();
|
||||
e.IsFatal = record.Field("fatal").Value<bool>();
|
||||
e.Position = record.Field("position").Value<int64_t>();
|
||||
m_errorCallback(std::move(e));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
} // namespace _detail
|
||||
} // namespace Blobs
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
||||
|
||||
#endif
|
|
@ -0,0 +1,625 @@
|
|||
#if defined(USE_S3)
|
||||
|
||||
#include "td_block_blob_client.hpp"
|
||||
|
||||
#include <azure/core/platform.hpp>
|
||||
|
||||
#if defined(AZ_PLATFORM_WINDOWS)
|
||||
#if !defined(WIN32_LEAN_AND_MEAN)
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
#if !defined(NOMINMAX)
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
#include <td_avro_parser.h>
|
||||
|
||||
#include <azure/core/io/body_stream.hpp>
|
||||
#include <azure/storage/common/crypt.hpp>
|
||||
#include <azure/storage/common/internal/concurrent_transfer.hpp>
|
||||
#include <azure/storage/common/internal/constants.hpp>
|
||||
#include <azure/storage/common/internal/file_io.hpp>
|
||||
#include <azure/storage/common/internal/storage_switch_to_secondary_policy.hpp>
|
||||
#include <azure/storage/common/storage_common.hpp>
|
||||
#include <azure/storage/common/storage_exception.hpp>
|
||||
|
||||
namespace Azure {
|
||||
namespace Storage {
|
||||
namespace Blobs {
|
||||
|
||||
TDBlockBlobClient TDBlockBlobClient::CreateFromConnectionString(const std::string& connectionString,
|
||||
const std::string& blobContainerName,
|
||||
const std::string& blobName,
|
||||
const BlobClientOptions& options) {
|
||||
TDBlockBlobClient newClient(
|
||||
BlobClient::CreateFromConnectionString(connectionString, blobContainerName, blobName, options));
|
||||
return newClient;
|
||||
}
|
||||
|
||||
TDBlockBlobClient::TDBlockBlobClient(const std::string& blobUrl, std::shared_ptr<StorageSharedKeyCredential> credential,
|
||||
const BlobClientOptions& options)
|
||||
: BlobClient(blobUrl, std::move(credential), options) {}
|
||||
|
||||
TDBlockBlobClient::TDBlockBlobClient(const std::string& blobUrl,
|
||||
std::shared_ptr<Core::Credentials::TokenCredential> credential,
|
||||
const BlobClientOptions& options)
|
||||
: BlobClient(blobUrl, std::move(credential), options) {}
|
||||
|
||||
TDBlockBlobClient::TDBlockBlobClient(const std::string& blobUrl, const BlobClientOptions& options)
|
||||
: BlobClient(blobUrl, options) {}
|
||||
|
||||
TDBlockBlobClient::TDBlockBlobClient(BlobClient blobClient) : BlobClient(std::move(blobClient)) {}
|
||||
|
||||
TDBlockBlobClient TDBlockBlobClient::WithSnapshot(const std::string& snapshot) const {
|
||||
TDBlockBlobClient newClient(*this);
|
||||
if (snapshot.empty()) {
|
||||
newClient.m_blobUrl.RemoveQueryParameter(_internal::HttpQuerySnapshot);
|
||||
} else {
|
||||
newClient.m_blobUrl.AppendQueryParameter(_internal::HttpQuerySnapshot,
|
||||
_internal::UrlEncodeQueryParameter(snapshot));
|
||||
}
|
||||
return newClient;
|
||||
}
|
||||
|
||||
TDBlockBlobClient TDBlockBlobClient::WithVersionId(const std::string& versionId) const {
|
||||
TDBlockBlobClient newClient(*this);
|
||||
if (versionId.empty()) {
|
||||
newClient.m_blobUrl.RemoveQueryParameter(_internal::HttpQueryVersionId);
|
||||
} else {
|
||||
newClient.m_blobUrl.AppendQueryParameter(_internal::HttpQueryVersionId,
|
||||
_internal::UrlEncodeQueryParameter(versionId));
|
||||
}
|
||||
return newClient;
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobResult> TDBlockBlobClient::Upload(Azure::Core::IO::BodyStream& content,
|
||||
const UploadBlockBlobOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::UploadBlockBlobOptions protocolLayerOptions;
|
||||
if (options.TransactionalContentHash.HasValue()) {
|
||||
if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) {
|
||||
protocolLayerOptions.TransactionalContentMD5 = options.TransactionalContentHash.Value().Value;
|
||||
} else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) {
|
||||
protocolLayerOptions.TransactionalContentCrc64 = options.TransactionalContentHash.Value().Value;
|
||||
}
|
||||
}
|
||||
protocolLayerOptions.BlobContentType = options.HttpHeaders.ContentType;
|
||||
protocolLayerOptions.BlobContentEncoding = options.HttpHeaders.ContentEncoding;
|
||||
protocolLayerOptions.BlobContentLanguage = options.HttpHeaders.ContentLanguage;
|
||||
protocolLayerOptions.BlobContentMD5 = options.HttpHeaders.ContentHash.Value;
|
||||
protocolLayerOptions.BlobContentDisposition = options.HttpHeaders.ContentDisposition;
|
||||
protocolLayerOptions.BlobCacheControl = options.HttpHeaders.CacheControl;
|
||||
protocolLayerOptions.Metadata = std::map<std::string, std::string>(options.Metadata.begin(), options.Metadata.end());
|
||||
protocolLayerOptions.BlobTagsString = _detail::TagsToString(options.Tags);
|
||||
protocolLayerOptions.Tier = options.AccessTier;
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch;
|
||||
protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
if (options.ImmutabilityPolicy.HasValue()) {
|
||||
protocolLayerOptions.ImmutabilityPolicyExpiry = options.ImmutabilityPolicy.Value().ExpiresOn;
|
||||
protocolLayerOptions.ImmutabilityPolicyMode = options.ImmutabilityPolicy.Value().PolicyMode;
|
||||
}
|
||||
protocolLayerOptions.LegalHold = options.HasLegalHold;
|
||||
|
||||
return _detail::BlockBlobClient::Upload(*m_pipeline, m_blobUrl, content, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> TDBlockBlobClient::UploadFrom(
|
||||
const uint8_t* buffer, size_t bufferSize, const UploadBlockBlobFromOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxBlockNumber = 50000;
|
||||
constexpr int64_t BlockGrainSize = 1 * 1024 * 1024;
|
||||
|
||||
if (static_cast<uint64_t>(options.TransferOptions.SingleUploadThreshold) > (std::numeric_limits<size_t>::max)()) {
|
||||
throw Azure::Core::RequestFailedException("Single upload threshold is too big");
|
||||
}
|
||||
if (bufferSize <= static_cast<size_t>(options.TransferOptions.SingleUploadThreshold)) {
|
||||
Azure::Core::IO::MemoryBodyStream contentStream(buffer, bufferSize);
|
||||
UploadBlockBlobOptions uploadBlockBlobOptions;
|
||||
uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders;
|
||||
uploadBlockBlobOptions.Metadata = options.Metadata;
|
||||
uploadBlockBlobOptions.Tags = options.Tags;
|
||||
uploadBlockBlobOptions.AccessTier = options.AccessTier;
|
||||
uploadBlockBlobOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
uploadBlockBlobOptions.HasLegalHold = options.HasLegalHold;
|
||||
return Upload(contentStream, uploadBlockBlobOptions, context);
|
||||
}
|
||||
|
||||
int64_t chunkSize;
|
||||
if (options.TransferOptions.ChunkSize.HasValue()) {
|
||||
chunkSize = options.TransferOptions.ChunkSize.Value();
|
||||
} else {
|
||||
int64_t minChunkSize = (bufferSize + MaxBlockNumber - 1) / MaxBlockNumber;
|
||||
minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize;
|
||||
chunkSize = (std::max)(DefaultStageBlockSize, minChunkSize);
|
||||
}
|
||||
if (chunkSize > MaxStageBlockSize) {
|
||||
throw Azure::Core::RequestFailedException("Block size is too big.");
|
||||
}
|
||||
|
||||
std::vector<std::string> blockIds;
|
||||
auto getBlockId = [](int64_t id) {
|
||||
constexpr size_t BlockIdLength = 64;
|
||||
std::string blockId = std::to_string(id);
|
||||
blockId = std::string(BlockIdLength - blockId.length(), '0') + blockId;
|
||||
return Azure::Core::Convert::Base64Encode(std::vector<uint8_t>(blockId.begin(), blockId.end()));
|
||||
};
|
||||
|
||||
auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) {
|
||||
Azure::Core::IO::MemoryBodyStream contentStream(buffer + offset, static_cast<size_t>(length));
|
||||
StageBlockOptions chunkOptions;
|
||||
auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context);
|
||||
if (chunkId == numChunks - 1) {
|
||||
blockIds.resize(static_cast<size_t>(numChunks));
|
||||
}
|
||||
};
|
||||
|
||||
_internal::ConcurrentTransfer(0, bufferSize, chunkSize, options.TransferOptions.Concurrency, uploadBlockFunc);
|
||||
|
||||
for (size_t i = 0; i < blockIds.size(); ++i) {
|
||||
blockIds[i] = getBlockId(static_cast<int64_t>(i));
|
||||
}
|
||||
CommitBlockListOptions commitBlockListOptions;
|
||||
commitBlockListOptions.HttpHeaders = options.HttpHeaders;
|
||||
commitBlockListOptions.Metadata = options.Metadata;
|
||||
commitBlockListOptions.Tags = options.Tags;
|
||||
commitBlockListOptions.AccessTier = options.AccessTier;
|
||||
commitBlockListOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
commitBlockListOptions.HasLegalHold = options.HasLegalHold;
|
||||
auto commitBlockListResponse = CommitBlockList(blockIds, commitBlockListOptions, context);
|
||||
|
||||
Models::UploadBlockBlobFromResult ret;
|
||||
ret.ETag = std::move(commitBlockListResponse.Value.ETag);
|
||||
ret.LastModified = std::move(commitBlockListResponse.Value.LastModified);
|
||||
ret.VersionId = std::move(commitBlockListResponse.Value.VersionId);
|
||||
ret.IsServerEncrypted = commitBlockListResponse.Value.IsServerEncrypted;
|
||||
ret.EncryptionKeySha256 = std::move(commitBlockListResponse.Value.EncryptionKeySha256);
|
||||
ret.EncryptionScope = std::move(commitBlockListResponse.Value.EncryptionScope);
|
||||
return Azure::Response<Models::UploadBlockBlobFromResult>(std::move(ret),
|
||||
std::move(commitBlockListResponse.RawResponse));
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> TDBlockBlobClient::UploadFrom(
|
||||
const std::string& fileName, const UploadBlockBlobFromOptions& options, const Azure::Core::Context& context) const {
|
||||
constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxBlockNumber = 50000;
|
||||
constexpr int64_t BlockGrainSize = 1 * 1024 * 1024;
|
||||
|
||||
{
|
||||
Azure::Core::IO::FileBodyStream contentStream(fileName);
|
||||
|
||||
if (contentStream.Length() <= options.TransferOptions.SingleUploadThreshold) {
|
||||
UploadBlockBlobOptions uploadBlockBlobOptions;
|
||||
uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders;
|
||||
uploadBlockBlobOptions.Metadata = options.Metadata;
|
||||
uploadBlockBlobOptions.Tags = options.Tags;
|
||||
uploadBlockBlobOptions.AccessTier = options.AccessTier;
|
||||
uploadBlockBlobOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
uploadBlockBlobOptions.HasLegalHold = options.HasLegalHold;
|
||||
return Upload(contentStream, uploadBlockBlobOptions, context);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> blockIds;
|
||||
auto getBlockId = [](int64_t id) {
|
||||
constexpr size_t BlockIdLength = 64;
|
||||
std::string blockId = std::to_string(id);
|
||||
blockId = std::string(BlockIdLength - blockId.length(), '0') + blockId;
|
||||
return Azure::Core::Convert::Base64Encode(std::vector<uint8_t>(blockId.begin(), blockId.end()));
|
||||
};
|
||||
|
||||
_internal::FileReader fileReader(fileName);
|
||||
|
||||
auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) {
|
||||
Azure::Core::IO::_internal::RandomAccessFileBodyStream contentStream(fileReader.GetHandle(), offset, length);
|
||||
StageBlockOptions chunkOptions;
|
||||
auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context);
|
||||
if (chunkId == numChunks - 1) {
|
||||
blockIds.resize(static_cast<size_t>(numChunks));
|
||||
}
|
||||
};
|
||||
|
||||
int64_t chunkSize;
|
||||
if (options.TransferOptions.ChunkSize.HasValue()) {
|
||||
chunkSize = options.TransferOptions.ChunkSize.Value();
|
||||
} else {
|
||||
int64_t minChunkSize = (fileReader.GetFileSize() + MaxBlockNumber - 1) / MaxBlockNumber;
|
||||
minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize;
|
||||
chunkSize = (std::max)(DefaultStageBlockSize, minChunkSize);
|
||||
}
|
||||
if (chunkSize > MaxStageBlockSize) {
|
||||
throw Azure::Core::RequestFailedException("Block size is too big.");
|
||||
}
|
||||
|
||||
_internal::ConcurrentTransfer(0, fileReader.GetFileSize(), chunkSize, options.TransferOptions.Concurrency,
|
||||
uploadBlockFunc);
|
||||
|
||||
for (size_t i = 0; i < blockIds.size(); ++i) {
|
||||
blockIds[i] = getBlockId(static_cast<int64_t>(i));
|
||||
}
|
||||
CommitBlockListOptions commitBlockListOptions;
|
||||
commitBlockListOptions.HttpHeaders = options.HttpHeaders;
|
||||
commitBlockListOptions.Metadata = options.Metadata;
|
||||
commitBlockListOptions.Tags = options.Tags;
|
||||
commitBlockListOptions.AccessTier = options.AccessTier;
|
||||
commitBlockListOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
commitBlockListOptions.HasLegalHold = options.HasLegalHold;
|
||||
auto commitBlockListResponse = CommitBlockList(blockIds, commitBlockListOptions, context);
|
||||
|
||||
Models::UploadBlockBlobFromResult result;
|
||||
result.ETag = commitBlockListResponse.Value.ETag;
|
||||
result.LastModified = commitBlockListResponse.Value.LastModified;
|
||||
result.VersionId = commitBlockListResponse.Value.VersionId;
|
||||
result.IsServerEncrypted = commitBlockListResponse.Value.IsServerEncrypted;
|
||||
result.EncryptionKeySha256 = commitBlockListResponse.Value.EncryptionKeySha256;
|
||||
result.EncryptionScope = commitBlockListResponse.Value.EncryptionScope;
|
||||
return Azure::Response<Models::UploadBlockBlobFromResult>(std::move(result),
|
||||
std::move(commitBlockListResponse.RawResponse));
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromResult> TDBlockBlobClient::UploadFrom(
|
||||
const std::string& fileName, int64_t offset, int64_t size, const UploadBlockBlobFromOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_internal::FileReader fileReader(fileName);
|
||||
|
||||
{
|
||||
Azure::Core::IO::_internal::RandomAccessFileBodyStream contentStream(fileReader.GetHandle(), offset, size);
|
||||
|
||||
if (size <= options.TransferOptions.SingleUploadThreshold) {
|
||||
UploadBlockBlobOptions uploadBlockBlobOptions;
|
||||
uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders;
|
||||
uploadBlockBlobOptions.Metadata = options.Metadata;
|
||||
uploadBlockBlobOptions.Tags = options.Tags;
|
||||
uploadBlockBlobOptions.AccessTier = options.AccessTier;
|
||||
uploadBlockBlobOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
uploadBlockBlobOptions.HasLegalHold = options.HasLegalHold;
|
||||
return Upload(contentStream, uploadBlockBlobOptions, context);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> blockIds;
|
||||
auto getBlockId = [](int64_t id) {
|
||||
constexpr size_t BlockIdLength = 64;
|
||||
std::string blockId = std::to_string(id);
|
||||
blockId = std::string(BlockIdLength - blockId.length(), '0') + blockId;
|
||||
return Azure::Core::Convert::Base64Encode(std::vector<uint8_t>(blockId.begin(), blockId.end()));
|
||||
};
|
||||
|
||||
auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) {
|
||||
Azure::Core::IO::_internal::RandomAccessFileBodyStream contentStream(fileReader.GetHandle(), offset, length);
|
||||
StageBlockOptions chunkOptions;
|
||||
auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context);
|
||||
if (chunkId == numChunks - 1) {
|
||||
blockIds.resize(static_cast<size_t>(numChunks));
|
||||
}
|
||||
};
|
||||
|
||||
constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL;
|
||||
constexpr int64_t MaxBlockNumber = 50000;
|
||||
constexpr int64_t BlockGrainSize = 1 * 1024 * 1024;
|
||||
|
||||
int64_t chunkSize;
|
||||
if (options.TransferOptions.ChunkSize.HasValue()) {
|
||||
chunkSize = options.TransferOptions.ChunkSize.Value();
|
||||
} else {
|
||||
int64_t minChunkSize = (size + MaxBlockNumber - 1) / MaxBlockNumber;
|
||||
minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize;
|
||||
chunkSize = (std::max)(DefaultStageBlockSize, minChunkSize);
|
||||
}
|
||||
if (chunkSize > MaxStageBlockSize) {
|
||||
throw Azure::Core::RequestFailedException("Block size is too big.");
|
||||
}
|
||||
|
||||
_internal::ConcurrentTransfer(offset, size, chunkSize, options.TransferOptions.Concurrency, uploadBlockFunc);
|
||||
|
||||
for (size_t i = 0; i < blockIds.size(); ++i) {
|
||||
blockIds[i] = getBlockId(static_cast<int64_t>(i));
|
||||
}
|
||||
CommitBlockListOptions commitBlockListOptions;
|
||||
commitBlockListOptions.HttpHeaders = options.HttpHeaders;
|
||||
commitBlockListOptions.Metadata = options.Metadata;
|
||||
commitBlockListOptions.Tags = options.Tags;
|
||||
commitBlockListOptions.AccessTier = options.AccessTier;
|
||||
commitBlockListOptions.ImmutabilityPolicy = options.ImmutabilityPolicy;
|
||||
commitBlockListOptions.HasLegalHold = options.HasLegalHold;
|
||||
auto commitBlockListResponse = CommitBlockList(blockIds, commitBlockListOptions, context);
|
||||
|
||||
Models::UploadBlockBlobFromResult result;
|
||||
result.ETag = commitBlockListResponse.Value.ETag;
|
||||
result.LastModified = commitBlockListResponse.Value.LastModified;
|
||||
result.VersionId = commitBlockListResponse.Value.VersionId;
|
||||
result.IsServerEncrypted = commitBlockListResponse.Value.IsServerEncrypted;
|
||||
result.EncryptionKeySha256 = commitBlockListResponse.Value.EncryptionKeySha256;
|
||||
result.EncryptionScope = commitBlockListResponse.Value.EncryptionScope;
|
||||
return Azure::Response<Models::UploadBlockBlobFromResult>(std::move(result),
|
||||
std::move(commitBlockListResponse.RawResponse));
|
||||
}
|
||||
|
||||
Azure::Response<Models::UploadBlockBlobFromUriResult> TDBlockBlobClient::UploadFromUri(
|
||||
const std::string& sourceUri, const UploadBlockBlobFromUriOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::UploadBlockBlobFromUriOptions protocolLayerOptions;
|
||||
protocolLayerOptions.CopySource = sourceUri;
|
||||
protocolLayerOptions.CopySourceBlobProperties = options.CopySourceBlobProperties;
|
||||
protocolLayerOptions.BlobContentType = options.HttpHeaders.ContentType;
|
||||
protocolLayerOptions.BlobContentEncoding = options.HttpHeaders.ContentEncoding;
|
||||
protocolLayerOptions.BlobContentLanguage = options.HttpHeaders.ContentLanguage;
|
||||
protocolLayerOptions.BlobContentMD5 = options.HttpHeaders.ContentHash.Value;
|
||||
protocolLayerOptions.BlobCacheControl = options.HttpHeaders.CacheControl;
|
||||
protocolLayerOptions.BlobContentDisposition = options.HttpHeaders.ContentDisposition;
|
||||
protocolLayerOptions.Metadata = std::map<std::string, std::string>(options.Metadata.begin(), options.Metadata.end());
|
||||
protocolLayerOptions.BlobTagsString = _detail::TagsToString(options.Tags);
|
||||
protocolLayerOptions.Tier = options.AccessTier;
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch;
|
||||
protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
protocolLayerOptions.SourceIfMatch = options.SourceAccessConditions.IfMatch;
|
||||
protocolLayerOptions.SourceIfNoneMatch = options.SourceAccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.SourceIfModifiedSince = options.SourceAccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.SourceIfUnmodifiedSince = options.SourceAccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.SourceIfTags = options.SourceAccessConditions.TagConditions;
|
||||
if (options.TransactionalContentHash.HasValue()) {
|
||||
if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) {
|
||||
protocolLayerOptions.SourceContentMD5 = options.TransactionalContentHash.Value().Value;
|
||||
} else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) {
|
||||
protocolLayerOptions.SourceContentcrc64 = options.TransactionalContentHash.Value().Value;
|
||||
}
|
||||
}
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
protocolLayerOptions.CopySourceTags = options.CopySourceTagsMode;
|
||||
if (!options.SourceAuthorization.empty()) {
|
||||
protocolLayerOptions.CopySourceAuthorization = options.SourceAuthorization;
|
||||
}
|
||||
|
||||
return _detail::BlockBlobClient::UploadFromUri(*m_pipeline, m_blobUrl, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::StageBlockResult> TDBlockBlobClient::StageBlock(const std::string& blockId,
|
||||
Azure::Core::IO::BodyStream& content,
|
||||
const StageBlockOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::StageBlockBlobBlockOptions protocolLayerOptions;
|
||||
protocolLayerOptions.BlockId = blockId;
|
||||
if (options.TransactionalContentHash.HasValue()) {
|
||||
if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) {
|
||||
protocolLayerOptions.TransactionalContentMD5 = options.TransactionalContentHash.Value().Value;
|
||||
} else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) {
|
||||
protocolLayerOptions.TransactionalContentCrc64 = options.TransactionalContentHash.Value().Value;
|
||||
}
|
||||
}
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
return _detail::BlockBlobClient::StageBlock(*m_pipeline, m_blobUrl, content, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::StageBlockFromUriResult> TDBlockBlobClient::StageBlockFromUri(
|
||||
const std::string& blockId, const std::string& sourceUri, const StageBlockFromUriOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::StageBlockBlobBlockFromUriOptions protocolLayerOptions;
|
||||
protocolLayerOptions.BlockId = blockId;
|
||||
protocolLayerOptions.SourceUrl = sourceUri;
|
||||
if (options.SourceRange.HasValue()) {
|
||||
std::string rangeStr = "bytes=" + std::to_string(options.SourceRange.Value().Offset) + "-";
|
||||
if (options.SourceRange.Value().Length.HasValue()) {
|
||||
rangeStr += std::to_string(options.SourceRange.Value().Offset + options.SourceRange.Value().Length.Value() - 1);
|
||||
}
|
||||
protocolLayerOptions.SourceRange = rangeStr;
|
||||
}
|
||||
if (options.TransactionalContentHash.HasValue()) {
|
||||
if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) {
|
||||
protocolLayerOptions.SourceContentMD5 = options.TransactionalContentHash.Value().Value;
|
||||
} else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) {
|
||||
protocolLayerOptions.SourceContentcrc64 = options.TransactionalContentHash.Value().Value;
|
||||
}
|
||||
}
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.SourceIfModifiedSince = options.SourceAccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.SourceIfUnmodifiedSince = options.SourceAccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.SourceIfMatch = options.SourceAccessConditions.IfMatch;
|
||||
protocolLayerOptions.SourceIfNoneMatch = options.SourceAccessConditions.IfNoneMatch;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
if (!options.SourceAuthorization.empty()) {
|
||||
protocolLayerOptions.CopySourceAuthorization = options.SourceAuthorization;
|
||||
}
|
||||
|
||||
return _detail::BlockBlobClient::StageBlockFromUri(*m_pipeline, m_blobUrl, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::CommitBlockListResult> TDBlockBlobClient::CommitBlockList(
|
||||
const std::vector<std::string>& blockIds, const CommitBlockListOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::CommitBlockBlobBlockListOptions protocolLayerOptions;
|
||||
protocolLayerOptions.Blocks.Latest = blockIds;
|
||||
protocolLayerOptions.BlobContentType = options.HttpHeaders.ContentType;
|
||||
protocolLayerOptions.BlobContentEncoding = options.HttpHeaders.ContentEncoding;
|
||||
protocolLayerOptions.BlobContentLanguage = options.HttpHeaders.ContentLanguage;
|
||||
protocolLayerOptions.BlobContentMD5 = options.HttpHeaders.ContentHash.Value;
|
||||
protocolLayerOptions.BlobContentDisposition = options.HttpHeaders.ContentDisposition;
|
||||
protocolLayerOptions.BlobCacheControl = options.HttpHeaders.CacheControl;
|
||||
protocolLayerOptions.Metadata = std::map<std::string, std::string>(options.Metadata.begin(), options.Metadata.end());
|
||||
protocolLayerOptions.BlobTagsString = _detail::TagsToString(options.Tags);
|
||||
protocolLayerOptions.Tier = options.AccessTier;
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch;
|
||||
protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
if (options.ImmutabilityPolicy.HasValue()) {
|
||||
protocolLayerOptions.ImmutabilityPolicyExpiry = options.ImmutabilityPolicy.Value().ExpiresOn;
|
||||
protocolLayerOptions.ImmutabilityPolicyMode = options.ImmutabilityPolicy.Value().PolicyMode;
|
||||
}
|
||||
protocolLayerOptions.LegalHold = options.HasLegalHold;
|
||||
|
||||
return _detail::BlockBlobClient::CommitBlockList(*m_pipeline, m_blobUrl, protocolLayerOptions, context);
|
||||
}
|
||||
|
||||
Azure::Response<Models::GetBlockListResult> TDBlockBlobClient::GetBlockList(const GetBlockListOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlockBlobClient::GetBlockBlobBlockListOptions protocolLayerOptions;
|
||||
protocolLayerOptions.ListType = options.ListType;
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
return _detail::BlockBlobClient::GetBlockList(*m_pipeline, m_blobUrl, protocolLayerOptions,
|
||||
_internal::WithReplicaStatus(context));
|
||||
}
|
||||
/*
|
||||
Azure::Response<Models::QueryBlobResult> TDBlockBlobClient::Query(const std::string& querySqlExpression,
|
||||
const QueryBlobOptions& options,
|
||||
const Azure::Core::Context& context) const {
|
||||
_detail::BlobClient::QueryBlobOptions protocolLayerOptions;
|
||||
protocolLayerOptions.QueryRequest.QueryType = Models::_detail::QueryRequestQueryType::SQL;
|
||||
protocolLayerOptions.QueryRequest.Expression = querySqlExpression;
|
||||
if (options.InputTextConfiguration.m_format == Models::_detail::QueryFormatType::Delimited) {
|
||||
Models::_detail::DelimitedTextConfiguration c;
|
||||
c.RecordSeparator = options.InputTextConfiguration.m_recordSeparator;
|
||||
c.ColumnSeparator = options.InputTextConfiguration.m_columnSeparator;
|
||||
c.FieldQuote = options.InputTextConfiguration.m_quotationCharacter;
|
||||
c.EscapeChar = options.InputTextConfiguration.m_escapeCharacter;
|
||||
c.HeadersPresent = options.InputTextConfiguration.m_hasHeaders;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.InputTextConfiguration.m_format;
|
||||
q.Format.DelimitedTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.InputSerialization = std::move(q);
|
||||
} else if (options.InputTextConfiguration.m_format == Models::_detail::QueryFormatType::Json) {
|
||||
Models::_detail::JsonTextConfiguration c;
|
||||
c.RecordSeparator = options.InputTextConfiguration.m_recordSeparator;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.InputTextConfiguration.m_format;
|
||||
q.Format.JsonTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.InputSerialization = std::move(q);
|
||||
} else if (options.InputTextConfiguration.m_format == Models::_detail::QueryFormatType::Parquet) {
|
||||
Models::_detail::ParquetConfiguration c;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.InputTextConfiguration.m_format;
|
||||
q.Format.ParquetTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.InputSerialization = std::move(q);
|
||||
} else if (options.InputTextConfiguration.m_format.ToString().empty()) {
|
||||
} else {
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Delimited) {
|
||||
Models::_detail::DelimitedTextConfiguration c;
|
||||
c.RecordSeparator = options.OutputTextConfiguration.m_recordSeparator;
|
||||
c.ColumnSeparator = options.OutputTextConfiguration.m_columnSeparator;
|
||||
c.FieldQuote = options.OutputTextConfiguration.m_quotationCharacter;
|
||||
c.EscapeChar = options.OutputTextConfiguration.m_escapeCharacter;
|
||||
c.HeadersPresent = options.OutputTextConfiguration.m_hasHeaders;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.OutputTextConfiguration.m_format;
|
||||
q.Format.DelimitedTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q);
|
||||
} else if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Json) {
|
||||
Models::_detail::JsonTextConfiguration c;
|
||||
c.RecordSeparator = options.OutputTextConfiguration.m_recordSeparator;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.OutputTextConfiguration.m_format;
|
||||
q.Format.JsonTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q);
|
||||
} else if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Parquet) {
|
||||
Models::_detail::ParquetConfiguration c;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.OutputTextConfiguration.m_format;
|
||||
q.Format.ParquetTextConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q);
|
||||
} else if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Arrow) {
|
||||
Models::_detail::ArrowConfiguration c;
|
||||
c.Schema = options.OutputTextConfiguration.m_schema;
|
||||
Models::_detail::QuerySerialization q;
|
||||
q.Format.Type = options.OutputTextConfiguration.m_format;
|
||||
q.Format.ArrowConfiguration = std::move(c);
|
||||
protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q);
|
||||
} else if (options.InputTextConfiguration.m_format.ToString().empty()) {
|
||||
} else {
|
||||
AZURE_UNREACHABLE_CODE();
|
||||
}
|
||||
|
||||
protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId;
|
||||
if (m_customerProvidedKey.HasValue()) {
|
||||
protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key;
|
||||
protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash;
|
||||
protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString();
|
||||
}
|
||||
protocolLayerOptions.EncryptionScope = m_encryptionScope;
|
||||
protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince;
|
||||
protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince;
|
||||
protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch;
|
||||
protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch;
|
||||
protocolLayerOptions.IfTags = options.AccessConditions.TagConditions;
|
||||
auto response =
|
||||
_detail::BlobClient::Query(*m_pipeline, m_blobUrl, protocolLayerOptions, _internal::WithReplicaStatus(context));
|
||||
|
||||
const auto statusCode = response.RawResponse->GetStatusCode();
|
||||
const auto reasonPhrase = response.RawResponse->GetReasonPhrase();
|
||||
const auto requestId = response.RawResponse->GetHeaders().count(_internal::HttpHeaderRequestId) != 0
|
||||
? response.RawResponse->GetHeaders().at(_internal::HttpHeaderRequestId)
|
||||
: std::string();
|
||||
|
||||
const auto clientRequestId = response.RawResponse->GetHeaders().count(_internal::HttpHeaderClientRequestId) != 0
|
||||
? response.RawResponse->GetHeaders().at(_internal::HttpHeaderClientRequestId)
|
||||
: std::string();
|
||||
|
||||
auto defaultErrorHandler = [statusCode, reasonPhrase, requestId, clientRequestId](BlobQueryError e) {
|
||||
if (e.IsFatal) {
|
||||
StorageException exception("Fatal " + e.Name + " at " + std::to_string(e.Position));
|
||||
exception.StatusCode = statusCode;
|
||||
exception.ReasonPhrase = reasonPhrase;
|
||||
exception.RequestId = requestId;
|
||||
exception.ClientRequestId = clientRequestId;
|
||||
exception.ErrorCode = e.Name;
|
||||
exception.Message = e.Description;
|
||||
|
||||
throw exception;
|
||||
}
|
||||
};
|
||||
|
||||
response.Value.BodyStream =
|
||||
std::make_unique<_detail::AvroStreamParser>(std::move(response.Value.BodyStream), options.ProgressHandler,
|
||||
options.ErrorHandler ? options.ErrorHandler : defaultErrorHandler);
|
||||
return response;
|
||||
}
|
||||
*/
|
||||
} // namespace Blobs
|
||||
} // namespace Storage
|
||||
} // namespace Azure
|
||||
|
||||
#endif
|
|
@ -0,0 +1,20 @@
|
|||
if(TD_LINUX)
|
||||
aux_source_directory(. AZ_TEST_SRC)
|
||||
|
||||
add_executable(azTest ${AZ_TEST_SRC})
|
||||
target_include_directories(azTest
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/azure"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_link_libraries(azTest
|
||||
az
|
||||
gtest_main
|
||||
)
|
||||
enable_testing()
|
||||
add_test(
|
||||
NAME az_test
|
||||
COMMAND azTest
|
||||
)
|
||||
endif(TD_LINUX)
|
|
@ -0,0 +1,201 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
|
||||
#include "az.h"
|
||||
|
||||
extern int8_t tsS3Enabled;
|
||||
|
||||
int32_t azInitEnv() {
|
||||
int32_t code = 0;
|
||||
|
||||
extern int8_t tsS3EpNum;
|
||||
|
||||
extern char tsS3Hostname[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeyId[][TSDB_FQDN_LEN];
|
||||
extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN];
|
||||
extern char tsS3BucketName[TSDB_FQDN_LEN];
|
||||
|
||||
/* TCS parameter format
|
||||
tsS3Hostname[0] = "<endpoint>/<account-name>.blob.core.windows.net";
|
||||
tsS3AccessKeyId[0] = "<access-key-id/account-name>";
|
||||
tsS3AccessKeySecret[0] = "<access-key-secret/account-key>";
|
||||
tsS3BucketName = "<bucket/container-name>";
|
||||
*/
|
||||
|
||||
const char *hostname = "<endpoint>/<account-name>.blob.core.windows.net";
|
||||
const char *accessKeyId = "<access-key-id/account-name>";
|
||||
const char *accessKeySecret = "<access-key-secret/account-key>";
|
||||
const char *bucketName = "<bucket/container-name>";
|
||||
|
||||
if (hostname[0] != '<') {
|
||||
tstrncpy(&tsS3Hostname[0][0], hostname, TSDB_FQDN_LEN);
|
||||
tstrncpy(&tsS3AccessKeyId[0][0], accessKeyId, TSDB_FQDN_LEN);
|
||||
tstrncpy(&tsS3AccessKeySecret[0][0], accessKeySecret, TSDB_FQDN_LEN);
|
||||
tstrncpy(tsS3BucketName, bucketName, TSDB_FQDN_LEN);
|
||||
} else {
|
||||
const char *accountId = getenv("ablob_account_id");
|
||||
if (!accountId) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char *accountSecret = getenv("ablob_account_secret");
|
||||
if (!accountSecret) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char *containerName = getenv("ablob_container");
|
||||
if (!containerName) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
TAOS_STRCPY(&tsS3Hostname[0][0], accountId);
|
||||
TAOS_STRCAT(&tsS3Hostname[0][0], ".blob.core.windows.net");
|
||||
TAOS_STRCPY(&tsS3AccessKeyId[0][0], accountId);
|
||||
TAOS_STRCPY(&tsS3AccessKeySecret[0][0], accountSecret);
|
||||
TAOS_STRCPY(tsS3BucketName, containerName);
|
||||
}
|
||||
|
||||
tstrncpy(tsTempDir, "/tmp/", PATH_MAX);
|
||||
|
||||
tsS3Enabled = true;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
// TEST(AzTest, DISABLED_InterfaceTest) {
|
||||
TEST(AzTest, InterfaceTest) {
|
||||
int code = 0;
|
||||
bool check = false;
|
||||
bool withcp = false;
|
||||
|
||||
code = azInitEnv();
|
||||
if (code) {
|
||||
std::cout << "ablob env init failed with: " << code << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
GTEST_ASSERT_EQ(tsS3Enabled, 1);
|
||||
|
||||
code = azBegin();
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = azCheckCfg();
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
const int size = 4096;
|
||||
char data[size] = {0};
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
data[i * 2 + 1] = 1;
|
||||
}
|
||||
|
||||
const char object_name[] = "azut.bin";
|
||||
char path[PATH_MAX] = {0};
|
||||
char path_download[PATH_MAX] = {0};
|
||||
int ds_len = strlen(TD_DIRSEP);
|
||||
int tmp_len = strlen(tsTempDir);
|
||||
|
||||
(void)snprintf(path, PATH_MAX, "%s", tsTempDir);
|
||||
if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP);
|
||||
(void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", object_name);
|
||||
} else {
|
||||
(void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", object_name);
|
||||
}
|
||||
|
||||
tstrncpy(path_download, path, strlen(path) + 1);
|
||||
tstrncpy(path_download + strlen(path), ".download", strlen(".download") + 1);
|
||||
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_WRITE_THROUGH);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
int n = taosWriteFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = azPutObjectFromFileOffset(path, object_name, 0, size);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
uint8_t *pBlock = NULL;
|
||||
code = azGetObjectBlock(object_name, 0, size, check, &pBlock);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(pBlock[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(pBlock[i * 2 + 1], 1);
|
||||
}
|
||||
|
||||
taosMemoryFree(pBlock);
|
||||
|
||||
code = azGetObjectToFile(object_name, path_download);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
{
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_READ);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
(void)memset(data, 0, size);
|
||||
|
||||
int64_t n = taosReadFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(data[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(data[i * 2 + 1], 1);
|
||||
}
|
||||
}
|
||||
|
||||
azDeleteObjectsByPrefix(object_name);
|
||||
// list object to check
|
||||
|
||||
code = azPutObjectFromFile2(path, object_name, withcp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
code = azGetObjectsByPrefix(object_name, tsTempDir);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
{
|
||||
TdFilePtr fp = taosOpenFile(path, TD_FILE_READ);
|
||||
GTEST_ASSERT_NE(fp, nullptr);
|
||||
|
||||
(void)memset(data, 0, size);
|
||||
|
||||
int64_t n = taosReadFile(fp, data, size);
|
||||
GTEST_ASSERT_EQ(n, size);
|
||||
|
||||
code = taosCloseFile(&fp);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
for (int i = 0; i < size / 2; ++i) {
|
||||
GTEST_ASSERT_EQ(data[i * 2], 0);
|
||||
GTEST_ASSERT_EQ(data[i * 2 + 1], 1);
|
||||
}
|
||||
}
|
||||
|
||||
const char *object_name_arr[] = {object_name};
|
||||
code = azDeleteObjects(object_name_arr, 1);
|
||||
GTEST_ASSERT_EQ(code, 0);
|
||||
|
||||
azEnd();
|
||||
}
|
|
@ -7,10 +7,10 @@ target_include_directories(
|
|||
)
|
||||
|
||||
target_link_libraries(
|
||||
catalog
|
||||
PRIVATE os util transport qcom nodes
|
||||
catalog
|
||||
PRIVATE os util transport qcom nodes
|
||||
)
|
||||
|
||||
# if(${BUILD_TEST})
|
||||
# ADD_SUBDIRECTORY(test)
|
||||
# ADD_SUBDIRECTORY(test)
|
||||
# endif(${BUILD_TEST})
|
||||
|
|
|
@ -12,5 +12,5 @@ target_link_libraries(
|
|||
)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
ADD_SUBDIRECTORY(test)
|
||||
ADD_SUBDIRECTORY(test)
|
||||
endif(${BUILD_TEST})
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
aux_source_directory(src CRYPT_SRC)
|
||||
|
||||
IF (TD_ENTERPRISE)
|
||||
IF(TD_ENTERPRISE)
|
||||
LIST(APPEND CRYPT_SRC ${TD_ENTERPRISE_DIR}/src/plugins/crypt/cryptImpl.c)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
add_library(crypt STATIC ${CRYPT_SRC})
|
||||
target_include_directories(
|
||||
|
|
|
@ -1,24 +1,25 @@
|
|||
aux_source_directory(src EXECUTOR_SRC)
|
||||
|
||||
add_library(executor STATIC ${EXECUTOR_SRC})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(executor PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
target_compile_options(executor PRIVATE -Wno-error=deprecated-non-prototype)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
IF(${BUILD_WITH_ANALYSIS})
|
||||
if(${BUILD_WITH_ANALYSIS})
|
||||
add_definitions(-DUSE_ANAL)
|
||||
ENDIF()
|
||||
endif()
|
||||
|
||||
target_link_libraries(executor
|
||||
PRIVATE os util common function parser planner qcom scalar nodes index wal tdb geometry
|
||||
)
|
||||
PRIVATE os util common function parser planner qcom scalar nodes index wal tdb geometry
|
||||
)
|
||||
|
||||
target_include_directories(
|
||||
executor
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/executor"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
executor
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/executor"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
ADD_SUBDIRECTORY(test)
|
||||
ADD_SUBDIRECTORY(test)
|
||||
endif(${BUILD_TEST})
|
||||
|
|
|
@ -35,6 +35,7 @@ typedef struct SFillColInfo {
|
|||
SExprInfo* pExpr;
|
||||
bool notFillCol; // denote if this column needs fill operation
|
||||
SVariant fillVal;
|
||||
bool fillNull;
|
||||
} SFillColInfo;
|
||||
|
||||
typedef struct SFillLinearInfo {
|
||||
|
@ -125,12 +126,14 @@ void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struc
|
|||
void taosFillUpdateStartTimestampInfo(SFillInfo* pFillInfo, int64_t ts);
|
||||
bool taosFillNotStarted(const SFillInfo* pFillInfo);
|
||||
SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr,
|
||||
int32_t numOfNotFillCols, const struct SNodeListNode* val);
|
||||
int32_t numOfNotFillCols, SExprInfo* pFillNullExpr, int32_t numOfFillNullExprs,
|
||||
const struct SNodeListNode* val);
|
||||
bool taosFillHasMoreResults(struct SFillInfo* pFillInfo);
|
||||
|
||||
int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity,
|
||||
SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId,
|
||||
int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo);
|
||||
int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t fillNullCols,
|
||||
int32_t capacity, SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol,
|
||||
int32_t slotId, int32_t order, const char* id, SExecTaskInfo* pTaskInfo,
|
||||
SFillInfo** ppFillInfo);
|
||||
|
||||
void* taosDestroyFillInfo(struct SFillInfo* pFillInfo);
|
||||
int32_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, SSDataBlock* p, int32_t capacity);
|
||||
|
|
|
@ -53,6 +53,7 @@ typedef struct SFillOperatorInfo {
|
|||
SExprInfo* pExprInfo;
|
||||
int32_t numOfExpr;
|
||||
SExprSupp noFillExprSupp;
|
||||
SExprSupp fillNullExprSupp;
|
||||
} SFillOperatorInfo;
|
||||
|
||||
static void destroyFillOperatorInfo(void* param);
|
||||
|
@ -140,6 +141,15 @@ void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int
|
|||
code = projectApplyFunctions(pNoFillSupp->pExprInfo, pInfo->pRes, pBlock, pNoFillSupp->pCtx, pNoFillSupp->numOfExprs,
|
||||
NULL);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
if (pInfo->fillNullExprSupp.pExprInfo) {
|
||||
pInfo->pRes->info.rows = 0;
|
||||
code = setInputDataBlock(&pInfo->fillNullExprSupp, pBlock, order, scanFlag, false);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
code = projectApplyFunctions(pInfo->fillNullExprSupp.pExprInfo, pInfo->pRes, pBlock, pInfo->fillNullExprSupp.pCtx,
|
||||
pInfo->fillNullExprSupp.numOfExprs, NULL);
|
||||
}
|
||||
|
||||
pInfo->pRes->info.id.groupId = pBlock->info.id.groupId;
|
||||
|
||||
_end:
|
||||
|
@ -327,6 +337,7 @@ void destroyFillOperatorInfo(void* param) {
|
|||
pInfo->pFinalRes = NULL;
|
||||
|
||||
cleanupExprSupp(&pInfo->noFillExprSupp);
|
||||
cleanupExprSupp(&pInfo->fillNullExprSupp);
|
||||
|
||||
taosMemoryFreeClear(pInfo->p);
|
||||
taosArrayDestroy(pInfo->matchInfo.pList);
|
||||
|
@ -334,10 +345,11 @@ void destroyFillOperatorInfo(void* param) {
|
|||
}
|
||||
|
||||
static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t numOfCols, SExprInfo* pNotFillExpr,
|
||||
int32_t numOfNotFillCols, SNodeListNode* pValNode, STimeWindow win, int32_t capacity,
|
||||
const char* id, SInterval* pInterval, int32_t fillType, int32_t order,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode);
|
||||
int32_t numOfNotFillCols, SExprInfo* pFillNullExpr, int32_t numOfFillNullExprs,
|
||||
SNodeListNode* pValNode, STimeWindow win, int32_t capacity, const char* id,
|
||||
SInterval* pInterval, int32_t fillType, int32_t order, SExecTaskInfo* pTaskInfo) {
|
||||
SFillColInfo* pColInfo =
|
||||
createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pFillNullExpr, numOfFillNullExprs, pValNode);
|
||||
if (!pColInfo) {
|
||||
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
|
||||
return terrno;
|
||||
|
@ -348,8 +360,8 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t
|
|||
// STimeWindow w = {0};
|
||||
// getInitialStartTimeWindow(pInterval, startKey, &w, order == TSDB_ORDER_ASC);
|
||||
pInfo->pFillInfo = NULL;
|
||||
int32_t code = taosCreateFillInfo(startKey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo,
|
||||
pInfo->primaryTsCol, order, id, pTaskInfo, &pInfo->pFillInfo);
|
||||
int32_t code = taosCreateFillInfo(startKey, numOfCols, numOfNotFillCols, numOfFillNullExprs, capacity, pInterval,
|
||||
fillType, pColInfo, pInfo->primaryTsCol, order, id, pTaskInfo, &pInfo->pFillInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
return code;
|
||||
|
@ -455,6 +467,13 @@ int32_t createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFi
|
|||
initExprSupp(pNoFillSupp, pNoFillSupp->pExprInfo, pNoFillSupp->numOfExprs, &pTaskInfo->storageAPI.functionStore);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
|
||||
code = createExprInfo(pPhyFillNode->pFillNullExprs, NULL, &pInfo->fillNullExprSupp.pExprInfo,
|
||||
&pInfo->fillNullExprSupp.numOfExprs);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
code = initExprSupp(&pInfo->fillNullExprSupp, pInfo->fillNullExprSupp.pExprInfo, pInfo->fillNullExprSupp.numOfExprs,
|
||||
&pTaskInfo->storageAPI.functionStore);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
|
||||
SInterval* pInterval =
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType
|
||||
? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval
|
||||
|
@ -482,7 +501,9 @@ int32_t createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFi
|
|||
code = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc, &numOfOutputCols,
|
||||
COL_MATCH_FROM_SLOT_ID, &pInfo->matchInfo);
|
||||
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
code = initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pNoFillSupp->pExprInfo, pNoFillSupp->numOfExprs,
|
||||
pInfo->fillNullExprSupp.pExprInfo, pInfo->fillNullExprSupp.numOfExprs,
|
||||
(SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity,
|
||||
pTaskInfo->id.str, pInterval, type, order, pTaskInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -1201,7 +1201,7 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod
|
|||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
pFillSup->pAllColInfo = createFillColInfo(pFillExprInfo, pFillSup->numOfFillCols, noFillExprInfo, numOfNotFillCols,
|
||||
(const SNodeListNode*)(pPhyFillNode->pValues));
|
||||
NULL, 0, (const SNodeListNode*)(pPhyFillNode->pValues));
|
||||
if (pFillSup->pAllColInfo == NULL) {
|
||||
code = terrno;
|
||||
lino = __LINE__;
|
||||
|
|
|
@ -39,22 +39,27 @@
|
|||
static int32_t doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey);
|
||||
|
||||
static void setNotFillColumn(SFillInfo* pFillInfo, SColumnInfoData* pDstColInfo, int32_t rowIndex, int32_t colIdx) {
|
||||
SRowVal* p = NULL;
|
||||
if (pFillInfo->type == TSDB_FILL_NEXT) {
|
||||
p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->next : &pFillInfo->prev;
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[colIdx];
|
||||
if (pCol->fillNull) {
|
||||
colDataSetNULL(pDstColInfo, rowIndex);
|
||||
} else {
|
||||
p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next;
|
||||
}
|
||||
SRowVal* p = NULL;
|
||||
if (pFillInfo->type == TSDB_FILL_NEXT) {
|
||||
p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->next : &pFillInfo->prev;
|
||||
} else {
|
||||
p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next;
|
||||
}
|
||||
|
||||
SGroupKeys* pKey = taosArrayGet(p->pRowVal, colIdx);
|
||||
if (!pKey) {
|
||||
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
|
||||
T_LONG_JMP(pFillInfo->pTaskInfo->env, terrno);
|
||||
}
|
||||
int32_t code = doSetVal(pDstColInfo, rowIndex, pKey);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
T_LONG_JMP(pFillInfo->pTaskInfo->env, code);
|
||||
SGroupKeys* pKey = taosArrayGet(p->pRowVal, colIdx);
|
||||
if (!pKey) {
|
||||
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
|
||||
T_LONG_JMP(pFillInfo->pTaskInfo->env, terrno);
|
||||
}
|
||||
int32_t code = doSetVal(pDstColInfo, rowIndex, pKey);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code));
|
||||
T_LONG_JMP(pFillInfo->pTaskInfo->env, code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -545,9 +550,10 @@ static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) {
|
|||
return pFillInfo->numOfRows - pFillInfo->index;
|
||||
}
|
||||
|
||||
int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity,
|
||||
SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t primaryTsSlotId,
|
||||
int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo) {
|
||||
int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t fillNullCols,
|
||||
int32_t capacity, SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol,
|
||||
int32_t primaryTsSlotId, int32_t order, const char* id, SExecTaskInfo* pTaskInfo,
|
||||
SFillInfo** ppFillInfo) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
if (fillType == TSDB_FILL_NONE) {
|
||||
|
@ -574,7 +580,7 @@ int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFi
|
|||
|
||||
pFillInfo->type = fillType;
|
||||
pFillInfo->pFillCol = pCol;
|
||||
pFillInfo->numOfCols = numOfFillCols + numOfNotFillCols;
|
||||
pFillInfo->numOfCols = numOfFillCols + numOfNotFillCols + fillNullCols;
|
||||
pFillInfo->alloc = capacity;
|
||||
pFillInfo->id = id;
|
||||
pFillInfo->interval = *pInterval;
|
||||
|
@ -761,10 +767,11 @@ _end:
|
|||
int64_t getFillInfoStart(struct SFillInfo* pFillInfo) { return pFillInfo->start; }
|
||||
|
||||
SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr,
|
||||
int32_t numOfNoFillExpr, const struct SNodeListNode* pValNode) {
|
||||
int32_t numOfNoFillExpr, SExprInfo* pFillNullExpr, int32_t numOfFillNullExpr,
|
||||
const struct SNodeListNode* pValNode) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SFillColInfo* pFillCol = taosMemoryCalloc(numOfFillExpr + numOfNoFillExpr, sizeof(SFillColInfo));
|
||||
SFillColInfo* pFillCol = taosMemoryCalloc(numOfFillExpr + numOfNoFillExpr + numOfFillNullExpr, sizeof(SFillColInfo));
|
||||
if (pFillCol == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -797,6 +804,13 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn
|
|||
pFillCol[i + numOfFillExpr].notFillCol = true;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfFillNullExpr; ++i) {
|
||||
SExprInfo* pExprInfo = &pFillNullExpr[i];
|
||||
pFillCol[i + numOfFillExpr + numOfNoFillExpr].pExpr = pExprInfo;
|
||||
pFillCol[i + numOfFillExpr + numOfNoFillExpr].notFillCol = true;
|
||||
pFillCol[i + numOfFillExpr + numOfNoFillExpr].fillNull = true;
|
||||
}
|
||||
|
||||
return pFillCol;
|
||||
|
||||
_end:
|
||||
|
|
|
@ -1147,7 +1147,8 @@ int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyN
|
|||
pInfo->fillType = convertFillType(pInterpPhyNode->fillMode);
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
|
||||
pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, NULL, 0, (SNodeListNode*)pInterpPhyNode->pFillValues);
|
||||
pInfo->pFillColInfo =
|
||||
createFillColInfo(pExprInfo, numOfExprs, NULL, 0, NULL, 0, (SNodeListNode*)pInterpPhyNode->pFillValues);
|
||||
QUERY_CHECK_NULL(pInfo->pFillColInfo, code, lino, _error, terrno);
|
||||
|
||||
pInfo->pLinearInfo = NULL;
|
||||
|
|
|
@ -5,115 +5,114 @@ add_library(function STATIC ${FUNCTION_SRC} ${FUNCTION_SRC_DETAIL})
|
|||
target_include_directories(
|
||||
function
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/contrib/libuv/include"
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/contrib/libuv/include"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
|
||||
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
|
||||
ELSE ()
|
||||
SET(LINK_JEMALLOC "")
|
||||
ENDIF ()
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
|
||||
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
|
||||
ELSE()
|
||||
SET(LINK_JEMALLOC "")
|
||||
ENDIF()
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEPENDENCIES(function jemalloc)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
target_link_libraries(
|
||||
function
|
||||
PRIVATE os
|
||||
PRIVATE util
|
||||
PRIVATE common
|
||||
PRIVATE nodes
|
||||
PRIVATE qcom
|
||||
PRIVATE scalar
|
||||
PRIVATE geometry
|
||||
PRIVATE transport
|
||||
PRIVATE os
|
||||
PRIVATE util
|
||||
PRIVATE common
|
||||
PRIVATE nodes
|
||||
PRIVATE qcom
|
||||
PRIVATE scalar
|
||||
PRIVATE geometry
|
||||
PRIVATE transport
|
||||
PUBLIC uv_a
|
||||
)
|
||||
|
||||
add_executable(runUdf test/runUdf.c)
|
||||
target_include_directories(
|
||||
runUdf
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/contrib/libuv/include"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
runUdf
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/contrib/libuv/include"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEPENDENCIES(runUdf jemalloc)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
target_link_libraries(
|
||||
runUdf
|
||||
PUBLIC uv_a
|
||||
PRIVATE os util common nodes function ${LINK_JEMALLOC}
|
||||
runUdf
|
||||
PUBLIC uv_a
|
||||
PRIVATE os util common nodes function ${LINK_JEMALLOC}
|
||||
)
|
||||
|
||||
add_library(udf1 STATIC MODULE test/udf1.c)
|
||||
target_include_directories(
|
||||
udf1
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
udf1
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEPENDENCIES(udf1 jemalloc)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
target_link_libraries(
|
||||
udf1 PUBLIC os ${LINK_JEMALLOC})
|
||||
|
||||
|
||||
add_library(udf1_dup STATIC MODULE test/udf1_dup.c)
|
||||
target_include_directories(
|
||||
udf1_dup
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
udf1_dup
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEPENDENCIES(udf1_dup jemalloc)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
target_link_libraries(
|
||||
udf1_dup PUBLIC os ${LINK_JEMALLOC})
|
||||
|
||||
add_library(udf2 STATIC MODULE test/udf2.c)
|
||||
target_include_directories(
|
||||
udf2
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
udf2
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEPENDENCIES(udf2 jemalloc)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
target_link_libraries(
|
||||
udf2 PUBLIC os ${LINK_JEMALLOC}
|
||||
|
@ -121,45 +120,44 @@ target_link_libraries(
|
|||
|
||||
add_library(udf2_dup STATIC MODULE test/udf2_dup.c)
|
||||
target_include_directories(
|
||||
udf2_dup
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
udf2_dup
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
"${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEPENDENCIES(udf2_dup jemalloc)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
target_link_libraries(
|
||||
udf2_dup PUBLIC os ${LINK_JEMALLOC}
|
||||
)
|
||||
|
||||
#SET(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/build/bin)
|
||||
# SET(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/build/bin)
|
||||
add_executable(udfd src/udfd.c)
|
||||
target_include_directories(
|
||||
udfd
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/contrib/libuv/include"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
udfd
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/function"
|
||||
"${TD_SOURCE_DIR}/contrib/libuv/include"
|
||||
"${TD_SOURCE_DIR}/include/util"
|
||||
"${TD_SOURCE_DIR}/include/common"
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${TD_SOURCE_DIR}/include/client"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
IF(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
ADD_DEPENDENCIES(udfd jemalloc)
|
||||
ENDIF ()
|
||||
ENDIF()
|
||||
|
||||
target_link_libraries(
|
||||
udfd
|
||||
PUBLIC uv_a
|
||||
PRIVATE os util common nodes function ${LINK_JEMALLOC}
|
||||
)
|
||||
|
||||
)
|
||||
|
|
|
@ -1,23 +1,22 @@
|
|||
aux_source_directory(src INDEX_SRC)
|
||||
add_library(index STATIC ${INDEX_SRC})
|
||||
target_include_directories(
|
||||
index
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/index"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
|
||||
index
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/index"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
target_link_libraries(
|
||||
index
|
||||
PUBLIC os
|
||||
PUBLIC util
|
||||
PUBLIC common
|
||||
PUBLIC nodes
|
||||
PUBLIC scalar
|
||||
PUBLIC function
|
||||
index
|
||||
PUBLIC os
|
||||
PUBLIC util
|
||||
PUBLIC common
|
||||
PUBLIC nodes
|
||||
PUBLIC scalar
|
||||
PUBLIC function
|
||||
)
|
||||
|
||||
if (${BUILD_WITH_LUCENE})
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
target_include_directories(
|
||||
index
|
||||
PUBLIC "${TD_SOURCE_DIR}/deps/lucene/include"
|
||||
|
@ -29,12 +28,10 @@ if (${BUILD_WITH_LUCENE})
|
|||
)
|
||||
endif(${BUILD_WITH_LUCENE})
|
||||
|
||||
if (${BUILD_WITH_INVERTEDINDEX})
|
||||
add_definitions(-DUSE_INVERTED_INDEX)
|
||||
if(${BUILD_WITH_INVERTEDINDEX})
|
||||
add_definitions(-DUSE_INVERTED_INDEX)
|
||||
endif(${BUILD_WITH_INVERTEDINDEX})
|
||||
|
||||
|
||||
if (${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
|
|
|
@ -9,5 +9,5 @@ target_include_directories(
|
|||
target_link_libraries(monitor os util common qcom transport monitorfw)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
add_subdirectory(test)
|
||||
endif(${BUILD_TEST})
|
|
@ -5,7 +5,9 @@ target_include_directories(
|
|||
PUBLIC "${TD_SOURCE_DIR}/include/libs/monitorfw"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
if(${TD_DARWIN})
|
||||
target_compile_options(monitorfw PRIVATE -Wno-error=deprecated-pragma)
|
||||
target_compile_options(monitorfw PRIVATE -Wno-error=deprecated-pragma)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
target_link_libraries(monitorfw os util common transport)
|
||||
|
|
|
@ -642,6 +642,7 @@ static int32_t logicFillCopy(const SFillLogicNode* pSrc, SFillLogicNode* pDst) {
|
|||
CLONE_NODE_FIELD(pWStartTs);
|
||||
CLONE_NODE_FIELD(pValues);
|
||||
COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow));
|
||||
CLONE_NODE_LIST_FIELD(pFillNullExprs);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -2887,6 +2887,7 @@ static const char* jkFillPhysiPlanWStartTs = "WStartTs";
|
|||
static const char* jkFillPhysiPlanValues = "Values";
|
||||
static const char* jkFillPhysiPlanStartTime = "StartTime";
|
||||
static const char* jkFillPhysiPlanEndTime = "EndTime";
|
||||
static const char* jkFillPhysiPlanFillNullExprs = "FillNullExprs";
|
||||
|
||||
static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SFillPhysiNode* pNode = (const SFillPhysiNode*)pObj;
|
||||
|
@ -2913,6 +2914,9 @@ static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanEndTime, pNode->timeRange.ekey);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodeListToJson(pJson, jkFillPhysiPlanFillNullExprs, pNode->pFillNullExprs);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -2942,6 +2946,9 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBigIntValue(pJson, jkFillPhysiPlanEndTime, &pNode->timeRange.ekey);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = jsonToNodeList(pJson, jkFillPhysiPlanFillNullExprs, &pNode->pFillNullExprs);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue