diff --git a/.gitignore b/.gitignore index d155512039..b849df0d06 100644 --- a/.gitignore +++ b/.gitignore @@ -121,6 +121,7 @@ TAGS contrib/* !contrib/CMakeLists.txt !contrib/test +!contrib/azure-cmake sql debug*/ .env diff --git a/CMakeLists.txt b/CMakeLists.txt index ac368c29fe..db5b89db3d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,13 +1,13 @@ cmake_minimum_required(VERSION 3.0) project( - TDengine - VERSION 3.0 - DESCRIPTION "An open-source big data platform designed and optimized for the Internet of Things(IOT)" + TDengine + VERSION 3.0 + DESCRIPTION "An open-source big data platform designed and optimized for the Internet of Things(IOT)" ) -if (NOT DEFINED TD_SOURCE_DIR) - set( TD_SOURCE_DIR ${PROJECT_SOURCE_DIR} ) +if(NOT DEFINED TD_SOURCE_DIR) + set(TD_SOURCE_DIR ${PROJECT_SOURCE_DIR}) endif() SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR}) @@ -15,13 +15,11 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR}) set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake") set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib") - include(${TD_SUPPORT_DIR}/cmake.platform) include(${TD_SUPPORT_DIR}/cmake.define) include(${TD_SUPPORT_DIR}/cmake.options) include(${TD_SUPPORT_DIR}/cmake.version) - # contrib add_subdirectory(contrib) @@ -33,8 +31,8 @@ target_include_directories(api INTERFACE "include/client") # src if(${BUILD_TEST}) - include(CTest) - enable_testing() + include(CTest) + enable_testing() endif(${BUILD_TEST}) add_subdirectory(source) @@ -44,5 +42,5 @@ add_subdirectory(examples/c) add_subdirectory(tests) include(${TD_SUPPORT_DIR}/cmake.install) -# docs +# docs add_subdirectory(docs/doxgen) diff --git a/cmake/azure_CMakeLists.txt.in b/cmake/azure_CMakeLists.txt.in new file mode 100644 index 0000000000..5aa32b70e5 --- /dev/null +++ b/cmake/azure_CMakeLists.txt.in @@ -0,0 +1,15 @@ +# azure +ExternalProject_Add(azure + URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz + URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1" + #BUILD_IN_SOURCE TRUE + #BUILD_ALWAYS 1 + #UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/cmake/cmake.define b/cmake/cmake.define index 802c36efbf..8b762011a4 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE) -#set output directory +# set output directory SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin) SET(TD_TESTS_OUTPUT_DIR ${PROJECT_BINARY_DIR}/test) @@ -12,170 +12,178 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR}) MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH}) MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH}) -if (NOT DEFINED TD_GRANT) - SET(TD_GRANT FALSE) -endif() +IF(NOT DEFINED TD_GRANT) + SET(TD_GRANT FALSE) +ENDIF() -IF (NOT DEFINED BUILD_WITH_RAND_ERR) - SET(BUILD_WITH_RAND_ERR FALSE) -ELSE () - SET(BUILD_WITH_RAND_ERR TRUE) -endif() +IF(NOT DEFINED BUILD_WITH_RAND_ERR) + SET(BUILD_WITH_RAND_ERR FALSE) +ELSE() + SET(BUILD_WITH_RAND_ERR TRUE) +ENDIF() -IF ("${WEBSOCKET}" MATCHES "true") +IF("${WEBSOCKET}" MATCHES "true") SET(TD_WEBSOCKET TRUE) MESSAGE("Enable websocket") ADD_DEFINITIONS(-DWEBSOCKET) -ELSE () +ELSE() SET(TD_WEBSOCKET FALSE) -ENDIF () +ENDIF() -IF ("${BUILD_HTTP}" STREQUAL "") - IF (TD_LINUX) - IF (TD_ARM_32) - SET(TD_BUILD_HTTP TRUE) - ELSE () - SET(TD_BUILD_HTTP TRUE) - ENDIF () - ELSEIF (TD_DARWIN) +IF("${BUILD_HTTP}" STREQUAL "") + IF(TD_LINUX) + IF(TD_ARM_32) + SET(TD_BUILD_HTTP TRUE) + ELSE() + SET(TD_BUILD_HTTP TRUE) + ENDIF() + ELSEIF(TD_DARWIN) + SET(TD_BUILD_HTTP TRUE) + ELSE() + SET(TD_BUILD_HTTP TRUE) + ENDIF() +ELSEIF(${BUILD_HTTP} MATCHES "false") + SET(TD_BUILD_HTTP FALSE) +ELSEIF(${BUILD_HTTP} MATCHES "true") SET(TD_BUILD_HTTP TRUE) - ELSE () +ELSEIF(${BUILD_HTTP} MATCHES "internal") + SET(TD_BUILD_HTTP FALSE) + SET(TD_BUILD_TAOSA_INTERNAL TRUE) +ELSE() SET(TD_BUILD_HTTP TRUE) - ENDIF () -ELSEIF (${BUILD_HTTP} MATCHES "false") - SET(TD_BUILD_HTTP FALSE) -ELSEIF (${BUILD_HTTP} MATCHES "true") - SET(TD_BUILD_HTTP TRUE) -ELSEIF (${BUILD_HTTP} MATCHES "internal") - SET(TD_BUILD_HTTP FALSE) - SET(TD_BUILD_TAOSA_INTERNAL TRUE) -ELSE () - SET(TD_BUILD_HTTP TRUE) -ENDIF () +ENDIF() -IF (TD_BUILD_HTTP) - ADD_DEFINITIONS(-DHTTP_EMBEDDED) -ENDIF () +IF(TD_BUILD_HTTP) + ADD_DEFINITIONS(-DHTTP_EMBEDDED) +ENDIF() -IF ("${BUILD_TOOLS}" STREQUAL "") - IF (TD_LINUX) - IF (TD_ARM_32) - SET(BUILD_TOOLS "false") - ELSEIF (TD_ARM_64) - SET(BUILD_TOOLS "false") - ELSE () - SET(BUILD_TOOLS "false") - ENDIF () - ELSEIF (TD_DARWIN) - SET(BUILD_TOOLS "false") - ELSE () - SET(BUILD_TOOLS "false") - ENDIF () -ENDIF () +IF("${BUILD_TOOLS}" STREQUAL "") + IF(TD_LINUX) + IF(TD_ARM_32) + SET(BUILD_TOOLS "false") + ELSEIF(TD_ARM_64) + SET(BUILD_TOOLS "false") + ELSE() + SET(BUILD_TOOLS "false") + ENDIF() + ELSEIF(TD_DARWIN) + SET(BUILD_TOOLS "false") + ELSE() + SET(BUILD_TOOLS "false") + ENDIF() +ENDIF() -IF ("${BUILD_TOOLS}" MATCHES "false") +IF("${BUILD_TOOLS}" MATCHES "false") MESSAGE("${Yellow} Will _not_ build taos_tools! ${ColourReset}") SET(TD_TAOS_TOOLS FALSE) -ELSE () +ELSE() MESSAGE("") MESSAGE("${Green} Will build taos_tools! ${ColourReset}") MESSAGE("") SET(TD_TAOS_TOOLS TRUE) -ENDIF () +ENDIF() -IF (${TD_WINDOWS}) +IF(${TD_WINDOWS}) SET(TAOS_LIB taos_static) -ELSE () +ELSE() SET(TAOS_LIB taos) -ENDIF () +ENDIF() # build TSZ by default -IF ("${TSZ_ENABLED}" MATCHES "false") - set(VAR_TSZ "" CACHE INTERNAL "global variant empty" ) +IF("${TSZ_ENABLED}" MATCHES "false") + set(VAR_TSZ "" CACHE INTERNAL "global variant empty") ELSE() - # define add - MESSAGE(STATUS "build with TSZ enabled") - ADD_DEFINITIONS(-DTD_TSZ) - set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" ) + # define add + MESSAGE(STATUS "build with TSZ enabled") + ADD_DEFINITIONS(-DTD_TSZ) + set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz") ENDIF() # force set all platform to JEMALLOC_ENABLED = false SET(JEMALLOC_ENABLED OFF) -IF (TD_WINDOWS) - MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") - IF (${CMAKE_BUILD_TYPE} MATCHES "Release") - MESSAGE("${Green} will build Release version! ${ColourReset}") - SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD") - ELSE () - MESSAGE("${Green} will build Debug version! ${ColourReset}") - SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") +IF(TD_WINDOWS) + MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") + + IF(${CMAKE_BUILD_TYPE} MATCHES "Release") + MESSAGE("${Green} will build Release version! ${ColourReset}") + SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD") + + ELSE() + MESSAGE("${Green} will build Debug version! ${ColourReset}") + SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") ENDIF() SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO") + # IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900)) - # SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18") + # SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18") # ENDIF () - IF (CMAKE_DEPFILE_FLAGS_C) + IF(CMAKE_DEPFILE_FLAGS_C) SET(CMAKE_DEPFILE_FLAGS_C "") - ENDIF () - IF (CMAKE_DEPFILE_FLAGS_CXX) + ENDIF() + + IF(CMAKE_DEPFILE_FLAGS_CXX) SET(CMAKE_DEPFILE_FLAGS_CXX "") - ENDIF () - IF (CMAKE_C_FLAGS_DEBUG) + ENDIF() + + IF(CMAKE_C_FLAGS_DEBUG) SET(CMAKE_C_FLAGS_DEBUG "" CACHE STRING "" FORCE) - ENDIF () - IF (CMAKE_CXX_FLAGS_DEBUG) + ENDIF() + + IF(CMAKE_CXX_FLAGS_DEBUG) SET(CMAKE_CXX_FLAGS_DEBUG "" CACHE STRING "" FORCE) - ENDIF () + ENDIF() SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}") -ELSE () - IF (${TD_DARWIN}) +ELSE() + IF(${TD_DARWIN}) set(CMAKE_MACOSX_RPATH 0) - ENDIF () - IF (${COVER} MATCHES "true") + ENDIF() + + IF(${COVER} MATCHES "true") MESSAGE(STATUS "Test coverage mode, add extra flags") SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage") - SET(GCC_COVERAGE_LINK_FLAGS "-lgcov --coverage") + SET(GCC_COVERAGE_LINK_FLAGS "-lgcov --coverage") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}") - ENDIF () + ENDIF() # disable all assert - IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true")) + IF((${DISABLE_ASSERT} MATCHES "true") OR(${DISABLE_ASSERTS} MATCHES "true")) ADD_DEFINITIONS(-DDISABLE_ASSERT) MESSAGE(STATUS "Disable all asserts") ENDIF() INCLUDE(CheckCCompilerFlag) - IF (TD_ARM_64 OR TD_ARM_32) + + IF(TD_ARM_64 OR TD_ARM_32) SET(COMPILER_SUPPORT_SSE42 false) - ELSEIF (("${CMAKE_C_COMPILER_ID}" MATCHES "Clang") OR ("${CMAKE_C_COMPILER_ID}" MATCHES "AppleClang")) + ELSEIF(("${CMAKE_C_COMPILER_ID}" MATCHES "Clang") OR("${CMAKE_C_COMPILER_ID}" MATCHES "AppleClang")) SET(COMPILER_SUPPORT_SSE42 true) MESSAGE(STATUS "Always enable sse4.2 for Clang/AppleClang") ELSE() CHECK_C_COMPILER_FLAG("-msse4.2" COMPILER_SUPPORT_SSE42) ENDIF() - IF (TD_ARM_64 OR TD_ARM_32) - SET(COMPILER_SUPPORT_FMA false) - SET(COMPILER_SUPPORT_AVX false) - SET(COMPILER_SUPPORT_AVX2 false) - SET(COMPILER_SUPPORT_AVX512F false) - SET(COMPILER_SUPPORT_AVX512BMI false) - SET(COMPILER_SUPPORT_AVX512VL false) + IF(TD_ARM_64 OR TD_ARM_32) + SET(COMPILER_SUPPORT_FMA false) + SET(COMPILER_SUPPORT_AVX false) + SET(COMPILER_SUPPORT_AVX2 false) + SET(COMPILER_SUPPORT_AVX512F false) + SET(COMPILER_SUPPORT_AVX512BMI false) + SET(COMPILER_SUPPORT_AVX512VL false) ELSE() - CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA) - CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F) - CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI) - CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL) + CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA) + CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F) + CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI) + CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL) - INCLUDE(CheckCSourceRuns) - SET(CMAKE_REQUIRED_FLAGS "-mavx") - check_c_source_runs(" + INCLUDE(CheckCSourceRuns) + SET(CMAKE_REQUIRED_FLAGS "-mavx") + check_c_source_runs(" #include int main() { __m256d a, b, c; @@ -185,7 +193,7 @@ ELSE () c = _mm256_add_pd(a, b); _mm256_storeu_pd(buf, c); for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { - if (buf[i] != 0) { + IF (buf[i] != 0) { return 1; } } @@ -193,8 +201,8 @@ ELSE () } " COMPILER_SUPPORT_AVX) - SET(CMAKE_REQUIRED_FLAGS "-mavx2") - check_c_source_runs(" + SET(CMAKE_REQUIRED_FLAGS "-mavx2") + check_c_source_runs(" #include int main() { __m256i a, b, c; @@ -204,7 +212,7 @@ ELSE () c = _mm256_and_si256(a, b); _mm256_storeu_si256((__m256i *)buf, c); for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { - if (buf[i] != 0) { + IF (buf[i] != 0) { return 1; } } @@ -213,40 +221,42 @@ ELSE () " COMPILER_SUPPORT_AVX2) ENDIF() - IF (COMPILER_SUPPORT_SSE42) + IF(COMPILER_SUPPORT_SSE42) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2") ENDIF() - IF ("${SIMD_SUPPORT}" MATCHES "true") - IF (COMPILER_SUPPORT_FMA) + IF("${SIMD_SUPPORT}" MATCHES "true") + IF(COMPILER_SUPPORT_FMA) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma") MESSAGE(STATUS "FMA instructions is ACTIVATED") ENDIF() - IF (COMPILER_SUPPORT_AVX) + + IF(COMPILER_SUPPORT_AVX) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx") MESSAGE(STATUS "AVX instructions is ACTIVATED") ENDIF() - IF (COMPILER_SUPPORT_AVX2) + + IF(COMPILER_SUPPORT_AVX2) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2") MESSAGE(STATUS "AVX2 instructions is ACTIVATED") ENDIF() ENDIF() - IF ("${SIMD_AVX512_SUPPORT}" MATCHES "true") - IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI) + IF("${SIMD_AVX512_SUPPORT}" MATCHES "true") + IF(COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi") MESSAGE(STATUS "avx512f/avx512bmi enabled by compiler") ENDIF() - IF (COMPILER_SUPPORT_AVX512VL) + IF(COMPILER_SUPPORT_AVX512VL) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl") - MESSAGE(STATUS "avx512vl enabled by compiler") + MESSAGE(STATUS "avx512vl enabled by compiler") ENDIF() ENDIF() @@ -254,16 +264,17 @@ ELSE () SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") - IF (${BUILD_SANITIZER}) + IF(${BUILD_SANITIZER}) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") - MESSAGE(STATUS "Compile with Address Sanitizer!") - ELSEIF (${BUILD_RELEASE}) + + # SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") + MESSAGE(STATUS "Compile with Address Sanitizer!") + ELSEIF(${BUILD_RELEASE}) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}") - ELSE () + ELSE() SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") - ENDIF () - -ENDIF () + ENDIF() +ENDIF() diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index ef6ed4af1d..13826a1a74 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 3.0 + GIT_TAG main SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 9a6a5329ae..9bbda8309f 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 3.0 + GIT_TAG main SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 2a38bf74c7..eae697560b 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -10,39 +10,36 @@ if(${BUILD_WITH_S3}) file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/) elseif(${BUILD_WITH_COS}) + set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") + configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) -set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") -configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + endif(${BUILD_WITH_COS}) -if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/) - cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) - cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) - cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) -endif(${BUILD_WITH_COS}) - -configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") -execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") + execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") -execute_process(COMMAND "${CMAKE_COMMAND}" --build . + execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") -set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") -configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) + set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") + configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) -if(${BUILD_WITH_COS}) - cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) -endif(${BUILD_WITH_COS}) + if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) + endif(${BUILD_WITH_COS}) -configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") -execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") + execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") -execute_process(COMMAND "${CMAKE_COMMAND}" --build . + execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") - endif() - set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -59,7 +56,7 @@ endif() # taosadapter if(${BUILD_HTTP}) MESSAGE("BUILD_HTTP is on") -else () +else() MESSAGE("BUILD_HTTP is off, use taosAdapter") cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() @@ -110,19 +107,18 @@ cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) # xz -#cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +# cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) -#lzma2 +# lzma2 cat("${TD_SUPPORT_DIR}/lzma_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - -if (${BUILD_CONTRIB}) +if(${BUILD_CONTRIB}) if(${BUILD_WITH_ROCKSDB}) cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_ROCKSDB) endif() else() - if (NOT ${TD_LINUX}) + if(NOT ${TD_LINUX}) if(${BUILD_WITH_ROCKSDB}) cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_ROCKSDB) @@ -134,9 +130,9 @@ else() endif() endif() -#cat("${TD_SUPPORT_DIR}/zstd_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +# cat("${TD_SUPPORT_DIR}/zstd_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) -#libuv +# libuv if(${BUILD_WITH_UV}) cat("${TD_SUPPORT_DIR}/libuv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_UV}) @@ -152,17 +148,17 @@ if(${BUILD_WITH_S3}) cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_S3) # cos elseif(${BUILD_WITH_COS}) - #cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - #cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - #cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - #cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + # cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + # cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + # cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + # cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) - endif() # crashdump @@ -191,9 +187,9 @@ endif() # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . - WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") execute_process(COMMAND "${CMAKE_COMMAND}" --build . - WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") # ================================================================================================ # Build @@ -206,25 +202,27 @@ if(${BUILD_TEST}) gtest PUBLIC $ ) + if(${TD_WINDOWS}) target_include_directories( gtest PUBLIC $ ) endif(${TD_WINDOWS}) + if(${TD_LINUX}) target_include_directories( gtest PUBLIC $ ) endif(${TD_LINUX}) + if(${TD_DARWIN}) target_include_directories( gtest PUBLIC $ ) endif(${TD_DARWIN}) - endif(${BUILD_TEST}) # cJson @@ -236,15 +234,16 @@ option(CJSON_BUILD_SHARED_LIBS "Overrides BUILD_SHARED_LIBS if CJSON_OVERRIDE_BU add_subdirectory(cJson EXCLUDE_FROM_ALL) target_include_directories( cjson + # see https://stackoverflow.com/questions/25676277/cmake-target-include-directories-prints-an-error-when-i-try-to-add-the-source PUBLIC $ ) unset(CMAKE_PROJECT_INCLUDE_BEFORE) # xml2 -#if(${BUILD_WITH_S3}) -# add_subdirectory(xml2 EXCLUDE_FROM_ALL) -#endif() +# if(${BUILD_WITH_S3}) +# add_subdirectory(xml2 EXCLUDE_FROM_ALL) +# endif() # lz4 add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL) @@ -255,10 +254,12 @@ target_include_directories( # zlib set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in") + if(${TD_DARWIN}) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=deprecated-non-prototype") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-non-prototype") endif(${TD_DARWIN}) + add_subdirectory(zlib EXCLUDE_FROM_ALL) target_include_directories( zlibstatic @@ -274,9 +275,9 @@ unset(CMAKE_PROJECT_INCLUDE_BEFORE) # add_subdirectory(xz EXCLUDE_FROM_ALL) # target_include_directories( -# xz -# PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/xz -# PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/xz +# xz +# PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/xz +# PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/xz # ) # leveldb @@ -291,24 +292,27 @@ endif(${BUILD_WITH_LEVELDB}) # rocksdb # To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev -if (${BUILD_WITH_UV}) +if(${BUILD_WITH_UV}) if(${TD_LINUX}) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}") - IF ("${CMAKE_BUILD_TYPE}" STREQUAL "") + + if("${CMAKE_BUILD_TYPE}" STREQUAL "") SET(CMAKE_BUILD_TYPE Release) endif() endif(${TD_LINUX}) -endif (${BUILD_WITH_UV}) +endif(${BUILD_WITH_UV}) -if (${BUILD_WITH_ROCKSDB}) - if (${BUILD_CONTRIB}) +if(${BUILD_WITH_ROCKSDB}) + if(${BUILD_CONTRIB}) if(${TD_LINUX}) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result") - if ("${CMAKE_BUILD_TYPE}" STREQUAL "") + + if("${CMAKE_BUILD_TYPE}" STREQUAL "") SET(CMAKE_BUILD_TYPE Release) endif() endif(${TD_LINUX}) + MESSAGE(STATUS "ROCKSDB CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS}) MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS}) @@ -316,22 +320,23 @@ if (${BUILD_WITH_ROCKSDB}) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") endif(${TD_DARWIN}) - if (${TD_DARWIN_ARM64}) + if(${TD_DARWIN_ARM64}) set(HAS_ARMV8_CRC true) endif(${TD_DARWIN_ARM64}) - if (${TD_WINDOWS}) + if(${TD_WINDOWS}) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819") option(WITH_JNI "" OFF) + if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd") - message("Rocksdb build runtime lib use /MT or /MTd") - option(WITH_MD_LIBRARY "build with MD" OFF) + message("Rocksdb build runtime lib use /MT or /MTd") + option(WITH_MD_LIBRARY "build with MD" OFF) endif() + set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) endif(${TD_WINDOWS}) - if(${TD_DARWIN}) option(HAVE_THREAD_LOCAL "" OFF) option(WITH_IOSTATS_CONTEXT "" OFF) @@ -357,30 +362,32 @@ if (${BUILD_WITH_ROCKSDB}) PUBLIC $ ) else() - if (NOT ${TD_LINUX}) + if(NOT ${TD_LINUX}) MESSAGE(STATUS "ROCKSDB CXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS}) MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS}) + if(${TD_DARWIN}) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=maybe-uninitialized") endif(${TD_DARWIN}) - if (${TD_DARWIN_ARM64}) + if(${TD_DARWIN_ARM64}) set(HAS_ARMV8_CRC true) endif(${TD_DARWIN_ARM64}) - if (${TD_WINDOWS}) + if(${TD_WINDOWS}) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819") option(WITH_JNI "" OFF) + if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd") message("Rocksdb build runtime lib use /MT or /MTd") option(WITH_MD_LIBRARY "build with MD" OFF) endif() + set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) endif(${TD_WINDOWS}) - if(${TD_DARWIN}) option(HAVE_THREAD_LOCAL "" OFF) option(WITH_IOSTATS_CONTEXT "" OFF) @@ -406,44 +413,44 @@ if (${BUILD_WITH_ROCKSDB}) PUBLIC $ ) endif() - endif() endif() if(${BUILD_WITH_S3}) INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.2/include) MESSAGE("build with s3: ${BUILD_WITH_S3}") + # cos elseif(${BUILD_WITH_COS}) - if(${TD_LINUX}) - set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) - #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) - option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include) - #MESSAGE("$ENV{HOME}/.cos-local.1/include") + if(${TD_LINUX}) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) - set(CMAKE_BUILD_TYPE Release) - set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) - set(CMAKE_PROJECT_NAME cos_c_sdk) + # ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) + option(ENABLE_TEST "Enable the tests" OFF) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include) - add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) - target_include_directories( - cos_c_sdk - PUBLIC $ - ) + # MESSAGE("$ENV{HOME}/.cos-local.1/include") + set(CMAKE_BUILD_TYPE Release) + set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) + set(CMAKE_PROJECT_NAME cos_c_sdk) - set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) - else() - - endif(${TD_LINUX}) + add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) + target_include_directories( + cos_c_sdk + PUBLIC $ + ) + set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) + else() + endif(${TD_LINUX}) endif() # pthread if(${BUILD_PTHREAD}) - if ("${CMAKE_BUILD_TYPE}" STREQUAL "") - SET(CMAKE_BUILD_TYPE Release) + if("${CMAKE_BUILD_TYPE}" STREQUAL "") + SET(CMAKE_BUILD_TYPE Release) endif() + add_definitions(-DPTW32_STATIC_LIB) add_subdirectory(pthread EXCLUDE_FROM_ALL) set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread) @@ -451,16 +458,15 @@ if(${BUILD_PTHREAD}) target_link_libraries(pthread INTERFACE libpthreadVC3) endif() - # jemalloc if(${JEMALLOC_ENABLED}) include(ExternalProject) ExternalProject_Add(jemalloc - PREFIX "jemalloc" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc - BUILD_IN_SOURCE 1 - CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls - BUILD_COMMAND ${MAKE} + PREFIX "jemalloc" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls + BUILD_COMMAND ${MAKE} ) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) endif() @@ -514,12 +520,13 @@ endif(${BUILD_WCWIDTH}) # LIBUV if(${BUILD_WITH_UV}) - if (TD_WINDOWS) + if(TD_WINDOWS) # There is no GetHostNameW function on win7. file(READ "libuv/src/win/util.c" LIBUV_WIN_UTIL_CONTENT) string(REPLACE "if (GetHostNameW(buf, UV_MAXHOSTNAMESIZE" "DWORD nSize = UV_MAXHOSTNAMESIZE;\n if (GetComputerNameW(buf, &nSize" LIBUV_WIN_UTIL_CONTENT "${LIBUV_WIN_UTIL_CONTENT}") file(WRITE "libuv/src/win/util.c" "${LIBUV_WIN_UTIL_CONTENT}") - endif () + endif() + add_subdirectory(libuv EXCLUDE_FROM_ALL) endif(${BUILD_WITH_UV}) @@ -535,6 +542,7 @@ if(${BUILD_WITH_SQLITE}) INTERFACE m INTERFACE pthread ) + if(NOT TD_WINDOWS) target_link_libraries(sqlite INTERFACE dl @@ -545,36 +553,38 @@ endif(${BUILD_WITH_SQLITE}) # addr2line if(${BUILD_ADDR2LINE}) if(NOT ${TD_WINDOWS}) - check_include_file( "sys/types.h" HAVE_SYS_TYPES_H) - check_include_file( "sys/stat.h" HAVE_SYS_STAT_H ) - check_include_file( "inttypes.h" HAVE_INTTYPES_H ) - check_include_file( "stddef.h" HAVE_STDDEF_H ) - check_include_file( "stdlib.h" HAVE_STDLIB_H ) - check_include_file( "string.h" HAVE_STRING_H ) - check_include_file( "memory.h" HAVE_MEMORY_H ) - check_include_file( "strings.h" HAVE_STRINGS_H ) - check_include_file( "stdint.h" HAVE_STDINT_H ) - check_include_file( "unistd.h" HAVE_UNISTD_H ) - check_include_file( "sgidefs.h" HAVE_SGIDEFS_H ) - check_include_file( "stdafx.h" HAVE_STDAFX_H ) - check_include_file( "elf.h" HAVE_ELF_H ) - check_include_file( "libelf.h" HAVE_LIBELF_H ) - check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H) - check_include_file( "alloca.h" HAVE_ALLOCA_H ) - check_include_file( "elfaccess.h" HAVE_ELFACCESS_H) - check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H ) - check_include_file( "sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H) - check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H) - check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H ) + check_include_file("sys/types.h" HAVE_SYS_TYPES_H) + check_include_file("sys/stat.h" HAVE_SYS_STAT_H) + check_include_file("inttypes.h" HAVE_INTTYPES_H) + check_include_file("stddef.h" HAVE_STDDEF_H) + check_include_file("stdlib.h" HAVE_STDLIB_H) + check_include_file("string.h" HAVE_STRING_H) + check_include_file("memory.h" HAVE_MEMORY_H) + check_include_file("strings.h" HAVE_STRINGS_H) + check_include_file("stdint.h" HAVE_STDINT_H) + check_include_file("unistd.h" HAVE_UNISTD_H) + check_include_file("sgidefs.h" HAVE_SGIDEFS_H) + check_include_file("stdafx.h" HAVE_STDAFX_H) + check_include_file("elf.h" HAVE_ELF_H) + check_include_file("libelf.h" HAVE_LIBELF_H) + check_include_file("libelf/libelf.h" HAVE_LIBELF_LIBELF_H) + check_include_file("alloca.h" HAVE_ALLOCA_H) + check_include_file("elfaccess.h" HAVE_ELFACCESS_H) + check_include_file("sys/elf_386.h" HAVE_SYS_ELF_386_H) + check_include_file("sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H) + check_include_file("sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H) + check_include_file("sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H) set(VERSION 0.3.1) set(PACKAGE_VERSION "\"${VERSION}\"") configure_file(libdwarf/cmake/config.h.cmake config.h) file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c") add_library(libdwarf STATIC ${LIBDWARF_SOURCES}) set_target_properties(libdwarf PROPERTIES OUTPUT_NAME "libdwarf") + if(HAVE_LIBELF_H OR HAVE_LIBELF_LIBELF_H) target_link_libraries(libdwarf PUBLIC libelf) endif() + target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_CURRENT_BINARY_DIR}) file(READ "addr2line/addr2line.c" ADDR2LINE_CONTENT) string(REPLACE "static int" "int" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}") @@ -583,7 +593,7 @@ if(${BUILD_ADDR2LINE}) file(WRITE "addr2line/addr2line.c" "${ADDR2LINE_CONTENT}") add_library(addr2line STATIC "addr2line/addr2line.c") target_link_libraries(addr2line PUBLIC libdwarf dl z) - target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf" ) + target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf") endif(NOT ${TD_WINDOWS}) endif(${BUILD_ADDR2LINE}) @@ -592,31 +602,41 @@ if(${BUILD_GEOS}) if(${TD_LINUX}) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}") - if ("${CMAKE_BUILD_TYPE}" STREQUAL "") + + if("${CMAKE_BUILD_TYPE}" STREQUAL "") SET(CMAKE_BUILD_TYPE Release) endif() endif(${TD_LINUX}) + option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF) add_subdirectory(geos EXCLUDE_FROM_ALL) - if (${TD_WINDOWS}) + + if(${TD_WINDOWS}) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - else () + else() unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD endif(${TD_WINDOWS}) + target_include_directories( geos_c PUBLIC $ ) endif(${BUILD_GEOS}) -if (${BUILD_PCRE2}) +if(${BUILD_PCRE2}) add_subdirectory(pcre2 EXCLUDE_FROM_ALL) endif(${BUILD_PCRE2}) +if(${TD_LINUX} AND ${BUILD_WITH_S3}) + add_subdirectory(azure-cmake EXCLUDE_FROM_ALL) +endif() + # ================================================================================================ # Build test # ================================================================================================ +MESSAGE("build with dependency tests: ${BUILD_DEPENDENCY_TESTS}") + if(${BUILD_DEPENDENCY_TESTS}) add_subdirectory(test EXCLUDE_FROM_ALL) endif(${BUILD_DEPENDENCY_TESTS}) diff --git a/contrib/azure-cmake/CMakeLists.txt b/contrib/azure-cmake/CMakeLists.txt new file mode 100644 index 0000000000..aaa5617860 --- /dev/null +++ b/contrib/azure-cmake/CMakeLists.txt @@ -0,0 +1,73 @@ +# lib_azure_sdk +set(AZURE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1") +set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk") + +file(GLOB AZURE_SDK_SRC + "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/credentials/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/tracing/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp" +) + +file(GLOB AZURE_SDK_UNIFIED_SRC + ${AZURE_SDK_SRC} +) + +set(AZURE_SDK_INCLUDES + "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/inc/" + "${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/inc/" + "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/inc/" + "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/" +) + +add_library(_azure_sdk STATIC ${AZURE_SDK_UNIFIED_SRC}) +target_compile_definitions(_azure_sdk PRIVATE BUILD_CURL_HTTP_TRANSPORT_ADAPTER) + +target_include_directories( + _azure_sdk + PUBLIC "$ENV{HOME}/.cos-local.2/include" +) + +find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) +find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + +# find_library(CURL_LIBRARY curl) +# find_library(XML2_LIBRARY xml2) +find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) +find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + +# find_library(CoreFoundation_Library CoreFoundation) +# find_library(SystemConfiguration_Library SystemConfiguration) +target_link_libraries( + _azure_sdk + PRIVATE ${CURL_LIBRARY} + PRIVATE ${SSL_LIBRARY} + PRIVATE ${CRYPTO_LIBRARY} + PRIVATE ${XML2_LIBRARY} + + # PRIVATE xml2 + PRIVATE zlib + + # PRIVATE ${CoreFoundation_Library} + # PRIVATE ${SystemConfiguration_Library} +) + +# Originally, on Windows azure-core is built with bcrypt and crypt32 by default +if(TARGET OpenSSL::SSL) + target_link_libraries(_azure_sdk PRIVATE OpenSSL::Crypto OpenSSL::SSL) +endif() + +# Originally, on Windows azure-core is built with winhttp by default +if(TARGET td_contrib::curl) + target_link_libraries(_azure_sdk PRIVATE td_contrib::curl) +endif() + +target_include_directories(_azure_sdk SYSTEM BEFORE PUBLIC ${AZURE_SDK_INCLUDES}) +add_library(td_contrib::azure_sdk ALIAS _azure_sdk) diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt index 1deff5a67e..f544baafde 100644 --- a/contrib/test/CMakeLists.txt +++ b/contrib/test/CMakeLists.txt @@ -28,5 +28,6 @@ if(${BUILD_WITH_TRAFT}) # add_subdirectory(traft) endif(${BUILD_WITH_TRAFT}) +add_subdirectory(azure) add_subdirectory(tdev) add_subdirectory(lz4) diff --git a/contrib/test/azure/CMakeLists.txt b/contrib/test/azure/CMakeLists.txt new file mode 100644 index 0000000000..fade8c9ef6 --- /dev/null +++ b/contrib/test/azure/CMakeLists.txt @@ -0,0 +1,27 @@ +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED True) + +add_executable( + azure-test + main.cpp +) + +find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) +find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + +# find_library(XML2_LIBRARY xml2) +find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) +find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + +# find_library(CoreFoundation_Library CoreFoundation) +# find_library(SystemConfiguration_Library SystemConfiguration) +target_link_libraries( + azure-test + PRIVATE _azure_sdk + PRIVATE ${CURL_LIBRARY} + PRIVATE ${XML2_LIBRARY} + PRIVATE ${SSL_LIBRARY} + PRIVATE ${CRYPTO_LIBRARY} + PRIVATE dl + PRIVATE pthread +) diff --git a/contrib/test/azure/main.cpp b/contrib/test/azure/main.cpp new file mode 100644 index 0000000000..78ecc8b9f5 --- /dev/null +++ b/contrib/test/azure/main.cpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include + +// Include the necessary SDK headers +#include +#include + +// Add appropriate using namespace directives +using namespace Azure::Storage; +using namespace Azure::Storage::Blobs; + +// Secrets should be stored & retrieved from secure locations such as Azure::KeyVault. For +// convenience and brevity of samples, the secrets are retrieved from environment variables. + +std::string GetEndpointUrl() { + // return std::getenv("AZURE_STORAGE_ACCOUNT_URL"); + std::string accountId = getenv("ablob_account_id"); + if (accountId.empty()) { + return accountId; + } + + return accountId + ".blob.core.windows.net"; +} + +std::string GetAccountName() { + // return std::getenv("AZURE_STORAGE_ACCOUNT_NAME"); + return getenv("ablob_account_id"); +} + +std::string GetAccountKey() { + // return std::getenv("AZURE_STORAGE_ACCOUNT_KEY"); + + return getenv("ablob_account_secret"); +} + +int main() { + std::string endpointUrl = GetEndpointUrl(); + std::string accountName = GetAccountName(); + std::string accountKey = GetAccountKey(); + + try { + auto sharedKeyCredential = std::make_shared(accountName, accountKey); + + std::string accountURL = "https://fd2d01cd892f844eeaa2273.blob.core.windows.net"; + BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential); + + std::string containerName = "myblobcontainer"; + // auto containerClient = blobServiceClient.GetBlobContainerClient("myblobcontainer"); + auto containerClient = blobServiceClient.GetBlobContainerClient("td-test"); + + // Create the container if it does not exist + std::cout << "Creating container: " << containerName << std::endl; + // containerClient.CreateIfNotExists(); + + std::string blobName = "blob.txt"; + uint8_t blobContent[] = "Hello Azure!"; + // Create the block blob client + BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName); + + // Upload the blob + std::cout << "Uploading blob: " << blobName << std::endl; + blobClient.UploadFrom(blobContent, sizeof(blobContent)); + /* + auto blockBlobClient = BlockBlobClient(endpointUrl, sharedKeyCredential); + + // Create some data to upload into the blob. + std::vector data = {1, 2, 3, 4}; + Azure::Core::IO::MemoryBodyStream stream(data); + + Azure::Response response = blockBlobClient.Upload(stream); + + Models::UploadBlockBlobResult model = response.Value; + std::cout << "Last modified date of uploaded blob: " << model.LastModified.ToString() + << std::endl; + */ + } catch (const Azure::Core::RequestFailedException& e) { + std::cout << "Status Code: " << static_cast(e.StatusCode) << ", Reason Phrase: " << e.ReasonPhrase + << std::endl; + std::cout << e.what() << std::endl; + + return 1; + } + + return 0; +} diff --git a/docs/en/14-reference/03-taos-sql/12-distinguished.md b/docs/en/14-reference/03-taos-sql/12-distinguished.md index bfc9ca32c0..2374b762d4 100644 --- a/docs/en/14-reference/03-taos-sql/12-distinguished.md +++ b/docs/en/14-reference/03-taos-sql/12-distinguished.md @@ -80,7 +80,7 @@ These pseudocolumns occur after the aggregation clause. `FILL` clause is used to specify how to fill when there is data missing in any window, including: 1. NONE: No fill (the default fill mode) -2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. +2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note that only exprs in select list that contains normal cols need to specify fill value, exprs like `_wstart`, `_wend`, `_wduration`, `_wstart + 1a`, `now`, `1+1`, partition keys like tbname(when using partition by) don't need to specify fill value. But exprs like `timediff(last(ts), _wstart)` need to specify fill value. 3. PREV: Fill with the previous non-NULL value, `FILL(PREV)` 4. NULL: Fill with NULL, `FILL(NULL)` 5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)` diff --git a/docs/examples/JDBC/JDBCDemo/pom.xml b/docs/examples/JDBC/JDBCDemo/pom.xml index 315b147cce..4b3e1ab675 100644 --- a/docs/examples/JDBC/JDBCDemo/pom.xml +++ b/docs/examples/JDBC/JDBCDemo/pom.xml @@ -19,7 +19,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.3 + 3.4.0 org.locationtech.jts diff --git a/docs/examples/JDBC/connectionPools/pom.xml b/docs/examples/JDBC/connectionPools/pom.xml index 1791bfe8bc..e3ef30d2f8 100644 --- a/docs/examples/JDBC/connectionPools/pom.xml +++ b/docs/examples/JDBC/connectionPools/pom.xml @@ -18,7 +18,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.3 + 3.4.0 diff --git a/docs/examples/JDBC/consumer-demo/pom.xml b/docs/examples/JDBC/consumer-demo/pom.xml index dcabfc1249..709f87d9c1 100644 --- a/docs/examples/JDBC/consumer-demo/pom.xml +++ b/docs/examples/JDBC/consumer-demo/pom.xml @@ -17,7 +17,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.3 + 3.4.0 com.google.guava diff --git a/docs/examples/JDBC/taosdemo/pom.xml b/docs/examples/JDBC/taosdemo/pom.xml index ffe159ea49..ab5912aa9e 100644 --- a/docs/examples/JDBC/taosdemo/pom.xml +++ b/docs/examples/JDBC/taosdemo/pom.xml @@ -67,7 +67,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.3 + 3.4.0 diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml index f23b73177e..e1a9504249 100644 --- a/docs/examples/java/pom.xml +++ b/docs/examples/java/pom.xml @@ -22,7 +22,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.3.3 + 3.4.0 diff --git a/docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java b/docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java index 08f66c2227..0f35e38f57 100644 --- a/docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java +++ b/docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java @@ -17,8 +17,8 @@ public class SchemalessWsTest { private static final String jsonDemo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; public static void main(String[] args) throws SQLException { - final String url = "jdbc:TAOS-RS://" + host + ":6041?user=root&password=taosdata&batchfetch=true"; - try(Connection connection = DriverManager.getConnection(url)){ + final String url = "jdbc:TAOS-WS://" + host + ":6041?user=root&password=taosdata"; + try (Connection connection = DriverManager.getConnection(url)) { init(connection); AbstractConnection conn = connection.unwrap(AbstractConnection.class); diff --git a/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java index afe74ace83..052af71a83 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java +++ b/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java @@ -12,9 +12,9 @@ public class WSConnectExample { public static void main(String[] args) throws Exception { // use // String jdbcUrl = - // "jdbc:TAOS-RS://localhost:6041/dbName?user=root&password=taosdata&batchfetch=true"; + // "jdbc:TAOS-WS://localhost:6041/dbName?user=root&password=taosdata"; // if you want to connect a specified database named "dbName". - String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata&batchfetch=true"; + String jdbcUrl = "jdbc:TAOS-WS://localhost:6041?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); diff --git a/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java index eab8df06b9..1353ebbddc 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java @@ -15,7 +15,7 @@ public class WSParameterBindingBasicDemo { public static void main(String[] args) throws SQLException { - String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true"; + String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041"; try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { init(conn); @@ -40,7 +40,7 @@ public class WSParameterBindingBasicDemo { pstmt.setFloat(4, random.nextFloat()); pstmt.addBatch(); } - int [] exeResult = pstmt.executeBatch(); + int[] exeResult = pstmt.executeBatch(); // you can check exeResult here System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters."); } @@ -60,7 +60,8 @@ public class WSParameterBindingBasicDemo { try (Statement stmt = conn.createStatement()) { stmt.execute("CREATE DATABASE IF NOT EXISTS power"); stmt.execute("USE power"); - stmt.execute("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + stmt.execute( + "CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); } } } diff --git a/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java index f23fb187f4..7eaccb3db2 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java @@ -40,7 +40,7 @@ public class WSParameterBindingFullDemo { public static void main(String[] args) throws SQLException { - String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true"; + String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041/"; try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { @@ -51,8 +51,10 @@ public class WSParameterBindingFullDemo { stmtAll(conn); } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + // handle any errors, please refer to the JDBC specifications for detailed + // exceptions info + System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + + ex.getMessage()); throw ex; } catch (Exception ex) { System.out.println("Failed to insert data using stmt, ErrMessage: " + ex.getMessage()); @@ -104,30 +106,29 @@ public class WSParameterBindingFullDemo { pstmt.setTagBoolean(3, true); pstmt.setTagString(4, "binary_value"); pstmt.setTagNString(5, "nchar_value"); - pstmt.setTagVarbinary(6, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e}); - pstmt.setTagGeometry(7, new byte[]{ + pstmt.setTagVarbinary(6, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e }); + pstmt.setTagGeometry(7, new byte[] { 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x59, 0x40}); + 0x00, 0x00, 0x00, 0x59, 0x40 }); long current = System.currentTimeMillis(); - pstmt.setTimestamp(1, new Timestamp(current)); pstmt.setInt(2, 1); pstmt.setDouble(3, 1.1); pstmt.setBoolean(4, true); pstmt.setString(5, "binary_value"); pstmt.setNString(6, "nchar_value"); - pstmt.setVarbinary(7, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e}); - pstmt.setGeometry(8, new byte[]{ + pstmt.setVarbinary(7, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e }); + pstmt.setGeometry(8, new byte[] { 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x59, 0x40}); + 0x00, 0x00, 0x00, 0x59, 0x40 }); pstmt.addBatch(); pstmt.executeBatch(); System.out.println("Successfully inserted rows to example_all_type_stmt.ntb"); diff --git a/docs/zh/05-basic/02-insert.md b/docs/zh/05-basic/02-insert.md index 0c7ffd86a4..88d131e832 100644 --- a/docs/zh/05-basic/02-insert.md +++ b/docs/zh/05-basic/02-insert.md @@ -111,7 +111,7 @@ TDengine 还支持直接向超级表写入数据。需要注意的是,超级 ```sql insert into meters (tbname, ts, current, voltage, phase, location, group_id) -values( "d1001v, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2) +values( "d1001, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2) ``` ### 零代码写入 diff --git a/docs/zh/07-develop/01-connect/_connect_rust.mdx b/docs/zh/07-develop/01-connect/_connect_rust.mdx index 0e65e8f920..d88a3335ca 100644 --- a/docs/zh/07-develop/01-connect/_connect_rust.mdx +++ b/docs/zh/07-develop/01-connect/_connect_rust.mdx @@ -3,6 +3,6 @@ ``` :::note -对于 Rust 连接器, 连接方式的不同只体现在使用的特性不同。如果启用了 "ws" 特性,那么只有 Websocket 的实现会被编译进来。 +对于 Rust 连接器, 连接方式的不同只体现在使用的特性不同。如果启用了 "ws" 特性,那么只有 WebSocket 的实现会被编译进来。 ::: diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index 1dfb95d169..bd26bea46d 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -28,7 +28,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 1. 通过客户端驱动程序 taosc 直接与服务端程序 taosd 建立连接,这种连接方式下文中简称 “原生连接”。 2. 通过 taosAdapter 组件提供的 REST API 建立与 taosd 的连接,这种连接方式下文中简称 “REST 连接” -3. 通过 taosAdapter 组件提供的 Websocket API 建立与 taosd 的连接,这种连接方式下文中简称 “Websocket 连接” +3. 通过 taosAdapter 组件提供的 WebSocket API 建立与 taosd 的连接,这种连接方式下文中简称 “WebSocket 连接” ![TDengine connection type](connection-type-zh.webp) @@ -38,9 +38,9 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 关键不同点在于: 1. 使用 原生连接,需要保证客户端的驱动程序 taosc 和服务端的 TDengine 版本配套。 -2. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但是无法体验数据订阅和二进制数据类型等功能。另外与 原生连接 和 Websocket 连接相比,REST连接的性能最低。REST 接口是无状态的。在使用 REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。 -3. 使用 Websocket 连接,用户也无需安装客户端驱动程序 taosc。 -4. 连接云服务实例,必须使用 REST 连接 或 Websocket 连接。 +2. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但是无法体验数据订阅和二进制数据类型等功能。另外与 原生连接 和 WebSocket 连接相比,REST连接的性能最低。REST 接口是无状态的。在使用 REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。 +3. 使用 WebSocket 连接,用户也无需安装客户端驱动程序 taosc。 +4. 连接云服务实例,必须使用 REST 连接 或 WebSocket 连接。 **推荐使用 WebSocket 连接** @@ -126,7 +126,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 ```bash pip3 install taos-ws-py ``` - :::note 此安装包为 Websocket 连接器 + :::note 此安装包为 WebSocket 连接器 - 同时安装 `taospy` 和 `taos-ws-py` ```bash pip3 install taospy[ws] @@ -182,7 +182,7 @@ taos = { version = "*"} ``` :::info -Rust 连接器通过不同的特性区分不同的连接方式。默认同时支持原生连接和 Websocket 连接,如果仅需要建立 Websocket 连接,可设置 `ws` 特性: +Rust 连接器通过不同的特性区分不同的连接方式。默认同时支持原生连接和 WebSocket 连接,如果仅需要建立 WebSocket 连接,可设置 `ws` 特性: ```toml taos = { version = "*", default-features = false, features = ["ws"] } @@ -201,7 +201,7 @@ taos = { version = "*", default-features = false, features = ["ws"] } ``` npm install @tdengine/websocket ``` - :::note Node.js 目前只支持 Websocket 连接 + :::note Node.js 目前只支持 WebSocket 连接 - **安装验证** - 新建安装验证目录,例如:`~/tdengine-test`,下载 GitHub 上 [nodejsChecker.js 源代码](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/nodejsChecker.js)到本地。 - 在命令行中执行以下命令。 @@ -271,12 +271,10 @@ dotnet add package TDengine.Connector Java 连接器建立连接的参数有 URL 和 Properties。 TDengine 的 JDBC URL 规范格式为: - `jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]` + `jdbc:[TAOS|TAOS-WS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]` URL 和 Properties 的详细参数说明和如何使用详见 [url 规范](../../reference/connector/java/#url-规范) - **注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。 - Python 连接器使用 `connect()` 方法来建立连接,下面是连接参数的具体说明: @@ -387,8 +385,8 @@ DSN 的详细说明和如何使用详见 [连接功能](../../reference/connecto - `reconnectIntervalMs`:重连间隔毫秒时间,默认为 2000。 -**Websocket 连接** -C/C++ 语言连接器 Websocket 连接方式使用 `ws_connect()` 函数用于建立与 TDengine 数据库的连接。其参数为 DSN 描述字符串,其基本结构如下: +**WebSocket 连接** +C/C++ 语言连接器 WebSocket 连接方式使用 `ws_connect()` 函数用于建立与 TDengine 数据库的连接。其参数为 DSN 描述字符串,其基本结构如下: ```text [+]://[[:@]:][/][?=[&=]] @@ -417,8 +415,8 @@ C/C++ 语言连接器原生连接方式使用 `taos_connect()` 函数用于建 -### Websocket 连接 -下面是各语言连接器建立 Websocket 连接代码样例。演示了如何使用 Websocket 连接方式连接到 TDengine 数据库,并对连接设定一些参数。整个过程主要涉及到数据库连接的建立和异常处理。 +### WebSocket 连接 +下面是各语言连接器建立 WebSocket 连接代码样例。演示了如何使用 WebSocket 连接方式连接到 TDengine 数据库,并对连接设定一些参数。整个过程主要涉及到数据库连接的建立和异常处理。 diff --git a/docs/zh/07-develop/02-sql.md b/docs/zh/07-develop/02-sql.md index 5461c975dd..b4274045fc 100644 --- a/docs/zh/07-develop/02-sql.md +++ b/docs/zh/07-develop/02-sql.md @@ -33,7 +33,7 @@ REST API:直接调用 `taosadapter` 提供的 REST API 接口,进行数据 -```python title="Websocket 连接" +```python title="WebSocket 连接" {{#include docs/examples/python/create_db_ws.py}} ``` @@ -69,7 +69,7 @@ REST API:直接调用 `taosadapter` 提供的 REST API 接口,进行数据 -```c title="Websocket 连接" +```c title="WebSocket 连接" {{#include docs/examples/c-ws/create_db_demo.c:create_db_and_table}} ``` @@ -114,7 +114,7 @@ NOW 为系统内部函数,默认为客户端所在计算机当前时间。 NOW -```python title="Websocket 连接" +```python title="WebSocket 连接" {{#include docs/examples/python/insert_ws.py}} ``` @@ -151,7 +151,7 @@ NOW 为系统内部函数,默认为客户端所在计算机当前时间。 NOW -```c title="Websocket 连接" +```c title="WebSocket 连接" {{#include docs/examples/c-ws/insert_data_demo.c:insert_data}} ``` @@ -189,7 +189,7 @@ curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \ -```python title="Websocket 连接" +```python title="WebSocket 连接" {{#include docs/examples/python/query_ws.py}} ``` @@ -230,7 +230,7 @@ rust 连接器还支持使用 **serde** 进行反序列化行为结构体的结 -```c title="Websocket 连接" +```c title="WebSocket 连接" {{#include docs/examples/c-ws/query_data_demo.c:query_data}} ``` @@ -273,7 +273,7 @@ reqId 可用于请求链路追踪,reqId 就像分布式系统中的 traceId -```python title="Websocket 连接" +```python title="WebSocket 连接" {{#include docs/examples/python/reqid_ws.py}} ``` @@ -310,7 +310,7 @@ reqId 可用于请求链路追踪,reqId 就像分布式系统中的 traceId -```c "Websocket 连接" +```c "WebSocket 连接" {{#include docs/examples/c-ws/with_reqid_demo.c:with_reqid}} ``` diff --git a/docs/zh/07-develop/04-schemaless.md b/docs/zh/07-develop/04-schemaless.md index a865b58b28..bf10b41736 100644 --- a/docs/zh/07-develop/04-schemaless.md +++ b/docs/zh/07-develop/04-schemaless.md @@ -191,7 +191,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 ::: -### Websocket 连接 +### WebSocket 连接 diff --git a/docs/zh/07-develop/05-stmt.md b/docs/zh/07-develop/05-stmt.md index 624600ba4d..74b44ba8e6 100644 --- a/docs/zh/07-develop/05-stmt.md +++ b/docs/zh/07-develop/05-stmt.md @@ -23,7 +23,7 @@ import TabItem from "@theme/TabItem"; - 执行批量插入操作,将这些数据行插入到对应的子表中。 3. 最后打印实际插入表中的行数。 -## Websocket 连接 +## WebSocket 连接 ```java diff --git a/docs/zh/07-develop/07-tmq.md b/docs/zh/07-develop/07-tmq.md index c668203259..a91a764c67 100644 --- a/docs/zh/07-develop/07-tmq.md +++ b/docs/zh/07-develop/07-tmq.md @@ -94,7 +94,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 -- Websocket 连接: 因为使用 dsn,不需要 `td.connect.ip`,`td.connect.port`,`td.connect.user` 和 `td.connect.pass` 四个配置项,其余同通用配置项。 +- WebSocket 连接: 因为使用 dsn,不需要 `td.connect.ip`,`td.connect.port`,`td.connect.user` 和 `td.connect.pass` 四个配置项,其余同通用配置项。 - 原生连接: 同通用基础配置项。 @@ -103,8 +103,8 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 -### Websocket 连接 -介绍各语言连接器使用 Websocket 连接方式创建消费者。指定连接的服务器地址,设置自动提交,从最新消息开始消费,指定 `group.id` 和 `client.id` 等信息。有的语言的连接器还支持反序列化参数。 +### WebSocket 连接 +介绍各语言连接器使用 WebSocket 连接方式创建消费者。指定连接的服务器地址,设置自动提交,从最新消息开始消费,指定 `group.id` 和 `client.id` 等信息。有的语言的连接器还支持反序列化参数。 @@ -234,7 +234,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ## 订阅消费数据 消费者订阅主题后,可以开始接收并处理这些主题中的消息。订阅消费数据的示例代码如下: -### Websocket 连接 +### WebSocket 连接 @@ -403,7 +403,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ## 指定订阅的 Offset 消费者可以指定从特定 Offset 开始读取分区中的消息,这允许消费者重读消息或跳过已处理的消息。下面展示各语言连接器如何指定订阅的 Offset。 -### Websocket 连接 +### WebSocket 连接 @@ -549,7 +549,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 **注意**:手工提交消费进度前确保消息正常处理完成,否则处理出错的消息不会被再次消费。自动提交是在本次 `poll` 消息时可能会提交上次消息的消费进度,因此请确保消息处理完毕再进行下一次 `poll` 或消息获取。 -### Websocket 连接 +### WebSocket 连接 @@ -663,7 +663,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ## 取消订阅和关闭消费 消费者可以取消对主题的订阅,停止接收消息。当消费者不再需要时,应该关闭消费者实例,以释放资源和断开与 TDengine 服务器的连接。 -### Websocket 连接 +### WebSocket 连接 @@ -766,7 +766,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ## 完整示例 -### Websocket 连接 +### WebSocket 连接
diff --git a/docs/zh/08-operation/18-dual.md b/docs/zh/08-operation/18-dual.md index c7871a8e1e..caddb7ab3b 100644 --- a/docs/zh/08-operation/18-dual.md +++ b/docs/zh/08-operation/18-dual.md @@ -30,9 +30,8 @@ toc_max_heading_level: 4 目前只有 Java 连接器在 WebSocket 连接模式下支持双活,其配置示例如下 ```java -url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; +url = "jdbc:TAOS-WS://" + host + ":6041/?user=root&password=taosdata"; Properties properties = new Properties(); -properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); properties.setProperty(TSDBDriver.PROPERTY_KEY_SLAVE_CLUSTER_HOST, "192.168.1.11"); properties.setProperty(TSDBDriver.PROPERTY_KEY_SLAVE_CLUSTER_PORT, "6041"); properties.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true"); @@ -43,13 +42,13 @@ connection = DriverManager.getConnection(url, properties); 其中的配置属性及含义如下表 -| 属性名 | 含义 | -| ----------------- | ------------------ | -| PROPERTY_KEY_SLAVE_CLUSTER_HOST | 第二节点的主机名或者 ip,默认空 | -| PROPERTY_KEY_SLAVE_CLUSTER_PORT | 第二节点的端口号,默认空 | -| PROPERTY_KEY_ENABLE_AUTO_RECONNECT | 是否启用自动重连。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。双活场景下请设置为 true | -| PROPERTY_KEY_RECONNECT_INTERVAL_MS | 重连的时间间隔,单位毫秒:默认 2000 毫秒,也就是 2 秒;最小值为 0, 表示立即重试;最大值不做限制 | -| PROPERTY_KEY_RECONNECT_RETRY_COUNT | 每节点最多重试次数:默认值为 3;最小值为 0,表示不进行重试;最大值不做限制 | +| 属性名 | 含义 | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------- | +| PROPERTY_KEY_SLAVE_CLUSTER_HOST | 第二节点的主机名或者 ip,默认空 | +| PROPERTY_KEY_SLAVE_CLUSTER_PORT | 第二节点的端口号,默认空 | +| PROPERTY_KEY_ENABLE_AUTO_RECONNECT | 是否启用自动重连。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。双活场景下请设置为 true | +| PROPERTY_KEY_RECONNECT_INTERVAL_MS | 重连的时间间隔,单位毫秒:默认 2000 毫秒,也就是 2 秒;最小值为 0, 表示立即重试;最大值不做限制 | +| PROPERTY_KEY_RECONNECT_RETRY_COUNT | 每节点最多重试次数:默认值为 3;最小值为 0,表示不进行重试;最大值不做限制 | ### 约束条件 diff --git a/docs/zh/14-reference/01-components/02-taosc.md b/docs/zh/14-reference/01-components/02-taosc.md index 32baac9a3b..5f22ebe8d5 100755 --- a/docs/zh/14-reference/01-components/02-taosc.md +++ b/docs/zh/14-reference/01-components/02-taosc.md @@ -10,7 +10,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在 | 参数名称 | 参数含义 | |:-----------:|:----------------------------------------------------------:| -|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:localhost:6030 | +|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:${hostname}:6030,若无法获取 ${hostname},则赋值为 localhost | |secondEp | 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值 | |numOfRpcSessions | 一个客户端能创建的最大连接数,取值范围:10-50000000(单位为毫秒);缺省值:500000 | |telemetryReporting | 是否上传 telemetry,0: 不上传,1: 上传;缺省值:1 | diff --git a/docs/zh/14-reference/01-components/03-taosadapter.md b/docs/zh/14-reference/01-components/03-taosadapter.md index a8e8fc3418..b74ee77862 100644 --- a/docs/zh/14-reference/01-components/03-taosadapter.md +++ b/docs/zh/14-reference/01-components/03-taosadapter.md @@ -306,7 +306,7 @@ http 返回内容: ## taosAdapter 监控指标 -taosAdapter 采集 REST/Websocket 相关请求的监控指标。将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。 +taosAdapter 采集 REST/WebSocket 相关请求的监控指标。将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。 #### adapter\_requests 表 @@ -330,8 +330,8 @@ taosAdapter 采集 REST/Websocket 相关请求的监控指标。将监控指标 | other\_fail | INT UNSIGNED | | 其他失败请求数 | | query\_in\_process | INT UNSIGNED | | 正在处理查询请求数 | | write\_in\_process | INT UNSIGNED | | 正在处理写入请求数 | -| endpoint | VARCHAR | | 请求端点 | -| req\_type | NCHAR | TAG | 请求类型:0 为 REST,1 为 Websocket | +| endpoint | VARCHAR | | 请求端点 | +| req\_type | NCHAR | TAG | 请求类型:0 为 REST,1 为 WebSocket | ## 结果返回条数限制 diff --git a/docs/zh/14-reference/02-tools/08-taos-cli.md b/docs/zh/14-reference/02-tools/08-taos-cli.md index c388e7edda..a6f2f7ae05 100644 --- a/docs/zh/14-reference/02-tools/08-taos-cli.md +++ b/docs/zh/14-reference/02-tools/08-taos-cli.md @@ -4,11 +4,11 @@ sidebar_label: taos toc_max_heading_level: 4 --- -TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用的方式。 使用前需要安装 TDengine Server 安装包或 TDengine Client 安装包。 +TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用工具。 使用前需要安装 TDengine Server 安装包或 TDengine Client 安装包。 ## 启动 -要进入 TDengine CLI,您只要在终端执行 `taos` 即可。 +要进入 TDengine CLI,您在终端执行 `taos` 即可。 ```bash taos @@ -23,6 +23,11 @@ taos> ``` 进入 TDengine CLI 后,你可执行各种 SQL 语句,包括插入、查询以及各种管理命令。 +退出 TDengine CLI, 执行 `q` 或 `quit` 或 `exit` 回车即可 +```shell +taos> quit +``` + ## 执行 SQL 脚本 @@ -66,7 +71,7 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH ; - -l PKTLEN: 网络测试时使用的测试包大小 - -n NETROLE: 网络连接测试时的测试范围,默认为 `client`, 可选值为 `client`、`server` - -N PKTNUM: 网络测试时使用的测试包数量 -- -r: 将时间输出出无符号 64 位整数类型(即 C 语音中 uint64_t) +- -r: 将时间列转化为无符号 64 位整数类型输出(即 C 语言中 uint64_t) - -R: 使用 RESTful 模式连接服务端 - -s COMMAND: 以非交互模式执行的 SQL 命令 - -t: 测试服务端启动状态,状态同-k @@ -84,6 +89,13 @@ taos -h h1.taos.com -s "use db; show tables;" 也可以通过配置文件中的参数设置来控制 TDengine CLI 的行为。可用配置参数请参考[客户端配置](../../components/taosc) +## TDengine CLI TAB 键补全 + +- TAB 键前为空命令状态下按 TAB 键,会列出 TDengine CLI 支持的所有命令 +- TAB 键前为空格状态下按 TAB 键,会显示此位置可以出现的所有命令词的第一个,再次按 TAB 键切为下一个 +- TAB 键前为字符串,会搜索与此字符串前缀匹配的所有可出现命令词,并显示第一个,再次按 TAB 键切为下一个 +- 输入反斜杠 `\` + TAB 键, 会自动补全为列显示模式命令词 `\G;` + ## TDengine CLI 小技巧 - 可以使用上下光标键查看历史输入的指令 @@ -91,7 +103,6 @@ taos -h h1.taos.com -s "use db; show tables;" - Ctrl+C 中止正在进行中的查询 - 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存 - 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source ` 自动执行该文件里所有的 SQL 语句 -- 输入 `q` 或 `quit` 或 `exit` 回车,可以退出 TDengine CLI ## TDengine CLI 导出查询结果到文件中 diff --git a/docs/zh/14-reference/03-taos-sql/01-data-type.md b/docs/zh/14-reference/03-taos-sql/01-data-type.md index f33592366a..54106cc578 100644 --- a/docs/zh/14-reference/03-taos-sql/01-data-type.md +++ b/docs/zh/14-reference/03-taos-sql/01-data-type.md @@ -42,8 +42,8 @@ CREATE DATABASE db_name PRECISION 'ns'; | 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符。如果用户字符串长度超出声明长度,将会报错。 | | 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 | | 16 | VARCHAR | 自定义 | BINARY 类型的别名 | -| 17 | GEOMETRY | 自定义 | 几何类型 -| 18 | VARBINARY | 自定义 | 可变长的二进制数据| +| 17 | GEOMETRY | 自定义 | 几何类型,3.1.0.0 版本开始支持 +| 18 | VARBINARY | 自定义 | 可变长的二进制数据, 3.1.1.0 版本开始支持| :::note diff --git a/docs/zh/14-reference/03-taos-sql/12-distinguished.md b/docs/zh/14-reference/03-taos-sql/12-distinguished.md index e149c2c82e..0b834dea29 100644 --- a/docs/zh/14-reference/03-taos-sql/12-distinguished.md +++ b/docs/zh/14-reference/03-taos-sql/12-distinguished.md @@ -76,7 +76,7 @@ window_clause: { FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种: 1. 不进行填充:NONE(默认填充模式)。 -2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1, 若查询列表中有多列需要FILL, 则需要给每一个FILL列指定VALUE, 如`SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`。 +2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1, 若查询列表中有多列需要FILL, 则需要给每一个FILL列指定VALUE, 如`SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`, 注意, SELECT表达式中只有包含普通列时才需要指定FILL VALUE, 如`_wstart`, `_wstart+1a`, `now`, `1+1` 以及使用partition by时的partition key(如tbname)都不需要指定VALUE, 如`timediff(last(ts), _wstart)`则需要指定VALUE。 3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。 4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。 5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。 diff --git a/docs/zh/14-reference/03-taos-sql/14-stream.md b/docs/zh/14-reference/03-taos-sql/14-stream.md index 3af8fa6921..cd5c76a4ad 100644 --- a/docs/zh/14-reference/03-taos-sql/14-stream.md +++ b/docs/zh/14-reference/03-taos-sql/14-stream.md @@ -153,7 +153,7 @@ SELECT * from information_schema.`ins_streams`; 由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 -因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY最小时间是5s,如果低于5s,创建流计算时会报错。 +因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY 最小时间是 5s,如果低于 5s,创建流计算时会报错。 MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 diff --git a/docs/zh/14-reference/05-connector/10-cpp.mdx b/docs/zh/14-reference/05-connector/10-cpp.mdx index c618601fb9..7164baad2a 100644 --- a/docs/zh/14-reference/05-connector/10-cpp.mdx +++ b/docs/zh/14-reference/05-connector/10-cpp.mdx @@ -5,14 +5,14 @@ toc_max_heading_level: 4 --- C/C++ 开发人员可以使用 TDengine 的客户端驱动,即 C/C++连接器 (以下都用 TDengine 客户端驱动表示),开发自己的应用来连接 TDengine 集群完成数据存储、查询以及其他功能。TDengine 客户端驱动的 API 类似于 MySQL 的 C API。应用程序使用时,需要包含 TDengine 头文件,里面列出了提供的 API 的函数原型;应用程序还要链接到所在平台上对应的动态库。 -TDengine 的客户端驱动提供了 taosws 和 taos 两个动态库,分别支持 Websocket 连接和原生连接。 Websocket 连接和原生连接的区别是 Websocket 连接方式不要求客户端和服务端版本完全匹配,而原生连接要求,在性能上 Websocket 连接方式也接近于原生连接,一般我们推荐使用 Websocket 连接方式。 +TDengine 的客户端驱动提供了 taosws 和 taos 两个动态库,分别支持 WebSocket 连接和原生连接。 WebSocket 连接和原生连接的区别是 WebSocket 连接方式不要求客户端和服务端版本完全匹配,而原生连接要求,在性能上 WebSocket 连接方式也接近于原生连接,一般我们推荐使用 WebSocket 连接方式。 下面我们分开介绍两种连接方式的使用方法。 -## Websocket 连接方式 +## WebSocket 连接方式 -Websocket 连接方式需要使用 taosws.h 头文件和 taosws 动态库。 +WebSocket 连接方式需要使用 taosws.h 头文件和 taosws 动态库。 ```c #include @@ -44,7 +44,7 @@ TDengine 客户端驱动的动态库位于: ### 错误码 在 C 接口的设计中,错误码采用整数类型表示,每个错误码都对应一个特定的错误状态。如未特别说明,当 API 的返回值是整数时,_0_ 代表成功,其它是代表失败原因的错误码,当返回值是指针时, _NULL_ 表示失败。 -Websocket 连接方式单独的错误码在 `taosws.h` 中, +WebSocket 连接方式单独的错误码在 `taosws.h` 中, | 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 | @@ -82,7 +82,7 @@ WebSocket 连接方式错误码只保留了原生连接错误码的后两个字 #### DSN -C/C++ Websocket 连接器通过 DSN 连接描述字符串来表示连接信息。 +C/C++ WebSocket 连接器通过 DSN 连接描述字符串来表示连接信息。 DSN 描述字符串基本结构如下: ```text @@ -96,16 +96,16 @@ DSN 描述字符串基本结构如下: - **driver**: 必须指定驱动名以便连接器选择何种方式创建连接,支持如下驱动名: - **taos**: 默认驱动,支持 SQL 执行,参数绑定,无模式写入。 - **tmq**: 使用 TMQ 订阅数据。 -- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 Websocket 方式建立连接。 - - **http/ws**: 使用 Websocket 协议。 - - **https/wss**: 在 Websocket 连接方式下显示启用 SSL/TLS 协议。 +- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 WebSocket 方式建立连接。 + - **http/ws**: 使用 WebSocket 协议。 + - **https/wss**: 在 WebSocket 连接方式下显示启用 SSL/TLS 协议。 - **username/password**: 用于创建连接的用户名及密码。 -- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时 Websocket 连接默认为 `localhost:6041` 。 +- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时 WebSocket 连接默认为 `localhost:6041` 。 - **database**: 指定默认连接的数据库名,可选参数。 - **params**:其他可选参数。 -一个完整的 DSN 描述字符串示例如下:`taos+ws://localhost:6041/test`, 表示使用 Websocket(`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。 +一个完整的 DSN 描述字符串示例如下:`taos+ws://localhost:6041/test`, 表示使用 WebSocket(`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。 #### 基础 API diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index ba4cb38afd..0a167dd5ee 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -33,14 +33,15 @@ REST 连接支持所有能运行 Java 的平台。 | taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 | | :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: | -| 3.3.3 | 1. 解决了 Websocket statement 关闭导致的内存泄漏 | - | -| 3.3.2 | 1. 优化 Websocket 连接下的参数绑定性能;2. 优化了对 mybatis 的支持 | - | -| 3.3.0 | 1. 优化 Websocket 连接下的数据传输性能;2. 支持跳过 SSL 验证,默认关闭 | 3.3.2.0 及更高版本 | +| 3.4.0 | 1. 使用 jackson 库替换 fastjson 库;2. WebSocket 采用独立协议标识;3. 优化后台拉取线程使用,避免用户误用导致超时。 | - | +| 3.3.3 | 1. 解决了 WebSocket statement 关闭导致的内存泄漏 | - | +| 3.3.2 | 1. 优化 WebSocket 连接下的参数绑定性能;2. 优化了对 mybatis 的支持 | - | +| 3.3.0 | 1. 优化 WebSocket 连接下的数据传输性能;2. 支持跳过 SSL 验证,默认关闭 | 3.3.2.0 及更高版本 | | 3.2.11 | 解决了 Native 连接关闭结果集 bug | - | -| 3.2.10 | 1. REST/WebSocket 连接支持传输中的数据压缩;2. Websocket 自动重连机制,默认关闭;3. Connection 类提供无模式写入的方法;4. 优化了原生连接的数据拉取性能;5. 修复了一些已知问题;6.元数据获取函数可以返回支持的函数列表。 | - | -| 3.2.9 | 解决了 Websocket prepareStatement 关闭 bug | - | -| 3.2.8 | 优化了自动提交, 解决了 websocket 手动提交 bug, 优化 Websocket prepareStatement 使用一个连接, 元数据支持视图 | - | -| 3.2.7 | 支持 VARBINARY 和 GEOMETRY 类型,增加 native 连接的时区设置支持。增加 websocket 自动重连功能。 | 3.2.0.0 及更高版本 | +| 3.2.10 | 1. REST/WebSocket 连接支持传输中的数据压缩;2. WebSocket 自动重连机制,默认关闭;3. Connection 类提供无模式写入的方法;4. 优化了原生连接的数据拉取性能;5. 修复了一些已知问题;6.元数据获取函数可以返回支持的函数列表。 | - | +| 3.2.9 | 解决了 WebSocket prepareStatement 关闭 bug | - | +| 3.2.8 | 优化了自动提交, 解决了 WebSocket 手动提交 bug, 优化 WebSocket prepareStatement 使用一个连接, 元数据支持视图 | - | +| 3.2.7 | 支持 VARBINARY 和 GEOMETRY 类型,增加 native 连接的时区设置支持。增加 WebSocket 自动重连功能。 | 3.2.0.0 及更高版本 | | 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 | | 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - | | 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - | @@ -195,17 +196,14 @@ WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/w ## API 参考 ### JDBC 驱动 -taos-jdbcdriver 实现了 JDBC 标准的 Driver 接口,提供了两个实现类:RestfulDriver 和 TSDBDriver。 -Websocket 和 REST 连接使用驱动类 `com.taosdata.jdbc.rs.RestfulDriver`。原生连接使用驱动类 `com.taosdata.jdbc.TSDBDriver`。 - +taos-jdbcdriver 实现了 JDBC 标准的 Driver 接口,提供了 3 个实现类。 +- WebSocket 连接使用驱动类 `com.taosdata.jdbc.ws.WebSocketDriver`。 +- 原生连接使用驱动类 `com.taosdata.jdbc.TSDBDriver`。 +- REST 连接使用驱动类 `com.taosdata.jdbc.rs.RestfulDriver`。 #### URL 规范 TDengine 的 JDBC URL 规范格式为: -`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]` - -对于建立连接,原生连接与 REST 连接有细微不同。 Websocket 和 REST 连接使用驱动类 `com.taosdata.jdbc.rs.RestfulDriver`。原生连接使用驱动类 `com.taosdata.jdbc.TSDBDriver`。 - -**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。 +`jdbc:[TAOS|TAOS-WS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}|&batchfetch={batchfetch}]` **原生连接** `jdbc:TAOS://taosdemo.com:6030/power?user=root&password=taosdata`,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 power 的连接。这个 URL @@ -234,23 +232,38 @@ TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可 > **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。 +**WebSocket 连接** +使用 JDBC WebSocket 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要: -**Websocket 和 REST 连接** -使用 JDBC Websocket 或 REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要: +1. driverClass 指定为“com.taosdata.jdbc.ws.WebSocketDriver”; +2. jdbcUrl 以“jdbc:TAOS-WS://”开头; +3. 使用 6041 作为连接端口。 + +对于 WebSocket 连接,url 中的配置参数如下: +- user:登录 TDengine 用户名,默认值 'root'。 +- password:用户登录密码,默认值 'taosdata'。 +- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。 +- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 +- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 60000。 +- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 60000。 +- useSSL: 连接中是否使用 SSL。 + +**注意**:部分配置项(比如:locale、timezone)在 WebSocket 连接中不生效。 + +**REST 连接** +使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要: 1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”; 2. jdbcUrl 以“jdbc:TAOS-RS://”开头; 3. 使用 6041 作为连接端口。 -对于 Websocket 和 REST 连接,url 中的配置参数如下: +对于 REST 连接,url 中的配置参数如下: - user:登录 TDengine 用户名,默认值 'root'。 - password:用户登录密码,默认值 'taosdata'。 -- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 - charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。 - batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 - httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 60000。 -- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 60000。仅在 batchfetch 设置为 false 时生效。 -- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 60000。 仅在 batchfetch 设置为 true 时生效。 +- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 60000。 - useSSL: 连接中是否使用 SSL。 - httpPoolSize: REST 并发请求大小,默认 20。 @@ -272,7 +285,7 @@ TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可 properties 中的配置参数如下: - TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 'root'。 - TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 'taosdata'。 -- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。 +- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。因历史原因使用 REST 连接时,若设置此参数为 true 会变成 WebSocket 连接。 - TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 sq 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 - TSDBDriver.PROPERTY_KEY_CONFIG_DIR:仅在使用 JDBC 原生连接时生效。客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。 - TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。 @@ -280,16 +293,16 @@ properties 中的配置参数如下: - TSDBDriver.PROPERTY_KEY_TIME_ZONE:仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。因为历史的原因,我们只支持POSIX标准的部分规范,如UTC-8(代表中国上上海), GMT-8,Asia/Shanghai 这几种形式。 - TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms, 默认值为 60000。仅在 REST 连接时生效。 - TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms,默认值为 60000。仅在 REST 连接且 batchfetch 设置为 false 时生效。 -- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 60000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。 -- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。 +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 60000。 仅 WebSocket 连接下有效。 +- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 WebSocket/REST 连接时生效。 - TSDBDriver.HTTP_POOL_SIZE: REST 并发请求大小,默认 20。 -- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: 传输过程是否启用压缩。仅在使用 REST/Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。 -- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: 是否启用自动重连。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。 +- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: 传输过程是否启用压缩。仅在使用 REST/WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。 +- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: 是否启用自动重连。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。 > **注意**:启用自动重连仅对简单执行 SQL 语句以及 无模式写入、数据订阅有效。对于参数绑定无效。自动重连仅对连接建立时通过参数指定数据库有效,对后面的 `use db` 语句切换数据库无效。 - TSDBDriver.PROPERTY_KEY_RECONNECT_INTERVAL_MS: 自动重连重试间隔,单位毫秒,默认值 2000。仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。 - TSDBDriver.PROPERTY_KEY_RECONNECT_RETRY_COUNT: 自动重连重试次数,默认值 3,仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。 -- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: 关闭 SSL 证书验证 。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。 +- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: 关闭 SSL 证书验证 。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。 此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。 @@ -1154,7 +1167,7 @@ JDBC 驱动支持标准的 ResultSet 接口,提供了用于读取结果集中 PreparedStatement 允许使用预编译的 SQL 语句,这可以提高性能并提供参数化查询的能力,从而增加安全性。 JDBC 驱动提供了实现 PreparedStatement 接口的两个类: 1. 对应原生连接的 TSDBPreparedStatement -2. 对应 Websocket 连接的 TSWSPreparedStatement +2. 对应 WebSocket 连接的 TSWSPreparedStatement 因 JDBC 标准没有高性能绑定数据的接口,TSDBPreparedStatement 和 TSWSPreparedStatement 都新增了一些方法,用来扩展参数绑定能力。 > **注意**:由于 PreparedStatement 继承了 Statement 接口,因此对于这部分重复的接口不再赘述,请参考 Statement 接口中对应描述。 @@ -1347,8 +1360,8 @@ JDBC 标准不支持数据订阅,因此本章所有接口都是扩展接口。 - httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。 - messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。 - httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。 -- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: 传输过程是否启用压缩。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。 -- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: 是否启用自动重连。仅在使用 Websocket 连接时生效。true: 启用,false: 不启用。默认为 false。 +- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: 传输过程是否启用压缩。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。 +- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: 是否启用自动重连。仅在使用 WebSocket 连接时生效。true: 启用,false: 不启用。默认为 false。 - TSDBDriver.PROPERTY_KEY_RECONNECT_INTERVAL_MS: 自动重连重试间隔,单位毫秒,默认值 2000。仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。 - TSDBDriver.PROPERTY_KEY_RECONNECT_RETRY_COUNT: 自动重连重试次数,默认值 3,仅在 PROPERTY_KEY_ENABLE_AUTO_RECONNECT 为 true 时生效。 diff --git a/docs/zh/14-reference/05-connector/26-rust.mdx b/docs/zh/14-reference/05-connector/26-rust.mdx index 1fcc1e3dcd..cfabed4c61 100644 --- a/docs/zh/14-reference/05-connector/26-rust.mdx +++ b/docs/zh/14-reference/05-connector/26-rust.mdx @@ -18,9 +18,9 @@ import RequestId from "./_request_id.mdx"; ## 连接方式 -`taos` 提供两种建立连接的方式。一般我们推荐使用 **Websocket 连接**。 +`taos` 提供两种建立连接的方式。一般我们推荐使用 **WebSocket 连接**。 - **原生连接**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例。 -- **Websocket 连接**,它通过 taosAdapter 的 Websocket 接口连接 TDengine 运行实例。 +- **WebSocket 连接**,它通过 taosAdapter 的 WebSocket 接口连接 TDengine 运行实例。 你可以通过不同的 “特性(即 Cargo 关键字 `features`)” 来指定使用哪种连接器(默认同时支持)。 @@ -29,13 +29,13 @@ import RequestId from "./_request_id.mdx"; ## 支持的平台 原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 -Websocket 连接支持所有能运行 Rust 的平台。 +WebSocket 连接支持所有能运行 Rust 的平台。 ## 版本历史 | Rust 连接器版本 | TDengine 版本 | 主要功能 | | :----------------: | :--------------: | :--------------------------------------------------: | -| v0.12.3 | 3.3.0.0 or later | 优化了 Websocket 查询和插入性能,支持了 VARBINARY 和 GEOMETRY 类型 | +| v0.12.3 | 3.3.0.0 or later | 优化了 WebSocket 查询和插入性能,支持了 VARBINARY 和 GEOMETRY 类型 | | v0.12.0 | 3.2.3.0 or later | WS 支持压缩。 | | v0.11.0 | 3.2.0.0 | TMQ 功能优化。 | | v0.10.0 | 3.1.0.0 | WS endpoint 变更。 | @@ -115,15 +115,15 @@ DSN 描述字符串基本结构如下: - **driver**: 必须指定驱动名以便连接器选择何种方式创建连接,支持如下驱动名: - **taos**: 使用 TDengine 连接器驱动,默认是使用 taos 驱动。 - **tmq**: 使用 TMQ 订阅数据。 -- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 Websocket 方式建立连接。 - - **http/ws**: 使用 Websocket 创建连接。 - - **https/wss**: 在 Websocket 连接方式下显示启用 SSL/TLS 连接。 +- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 WebSocket 方式建立连接。 + - **http/ws**: 使用 WebSocket 创建连接。 + - **https/wss**: 在 WebSocket 连接方式下显示启用 SSL/TLS 连接。 - **username/password**: 用于创建连接的用户名及密码。 -- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时(`taos://`),原生连接默认为 `localhost:6030`,Websocket 连接默认为 `localhost:6041` 。 +- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时(`taos://`),原生连接默认为 `localhost:6030`,WebSocket 连接默认为 `localhost:6041` 。 - **database**: 指定默认连接的数据库名,可选参数。 - **params**:其他可选参数。 -一个完整的 DSN 描述字符串示例如下:`taos+ws://localhost:6041/test`, 表示使用 Websocket(`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。 +一个完整的 DSN 描述字符串示例如下:`taos+ws://localhost:6041/test`, 表示使用 WebSocket(`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。 #### TaosBuilder TaosBuilder 结构体主要提供了根据 DSN 构建 Taos 对象的方法,还提供了检查连接,以及获取客户端版本号等功能。 diff --git a/docs/zh/14-reference/05-connector/30-python.mdx b/docs/zh/14-reference/05-connector/30-python.mdx index 8e08bfc103..8436c30249 100644 --- a/docs/zh/14-reference/05-connector/30-python.mdx +++ b/docs/zh/14-reference/05-connector/30-python.mdx @@ -14,10 +14,10 @@ import RequestId from "./_request_id.mdx"; Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-python)。 ## 连接方式 -`taospy`主要提供三种形式的连接器。一般我们推荐使用 **Websocket 连接**。 +`taospy`主要提供三种形式的连接器。一般我们推荐使用 **WebSocket 连接**。 - **原生连接**,对应 `taospy` 包的 `taos` 模块。通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、数据订阅、schemaless 接口和参数绑定接口等功能。 - **REST 连接**,对应 `taospy` 包的 `taosrest` 模块。通过 taosAdapter 提供的 HTTP 接口连接 TDengine 实例,不支持 schemaless 和数据订阅等特性。 -- **Websocket 连接**,对应 `taos-ws-py` 包,可以选装。通过 taosAdapter 提供的 Websocket 接口连接 TDengine 实例,WebSocket 连接实现的功能集合和原生连接有少量不同。 +- **WebSocket 连接**,对应 `taos-ws-py` 包,可以选装。通过 taosAdapter 提供的 WebSocket 接口连接 TDengine 实例,WebSocket 连接实现的功能集合和原生连接有少量不同。 连接方式的详细介绍请参考:[连接方式](../../../develop/connect/#连接方式) @@ -48,9 +48,9 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con |2.7.9|数据订阅支持获取消费进度和重置消费进度| |2.7.8|新增 `execute_many`| -|Python Websocket Connector 版本|主要变化| +|Python WebSocket Connector 版本|主要变化| |:----------------------------:|:-----:| -|0.3.2|优化 Websocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题| +|0.3.2|优化 WebSocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题| |0.2.9|已知问题修复| |0.2.5|1. 数据订阅支持获取消费进度和重置消费进度
2. 支持 schemaless
3. 支持 STMT| |0.2.4|数据订阅新增取消订阅方法| diff --git a/docs/zh/14-reference/05-connector/35-node.mdx b/docs/zh/14-reference/05-connector/35-node.mdx index bd2ca537e3..6ac34d2471 100644 --- a/docs/zh/14-reference/05-connector/35-node.mdx +++ b/docs/zh/14-reference/05-connector/35-node.mdx @@ -14,7 +14,7 @@ Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-conne ## 连接方式 -Node.js 连接器目前仅支持 Websocket 连接器, 其通过 taosAdapter 提供的 Websocket 接口连接 TDengine 实例。 +Node.js 连接器目前仅支持 WebSocket 连接器, 其通过 taosAdapter 提供的 WebSocket 接口连接 TDengine 实例。 连接方式的详细介绍请参考:[连接方式](../../../develop/connect/#连接方式) @@ -48,7 +48,7 @@ Node.js 连接器目前仅支持 Websocket 连接器, 其通过 taosAdapter | 107 | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 | | 108 | connection has been closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 | | 109 | fetch block data parse fail | 获取到的查询数据,解析失败 | -| 110 | websocket connection has reached its maximum limit | Websocket 连接达到上限 | +| 110 | websocket connection has reached its maximum limit | WebSocket 连接达到上限 | - [TDengine Node.js Connector Error Code](https://github.com/taosdata/taos-connector-node/blob/main/nodejs/src/common/wsError.ts) - TDengine 其他功能模块的报错,请参考 [错误码](../../../reference/error-code) @@ -104,7 +104,7 @@ Node.js 连接器目前仅支持 Websocket 连接器, 其通过 taosAdapter ## API 参考 -Node.js 连接器(`@tdengine/websocket`), 其通过 taosAdapter 提供的 Websocket 接口连接 TDengine 实例。 +Node.js 连接器(`@tdengine/websocket`), 其通过 taosAdapter 提供的 WebSocket 接口连接 TDengine 实例。 ### URL 规范 diff --git a/docs/zh/14-reference/05-connector/40-csharp.mdx b/docs/zh/14-reference/05-connector/40-csharp.mdx index 93f592fdd0..e4e778eeff 100644 --- a/docs/zh/14-reference/05-connector/40-csharp.mdx +++ b/docs/zh/14-reference/05-connector/40-csharp.mdx @@ -14,7 +14,7 @@ import RequestId from "./_request_id.mdx"; `TDengine.Connector` 提供两种形式的连接器 * **原生连接**,通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、数据订阅、schemaless 接口和参数绑定接口等功能。 -* **Websocket 连接**,通过 taosAdapter 提供的 Websocket 接口连接 TDengine 实例,WebSocket 连接实现的功能集合和原生连接有少量不同。(自 v3.0.1 起) +* **WebSocket 连接**,通过 taosAdapter 提供的 WebSocket 接口连接 TDengine 实例,WebSocket 连接实现的功能集合和原生连接有少量不同。(自 v3.0.1 起) 连接方式的详细介绍请参考:[连接方式](../../../develop/connect/#连接方式) diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index 38dd88b86d..8518d2ffd7 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -109,7 +109,7 @@ WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位 ## 版本历史 -| taos_odbc版本 | 主要变化 | TDengine 版本 | +| taos_odbc 版本 | 主要变化 | TDengine 版本 | | :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- | | v1.1.0 | 1. 支持视图功能;
2. 支持 VARBINARY/GEOMETRY 数据类型; | 3.3.3.0及更高版本 | | v1.0.2 | 支持 CP1252 字符编码; | 3.2.3.0及更高版本 | @@ -145,7 +145,7 @@ WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位 ## API 参考 -本节按功能分类汇总了 ODBC API,关于完整的 ODBC API 参考,请访问 http://msdn.microsoft.com/en-us/library/ms714177.aspx 的ODBC程序员参考页面。 +本节按功能分类汇总了 ODBC API,关于完整的 ODBC API 参考,请访问 http://msdn.microsoft.com/en-us/library/ms714177.aspx 的 ODBC 程序员参考页面。 ### 数据源和驱动程序管理 diff --git a/docs/zh/14-reference/05-connector/index.md b/docs/zh/14-reference/05-connector/index.md index 04a2ef6c1f..bd2cff6a3d 100644 --- a/docs/zh/14-reference/05-connector/index.md +++ b/docs/zh/14-reference/05-connector/index.md @@ -62,7 +62,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器 | **连接管理** | 支持 | 支持 | 支持 | | **执行 SQL** | 支持 | 支持 | 支持 | -### 使用 Websocket 接口 +### 使用 WebSocket 接口 | **功能特性** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** | **C/C++** | | ------------------- | -------- | ---------- | ------ | ------ | ----------- | -------- | --------- | diff --git a/include/common/cos.h b/include/common/cos.h index b336a1e5ee..53dc161ee5 100644 --- a/include/common/cos.h +++ b/include/common/cos.h @@ -32,6 +32,8 @@ extern int32_t tsS3PageCacheSize; extern int32_t tsS3UploadDelaySec; int32_t s3Init(); +int32_t s3Begin(); +void s3End(); int32_t s3CheckCfg(); int32_t s3PutObjectFromFile(const char *file, const char *object); int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp); diff --git a/include/libs/azure/az.h b/include/libs/azure/az.h new file mode 100644 index 0000000000..55839b0727 --- /dev/null +++ b/include/libs/azure/az.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_AZURE_H_ +#define _TD_AZURE_H_ + +#include "os.h" +#include "tarray.h" +#include "tdef.h" +#include "tlog.h" +#include "tmsg.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t azBegin(); +void azEnd(); +int32_t azCheckCfg(); +int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size); +int32_t azGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock); +void azDeleteObjectsByPrefix(const char *prefix); + +int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp); +int32_t azGetObjectsByPrefix(const char *prefix, const char *path); +int32_t azGetObjectToFile(const char *object_name, const char *fileName); +int32_t azDeleteObjects(const char *object_name[], int nobject); + +#ifdef __cplusplus +} +#endif + +#endif // _TD_AZURE_H_ diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 47b75c4f3f..356d8480ba 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -345,6 +345,7 @@ typedef struct SFillLogicNode { SNode* pWStartTs; SNode* pValues; // SNodeListNode STimeWindow timeRange; + SNodeList* pFillNullExprs; } SFillLogicNode; typedef struct SSortLogicNode { @@ -693,6 +694,7 @@ typedef struct SFillPhysiNode { SNode* pWStartTs; // SColumnNode SNode* pValues; // SNodeListNode STimeWindow timeRange; + SNodeList* pFillNullExprs; } SFillPhysiNode; typedef SFillPhysiNode SStreamFillPhysiNode; diff --git a/include/libs/tcs/tcs.h b/include/libs/tcs/tcs.h new file mode 100644 index 0000000000..530a23d9e9 --- /dev/null +++ b/include/libs/tcs/tcs.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TCS_H_ +#define _TD_TCS_H_ + +#include "os.h" +#include "tarray.h" +#include "tdef.h" +#include "tlog.h" +#include "tmsg.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern int8_t tsS3Enabled; +extern int8_t tsS3EnabledCfg; + +extern int32_t tsS3UploadDelaySec; +extern int32_t tsS3BlockSize; +extern int32_t tsS3BlockCacheSize; +extern int32_t tsS3PageCacheSize; + +extern int8_t tsS3StreamEnabled; + +int32_t tcsInit(); +void tcsUninit(); + +int32_t tcsCheckCfg(); + +int32_t tcsPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size); +int32_t tcsGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock); + +void tcsDeleteObjectsByPrefix(const char *prefix); + +int32_t tcsPutObjectFromFile2(const char *file, const char *object, int8_t withcp); +int32_t tcsGetObjectsByPrefix(const char *prefix, const char *path); +int32_t tcsDeleteObjects(const char *object_name[], int nobject); +int32_t tcsGetObjectToFile(const char *object_name, const char *fileName); + +#ifdef __cplusplus +} +#endif + +#endif // _TD_TCS_H_ diff --git a/include/util/tdef.h b/include/util/tdef.h index a0bfdc83f5..b4cb1bdd1c 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -506,7 +506,7 @@ typedef enum ELogicConditionType { #ifdef WINDOWS #define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections. #else -#define TSDB_MAX_RPC_THREADS 20 +#define TSDB_MAX_RPC_THREADS 50 #endif #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type diff --git a/include/util/tlog.h b/include/util/tlog.h index 6b0270523e..09ebb35e8f 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -57,6 +57,7 @@ extern int32_t rpcDebugFlag; extern int32_t qDebugFlag; extern int32_t stDebugFlag; extern int32_t wDebugFlag; +extern int32_t azDebugFlag; extern int32_t sDebugFlag; extern int32_t tsdbDebugFlag; extern int32_t tqDebugFlag; diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index 84747860e9..bbd18892ab 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -1,8 +1,8 @@ aux_source_directory(src CLIENT_SRC) -IF (TD_ENTERPRISE) - LIST(APPEND CLIENT_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/clientView.c) -ENDIF () +if(TD_ENTERPRISE) + LIST(APPEND CLIENT_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/clientView.c) +endif() if(TD_WINDOWS) add_library(taos SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in) diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index eb3dd95e95..42a7c2c615 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -1,121 +1,122 @@ aux_source_directory(src COMMON_SRC) -IF (TD_ENTERPRISE) -LIST(APPEND COMMON_SRC ${TD_ENTERPRISE_DIR}/src/plugins/common/src/tglobal.c) -ENDIF() + +if(TD_ENTERPRISE) + LIST(APPEND COMMON_SRC ${TD_ENTERPRISE_DIR}/src/plugins/common/src/tglobal.c) +endif() add_library(common STATIC ${COMMON_SRC}) -if (DEFINED GRANT_CFG_INCLUDE_DIR) +if(DEFINED GRANT_CFG_INCLUDE_DIR) add_definitions(-DGRANTS_CFG) endif() -IF (TD_GRANT) +if(TD_GRANT) ADD_DEFINITIONS(-D_GRANT) -ENDIF () +endif() -IF (TD_STORAGE) +if(TD_STORAGE) ADD_DEFINITIONS(-D_STORAGE) TARGET_LINK_LIBRARIES(common PRIVATE storage) -ENDIF () +endif() -IF (TD_ENTERPRISE) - IF(${BUILD_WITH_S3}) +if(TD_ENTERPRISE) + if(${BUILD_WITH_S3}) add_definitions(-DUSE_S3) ELSEIF(${BUILD_WITH_COS}) add_definitions(-DUSE_COS) - ENDIF() -ENDIF() + endif() +endif() target_include_directories( - common - PUBLIC "${TD_SOURCE_DIR}/include/common" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" - PRIVATE "${GRANT_CFG_INCLUDE_DIR}" + common + PUBLIC "${TD_SOURCE_DIR}/include/common" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + PRIVATE "${GRANT_CFG_INCLUDE_DIR}" ) -IF(${TD_WINDOWS}) - target_include_directories( - common - PRIVATE "${TD_SOURCE_DIR}/contrib/pthread" - PRIVATE "${TD_SOURCE_DIR}/contrib/msvcregex" - ) -ENDIF () + +if(${TD_WINDOWS}) + target_include_directories( + common + PRIVATE "${TD_SOURCE_DIR}/contrib/pthread" + PRIVATE "${TD_SOURCE_DIR}/contrib/msvcregex" + ) +endif() target_link_libraries( - common - PUBLIC os - PUBLIC util - INTERFACE api + common + PUBLIC os + PUBLIC util + INTERFACE api ) if(${BUILD_S3}) + if(${BUILD_WITH_S3}) + target_include_directories( + common - if(${BUILD_WITH_S3}) - target_include_directories( - common + PUBLIC "$ENV{HOME}/.cos-local.2/include" + ) - PUBLIC "$ENV{HOME}/.cos-local.2/include" - ) + set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2) + find_library(S3_LIBRARY s3) + find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + find_library(XML2_LIBRARY xml2) + find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + target_link_libraries( + common - set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") - set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2) - find_library(S3_LIBRARY s3) - find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - find_library(XML2_LIBRARY xml2) - find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - target_link_libraries( - common + # s3 + PUBLIC ${S3_LIBRARY} + PUBLIC ${CURL_LIBRARY} + PUBLIC ${SSL_LIBRARY} + PUBLIC ${CRYPTO_LIBRARY} + PUBLIC ${XML2_LIBRARY} + ) - # s3 - PUBLIC ${S3_LIBRARY} - PUBLIC ${CURL_LIBRARY} - PUBLIC ${SSL_LIBRARY} - PUBLIC ${CRYPTO_LIBRARY} - PUBLIC ${XML2_LIBRARY} - ) + add_definitions(-DUSE_S3) + endif() - add_definitions(-DUSE_S3) + if(${BUILD_WITH_COS}) + set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") + find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) + find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) + find_library(MINIXML_LIBRARY mxml) + find_library(CURL_LIBRARY curl) + target_link_libraries( + common + + # s3 + PUBLIC cos_c_sdk_static + PUBLIC ${APR_UTIL_LIBRARY} + PUBLIC ${APR_LIBRARY} + PUBLIC ${MINIXML_LIBRARY} + PUBLIC ${CURL_LIBRARY} + ) + + # s3 + FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) + + if(APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) endif() - if(${BUILD_WITH_COS}) - - set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") - find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) - find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) - find_library(MINIXML_LIBRARY mxml) - find_library(CURL_LIBRARY curl) - target_link_libraries( - common - - # s3 - PUBLIC cos_c_sdk_static - PUBLIC ${APR_UTIL_LIBRARY} - PUBLIC ${APR_LIBRARY} - PUBLIC ${MINIXML_LIBRARY} - PUBLIC ${CURL_LIBRARY} - ) - - # s3 - FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) - IF (APR_CONFIG_BIN) - EXECUTE_PROCESS( - COMMAND ${APR_CONFIG_BIN} --includedir - OUTPUT_VARIABLE APR_INCLUDE_DIR - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - ENDIF() - include_directories (${APR_INCLUDE_DIR}) - target_include_directories( - common - PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "$ENV{HOME}/.cos-local.1/include" - ) - - add_definitions(-DUSE_COS) - endif(${BUILD_WITH_COS}) + include_directories(${APR_INCLUDE_DIR}) + target_include_directories( + common + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + PUBLIC "$ENV{HOME}/.cos-local.1/include" + ) + add_definitions(-DUSE_COS) + endif(${BUILD_WITH_COS}) endif() if(${BUILD_TEST}) - ADD_SUBDIRECTORY(test) + ADD_SUBDIRECTORY(test) endif(${BUILD_TEST}) diff --git a/source/common/src/cos.c b/source/common/src/cos.c index 453b924f31..a7e69ddc4c 100644 --- a/source/common/src/cos.c +++ b/source/common/src/cos.c @@ -89,20 +89,8 @@ static void s3DumpCfgByEp(int8_t epIndex) { int32_t s3CheckCfg() { int32_t code = 0, lino = 0; - int8_t i = 0; - if (!tsS3Enabled) { - (void)fprintf(stderr, "s3 not configured.\n"); - TAOS_RETURN(code); - } - - code = s3Begin(); - if (code != 0) { - (void)fprintf(stderr, "failed to initialize s3.\n"); - TAOS_RETURN(code); - } - - for (; i < tsS3EpNum; i++) { + for (int8_t i = 0; i < tsS3EpNum; i++) { (void)fprintf(stdout, "test s3 ep (%d/%d):\n", i + 1, tsS3EpNum); s3DumpCfgByEp(i); @@ -192,7 +180,7 @@ int32_t s3CheckCfg() { (void)fprintf(stdout, "=================================================================\n"); } - s3End(); + // s3End(); TAOS_RETURN(code); } @@ -1529,6 +1517,8 @@ void s3EvictCache(const char *path, long object_size) {} #include "cos_http_io.h" #include "cos_log.h" +int32_t s3Begin() { TAOS_RETURN(TSDB_CODE_SUCCESS); } + int32_t s3Init() { if (cos_http_io_initialize(NULL, 0) != COSE_OK) { return -1; @@ -1967,6 +1957,10 @@ long s3Size(const char *object_name) { #else int32_t s3Init() { return 0; } +int32_t s3Begin() { TAOS_RETURN(TSDB_CODE_SUCCESS); } + +void s3End() {} +int32_t s3CheckCfg() { return 0; } int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; } int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp) { return 0; } int32_t s3PutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) { return 0; } diff --git a/source/common/src/rsync.c b/source/common/src/rsync.c index eef889429b..b7352acf25 100644 --- a/source/common/src/rsync.c +++ b/source/common/src/rsync.c @@ -160,7 +160,11 @@ int32_t startRsync() { code = system(cmd); if (code != 0) { uError("[rsync] cmd:%s start server failed, code:%d," ERRNO_ERR_FORMAT, cmd, code, ERRNO_ERR_DATA); - code = TAOS_SYSTEM_ERROR(errno); + if (errno == 0) { + return 0; + } else { + code = TAOS_SYSTEM_ERROR(errno); + } } else { uInfo("[rsync] cmd:%s start server successful", cmd); } @@ -358,4 +362,4 @@ int32_t deleteRsync(const char* id) { uDebug("[rsync] delete data:%s successful", id); return 0; -} \ No newline at end of file +} diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 7cff5de008..5ab5500fa6 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -56,7 +56,7 @@ int32_t tsShellActivityTimer = 3; // second // queue & threads int32_t tsNumOfRpcThreads = 1; int32_t tsNumOfRpcSessions = 30000; -int32_t tsShareConnLimit = 8; +int32_t tsShareConnLimit = 10; int32_t tsReadTimeout = 900; int32_t tsTimeToGetAvailableConn = 500000; int32_t tsKeepAliveIdle = 60; @@ -308,6 +308,7 @@ char tsS3AppId[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {""}; int8_t tsS3Enabled = false; int8_t tsS3EnabledCfg = false; int8_t tsS3Oss[TSDB_MAX_EP_NUM] = {false}; +int8_t tsS3Ablob = false; int8_t tsS3StreamEnabled = false; int8_t tsS3Https[TSDB_MAX_EP_NUM] = {true}; @@ -436,6 +437,7 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { } tsS3Https[i] = (strstr(tsS3Endpoint[i], "https://") != NULL); tsS3Oss[i] = (strstr(tsS3Endpoint[i], "aliyuncs.") != NULL); + tsS3Ablob = (strstr(tsS3Endpoint[i], ".blob.core.windows.net") != NULL); } if (tsS3BucketName[0] != '<') { @@ -542,6 +544,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "vDebugFlag", vDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "mDebugFlag", mDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "wDebugFlag", wDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "azDebugFlag", azDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); @@ -1063,6 +1066,9 @@ static int32_t taosSetServerLogCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "wDebugFlag"); wDebugFlag = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "azDebugFlag"); + azDebugFlag = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "sDebugFlag"); sDebugFlag = pItem->i32; @@ -1987,13 +1993,14 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { { // 'bool/int32_t/int64_t/float/double' variables with general modification function static OptionNameAndVar debugOptions[] = { - {"dDebugFlag", &dDebugFlag}, {"vDebugFlag", &vDebugFlag}, {"mDebugFlag", &mDebugFlag}, - {"wDebugFlag", &wDebugFlag}, {"sDebugFlag", &sDebugFlag}, {"tsdbDebugFlag", &tsdbDebugFlag}, - {"tqDebugFlag", &tqDebugFlag}, {"fsDebugFlag", &fsDebugFlag}, {"udfDebugFlag", &udfDebugFlag}, - {"smaDebugFlag", &smaDebugFlag}, {"idxDebugFlag", &idxDebugFlag}, {"tdbDebugFlag", &tdbDebugFlag}, - {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, - {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"metaDebugFlag", &metaDebugFlag}, - {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, {"tqClientDebug", &tqClientDebug}, + {"dDebugFlag", &dDebugFlag}, {"vDebugFlag", &vDebugFlag}, {"mDebugFlag", &mDebugFlag}, + {"wDebugFlag", &wDebugFlag}, {"azDebugFlag", &azDebugFlag}, {"sDebugFlag", &sDebugFlag}, + {"tsdbDebugFlag", &tsdbDebugFlag}, {"tqDebugFlag", &tqDebugFlag}, {"fsDebugFlag", &fsDebugFlag}, + {"udfDebugFlag", &udfDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"idxDebugFlag", &idxDebugFlag}, + {"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, + {"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, + {"metaDebugFlag", &metaDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, + {"tqClientDebug", &tqClientDebug}, }; static OptionNameAndVar options[] = {{"audit", &tsEnableAudit}, @@ -2371,6 +2378,7 @@ static int32_t taosSetAllDebugFlag(SConfig *pCfg, int32_t flag) { taosCheckAndSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, noNeedToSetVars); taosCheckAndSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, noNeedToSetVars); taosCheckAndSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&azDebugFlag, "azDebugFlag", flag, noNeedToSetVars); taosCheckAndSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, noNeedToSetVars); taosCheckAndSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, noNeedToSetVars); taosCheckAndSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, noNeedToSetVars); diff --git a/source/dnode/mgmt/CMakeLists.txt b/source/dnode/mgmt/CMakeLists.txt index d72301279e..5d356e06b1 100644 --- a/source/dnode/mgmt/CMakeLists.txt +++ b/source/dnode/mgmt/CMakeLists.txt @@ -10,29 +10,28 @@ add_subdirectory(test) aux_source_directory(exe EXEC_SRC) add_executable(taosd ${EXEC_SRC}) target_include_directories( - taosd - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/node_mgmt/inc" + taosd + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/node_mgmt/inc" ) -IF (TD_ENTERPRISE) +IF(TD_ENTERPRISE) IF(${BUILD_WITH_S3}) - add_definitions(-DUSE_S3) + add_definitions(-DUSE_S3) ELSEIF(${BUILD_WITH_COS}) - add_definitions(-DUSE_COS) + add_definitions(-DUSE_COS) ENDIF() ENDIF() -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") -ELSE () +ELSE() SET(LINK_JEMALLOC "") -ENDIF () - -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) - ADD_DEPENDENCIES(taosd jemalloc) - target_link_libraries(taosd dnode crypt ${LINK_JEMALLOC}) -ELSE () - target_link_libraries(taosd dnode crypt) -ENDIF () +ENDIF() +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEPENDENCIES(taosd jemalloc) + target_link_libraries(taosd dnode crypt ${LINK_JEMALLOC}) +ELSE() + target_link_libraries(taosd dnode crypt) +ENDIF() diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index 040eafbcf1..f94b9e2d73 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -24,6 +24,7 @@ #include "jemalloc/jemalloc.h" #endif #include "dmUtil.h" +#include "tcs.h" #if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) #include "cus_name.h" @@ -330,10 +331,9 @@ static int32_t dmCheckS3() { int32_t code = 0; SConfig *pCfg = taosGetCfg(); cfgDumpCfgS3(pCfg, 0, true); -#if defined(USE_S3) - extern int32_t s3CheckCfg(); - code = s3CheckCfg(); +#if defined(USE_S3) + code = tcsCheckCfg(); #endif return code; } diff --git a/source/dnode/mgmt/mgmt_dnode/CMakeLists.txt b/source/dnode/mgmt/mgmt_dnode/CMakeLists.txt index f7920d3d8e..76e51ac44f 100644 --- a/source/dnode/mgmt/mgmt_dnode/CMakeLists.txt +++ b/source/dnode/mgmt/mgmt_dnode/CMakeLists.txt @@ -1,24 +1,25 @@ aux_source_directory(src MGMT_DNODE) add_library(mgmt_dnode STATIC ${MGMT_DNODE}) -if (DEFINED GRANT_CFG_INCLUDE_DIR) + +if(DEFINED GRANT_CFG_INCLUDE_DIR) add_definitions(-DGRANTS_CFG) endif() -IF (NOT BUILD_DM_MODULE) +if(NOT BUILD_DM_MODULE) MESSAGE(STATUS "NOT BUILD_DM_MODULE") target_link_directories( mgmt_dnode PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/dm_static" ) -ELSE() - MESSAGE(STATUS "BUILD_DM_MODULE") -ENDIF() +else() + MESSAGE(STATUS "BUILD_DM_MODULE") +endif() target_include_directories( - mgmt_dnode - PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc" - PUBLIC "${GRANT_CFG_INCLUDE_DIR}" + mgmt_dnode + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc" + PUBLIC "${GRANT_CFG_INCLUDE_DIR}" ) target_link_libraries( - mgmt_dnode node_util dmodule + mgmt_dnode node_util dmodule ) \ No newline at end of file diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 3cf0382eba..20618dbdf3 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -213,13 +213,13 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) taosQueueGetThreadId(pVnode->pApplyW.queue)); tMultiWorkerCleanup(&pVnode->pApplyW); - dInfo("vgId:%d, wait for vnode query queue:%p is empty", pVnode->vgId, pVnode->pQueryQ); - while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10); - dInfo("vgId:%d, wait for vnode fetch queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ, taosQueueGetThreadId(pVnode->pFetchQ)); while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10); + dInfo("vgId:%d, wait for vnode query queue:%p is empty", pVnode->vgId, pVnode->pQueryQ); + while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10); + tqNotifyClose(pVnode->pImpl->pTq); dInfo("vgId:%d, wait for vnode stream queue:%p is empty", pVnode->vgId, pVnode->pStreamQ); while (!taosQueueEmpty(pVnode->pStreamQ)) taosMsleep(10); diff --git a/source/dnode/mgmt/node_mgmt/CMakeLists.txt b/source/dnode/mgmt/node_mgmt/CMakeLists.txt index 82b9384d66..f5198681bc 100644 --- a/source/dnode/mgmt/node_mgmt/CMakeLists.txt +++ b/source/dnode/mgmt/node_mgmt/CMakeLists.txt @@ -1,22 +1,22 @@ aux_source_directory(src IMPLEMENT_SRC) add_library(dnode STATIC ${IMPLEMENT_SRC}) target_link_libraries( - dnode mgmt_mnode mgmt_qnode mgmt_snode mgmt_vnode mgmt_dnode monitorfw + dnode mgmt_mnode mgmt_qnode mgmt_snode mgmt_vnode mgmt_dnode monitorfw tcs ) -IF (TD_ENTERPRISE) +IF(TD_ENTERPRISE) IF(${BUILD_WITH_S3}) - add_definitions(-DUSE_S3) + add_definitions(-DUSE_S3) ELSEIF(${BUILD_WITH_COS}) - add_definitions(-DUSE_COS) + add_definitions(-DUSE_COS) ENDIF() ENDIF() -IF (DEFINED GRANT_CFG_INCLUDE_DIR) +IF(DEFINED GRANT_CFG_INCLUDE_DIR) add_definitions(-DGRANTS_CFG) ENDIF() target_include_directories( - dnode - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + dnode + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index 0c2bd2bc0f..694cc52d64 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -20,6 +20,7 @@ #include "libs/function/tudf.h" #include "tgrant.h" #include "tcompare.h" +#include "tcs.h" #include "tanal.h" // clang-format on @@ -98,9 +99,9 @@ static bool dmDataSpaceAvailable() { static int32_t dmCheckDiskSpace() { // availability int32_t code = 0; - code = osUpdate(); - if(code != 0) { - code = 0; // ignore the error, just log it + code = osUpdate(); + if (code != 0) { + code = 0; // ignore the error, just log it dError("failed to update os info since %s", tstrerror(code)); } if (!dmDataSpaceAvailable()) { @@ -163,13 +164,6 @@ static int32_t dmCheckDataDirVersionWrapper() { } return 0; } -#if defined(USE_S3) - -extern int32_t s3Begin(); -extern void s3End(); -extern int8_t tsS3Enabled; - -#endif int32_t dmInit() { dInfo("start to init dnode env"); @@ -187,7 +181,7 @@ int32_t dmInit() { if ((code = dmInitDnode(dmInstance())) != 0) return code; if ((code = InitRegexCache() != 0)) return code; #if defined(USE_S3) - if ((code = s3Begin()) != 0) return code; + if ((code = tcsInit()) != 0) return code; #endif dInfo("dnode env is initialized"); @@ -221,7 +215,7 @@ void dmCleanup() { DestroyRegexCache(); #if defined(USE_S3) - s3End(); + tcsUninit(); #endif dInfo("dnode env is cleaned up"); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 28d6113bba..fd593e0638 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -16,8 +16,8 @@ #define _DEFAULT_SOURCE #include "dmMgmt.h" #include "qworker.h" -#include "tversion.h" #include "tanal.h" +#include "tversion.h" static inline void dmSendRsp(SRpcMsg *pMsg) { if (rpcSendResponse(pMsg) != 0) { @@ -411,7 +411,7 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.noDelayFp = rpcNoDelayMsg; - int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3) / 2; + int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3); connLimitNum = TMAX(connLimitNum, 10); connLimitNum = TMIN(connLimitNum, 500); diff --git a/source/dnode/mnode/impl/CMakeLists.txt b/source/dnode/mnode/impl/CMakeLists.txt index 1a74b6fa9f..8a390948ae 100644 --- a/source/dnode/mnode/impl/CMakeLists.txt +++ b/source/dnode/mnode/impl/CMakeLists.txt @@ -1,44 +1,46 @@ aux_source_directory(src MNODE_SRC) -IF (TD_PRIVILEGE) + +if(TD_PRIVILEGE) ADD_DEFINITIONS(-D_PRIVILEGE) -ENDIF () -IF (TD_ENTERPRISE) +endif() + +if(TD_ENTERPRISE) LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/privilege/src/privilege.c) LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDb.c) LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndVgroup.c) LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDnode.c) LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/mndView.c) - IF(${BUILD_WITH_S3}) + if(${BUILD_WITH_S3}) add_definitions(-DUSE_S3) ELSEIF(${BUILD_WITH_COS}) add_definitions(-DUSE_COS) - ENDIF() + endif() - IF(${BUILD_WITH_ANALYSIS}) + if(${BUILD_WITH_ANALYSIS}) add_definitions(-DUSE_ANAL) - ENDIF() -ENDIF () + endif() +endif() add_library(mnode STATIC ${MNODE_SRC}) target_include_directories( - mnode - PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + mnode + PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( - mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser audit monitorfw + mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser audit monitorfw ) -IF (DEFINED GRANT_CFG_INCLUDE_DIR) +if(DEFINED GRANT_CFG_INCLUDE_DIR) add_definitions(-DGRANTS_CFG) -ENDIF() +endif() -IF (TD_GRANT) +if(TD_GRANT) TARGET_LINK_LIBRARIES(mnode grant) ADD_DEFINITIONS(-D_GRANT) -ENDIF () +endif() if(${BUILD_TEST}) - add_subdirectory(test) + add_subdirectory(test) endif(${BUILD_TEST}) diff --git a/source/dnode/mnode/impl/test/arbgroup/CMakeLists.txt b/source/dnode/mnode/impl/test/arbgroup/CMakeLists.txt index 44ac305498..0da36e1f67 100644 --- a/source/dnode/mnode/impl/test/arbgroup/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/arbgroup/CMakeLists.txt @@ -4,7 +4,7 @@ aux_source_directory(. MNODE_ARBGROUP_TEST_SRC) add_executable(arbgroupTest ${MNODE_ARBGROUP_TEST_SRC}) target_link_libraries( arbgroupTest - PRIVATE dnode nodes planner gtest qcom + PRIVATE dnode nodes planner gtest qcom tcs ) add_test( diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index f70a8844ba..c377e69f03 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -1,24 +1,26 @@ # vnode add_subdirectory(src/tqCommon) add_library(vnode STATIC "") + if(${TD_DARWIN}) target_compile_options(vnode PRIVATE -Wno-error=single-bit-bitfield-constant-conversion) endif(${TD_DARWIN}) + set( - VNODE_SOURCE_FILES - "src/vnd/vnodeOpen.c" - "src/vnd/vnodeBufPool.c" - "src/vnd/vnodeCfg.c" - "src/vnd/vnodeCommit.c" - "src/vnd/vnodeQuery.c" - "src/vnd/vnodeModule.c" - "src/vnd/vnodeSvr.c" - "src/vnd/vnodeSync.c" - "src/vnd/vnodeSnapshot.c" - "src/vnd/vnodeRetention.c" - "src/vnd/vnodeInitApi.c" - "src/vnd/vnodeAsync.c" - "src/vnd/vnodeHash.c" + VNODE_SOURCE_FILES + "src/vnd/vnodeOpen.c" + "src/vnd/vnodeBufPool.c" + "src/vnd/vnodeCfg.c" + "src/vnd/vnodeCommit.c" + "src/vnd/vnodeQuery.c" + "src/vnd/vnodeModule.c" + "src/vnd/vnodeSvr.c" + "src/vnd/vnodeSync.c" + "src/vnd/vnodeSnapshot.c" + "src/vnd/vnodeRetention.c" + "src/vnd/vnodeInitApi.c" + "src/vnd/vnodeAsync.c" + "src/vnd/vnodeHash.c" # meta "src/meta/metaOpen.c" @@ -40,23 +42,23 @@ set( "src/sma/smaSnapshot.c" "src/sma/smaTimeRange.c" - # # tsdb - # "src/tsdb/tsdbCommit.c" - # "src/tsdb/tsdbFile.c" - # "src/tsdb/tsdbFS.c" - # "src/tsdb/tsdbOpen.c" - # "src/tsdb/tsdbMemTable.c" - # "src/tsdb/tsdbRead.c" - # "src/tsdb/tsdbCache.c" - # "src/tsdb/tsdbWrite.c" - # "src/tsdb/tsdbReaderWriter.c" - # "src/tsdb/tsdbUtil.c" - # "src/tsdb/tsdbSnapshot.c" - # "src/tsdb/tsdbCacheRead.c" - # "src/tsdb/tsdbRetention.c" - # "src/tsdb/tsdbDiskData.c" - # "src/tsdb/tsdbMergeTree.c" - # "src/tsdb/tsdbDataIter.c" + # # tsdb + # "src/tsdb/tsdbCommit.c" + # "src/tsdb/tsdbFile.c" + # "src/tsdb/tsdbFS.c" + # "src/tsdb/tsdbOpen.c" + # "src/tsdb/tsdbMemTable.c" + # "src/tsdb/tsdbRead.c" + # "src/tsdb/tsdbCache.c" + # "src/tsdb/tsdbWrite.c" + # "src/tsdb/tsdbReaderWriter.c" + # "src/tsdb/tsdbUtil.c" + # "src/tsdb/tsdbSnapshot.c" + # "src/tsdb/tsdbCacheRead.c" + # "src/tsdb/tsdbRetention.c" + # "src/tsdb/tsdbDiskData.c" + # "src/tsdb/tsdbMergeTree.c" + # "src/tsdb/tsdbDataIter.c" # tq "src/tq/tq.c" @@ -71,14 +73,13 @@ set( "src/tq/tqSnapshot.c" "src/tq/tqStreamStateSnap.c" "src/tq/tqStreamTaskSnap.c" - ) aux_source_directory("src/tsdb/" TSDB_SOURCE_FILES) list( - APPEND - VNODE_SOURCE_FILES - ${TSDB_SOURCE_FILES} + APPEND + VNODE_SOURCE_FILES + ${TSDB_SOURCE_FILES} ) target_sources( @@ -87,38 +88,38 @@ target_sources( ${VNODE_SOURCE_FILES} ) -IF (TD_VNODE_PLUGINS) - target_sources( - vnode - PRIVATE - ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompact.c - ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompactMonitor.c - ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/vnodeCompact.c - ) -ENDIF () +if(TD_VNODE_PLUGINS) + target_sources( + vnode + PRIVATE + ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompact.c + ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/tsdbCompactMonitor.c + ${TD_ENTERPRISE_DIR}/src/plugins/vnode/src/vnodeCompact.c + ) +endif() -# IF (NOT ${TD_LINUX}) +# if (NOT ${TD_LINUX}) # target_include_directories( -# vnode -# PUBLIC "inc" -# PUBLIC "src/inc" -# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" -# PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" +# vnode +# PUBLIC "inc" +# PUBLIC "src/inc" +# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" +# PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" # ) # ELSE() # target_include_directories( -# vnode -# PUBLIC "inc" -# PUBLIC "src/inc" -# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" +# vnode +# PUBLIC "inc" +# PUBLIC "src/inc" +# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" # ) -#ENDIF(NOT ${TD_LINUX}) - -if (${BUILD_CONTRIB}) +# endif(NOT ${TD_LINUX}) +if(${BUILD_CONTRIB}) target_include_directories( vnode PUBLIC "inc" PUBLIC "src/inc" + PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs" PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" PUBLIC "${TD_SOURCE_DIR}/include/libs/crypt" PUBLIC "${TD_SOURCE_DIR}/include/dnode/vnode" @@ -129,24 +130,26 @@ else() vnode PUBLIC "inc" PUBLIC "src/inc" + PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs" PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" PUBLIC "${TD_SOURCE_DIR}/include/libs/crypt" PUBLIC "${TD_SOURCE_DIR}/include/dnode/vnode" ) - if (${TD_LINUX}) - target_include_directories( - vnode + + if(${TD_LINUX}) + target_include_directories( + vnode PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" ) target_link_directories( - vnode + vnode PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" ) endif() endif() target_link_directories( - vnode + vnode PUBLIC "${CMAKE_BINARY_DIR}/build/lib" ) @@ -164,10 +167,11 @@ target_link_libraries( PUBLIC tdb PUBLIC audit PUBLIC crypt + PUBLIC tcs # PUBLIC bdb # PUBLIC scalar - #PUBLIC zstd + # PUBLIC zstd PUBLIC rocksdb PUBLIC transport PUBLIC stream @@ -175,9 +179,9 @@ target_link_libraries( PUBLIC tqCommon ) -IF (TD_GRANT) - TARGET_LINK_LIBRARIES(vnode PUBLIC grant) -ENDIF () +if(TD_GRANT) + TARGET_LINK_LIBRARIES(vnode PUBLIC grant) +endif() target_compile_definitions(vnode PUBLIC -DMETA_REFACT) diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 8f2c0b5a5e..659ba3f777 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -496,6 +496,7 @@ void metaULock(SMeta *pMeta) { static void metaCleanup(SMeta **ppMeta) { SMeta *pMeta = *ppMeta; if (pMeta) { + metaInfo("vgId:%d meta clean up, path:%s", TD_VID(pMeta->pVnode), pMeta->path); if (pMeta->pEnv) metaAbort(pMeta); if (pMeta->pCache) metaCacheClose(pMeta); #ifdef BUILD_NO_CALL diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 89a51eb0f5..5583e464ed 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -12,8 +12,8 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ -#include "cos.h" #include "functionMgt.h" +#include "tcs.h" #include "tsdb.h" #include "tsdbDataFileRW.h" #include "tsdbIter.h" @@ -1258,7 +1258,8 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray } if (NULL == pLastCol || cmp_res < 0 || (cmp_res == 0 && !COL_VAL_IS_NONE(pColVal))) { - SLastCol lastColTmp = {.rowKey = *pRowKey, .colVal = *pColVal, .dirty = 0, .cacheStatus = TSDB_LAST_CACHE_VALID}; + SLastCol lastColTmp = { + .rowKey = *pRowKey, .colVal = *pColVal, .dirty = 0, .cacheStatus = TSDB_LAST_CACHE_VALID}; if ((code = tsdbCachePutToRocksdb(pTsdb, &idxKey->key, &lastColTmp)) != TSDB_CODE_SUCCESS) { tsdbError("tsdb/cache: vgId:%d, put rocks failed at line %d since %s.", TD_VID(pTsdb->pVnode), lino, tstrerror(code)); @@ -1705,8 +1706,7 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA if (pLastCol && pLastCol->cacheStatus != TSDB_LAST_CACHE_NO_CACHE) { code = tsdbCachePutToLRU(pTsdb, &idxKey->key, pLastCol, 0); if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); taosMemoryFreeClear(pToFree); TAOS_CHECK_EXIT(code); } @@ -3064,9 +3064,8 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow, bool *pI iMax[nMax] = i; max[nMax++] = pIter->input[i].pRow; - } else { - pIter->input[i].next = false; } + pIter->input[i].next = false; } } @@ -3520,7 +3519,7 @@ static int32_t tsdbCacheLoadBlockS3(STsdbFD *pFD, uint8_t **ppBlock) { int64_t block_offset = (pFD->blkno - 1) * tsS3BlockSize * pFD->szPage; - TAOS_CHECK_RETURN(s3GetObjectBlock(pFD->objName, block_offset, tsS3BlockSize * pFD->szPage, 0, ppBlock)); + TAOS_CHECK_RETURN(tcsGetObjectBlock(pFD->objName, block_offset, tsS3BlockSize * pFD->szPage, 0, ppBlock)); tsdbTrace("block:%p load from s3", *ppBlock); diff --git a/source/dnode/vnode/src/tsdb/tsdbFile2.c b/source/dnode/vnode/src/tsdb/tsdbFile2.c index da78d67db3..ad5f02d601 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFile2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFile2.c @@ -14,7 +14,7 @@ */ #include "tsdbFile2.h" -#include "cos.h" +#include "tcs.h" #include "vnd.h" // to_json @@ -318,7 +318,7 @@ static void tsdbTFileObjRemoveLC(STFileObj *fobj, bool remove_all) { } *(dot + 1) = 0; - s3DeleteObjectsByPrefix(object_name_prefix); + tcsDeleteObjectsByPrefix(object_name_prefix); // remove local last chunk file dot = strrchr(lc_path, '.'); diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index d867318e1c..53e1c57f14 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#include "cos.h" #include "crypt.h" +#include "tcs.h" #include "tsdb.h" #include "tsdbDef.h" #include "vnd.h" @@ -391,7 +391,7 @@ static int32_t tsdbReadFileBlock(STsdbFD *pFD, int64_t offset, int64_t size, boo snprintf(dot + 1, TSDB_FQDN_LEN - (dot + 1 - object_name_prefix), "%d.data", chunkno); - code = s3GetObjectBlock(object_name_prefix, cOffset, nRead, check, &pBlock); + code = tcsGetObjectBlock(object_name_prefix, cOffset, nRead, check, &pBlock); TSDB_CHECK_CODE(code, lino, _exit); memcpy(buf + n, pBlock, nRead); diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index cbe2ab4b8e..0072fd5e7f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -13,7 +13,7 @@ * along with this program. If not, see . */ -#include "cos.h" +#include "tcs.h" #include "tsdb.h" #include "tsdbFS2.h" #include "vnd.h" @@ -426,35 +426,6 @@ static int32_t tsdbS3FidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int32_t s3Kee } } -static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile *to) { - int32_t code = 0; - int32_t lino = 0; - - char fname[TSDB_FILENAME_LEN]; - TdFilePtr fdFrom = NULL; - // TdFilePtr fdTo = NULL; - - tsdbTFileName(rtner->tsdb, to, fname); - - fdFrom = taosOpenFile(from->fname, TD_FILE_READ); - if (fdFrom == NULL) { - TAOS_CHECK_GOTO(terrno, &lino, _exit); - } - - char *object_name = taosDirEntryBaseName(fname); - TAOS_CHECK_GOTO(s3PutObjectFromFile2(from->fname, object_name, 1), &lino, _exit); - -_exit: - if (code) { - tsdbError("vgId:%d %s failed at line %s:%d since %s", TD_VID(rtner->tsdb->pVnode), __func__, __FILE__, lino, - tstrerror(code)); - } - if (taosCloseFile(&fdFrom) != 0) { - tsdbTrace("vgId:%d, failed to close file", TD_VID(rtner->tsdb->pVnode)); - } - return code; -} - static int32_t tsdbMigrateDataFileLCS3(SRTNer *rtner, const STFileObj *fobj, int64_t size, int64_t chunksize) { int32_t code = 0; int32_t lino = 0; @@ -519,7 +490,7 @@ static int32_t tsdbMigrateDataFileLCS3(SRTNer *rtner, const STFileObj *fobj, int snprintf(dot + 1, TSDB_FQDN_LEN - (dot + 1 - object_name_prefix), "%d.data", cn); int64_t c_offset = chunksize * (cn - fobj->f->lcn); - TAOS_CHECK_GOTO(s3PutObjectFromFileOffset(fname, object_name_prefix, c_offset, chunksize), &lino, _exit); + TAOS_CHECK_GOTO(tcsPutObjectFromFileOffset(fname, object_name_prefix, c_offset, chunksize), &lino, _exit); } // copy last chunk @@ -618,7 +589,7 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, int64 snprintf(dot + 1, TSDB_FQDN_LEN - (dot + 1 - object_name_prefix), "%d.data", cn); int64_t c_offset = chunksize * (cn - 1); - TAOS_CHECK_GOTO(s3PutObjectFromFileOffset(fobj->fname, object_name_prefix, c_offset, chunksize), &lino, _exit); + TAOS_CHECK_GOTO(tcsPutObjectFromFileOffset(fobj->fname, object_name_prefix, c_offset, chunksize), &lino, _exit); } // copy last chunk @@ -741,8 +712,6 @@ _exit: int32_t tsdbAsyncS3Migrate(STsdb *tsdb, int64_t now) { int32_t code = 0; - extern int8_t tsS3EnabledCfg; - int32_t expired = grantCheck(TSDB_GRANT_OBJECT_STORAGE); if (expired && tsS3Enabled) { tsdbWarn("s3 grant expired: %d", expired); diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 0d04486925..53365303b0 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#include "cos.h" #include "sync.h" +#include "tcs.h" #include "tsdb.h" #include "vnd.h" @@ -327,7 +327,7 @@ void vnodeDestroy(int32_t vgId, const char *path, STfs *pTfs, int32_t nodeId) { if (nodeId > 0 && vgId > 0 /*&& nlevel > 1*/ && tsS3Enabled) { char vnode_prefix[TSDB_FILENAME_LEN]; snprintf(vnode_prefix, TSDB_FILENAME_LEN, "%d/v%df", nodeId, vgId); - s3DeleteObjectsByPrefix(vnode_prefix); + tcsDeleteObjectsByPrefix(vnode_prefix); } } diff --git a/source/libs/CMakeLists.txt b/source/libs/CMakeLists.txt index 64209572f4..033582f2c0 100644 --- a/source/libs/CMakeLists.txt +++ b/source/libs/CMakeLists.txt @@ -22,4 +22,6 @@ add_subdirectory(stream) add_subdirectory(planner) add_subdirectory(qworker) add_subdirectory(geometry) -add_subdirectory(command) \ No newline at end of file +add_subdirectory(command) +add_subdirectory(azure) +add_subdirectory(tcs) diff --git a/source/libs/audit/CMakeLists.txt b/source/libs/audit/CMakeLists.txt index 2a04f084f1..14648cc1a2 100644 --- a/source/libs/audit/CMakeLists.txt +++ b/source/libs/audit/CMakeLists.txt @@ -1,7 +1,8 @@ aux_source_directory(src AUDIT_SRC) -IF (TD_ENTERPRISE) + +IF(TD_ENTERPRISE) LIST(APPEND AUDIT_SRC ${TD_ENTERPRISE_DIR}/src/plugins/audit/src/audit.c) -ENDIF () +ENDIF() add_library(audit STATIC ${AUDIT_SRC}) target_include_directories( diff --git a/source/libs/azure/CMakeLists.txt b/source/libs/azure/CMakeLists.txt new file mode 100644 index 0000000000..1516a35c4d --- /dev/null +++ b/source/libs/azure/CMakeLists.txt @@ -0,0 +1,33 @@ +# if(${TD_LINUX}) +aux_source_directory(src AZ_SRC) + +add_library(az STATIC ${AZ_SRC}) + +if(${BUILD_S3}) + add_definitions(-DUSE_S3) + target_link_libraries( + az + PUBLIC _azure_sdk + PUBLIC crypt + ) +endif() + +target_include_directories( + az + PUBLIC "${TD_SOURCE_DIR}/include/libs/azure" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" +) + +target_link_libraries( + az + PUBLIC cjson + PUBLIC os + PUBLIC util + PUBLIC common +) + +if(${BUILD_TEST}) + add_subdirectory(test) +endif(${BUILD_TEST}) + +# endif(${TD_LINUX}) diff --git a/source/libs/azure/inc/azInt.h b/source/libs/azure/inc/azInt.h new file mode 100644 index 0000000000..3538e925c7 --- /dev/null +++ b/source/libs/azure/inc/azInt.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_AZ_INT_H_ +#define _TD_AZ_INT_H_ + +#include "os.h" +#include "tarray.h" +#include "tdef.h" +#include "tlog.h" +#include "tmsg.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// clang-format off +#define azFatal(...) { if (azDebugFlag & DEBUG_FATAL) { taosPrintLog("AZR FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} +#define azError(...) { if (azDebugFlag & DEBUG_ERROR) { taosPrintLog("AZR ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} +#define azWarn(...) { if (azDebugFlag & DEBUG_WARN) { taosPrintLog("AZR WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} +#define azInfo(...) { if (azDebugFlag & DEBUG_INFO) { taosPrintLog("AZR ", DEBUG_INFO, 255, __VA_ARGS__); }} +#define azDebug(...) { if (azDebugFlag & DEBUG_DEBUG) { taosPrintLog("AZR ", DEBUG_DEBUG, azDebugFlag, __VA_ARGS__); }} +#define azTrace(...) { if (azDebugFlag & DEBUG_TRACE) { taosPrintLog("AZR ", DEBUG_TRACE, azDebugFlag, __VA_ARGS__); }} +// clang-format on + +#ifdef __cplusplus +} +#endif + +#endif // _TD_AZ_INT_H_ diff --git a/source/libs/azure/inc/td_avro_parser.h b/source/libs/azure/inc/td_avro_parser.h new file mode 100644 index 0000000000..dae5a65dc7 --- /dev/null +++ b/source/libs/azure/inc/td_avro_parser.h @@ -0,0 +1,191 @@ +#pragma once + +#include "azure/storage/blobs/blob_options.hpp" + +#include + +#include +#include +#include + +namespace Azure { +namespace Storage { +namespace Blobs { +namespace _detail { +enum class AvroDatumType { + String, + Bytes, + Int, + Long, + Float, + Double, + Bool, + Null, + Record, + Enum, + Array, + Map, + Union, + Fixed, +}; + +class AvroStreamReader final { + public: + // position of a vector that lives through vector resizing + struct ReaderPos final { + const std::vector* BufferPtr = nullptr; + size_t Offset = 0; + }; + explicit AvroStreamReader(Core::IO::BodyStream& stream) : m_stream(&stream), m_pos{&m_streambuffer, 0} {} + AvroStreamReader(const AvroStreamReader&) = delete; + AvroStreamReader& operator=(const AvroStreamReader&) = delete; + + int64_t ParseInt(const Core::Context& context); + void Advance(size_t n, const Core::Context& context); + // Read at least n bytes from m_stream and append data to m_streambuffer. Return number of bytes + // available in m_streambuffer; + size_t Preload(size_t n, const Core::Context& context); + size_t TryPreload(size_t n, const Core::Context& context); + // discards data that's before m_pos + void Discard(); + + private: + size_t AvailableBytes() const { return m_streambuffer.size() - m_pos.Offset; } + + private: + Core::IO::BodyStream* m_stream; + std::vector m_streambuffer; + ReaderPos m_pos; + + friend class AvroDatum; +}; + +class AvroSchema final { + public: + static const AvroSchema StringSchema; + static const AvroSchema BytesSchema; + static const AvroSchema IntSchema; + static const AvroSchema LongSchema; + static const AvroSchema FloatSchema; + static const AvroSchema DoubleSchema; + static const AvroSchema BoolSchema; + static const AvroSchema NullSchema; + static AvroSchema RecordSchema(std::string name, const std::vector>& fieldsSchema); + static AvroSchema ArraySchema(AvroSchema elementSchema); + static AvroSchema MapSchema(AvroSchema elementSchema); + static AvroSchema UnionSchema(std::vector schemas); + static AvroSchema FixedSchema(std::string name, int64_t size); + + const std::string& Name() const { return m_name; } + AvroDatumType Type() const { return m_type; } + const std::vector& FieldNames() const { return m_status->m_keys; } + AvroSchema ItemSchema() const { return m_status->m_schemas[0]; } + const std::vector& FieldSchemas() const { return m_status->m_schemas; } + size_t Size() const { return static_cast(m_status->m_size); } + + private: + explicit AvroSchema(AvroDatumType type) : m_type(type) {} + + private: + AvroDatumType m_type; + std::string m_name; + + struct SharedStatus { + std::vector m_keys; + std::vector m_schemas; + int64_t m_size = 0; + }; + std::shared_ptr m_status; +}; + +class AvroDatum final { + public: + AvroDatum() : m_schema(AvroSchema::NullSchema) {} + explicit AvroDatum(AvroSchema schema) : m_schema(std::move(schema)) {} + + void Fill(AvroStreamReader& reader, const Core::Context& context); + void Fill(AvroStreamReader::ReaderPos& data); + + const AvroSchema& Schema() const { return m_schema; } + + template + T Value() const; + struct StringView { + const uint8_t* Data = nullptr; + size_t Length = 0; + }; + + private: + AvroSchema m_schema; + AvroStreamReader::ReaderPos m_data; +}; + +using AvroMap = std::map; + +class AvroRecord final { + public: + bool HasField(const std::string& key) const { return FindField(key) != m_keys->size(); } + const AvroDatum& Field(const std::string& key) const { return m_values.at(FindField(key)); } + AvroDatum& Field(const std::string& key) { return m_values.at(FindField(key)); } + const AvroDatum& FieldAt(size_t i) const { return m_values.at(i); } + AvroDatum& FieldAt(size_t i) { return m_values.at(i); } + + private: + size_t FindField(const std::string& key) const { + auto i = find(m_keys->begin(), m_keys->end(), key); + return i - m_keys->begin(); + } + const std::vector* m_keys = nullptr; + std::vector m_values; + + friend class AvroDatum; +}; + +class AvroObjectContainerReader final { + public: + explicit AvroObjectContainerReader(Core::IO::BodyStream& stream); + + bool End() const { return m_eof; } + // Calling Next() will invalidates the previous AvroDatum returned by this function and all + // AvroDatums propagated from there. + AvroDatum Next(const Core::Context& context) { return NextImpl(m_objectSchema.get(), context); } + + private: + AvroDatum NextImpl(const AvroSchema* schema, const Core::Context& context); + + private: + std::unique_ptr m_reader; + std::unique_ptr m_objectSchema; + std::string m_syncMarker; + int64_t m_remainingObjectInCurrentBlock = 0; + bool m_eof = false; +}; + +class AvroStreamParser final : public Core::IO::BodyStream { + public: + explicit AvroStreamParser(std::unique_ptr inner, + std::function progressCallback, + std::function errorCallback) + : m_inner(std::move(inner)), + m_parser(*m_inner), + m_progressCallback(std::move(progressCallback)), + m_errorCallback(std::move(errorCallback)) {} + + int64_t Length() const override { return -1; } + void Rewind() override { this->m_inner->Rewind(); } + + private: + size_t OnRead(uint8_t* buffer, size_t count, const Azure::Core::Context& context) override; + + private: + std::unique_ptr m_inner; + AvroObjectContainerReader m_parser; + std::function m_progressCallback; + std::function m_errorCallback; + AvroDatum::StringView m_parserBuffer; +}; + +} // namespace _detail +} // namespace Blobs +} // namespace Storage +} // namespace Azure diff --git a/source/libs/azure/inc/td_block_blob_client.hpp b/source/libs/azure/inc/td_block_blob_client.hpp new file mode 100644 index 0000000000..1b00104821 --- /dev/null +++ b/source/libs/azure/inc/td_block_blob_client.hpp @@ -0,0 +1,260 @@ +#pragma once + +#include "azure/storage/blobs/blob_client.hpp" + +#include +#include +#include + +namespace Azure { +namespace Storage { +namespace Files { +namespace DataLake { +class FileClient; +} +} // namespace Files +} // namespace Storage +} // namespace Azure + +namespace Azure { +namespace Storage { +namespace Blobs { + +/** + * @brief The TDBlockBlobClient allows you to manipulate Azure Storage block blobs. + * + * Block blobs let you upload large blobs efficiently. Block blobs are comprised of blocks, each + * of which is identified by a block ID. You create or modify a block blob by writing a set of + * blocks and committing them by their block IDs. Each block can be a different size. + * + * When you upload a block to a blob in your storage account, it is associated with the specified + * block blob, but it does not become part of the blob until you commit a list of blocks that + * includes the new block's ID. New blocks remain in an uncommitted state until they are + * specifically committed or discarded. Writing a block does not update the last modified time of + * an existing blob. + */ +class TDBlockBlobClient final : public BlobClient { + public: + /** + * @brief Initialize a new instance of TDBlockBlobClient. + * + * @param connectionString A connection string includes the authentication information required + * for your application to access data in an Azure Storage account at runtime. + * @param blobContainerName The name of the container containing this blob. + * @param blobName The name of this blob. + * @param options Optional client options that define the transport pipeline policies for + * authentication, retries, etc., that are applied to every request. + * @return A new TDBlockBlobClient instance. + */ + static TDBlockBlobClient CreateFromConnectionString(const std::string& connectionString, + const std::string& blobContainerName, const std::string& blobName, + const BlobClientOptions& options = BlobClientOptions()); + + /** + * @brief Initialize a new instance of TDBlockBlobClient. + * + * @param blobUrl A URL + * referencing the blob that includes the name of the account, the name of the container, and + * the name of the blob. + * @param credential The shared key credential used to sign + * requests. + * @param options Optional client options that define the transport pipeline + * policies for authentication, retries, etc., that are applied to every request. + */ + explicit TDBlockBlobClient(const std::string& blobUrl, std::shared_ptr credential, + const BlobClientOptions& options = BlobClientOptions()); + + /** + * @brief Initialize a new instance of TDBlockBlobClient. + * + * @param blobUrl A URL + * referencing the blob that includes the name of the account, the name of the container, and + * the name of the blob. + * @param credential The token credential used to sign requests. + * @param options Optional client options that define the transport pipeline policies for + * authentication, retries, etc., that are applied to every request. + */ + explicit TDBlockBlobClient(const std::string& blobUrl, std::shared_ptr credential, + const BlobClientOptions& options = BlobClientOptions()); + + /** + * @brief Initialize a new instance of TDBlockBlobClient. + * + * @param blobUrl A URL + * referencing the blob that includes the name of the account, the name of the container, and + * the name of the blob, and possibly also a SAS token. + * @param options Optional client + * options that define the transport pipeline policies for authentication, retries, etc., that + * are applied to every request. + */ + explicit TDBlockBlobClient(const std::string& blobUrl, const BlobClientOptions& options = BlobClientOptions()); + + /** + * @brief Initializes a new instance of the TDBlockBlobClient class with an identical URL + * source but the specified snapshot timestamp. + * + * @param snapshot The snapshot + * identifier. + * @return A new TDBlockBlobClient instance. + * @remarks Pass empty string to remove the snapshot returning the base blob. + */ + TDBlockBlobClient WithSnapshot(const std::string& snapshot) const; + + /** + * @brief Creates a clone of this instance that references a version ID rather than the base + * blob. + * + * @param versionId The version ID returning a URL to the base blob. + * @return A new TDBlockBlobClient instance. + * @remarks Pass empty string to remove the version ID returning the base blob. + */ + TDBlockBlobClient WithVersionId(const std::string& versionId) const; + + /** + * @brief Creates a new block blob, or updates the content of an existing block blob. Updating + * an existing block blob overwrites any existing metadata on the blob. + * + * @param content A BodyStream containing the content to upload. + * @param options Optional parameters to execute this function. + * @param context Context for cancelling long running operations. + * @return A UploadBlockBlobResult describing the state of the updated block blob. + */ + Azure::Response Upload( + Azure::Core::IO::BodyStream& content, const UploadBlockBlobOptions& options = UploadBlockBlobOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + /** + * @brief Creates a new block blob, or updates the content of an existing block blob. Updating + * an existing block blob overwrites any existing metadata on the blob. + * + * @param buffer A memory buffer containing the content to upload. + * @param bufferSize Size of the memory buffer. + * @param options Optional parameters to execute this function. + * @param context Context for cancelling long running operations. + * @return A UploadBlockBlobFromResult describing the state of the updated block blob. + */ + Azure::Response UploadFrom( + const uint8_t* buffer, size_t bufferSize, + const UploadBlockBlobFromOptions& options = UploadBlockBlobFromOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + /** + * @brief Creates a new block blob, or updates the content of an existing block blob. Updating + * an existing block blob overwrites any existing metadata on the blob. + * + * @param fileName A file containing the content to upload. + * @param options Optional parameters to execute this function. + * @param context Context for cancelling long running operations. + * @return A UploadBlockBlobFromResult describing the state of the updated block blob. + */ + Azure::Response UploadFrom( + const std::string& fileName, const UploadBlockBlobFromOptions& options = UploadBlockBlobFromOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + Azure::Response UploadFrom( + const std::string& fileName, int64_t offset, int64_t size, + const UploadBlockBlobFromOptions& options = UploadBlockBlobFromOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + /** + * @brief Creates a new Block Blob where the contents of the blob are read from a given URL. + * + * @param sourceUri Specifies the URL of the source blob. + * @param options Optional parameters to execute this function. + * @param context Context for cancelling long running operations. + * @return A UploadBlockBlobFromUriResult describing the state of the updated block blob. + */ + Azure::Response UploadFromUri( + const std::string& sourceUri, const UploadBlockBlobFromUriOptions& options = UploadBlockBlobFromUriOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + /** + * @brief Creates a new block as part of a block blob's staging area to be eventually + * committed via the CommitBlockList operation. + * + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the + * string must be less than or equal to 64 bytes in size. + * @param content A BodyStream containing the content to upload. + * @param options Optional parameters to execute this function. + * @param context Context for cancelling long running operations. + * @return A StageBlockResult describing the state of the updated block. + */ + Azure::Response StageBlock( + const std::string& blockId, Azure::Core::IO::BodyStream& content, + const StageBlockOptions& options = StageBlockOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + /** + * @brief Creates a new block to be committed as part of a blob where the contents are read from + * the sourceUri. + * + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the + * string must be less than or equal to 64 bytes in size. + * @param sourceUri Specifies the uri of the source + * blob. The value may be a uri of up to 2 KB in length that specifies a blob. The source blob + * must either be public or must be authenticated via a shared access signature. If the source + * blob is public, no authentication is required to perform the operation. + * @param options Optional parameters to execute this function. + * @param context Context for cancelling long running operations. + * @return A StageBlockFromUriResult describing the state of the updated block blob. + */ + Azure::Response StageBlockFromUri( + const std::string& blockId, const std::string& sourceUri, + const StageBlockFromUriOptions& options = StageBlockFromUriOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + /** + * @brief Writes a blob by specifying the list of block IDs that make up the blob. In order to + * be written as part of a blob, a block must have been successfully written to the server in a + * prior StageBlock operation. You can call CommitBlockList to update a blob by uploading only + * those blocks that have changed, then committing the new and existing blocks together. You can + * do this by specifying whether to commit a block from the committed block list or from the + * uncommitted block list, or to commit the most recently uploaded version of the block, + * whichever list it may belong to. + * + * @param blockIds Base64 encoded block IDs to indicate that make up the blob. + * @param options Optional parameters to execute this function. + * @param context Context for cancelling long running operations. + * @return A CommitBlobBlockListResult describing the state of the updated block blob. + */ + Azure::Response CommitBlockList( + const std::vector& blockIds, const CommitBlockListOptions& options = CommitBlockListOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + /** + * @brief Retrieves the list of blocks that have been uploaded as part of a block blob. There + * are two block lists maintained for a blob. The Committed Block list has blocks that have been + * successfully committed to a given blob with CommitBlockList. The Uncommitted Block list has + * blocks that have been uploaded for a blob using StageBlock, but that have not yet been + * committed. + * + * @param options Optional parameters to execute this function. + * @param context Context for cancelling long running operations. + * @return A GetBlobBlockListResult describing requested block list. + */ + Azure::Response GetBlockList( + const GetBlockListOptions& options = GetBlockListOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + /** + * @brief Returns the result of a query against the blob. + * + * @param querySqlExpression The query expression in SQL. + * @param options Optional parameters to execute this function. + * @param context Context for cancelling long running operations. + * @return A QueryBlobResult describing the query result. + */ + Azure::Response Query(const std::string& querySqlExpression, + const QueryBlobOptions& options = QueryBlobOptions(), + const Azure::Core::Context& context = Azure::Core::Context()) const; + + explicit TDBlockBlobClient(BlobClient blobClient); + + private: + friend class BlobClient; + friend class Files::DataLake::DataLakeFileClient; +}; + +} // namespace Blobs +} // namespace Storage +} // namespace Azure diff --git a/source/libs/azure/src/az.cpp b/source/libs/azure/src/az.cpp new file mode 100644 index 0000000000..831694356a --- /dev/null +++ b/source/libs/azure/src/az.cpp @@ -0,0 +1,534 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define ALLOW_FORBID_FUNC + +#include "az.h" +#include "azInt.h" + +#include "os.h" +#include "taoserror.h" +#include "tglobal.h" + +#if defined(USE_S3) + +#include +#include +#include "td_block_blob_client.hpp" + +// Add appropriate using namespace directives +using namespace Azure::Storage; +using namespace Azure::Storage::Blobs; + +extern char tsS3Hostname[][TSDB_FQDN_LEN]; +extern char tsS3AccessKeyId[][TSDB_FQDN_LEN]; +extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN]; +extern char tsS3BucketName[TSDB_FQDN_LEN]; + +extern int8_t tsS3Enabled; +extern int8_t tsS3EpNum; + +int32_t azBegin() { return TSDB_CODE_SUCCESS; } + +void azEnd() {} + +static void checkPrint(const char *fmt, ...) { + va_list arg_ptr; + va_start(arg_ptr, fmt); + (void)vfprintf(stderr, fmt, arg_ptr); + va_end(arg_ptr); +} + +static void azDumpCfgByEp(int8_t epIndex) { + // clang-format off + checkPrint( + "%-24s %s\n" + "%-24s %s\n" + "%-24s %s\n" + "%-24s %s\n" + "%-24s %s\n" + "%-24s %s\n", + "hostName", tsS3Hostname[epIndex], + "bucketName", tsS3BucketName, + "protocol", "https only", + "uristyle", "path only", + "accessKey", tsS3AccessKeyId[epIndex], + "accessKeySecret", tsS3AccessKeySecret[epIndex]); + // clang-format on +} + +static int32_t azListBucket(char const *bucketname) { + int32_t code = 0; + const std::string delimiter = "/"; + std::string accountName = tsS3AccessKeyId[0]; + std::string accountKey = tsS3AccessKeySecret[0]; + std::string accountURL = tsS3Hostname[0]; + accountURL = "https://" + accountURL; + + try { + auto sharedKeyCredential = std::make_shared(accountName, accountKey); + + BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential); + + std::string containerName = bucketname; + auto containerClient = blobServiceClient.GetBlobContainerClient(containerName); + + Azure::Storage::Blobs::ListBlobsOptions options; + options.Prefix = "s3"; + + checkPrint("objects:\n"); + for (auto pageResult = containerClient.ListBlobs(options); pageResult.HasPage(); pageResult.MoveToNextPage()) { + for (const auto &blob : pageResult.Blobs) { + checkPrint("%s\n", blob.Name.c_str()); + } + } + } catch (const Azure::Core::RequestFailedException &e) { + azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast(e.StatusCode), + e.ReasonPhrase.c_str()); + + code = TAOS_SYSTEM_ERROR(EIO); + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + +int32_t azCheckCfg() { + int32_t code = 0, lino = 0; + + azDumpCfgByEp(0); + + // test put + char testdata[17] = "0123456789abcdef"; + const char *objectname[] = {"s3test.txt"}; + char path[PATH_MAX] = {0}; + int ds_len = strlen(TD_DIRSEP); + int tmp_len = strlen(tsTempDir); + + (void)snprintf(path, PATH_MAX, "%s", tsTempDir); + if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) { + (void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP); + (void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", objectname[0]); + } else { + (void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", objectname[0]); + } + + uint8_t *pBlock = NULL; + int c_offset = 10; + int c_len = 6; + char buf[7] = {0}; + + TdFilePtr fp = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC); + if (!fp) { + checkPrint("failed to open test file: %s.\n", path); + TAOS_CHECK_GOTO(terrno, &lino, _next); + } + if (taosWriteFile(fp, testdata, strlen(testdata)) < 0) { + checkPrint("failed to write test file: %s.\n", path); + TAOS_CHECK_GOTO(terrno, &lino, _next); + } + if (taosFsyncFile(fp) < 0) { + checkPrint("failed to fsync test file: %s.\n", path); + TAOS_CHECK_GOTO(terrno, &lino, _next); + } + (void)taosCloseFile(&fp); + + checkPrint("\nstart to put object: %s, file: %s content: %s\n", objectname[0], path, testdata); + code = azPutObjectFromFileOffset(path, objectname[0], 0, 16); + if (code != 0) { + checkPrint("put object %s : failed.\n", objectname[0]); + TAOS_CHECK_GOTO(code, &lino, _next); + } + checkPrint("put object %s: success.\n\n", objectname[0]); + + // list buckets + checkPrint("start to list bucket %s by prefix s3.\n", tsS3BucketName); + code = azListBucket(tsS3BucketName); + if (code != 0) { + checkPrint("listing bucket %s : failed.\n", tsS3BucketName); + TAOS_CHECK_GOTO(code, &lino, _next); + } + checkPrint("listing bucket %s: success.\n\n", tsS3BucketName); + + // test range get + checkPrint("start to range get object %s offset: %d len: %d.\n", objectname[0], c_offset, c_len); + code = azGetObjectBlock(objectname[0], c_offset, c_len, true, &pBlock); + if (code != 0) { + checkPrint("get object %s : failed.\n", objectname[0]); + TAOS_CHECK_GOTO(code, &lino, _next); + } + + (void)memcpy(buf, pBlock, c_len); + taosMemoryFree(pBlock); + checkPrint("object content: %s\n", buf); + checkPrint("get object %s: success.\n\n", objectname[0]); + + // delete test object + checkPrint("start to delete object: %s.\n", objectname[0]); + // code = azDeleteObjectsByPrefix(objectname[0]); + azDeleteObjectsByPrefix(objectname[0]); + /* + if (code != 0) { + (void)fprintf(stderr, "delete object %s : failed.\n", objectname[0]); + TAOS_CHECK_GOTO(code, &lino, _next); + } + */ + checkPrint("delete object %s: success.\n\n", objectname[0]); + +_next: + if (fp) { + (void)taosCloseFile(&fp); + } + + if (TSDB_CODE_SUCCESS != code) { + checkPrint("s3 check failed, code: %d, line: %d.\n", code, lino); + } + + checkPrint("=================================================================\n"); + + TAOS_RETURN(code); +} + +static int32_t azPutObjectFromFileOffsetImpl(const char *file, const char *object_name, int64_t offset, int64_t size) { + int32_t code = 0; + + std::string endpointUrl = tsS3Hostname[0]; + std::string accountName = tsS3AccessKeyId[0]; + std::string accountKey = tsS3AccessKeySecret[0]; + + try { + auto sharedKeyCredential = std::make_shared(accountName, accountKey); + + std::string accountURL = tsS3Hostname[0]; + + accountURL = "https://" + accountURL; + BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential); + + std::string containerName = tsS3BucketName; + auto containerClient = blobServiceClient.GetBlobContainerClient(containerName); + + std::string blobName = "blob.txt"; + uint8_t blobContent[] = "Hello Azure!"; + // Create the block blob client + // BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName); + // TDBlockBlobClient blobClient(containerClient.GetBlobClient(blobName)); + TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name)); + + blobClient.UploadFrom(file, offset, size); + } catch (const Azure::Core::RequestFailedException &e) { + azError("%s: Status Code: %d, Reason Phrase: %s", __func__, static_cast(e.StatusCode), e.ReasonPhrase.c_str()); + + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + +int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) { + int32_t code = 0; + + try { + code = azPutObjectFromFileOffsetImpl(file, object_name, offset, size); + } catch (const std::exception &e) { + azError("%s: Reason Phrase: %s", __func__, e.what()); + + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + +static int32_t azGetObjectBlockImpl(const char *object_name, int64_t offset, int64_t size, bool check, + uint8_t **ppBlock) { + int32_t code = TSDB_CODE_SUCCESS; + std::string accountName = tsS3AccessKeyId[0]; + std::string accountKey = tsS3AccessKeySecret[0]; + std::string accountURL = tsS3Hostname[0]; + uint8_t *buf = NULL; + + try { + auto sharedKeyCredential = std::make_shared(accountName, accountKey); + + accountURL = "https://" + accountURL; + BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential); + + std::string containerName = tsS3BucketName; + auto containerClient = blobServiceClient.GetBlobContainerClient(containerName); + + TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name)); + + Blobs::DownloadBlobToOptions options; + options.Range = Azure::Core::Http::HttpRange(); + options.Range.Value().Offset = offset; + options.Range.Value().Length = size; + + buf = (uint8_t *)taosMemoryCalloc(1, size); + if (!buf) { + return terrno; + } + + auto res = blobClient.DownloadTo(buf, size, options); + if (check && res.Value.ContentRange.Length.Value() != size) { + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + TAOS_RETURN(code); + } + + *ppBlock = buf; + } catch (const Azure::Core::RequestFailedException &e) { + azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast(e.StatusCode), + e.ReasonPhrase.c_str()); + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + + if (buf) { + taosMemoryFree(buf); + } + *ppBlock = NULL; + + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + +static int32_t azGetObjectBlockRetry(const char *object_name, int64_t offset, int64_t size, bool check, + uint8_t **ppBlock) { + int32_t code = TSDB_CODE_SUCCESS; + + // May use an exponential backoff policy for retries with 503 + int retryCount = 0; + static int maxRetryCount = 5; + static int minRetryInterval = 1000; // ms + static int maxRetryInterval = 3000; // ms + +_retry: + code = azGetObjectBlockImpl(object_name, offset, size, check, ppBlock); + if (TSDB_CODE_SUCCESS != code && retryCount++ < maxRetryCount) { + taosMsleep(taosRand() % (maxRetryInterval - minRetryInterval + 1) + minRetryInterval); + uInfo("%s: 0x%x(%s) and retry get object", __func__, code, tstrerror(code)); + goto _retry; + } + + TAOS_RETURN(code); +} + +int32_t azGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock) { + int32_t code = TSDB_CODE_SUCCESS; + + try { + code = azGetObjectBlockRetry(object_name, offset, size, check, ppBlock); + } catch (const std::exception &e) { + azError("%s: Reason Phrase: %s", __func__, e.what()); + + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + +static void azDeleteObjectsByPrefixImpl(const char *prefix) { + const std::string delimiter = "/"; + std::string accountName = tsS3AccessKeyId[0]; + std::string accountKey = tsS3AccessKeySecret[0]; + std::string accountURL = tsS3Hostname[0]; + accountURL = "https://" + accountURL; + + try { + auto sharedKeyCredential = std::make_shared(accountName, accountKey); + + BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential); + + std::string containerName = tsS3BucketName; + auto containerClient = blobServiceClient.GetBlobContainerClient(containerName); + + Azure::Storage::Blobs::ListBlobsOptions options; + options.Prefix = prefix; + + std::set listBlobs; + for (auto pageResult = containerClient.ListBlobs(options); pageResult.HasPage(); pageResult.MoveToNextPage()) { + for (const auto &blob : pageResult.Blobs) { + listBlobs.insert(blob.Name); + } + } + + for (auto blobName : listBlobs) { + auto blobClient = containerClient.GetAppendBlobClient(blobName); + blobClient.Delete(); + } + } catch (const Azure::Core::RequestFailedException &e) { + azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast(e.StatusCode), + e.ReasonPhrase.c_str()); + } +} + +void azDeleteObjectsByPrefix(const char *prefix) { + int32_t code = TSDB_CODE_SUCCESS; + + try { + azDeleteObjectsByPrefixImpl(prefix); + } catch (const std::exception &e) { + azError("%s: Reason Phrase: %s", __func__, e.what()); + } +} + +int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp) { + int32_t code = 0, lino = 0; + uint64_t contentLength = 0; + + if (taosStatFile(file, (int64_t *)&contentLength, NULL, NULL) < 0) { + azError("ERROR: %s Failed to stat file %s: ", __func__, file); + TAOS_RETURN(terrno); + } + + code = azPutObjectFromFileOffset(file, object, 0, contentLength); + if (code != 0) { + azError("ERROR: %s Failed to put file %s: ", __func__, file); + TAOS_CHECK_GOTO(code, &lino, _exit); + } + +_exit: + if (code) { + azError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + + return 0; +} + +int32_t azGetObjectToFile(const char *object_name, const char *fileName) { + int32_t code = TSDB_CODE_SUCCESS; + std::string accountName = tsS3AccessKeyId[0]; + std::string accountKey = tsS3AccessKeySecret[0]; + std::string accountURL = tsS3Hostname[0]; + accountURL = "https://" + accountURL; + + try { + auto sharedKeyCredential = std::make_shared(accountName, accountKey); + + BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential); + + std::string containerName = tsS3BucketName; + auto containerClient = blobServiceClient.GetBlobContainerClient(containerName); + + TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name)); + + auto res = blobClient.DownloadTo(fileName); + if (res.Value.ContentRange.Length.Value() <= 0) { + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + TAOS_RETURN(code); + } + } catch (const Azure::Core::RequestFailedException &e) { + azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast(e.StatusCode), + e.ReasonPhrase.c_str()); + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + +int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { + const std::string delimiter = "/"; + std::string accountName = tsS3AccessKeyId[0]; + std::string accountKey = tsS3AccessKeySecret[0]; + std::string accountURL = tsS3Hostname[0]; + accountURL = "https://" + accountURL; + + try { + auto sharedKeyCredential = std::make_shared(accountName, accountKey); + + BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential); + + std::string containerName = tsS3BucketName; + auto containerClient = blobServiceClient.GetBlobContainerClient(containerName); + + Azure::Storage::Blobs::ListBlobsOptions options; + options.Prefix = prefix; + + std::set listBlobs; + for (auto pageResult = containerClient.ListBlobs(options); pageResult.HasPage(); pageResult.MoveToNextPage()) { + for (const auto &blob : pageResult.Blobs) { + listBlobs.insert(blob.Name); + } + } + + for (auto blobName : listBlobs) { + const char *tmp = strchr(blobName.c_str(), '/'); + tmp = (tmp == NULL) ? blobName.c_str() : tmp + 1; + char fileName[PATH_MAX] = {0}; + if (path[strlen(path) - 1] != TD_DIRSEP_CHAR) { + (void)snprintf(fileName, PATH_MAX, "%s%s%s", path, TD_DIRSEP, tmp); + } else { + (void)snprintf(fileName, PATH_MAX, "%s%s", path, tmp); + } + if (azGetObjectToFile(blobName.c_str(), fileName)) { + TAOS_RETURN(TSDB_CODE_FAILED); + } + } + } catch (const Azure::Core::RequestFailedException &e) { + azError("%s failed at line %d since %d(%s)", __func__, __LINE__, static_cast(e.StatusCode), + e.ReasonPhrase.c_str()); + TAOS_RETURN(TSDB_CODE_FAILED); + } + + return 0; +} + +int32_t azDeleteObjects(const char *object_name[], int nobject) { + for (int i = 0; i < nobject; ++i) { + azDeleteObjectsByPrefix(object_name[i]); + } + + return 0; +} + +#else + +int32_t azBegin() { return TSDB_CODE_SUCCESS; } + +void azEnd() {} + +int32_t azCheckCfg() { return TSDB_CODE_SUCCESS; } + +int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) { + return TSDB_CODE_SUCCESS; +} + +int32_t azGetObjectBlock(const char *object_name, int64_t offset, int64_t size, bool check, uint8_t **ppBlock) { + return TSDB_CODE_SUCCESS; +} + +void azDeleteObjectsByPrefix(const char *prefix) {} + +int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp) { return 0; } + +int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { return 0; } + +int32_t azGetObjectToFile(const char *object_name, const char *fileName) { return 0; } + +int32_t azDeleteObjects(const char *object_name[], int nobject) { return 0; } + +#endif diff --git a/source/libs/azure/src/td_avro_parser.cpp b/source/libs/azure/src/td_avro_parser.cpp new file mode 100644 index 0000000000..62bd3a8151 --- /dev/null +++ b/source/libs/azure/src/td_avro_parser.cpp @@ -0,0 +1,531 @@ +#if defined(USE_S3) +#include + +#include +#include + +#include +#include + +namespace Azure { +namespace Storage { +namespace Blobs { +namespace _detail { + +namespace { +int64_t parseInt(AvroStreamReader::ReaderPos& data) { + uint64_t r = 0; + int nb = 0; + while (true) { + uint8_t c = (*data.BufferPtr)[data.Offset++]; + r = r | ((static_cast(c) & 0x7f) << (nb * 7)); + if (c & 0x80) { + ++nb; + continue; + } + break; + } + return static_cast(r >> 1) ^ -static_cast(r & 0x01); +} + +AvroSchema ParseSchemaFromJsonString(const std::string& jsonSchema) { + const static std::map BuiltinNameSchemaMap = { + {"string", AvroSchema::StringSchema}, {"bytes", AvroSchema::BytesSchema}, {"int", AvroSchema::IntSchema}, + {"long", AvroSchema::LongSchema}, {"float", AvroSchema::FloatSchema}, {"double", AvroSchema::DoubleSchema}, + {"boolean", AvroSchema::BoolSchema}, {"null", AvroSchema::NullSchema}, {"string", AvroSchema::StringSchema}, + }; + std::map nameSchemaMap = BuiltinNameSchemaMap; + + std::function parseSchemaFromJsonObject; + parseSchemaFromJsonObject = [&](const Core::Json::_internal::json& obj) -> AvroSchema { + if (obj.is_string()) { + auto typeName = obj.get(); + return nameSchemaMap.find(typeName)->second; + } else if (obj.is_array()) { + std::vector unionSchemas; + for (const auto& s : obj) { + unionSchemas.push_back(parseSchemaFromJsonObject(s)); + } + return AvroSchema::UnionSchema(std::move(unionSchemas)); + } else if (obj.is_object()) { + if (obj.count("namespace") != 0) { + throw std::runtime_error("Namespace isn't supported yet in Avro schema."); + } + if (obj.count("aliases") != 0) { + throw std::runtime_error("Alias isn't supported yet in Avro schema."); + } + auto typeName = obj["type"].get(); + auto i = nameSchemaMap.find(typeName); + if (i != nameSchemaMap.end()) { + return i->second; + } + if (typeName == "record") { + std::vector> fieldsSchema; + for (const auto& field : obj["fields"]) { + fieldsSchema.push_back( + std::make_pair(field["name"].get(), parseSchemaFromJsonObject(field["type"]))); + } + + const std::string recordName = obj["name"].get(); + auto recordSchema = AvroSchema::RecordSchema(recordName, std::move(fieldsSchema)); + nameSchemaMap.insert(std::make_pair(recordName, recordSchema)); + return recordSchema; + } else if (typeName == "enum") { + throw std::runtime_error("Enum type isn't supported yet in Avro schema."); + } else if (typeName == "array") { + return AvroSchema::ArraySchema(parseSchemaFromJsonObject(obj["items"])); + } else if (typeName == "map") { + return AvroSchema::MapSchema(parseSchemaFromJsonObject(obj["items"])); + } else if (typeName == "fixed") { + const std::string fixedName = obj["name"].get(); + auto fixedSchema = AvroSchema::FixedSchema(fixedName, obj["size"].get()); + nameSchemaMap.insert(std::make_pair(fixedName, fixedSchema)); + return fixedSchema; + } else { + throw std::runtime_error("Unrecognized type " + typeName + " in Avro schema."); + } + } + AZURE_UNREACHABLE_CODE(); + }; + + auto jsonRoot = Core::Json::_internal::json::parse(jsonSchema.begin(), jsonSchema.end()); + return parseSchemaFromJsonObject(jsonRoot); +} +} // namespace + +int64_t AvroStreamReader::ParseInt(const Core::Context& context) { + uint64_t r = 0; + int nb = 0; + while (true) { + Preload(1, context); + uint8_t c = m_streambuffer[m_pos.Offset++]; + + r = r | ((static_cast(c) & 0x7f) << (nb * 7)); + if (c & 0x80) { + ++nb; + continue; + } + break; + } + return static_cast(r >> 1) ^ -static_cast(r & 0x01); +} + +void AvroStreamReader::Advance(size_t n, const Core::Context& context) { + Preload(n, context); + m_pos.Offset += n; +} + +size_t AvroStreamReader::Preload(size_t n, const Core::Context& context) { + size_t oldAvailable = AvailableBytes(); + while (true) { + size_t newAvailable = TryPreload(n, context); + if (newAvailable >= n) { + return newAvailable; + } + if (oldAvailable == newAvailable) { + throw std::runtime_error("Unexpected EOF of Avro stream."); + } + oldAvailable = newAvailable; + } + AZURE_UNREACHABLE_CODE(); +} + +size_t AvroStreamReader::TryPreload(size_t n, const Core::Context& context) { + size_t availableBytes = AvailableBytes(); + if (availableBytes >= n) { + return availableBytes; + } + const size_t MinRead = 4096; + size_t tryReadSize = (std::max)(n, MinRead); + size_t currSize = m_streambuffer.size(); + m_streambuffer.resize(m_streambuffer.size() + tryReadSize); + size_t actualReadSize = m_stream->Read(m_streambuffer.data() + currSize, tryReadSize, context); + m_streambuffer.resize(currSize + actualReadSize); + return AvailableBytes(); +} + +void AvroStreamReader::Discard() { + constexpr size_t MinimumReleaseMemory = 128 * 1024; + if (m_pos.Offset < MinimumReleaseMemory) { + return; + } + const size_t availableBytes = AvailableBytes(); + std::memmove(&m_streambuffer[0], &m_streambuffer[m_pos.Offset], availableBytes); + m_streambuffer.resize(availableBytes); + m_pos.Offset = 0; +} + +const AvroSchema AvroSchema::StringSchema(AvroDatumType::String); +const AvroSchema AvroSchema::BytesSchema(AvroDatumType::Bytes); +const AvroSchema AvroSchema::IntSchema(AvroDatumType::Int); +const AvroSchema AvroSchema::LongSchema(AvroDatumType::Long); +const AvroSchema AvroSchema::FloatSchema(AvroDatumType::Float); +const AvroSchema AvroSchema::DoubleSchema(AvroDatumType::Double); +const AvroSchema AvroSchema::BoolSchema(AvroDatumType::Bool); +const AvroSchema AvroSchema::NullSchema(AvroDatumType::Null); + +AvroSchema AvroSchema::RecordSchema(std::string name, + const std::vector>& fieldsSchema) { + AvroSchema recordSchema(AvroDatumType::Record); + recordSchema.m_name = std::move(name); + recordSchema.m_status = std::make_shared(); + for (auto& i : fieldsSchema) { + recordSchema.m_status->m_keys.push_back(i.first); + recordSchema.m_status->m_schemas.push_back(i.second); + } + return recordSchema; +} + +AvroSchema AvroSchema::ArraySchema(AvroSchema elementSchema) { + AvroSchema arraySchema(AvroDatumType::Array); + arraySchema.m_status = std::make_shared(); + arraySchema.m_status->m_schemas.push_back(std::move(elementSchema)); + return arraySchema; +} + +AvroSchema AvroSchema::MapSchema(AvroSchema elementSchema) { + AvroSchema mapSchema(AvroDatumType::Map); + mapSchema.m_status = std::make_shared(); + mapSchema.m_status->m_schemas.push_back(std::move(elementSchema)); + return mapSchema; +} + +AvroSchema AvroSchema::UnionSchema(std::vector schemas) { + AvroSchema unionSchema(AvroDatumType::Union); + unionSchema.m_status = std::make_shared(); + unionSchema.m_status->m_schemas = std::move(schemas); + return unionSchema; +} + +AvroSchema AvroSchema::FixedSchema(std::string name, int64_t size) { + AvroSchema fixedSchema(AvroDatumType::Fixed); + fixedSchema.m_name = std::move(name); + fixedSchema.m_status = std::make_shared(); + fixedSchema.m_status->m_size = size; + return fixedSchema; +} + +void AvroDatum::Fill(AvroStreamReader& reader, const Core::Context& context) { + m_data = reader.m_pos; + if (m_schema.Type() == AvroDatumType::String || m_schema.Type() == AvroDatumType::Bytes) { + int64_t stringSize = reader.ParseInt(context); + reader.Advance(static_cast(stringSize), context); + } else if (m_schema.Type() == AvroDatumType::Int || m_schema.Type() == AvroDatumType::Long || + m_schema.Type() == AvroDatumType::Enum) { + reader.ParseInt(context); + } else if (m_schema.Type() == AvroDatumType::Float) { + reader.Advance(4, context); + } else if (m_schema.Type() == AvroDatumType::Double) { + reader.Advance(8, context); + } else if (m_schema.Type() == AvroDatumType::Bool) { + reader.Advance(1, context); + } else if (m_schema.Type() == AvroDatumType::Null) { + reader.Advance(0, context); + } else if (m_schema.Type() == AvroDatumType::Record) { + for (const auto& s : m_schema.FieldSchemas()) { + AvroDatum(s).Fill(reader, context); + } + } else if (m_schema.Type() == AvroDatumType::Array) { + while (true) { + int64_t numElementsInBlock = reader.ParseInt(context); + if (numElementsInBlock == 0) { + break; + } else if (numElementsInBlock < 0) { + int64_t blockSize = reader.ParseInt(context); + reader.Advance(static_cast(blockSize), context); + } else { + for (auto i = 0; i < numElementsInBlock; ++i) { + AvroDatum(m_schema.ItemSchema()).Fill(reader, context); + } + } + } + } else if (m_schema.Type() == AvroDatumType::Map) { + while (true) { + int64_t numElementsInBlock = reader.ParseInt(context); + if (numElementsInBlock == 0) { + break; + } else if (numElementsInBlock < 0) { + int64_t blockSize = reader.ParseInt(context); + reader.Advance(static_cast(blockSize), context); + } else { + for (int64_t i = 0; i < numElementsInBlock; ++i) { + AvroDatum(AvroSchema::StringSchema).Fill(reader, context); + AvroDatum(m_schema.ItemSchema()).Fill(reader, context); + } + } + } + } else if (m_schema.Type() == AvroDatumType::Union) { + int64_t i = reader.ParseInt(context); + AvroDatum(m_schema.FieldSchemas()[static_cast(i)]).Fill(reader, context); + } else if (m_schema.Type() == AvroDatumType::Fixed) { + reader.Advance(m_schema.Size(), context); + } else { + AZURE_UNREACHABLE_CODE(); + } +} + +void AvroDatum::Fill(AvroStreamReader::ReaderPos& data) { + m_data = data; + if (m_schema.Type() == AvroDatumType::String || m_schema.Type() == AvroDatumType::Bytes) { + int64_t stringSize = parseInt(data); + data.Offset += static_cast(stringSize); + } else if (m_schema.Type() == AvroDatumType::Int || m_schema.Type() == AvroDatumType::Long || + m_schema.Type() == AvroDatumType::Enum) { + parseInt(data); + } else if (m_schema.Type() == AvroDatumType::Float) { + data.Offset += 4; + } else if (m_schema.Type() == AvroDatumType::Double) { + data.Offset += 8; + } else if (m_schema.Type() == AvroDatumType::Bool) { + data.Offset += 1; + } else if (m_schema.Type() == AvroDatumType::Null) { + data.Offset += 0; + } else if (m_schema.Type() == AvroDatumType::Record) { + for (const auto& s : m_schema.FieldSchemas()) { + AvroDatum(s).Fill(data); + } + } else if (m_schema.Type() == AvroDatumType::Array) { + while (true) { + int64_t numElementsInBlock = parseInt(data); + if (numElementsInBlock == 0) { + break; + } else if (numElementsInBlock < 0) { + int64_t blockSize = parseInt(data); + data.Offset += static_cast(blockSize); + } else { + for (auto i = 0; i < numElementsInBlock; ++i) { + AvroDatum(m_schema.ItemSchema()).Fill(data); + } + } + } + } else if (m_schema.Type() == AvroDatumType::Map) { + while (true) { + int64_t numElementsInBlock = parseInt(data); + if (numElementsInBlock == 0) { + break; + } else if (numElementsInBlock < 0) { + int64_t blockSize = parseInt(data); + data.Offset += static_cast(blockSize); + } else { + for (int64_t i = 0; i < numElementsInBlock; ++i) { + AvroDatum(AvroSchema::StringSchema).Fill(data); + AvroDatum(m_schema.ItemSchema()).Fill(data); + } + } + } + } else if (m_schema.Type() == AvroDatumType::Union) { + int64_t i = parseInt(data); + AvroDatum(m_schema.FieldSchemas()[static_cast(i)]).Fill(data); + } else if (m_schema.Type() == AvroDatumType::Fixed) { + data.Offset += m_schema.Size(); + } else { + AZURE_UNREACHABLE_CODE(); + } +} + +template <> +AvroDatum::StringView AvroDatum::Value() const { + auto data = m_data; + if (m_schema.Type() == AvroDatumType::String || m_schema.Type() == AvroDatumType::Bytes) { + const int64_t length = parseInt(data); + const uint8_t* start = &(*data.BufferPtr)[data.Offset]; + StringView ret{start, static_cast(length)}; + data.Offset += static_cast(length); + return ret; + } + if (m_schema.Type() == AvroDatumType::Fixed) { + const size_t fixedSize = m_schema.Size(); + const uint8_t* start = &(*data.BufferPtr)[data.Offset]; + StringView ret{start, fixedSize}; + data.Offset += fixedSize; + return ret; + } + AZURE_UNREACHABLE_CODE(); +} + +template <> +std::string AvroDatum::Value() const { + auto stringView = Value(); + return std::string(stringView.Data, stringView.Data + stringView.Length); +} + +template <> +std::vector AvroDatum::Value() const { + auto stringView = Value(); + return std::vector(stringView.Data, stringView.Data + stringView.Length); +} + +template <> +int64_t AvroDatum::Value() const { + auto data = m_data; + return parseInt(data); +} + +template <> +int32_t AvroDatum::Value() const { + return static_cast(Value()); +} + +template <> +bool AvroDatum::Value() const { + return Value(); +} + +template <> +std::nullptr_t AvroDatum::Value() const { + return nullptr; +} + +template <> +AvroRecord AvroDatum::Value() const { + auto data = m_data; + + AvroRecord r; + r.m_keys = &m_schema.FieldNames(); + for (const auto& schema : m_schema.FieldSchemas()) { + auto datum = AvroDatum(schema); + datum.Fill(data); + r.m_values.push_back(std::move(datum)); + } + + return r; +} + +template <> +AvroMap AvroDatum::Value() const { + auto data = m_data; + + AvroMap m; + while (true) { + int64_t numElementsInBlock = parseInt(data); + if (numElementsInBlock == 0) { + break; + } + if (numElementsInBlock < 0) { + numElementsInBlock = -numElementsInBlock; + parseInt(data); + } + for (int64_t i = 0; i < numElementsInBlock; ++i) { + auto keyDatum = AvroDatum(AvroSchema::StringSchema); + keyDatum.Fill(data); + auto valueDatum = AvroDatum(m_schema.ItemSchema()); + valueDatum.Fill(data); + m[keyDatum.Value()] = valueDatum; + } + } + return m; +} + +template <> +AvroDatum AvroDatum::Value() const { + auto data = m_data; + if (m_schema.Type() == AvroDatumType::Union) { + int64_t i = parseInt(data); + auto datum = AvroDatum(m_schema.FieldSchemas()[static_cast(i)]); + datum.Fill(data); + return datum; + } + AZURE_UNREACHABLE_CODE(); +} + +AvroObjectContainerReader::AvroObjectContainerReader(Core::IO::BodyStream& stream) + : m_reader(std::make_unique(stream)) {} + +AvroDatum AvroObjectContainerReader::NextImpl(const AvroSchema* schema, const Core::Context& context) { + AZURE_ASSERT_FALSE(m_eof); + static const auto SyncMarkerSchema = AvroSchema::FixedSchema("Sync", 16); + if (!schema) { + static AvroSchema FileHeaderSchema = []() { + std::vector> fieldsSchema; + fieldsSchema.push_back(std::make_pair("magic", AvroSchema::FixedSchema("Magic", 4))); + fieldsSchema.push_back(std::make_pair("meta", AvroSchema::MapSchema(AvroSchema::BytesSchema))); + fieldsSchema.push_back(std::make_pair("sync", SyncMarkerSchema)); + return AvroSchema::RecordSchema("org.apache.avro.file.Header", std::move(fieldsSchema)); + }(); + auto fileHeaderDatum = AvroDatum(FileHeaderSchema); + fileHeaderDatum.Fill(*m_reader, context); + auto fileHeader = fileHeaderDatum.Value(); + if (fileHeader.Field("magic").Value() != "Obj\01") { + throw std::runtime_error("Invalid Avro object container magic."); + } + AvroMap meta = fileHeader.Field("meta").Value(); + std::string objectSchemaJson = meta["avro.schema"].Value(); + std::string codec = "null"; + if (meta.count("avro.codec") != 0) { + codec = meta["avro.codec"].Value(); + } + if (codec != "null") { + throw std::runtime_error("Unsupported Avro codec: " + codec); + } + m_syncMarker = fileHeader.Field("sync").Value(); + m_objectSchema = std::make_unique(ParseSchemaFromJsonString(objectSchemaJson)); + schema = m_objectSchema.get(); + } + + if (m_remainingObjectInCurrentBlock == 0) { + m_reader->Discard(); + m_remainingObjectInCurrentBlock = m_reader->ParseInt(context); + int64_t ObjectsSize = m_reader->ParseInt(context); + m_reader->Preload(static_cast(ObjectsSize), context); + } + + auto objectDatum = AvroDatum(*m_objectSchema); + objectDatum.Fill(*m_reader, context); + if (--m_remainingObjectInCurrentBlock == 0) { + auto markerDatum = AvroDatum(SyncMarkerSchema); + markerDatum.Fill(*m_reader, context); + auto marker = markerDatum.Value(); + if (marker != m_syncMarker) { + throw std::runtime_error("Sync marker doesn't match."); + } + m_eof = m_reader->TryPreload(1, context) == 0; + } + return objectDatum; +} + +size_t AvroStreamParser::OnRead(uint8_t* buffer, size_t count, Azure::Core::Context const& context) { + if (m_parserBuffer.Length != 0) { + size_t bytesToCopy = (std::min)(m_parserBuffer.Length, count); + std::memcpy(buffer, m_parserBuffer.Data, bytesToCopy); + m_parserBuffer.Data += bytesToCopy; + m_parserBuffer.Length -= bytesToCopy; + return bytesToCopy; + } + while (!m_parser.End()) { + auto datum = m_parser.Next(context); + if (datum.Schema().Type() == AvroDatumType::Union) { + datum = datum.Value(); + } + if (datum.Schema().Type() != AvroDatumType::Record) { + continue; + } + if (datum.Schema().Name() == "com.microsoft.azure.storage.queryBlobContents.resultData") { + auto record = datum.Value(); + auto dataDatum = record.Field("data"); + m_parserBuffer = dataDatum.Value(); + return OnRead(buffer, count, context); + } + if (datum.Schema().Name() == "com.microsoft.azure.storage.queryBlobContents.progress" && m_progressCallback) { + auto record = datum.Value(); + auto bytesScanned = record.Field("bytesScanned").Value(); + auto totalBytes = record.Field("totalBytes").Value(); + m_progressCallback(bytesScanned, totalBytes); + } + if (datum.Schema().Name() == "com.microsoft.azure.storage.queryBlobContents.error" && m_errorCallback) { + auto record = datum.Value(); + BlobQueryError e; + e.Name = record.Field("name").Value(); + e.Description = record.Field("description").Value(); + e.IsFatal = record.Field("fatal").Value(); + e.Position = record.Field("position").Value(); + m_errorCallback(std::move(e)); + } + } + return 0; +} +} // namespace _detail +} // namespace Blobs +} // namespace Storage +} // namespace Azure + +#endif diff --git a/source/libs/azure/src/td_block_blob_client.cpp b/source/libs/azure/src/td_block_blob_client.cpp new file mode 100644 index 0000000000..33ac774d0c --- /dev/null +++ b/source/libs/azure/src/td_block_blob_client.cpp @@ -0,0 +1,625 @@ +#if defined(USE_S3) + +#include "td_block_blob_client.hpp" + +#include + +#if defined(AZ_PLATFORM_WINDOWS) +#if !defined(WIN32_LEAN_AND_MEAN) +#define WIN32_LEAN_AND_MEAN +#endif +#if !defined(NOMINMAX) +#define NOMINMAX +#endif +#include +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Azure { +namespace Storage { +namespace Blobs { + +TDBlockBlobClient TDBlockBlobClient::CreateFromConnectionString(const std::string& connectionString, + const std::string& blobContainerName, + const std::string& blobName, + const BlobClientOptions& options) { + TDBlockBlobClient newClient( + BlobClient::CreateFromConnectionString(connectionString, blobContainerName, blobName, options)); + return newClient; +} + +TDBlockBlobClient::TDBlockBlobClient(const std::string& blobUrl, std::shared_ptr credential, + const BlobClientOptions& options) + : BlobClient(blobUrl, std::move(credential), options) {} + +TDBlockBlobClient::TDBlockBlobClient(const std::string& blobUrl, + std::shared_ptr credential, + const BlobClientOptions& options) + : BlobClient(blobUrl, std::move(credential), options) {} + +TDBlockBlobClient::TDBlockBlobClient(const std::string& blobUrl, const BlobClientOptions& options) + : BlobClient(blobUrl, options) {} + +TDBlockBlobClient::TDBlockBlobClient(BlobClient blobClient) : BlobClient(std::move(blobClient)) {} + +TDBlockBlobClient TDBlockBlobClient::WithSnapshot(const std::string& snapshot) const { + TDBlockBlobClient newClient(*this); + if (snapshot.empty()) { + newClient.m_blobUrl.RemoveQueryParameter(_internal::HttpQuerySnapshot); + } else { + newClient.m_blobUrl.AppendQueryParameter(_internal::HttpQuerySnapshot, + _internal::UrlEncodeQueryParameter(snapshot)); + } + return newClient; +} + +TDBlockBlobClient TDBlockBlobClient::WithVersionId(const std::string& versionId) const { + TDBlockBlobClient newClient(*this); + if (versionId.empty()) { + newClient.m_blobUrl.RemoveQueryParameter(_internal::HttpQueryVersionId); + } else { + newClient.m_blobUrl.AppendQueryParameter(_internal::HttpQueryVersionId, + _internal::UrlEncodeQueryParameter(versionId)); + } + return newClient; +} + +Azure::Response TDBlockBlobClient::Upload(Azure::Core::IO::BodyStream& content, + const UploadBlockBlobOptions& options, + const Azure::Core::Context& context) const { + _detail::BlockBlobClient::UploadBlockBlobOptions protocolLayerOptions; + if (options.TransactionalContentHash.HasValue()) { + if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) { + protocolLayerOptions.TransactionalContentMD5 = options.TransactionalContentHash.Value().Value; + } else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) { + protocolLayerOptions.TransactionalContentCrc64 = options.TransactionalContentHash.Value().Value; + } + } + protocolLayerOptions.BlobContentType = options.HttpHeaders.ContentType; + protocolLayerOptions.BlobContentEncoding = options.HttpHeaders.ContentEncoding; + protocolLayerOptions.BlobContentLanguage = options.HttpHeaders.ContentLanguage; + protocolLayerOptions.BlobContentMD5 = options.HttpHeaders.ContentHash.Value; + protocolLayerOptions.BlobContentDisposition = options.HttpHeaders.ContentDisposition; + protocolLayerOptions.BlobCacheControl = options.HttpHeaders.CacheControl; + protocolLayerOptions.Metadata = std::map(options.Metadata.begin(), options.Metadata.end()); + protocolLayerOptions.BlobTagsString = _detail::TagsToString(options.Tags); + protocolLayerOptions.Tier = options.AccessTier; + protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId; + protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince; + protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince; + protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch; + protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch; + protocolLayerOptions.IfTags = options.AccessConditions.TagConditions; + if (m_customerProvidedKey.HasValue()) { + protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key; + protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash; + protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString(); + } + protocolLayerOptions.EncryptionScope = m_encryptionScope; + if (options.ImmutabilityPolicy.HasValue()) { + protocolLayerOptions.ImmutabilityPolicyExpiry = options.ImmutabilityPolicy.Value().ExpiresOn; + protocolLayerOptions.ImmutabilityPolicyMode = options.ImmutabilityPolicy.Value().PolicyMode; + } + protocolLayerOptions.LegalHold = options.HasLegalHold; + + return _detail::BlockBlobClient::Upload(*m_pipeline, m_blobUrl, content, protocolLayerOptions, context); +} + +Azure::Response TDBlockBlobClient::UploadFrom( + const uint8_t* buffer, size_t bufferSize, const UploadBlockBlobFromOptions& options, + const Azure::Core::Context& context) const { + constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL; + constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL; + constexpr int64_t MaxBlockNumber = 50000; + constexpr int64_t BlockGrainSize = 1 * 1024 * 1024; + + if (static_cast(options.TransferOptions.SingleUploadThreshold) > (std::numeric_limits::max)()) { + throw Azure::Core::RequestFailedException("Single upload threshold is too big"); + } + if (bufferSize <= static_cast(options.TransferOptions.SingleUploadThreshold)) { + Azure::Core::IO::MemoryBodyStream contentStream(buffer, bufferSize); + UploadBlockBlobOptions uploadBlockBlobOptions; + uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders; + uploadBlockBlobOptions.Metadata = options.Metadata; + uploadBlockBlobOptions.Tags = options.Tags; + uploadBlockBlobOptions.AccessTier = options.AccessTier; + uploadBlockBlobOptions.ImmutabilityPolicy = options.ImmutabilityPolicy; + uploadBlockBlobOptions.HasLegalHold = options.HasLegalHold; + return Upload(contentStream, uploadBlockBlobOptions, context); + } + + int64_t chunkSize; + if (options.TransferOptions.ChunkSize.HasValue()) { + chunkSize = options.TransferOptions.ChunkSize.Value(); + } else { + int64_t minChunkSize = (bufferSize + MaxBlockNumber - 1) / MaxBlockNumber; + minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize; + chunkSize = (std::max)(DefaultStageBlockSize, minChunkSize); + } + if (chunkSize > MaxStageBlockSize) { + throw Azure::Core::RequestFailedException("Block size is too big."); + } + + std::vector blockIds; + auto getBlockId = [](int64_t id) { + constexpr size_t BlockIdLength = 64; + std::string blockId = std::to_string(id); + blockId = std::string(BlockIdLength - blockId.length(), '0') + blockId; + return Azure::Core::Convert::Base64Encode(std::vector(blockId.begin(), blockId.end())); + }; + + auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) { + Azure::Core::IO::MemoryBodyStream contentStream(buffer + offset, static_cast(length)); + StageBlockOptions chunkOptions; + auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context); + if (chunkId == numChunks - 1) { + blockIds.resize(static_cast(numChunks)); + } + }; + + _internal::ConcurrentTransfer(0, bufferSize, chunkSize, options.TransferOptions.Concurrency, uploadBlockFunc); + + for (size_t i = 0; i < blockIds.size(); ++i) { + blockIds[i] = getBlockId(static_cast(i)); + } + CommitBlockListOptions commitBlockListOptions; + commitBlockListOptions.HttpHeaders = options.HttpHeaders; + commitBlockListOptions.Metadata = options.Metadata; + commitBlockListOptions.Tags = options.Tags; + commitBlockListOptions.AccessTier = options.AccessTier; + commitBlockListOptions.ImmutabilityPolicy = options.ImmutabilityPolicy; + commitBlockListOptions.HasLegalHold = options.HasLegalHold; + auto commitBlockListResponse = CommitBlockList(blockIds, commitBlockListOptions, context); + + Models::UploadBlockBlobFromResult ret; + ret.ETag = std::move(commitBlockListResponse.Value.ETag); + ret.LastModified = std::move(commitBlockListResponse.Value.LastModified); + ret.VersionId = std::move(commitBlockListResponse.Value.VersionId); + ret.IsServerEncrypted = commitBlockListResponse.Value.IsServerEncrypted; + ret.EncryptionKeySha256 = std::move(commitBlockListResponse.Value.EncryptionKeySha256); + ret.EncryptionScope = std::move(commitBlockListResponse.Value.EncryptionScope); + return Azure::Response(std::move(ret), + std::move(commitBlockListResponse.RawResponse)); +} + +Azure::Response TDBlockBlobClient::UploadFrom( + const std::string& fileName, const UploadBlockBlobFromOptions& options, const Azure::Core::Context& context) const { + constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL; + constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL; + constexpr int64_t MaxBlockNumber = 50000; + constexpr int64_t BlockGrainSize = 1 * 1024 * 1024; + + { + Azure::Core::IO::FileBodyStream contentStream(fileName); + + if (contentStream.Length() <= options.TransferOptions.SingleUploadThreshold) { + UploadBlockBlobOptions uploadBlockBlobOptions; + uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders; + uploadBlockBlobOptions.Metadata = options.Metadata; + uploadBlockBlobOptions.Tags = options.Tags; + uploadBlockBlobOptions.AccessTier = options.AccessTier; + uploadBlockBlobOptions.ImmutabilityPolicy = options.ImmutabilityPolicy; + uploadBlockBlobOptions.HasLegalHold = options.HasLegalHold; + return Upload(contentStream, uploadBlockBlobOptions, context); + } + } + + std::vector blockIds; + auto getBlockId = [](int64_t id) { + constexpr size_t BlockIdLength = 64; + std::string blockId = std::to_string(id); + blockId = std::string(BlockIdLength - blockId.length(), '0') + blockId; + return Azure::Core::Convert::Base64Encode(std::vector(blockId.begin(), blockId.end())); + }; + + _internal::FileReader fileReader(fileName); + + auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) { + Azure::Core::IO::_internal::RandomAccessFileBodyStream contentStream(fileReader.GetHandle(), offset, length); + StageBlockOptions chunkOptions; + auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context); + if (chunkId == numChunks - 1) { + blockIds.resize(static_cast(numChunks)); + } + }; + + int64_t chunkSize; + if (options.TransferOptions.ChunkSize.HasValue()) { + chunkSize = options.TransferOptions.ChunkSize.Value(); + } else { + int64_t minChunkSize = (fileReader.GetFileSize() + MaxBlockNumber - 1) / MaxBlockNumber; + minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize; + chunkSize = (std::max)(DefaultStageBlockSize, minChunkSize); + } + if (chunkSize > MaxStageBlockSize) { + throw Azure::Core::RequestFailedException("Block size is too big."); + } + + _internal::ConcurrentTransfer(0, fileReader.GetFileSize(), chunkSize, options.TransferOptions.Concurrency, + uploadBlockFunc); + + for (size_t i = 0; i < blockIds.size(); ++i) { + blockIds[i] = getBlockId(static_cast(i)); + } + CommitBlockListOptions commitBlockListOptions; + commitBlockListOptions.HttpHeaders = options.HttpHeaders; + commitBlockListOptions.Metadata = options.Metadata; + commitBlockListOptions.Tags = options.Tags; + commitBlockListOptions.AccessTier = options.AccessTier; + commitBlockListOptions.ImmutabilityPolicy = options.ImmutabilityPolicy; + commitBlockListOptions.HasLegalHold = options.HasLegalHold; + auto commitBlockListResponse = CommitBlockList(blockIds, commitBlockListOptions, context); + + Models::UploadBlockBlobFromResult result; + result.ETag = commitBlockListResponse.Value.ETag; + result.LastModified = commitBlockListResponse.Value.LastModified; + result.VersionId = commitBlockListResponse.Value.VersionId; + result.IsServerEncrypted = commitBlockListResponse.Value.IsServerEncrypted; + result.EncryptionKeySha256 = commitBlockListResponse.Value.EncryptionKeySha256; + result.EncryptionScope = commitBlockListResponse.Value.EncryptionScope; + return Azure::Response(std::move(result), + std::move(commitBlockListResponse.RawResponse)); +} + +Azure::Response TDBlockBlobClient::UploadFrom( + const std::string& fileName, int64_t offset, int64_t size, const UploadBlockBlobFromOptions& options, + const Azure::Core::Context& context) const { + _internal::FileReader fileReader(fileName); + + { + Azure::Core::IO::_internal::RandomAccessFileBodyStream contentStream(fileReader.GetHandle(), offset, size); + + if (size <= options.TransferOptions.SingleUploadThreshold) { + UploadBlockBlobOptions uploadBlockBlobOptions; + uploadBlockBlobOptions.HttpHeaders = options.HttpHeaders; + uploadBlockBlobOptions.Metadata = options.Metadata; + uploadBlockBlobOptions.Tags = options.Tags; + uploadBlockBlobOptions.AccessTier = options.AccessTier; + uploadBlockBlobOptions.ImmutabilityPolicy = options.ImmutabilityPolicy; + uploadBlockBlobOptions.HasLegalHold = options.HasLegalHold; + return Upload(contentStream, uploadBlockBlobOptions, context); + } + } + + std::vector blockIds; + auto getBlockId = [](int64_t id) { + constexpr size_t BlockIdLength = 64; + std::string blockId = std::to_string(id); + blockId = std::string(BlockIdLength - blockId.length(), '0') + blockId; + return Azure::Core::Convert::Base64Encode(std::vector(blockId.begin(), blockId.end())); + }; + + auto uploadBlockFunc = [&](int64_t offset, int64_t length, int64_t chunkId, int64_t numChunks) { + Azure::Core::IO::_internal::RandomAccessFileBodyStream contentStream(fileReader.GetHandle(), offset, length); + StageBlockOptions chunkOptions; + auto blockInfo = StageBlock(getBlockId(chunkId), contentStream, chunkOptions, context); + if (chunkId == numChunks - 1) { + blockIds.resize(static_cast(numChunks)); + } + }; + + constexpr int64_t DefaultStageBlockSize = 4 * 1024 * 1024ULL; + constexpr int64_t MaxStageBlockSize = 4000 * 1024 * 1024ULL; + constexpr int64_t MaxBlockNumber = 50000; + constexpr int64_t BlockGrainSize = 1 * 1024 * 1024; + + int64_t chunkSize; + if (options.TransferOptions.ChunkSize.HasValue()) { + chunkSize = options.TransferOptions.ChunkSize.Value(); + } else { + int64_t minChunkSize = (size + MaxBlockNumber - 1) / MaxBlockNumber; + minChunkSize = (minChunkSize + BlockGrainSize - 1) / BlockGrainSize * BlockGrainSize; + chunkSize = (std::max)(DefaultStageBlockSize, minChunkSize); + } + if (chunkSize > MaxStageBlockSize) { + throw Azure::Core::RequestFailedException("Block size is too big."); + } + + _internal::ConcurrentTransfer(offset, size, chunkSize, options.TransferOptions.Concurrency, uploadBlockFunc); + + for (size_t i = 0; i < blockIds.size(); ++i) { + blockIds[i] = getBlockId(static_cast(i)); + } + CommitBlockListOptions commitBlockListOptions; + commitBlockListOptions.HttpHeaders = options.HttpHeaders; + commitBlockListOptions.Metadata = options.Metadata; + commitBlockListOptions.Tags = options.Tags; + commitBlockListOptions.AccessTier = options.AccessTier; + commitBlockListOptions.ImmutabilityPolicy = options.ImmutabilityPolicy; + commitBlockListOptions.HasLegalHold = options.HasLegalHold; + auto commitBlockListResponse = CommitBlockList(blockIds, commitBlockListOptions, context); + + Models::UploadBlockBlobFromResult result; + result.ETag = commitBlockListResponse.Value.ETag; + result.LastModified = commitBlockListResponse.Value.LastModified; + result.VersionId = commitBlockListResponse.Value.VersionId; + result.IsServerEncrypted = commitBlockListResponse.Value.IsServerEncrypted; + result.EncryptionKeySha256 = commitBlockListResponse.Value.EncryptionKeySha256; + result.EncryptionScope = commitBlockListResponse.Value.EncryptionScope; + return Azure::Response(std::move(result), + std::move(commitBlockListResponse.RawResponse)); +} + +Azure::Response TDBlockBlobClient::UploadFromUri( + const std::string& sourceUri, const UploadBlockBlobFromUriOptions& options, + const Azure::Core::Context& context) const { + _detail::BlockBlobClient::UploadBlockBlobFromUriOptions protocolLayerOptions; + protocolLayerOptions.CopySource = sourceUri; + protocolLayerOptions.CopySourceBlobProperties = options.CopySourceBlobProperties; + protocolLayerOptions.BlobContentType = options.HttpHeaders.ContentType; + protocolLayerOptions.BlobContentEncoding = options.HttpHeaders.ContentEncoding; + protocolLayerOptions.BlobContentLanguage = options.HttpHeaders.ContentLanguage; + protocolLayerOptions.BlobContentMD5 = options.HttpHeaders.ContentHash.Value; + protocolLayerOptions.BlobCacheControl = options.HttpHeaders.CacheControl; + protocolLayerOptions.BlobContentDisposition = options.HttpHeaders.ContentDisposition; + protocolLayerOptions.Metadata = std::map(options.Metadata.begin(), options.Metadata.end()); + protocolLayerOptions.BlobTagsString = _detail::TagsToString(options.Tags); + protocolLayerOptions.Tier = options.AccessTier; + protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId; + protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch; + protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch; + protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince; + protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince; + protocolLayerOptions.IfTags = options.AccessConditions.TagConditions; + protocolLayerOptions.SourceIfMatch = options.SourceAccessConditions.IfMatch; + protocolLayerOptions.SourceIfNoneMatch = options.SourceAccessConditions.IfNoneMatch; + protocolLayerOptions.SourceIfModifiedSince = options.SourceAccessConditions.IfModifiedSince; + protocolLayerOptions.SourceIfUnmodifiedSince = options.SourceAccessConditions.IfUnmodifiedSince; + protocolLayerOptions.SourceIfTags = options.SourceAccessConditions.TagConditions; + if (options.TransactionalContentHash.HasValue()) { + if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) { + protocolLayerOptions.SourceContentMD5 = options.TransactionalContentHash.Value().Value; + } else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) { + protocolLayerOptions.SourceContentcrc64 = options.TransactionalContentHash.Value().Value; + } + } + if (m_customerProvidedKey.HasValue()) { + protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key; + protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash; + protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString(); + } + protocolLayerOptions.EncryptionScope = m_encryptionScope; + protocolLayerOptions.CopySourceTags = options.CopySourceTagsMode; + if (!options.SourceAuthorization.empty()) { + protocolLayerOptions.CopySourceAuthorization = options.SourceAuthorization; + } + + return _detail::BlockBlobClient::UploadFromUri(*m_pipeline, m_blobUrl, protocolLayerOptions, context); +} + +Azure::Response TDBlockBlobClient::StageBlock(const std::string& blockId, + Azure::Core::IO::BodyStream& content, + const StageBlockOptions& options, + const Azure::Core::Context& context) const { + _detail::BlockBlobClient::StageBlockBlobBlockOptions protocolLayerOptions; + protocolLayerOptions.BlockId = blockId; + if (options.TransactionalContentHash.HasValue()) { + if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) { + protocolLayerOptions.TransactionalContentMD5 = options.TransactionalContentHash.Value().Value; + } else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) { + protocolLayerOptions.TransactionalContentCrc64 = options.TransactionalContentHash.Value().Value; + } + } + protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId; + if (m_customerProvidedKey.HasValue()) { + protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key; + protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash; + protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString(); + } + protocolLayerOptions.EncryptionScope = m_encryptionScope; + return _detail::BlockBlobClient::StageBlock(*m_pipeline, m_blobUrl, content, protocolLayerOptions, context); +} + +Azure::Response TDBlockBlobClient::StageBlockFromUri( + const std::string& blockId, const std::string& sourceUri, const StageBlockFromUriOptions& options, + const Azure::Core::Context& context) const { + _detail::BlockBlobClient::StageBlockBlobBlockFromUriOptions protocolLayerOptions; + protocolLayerOptions.BlockId = blockId; + protocolLayerOptions.SourceUrl = sourceUri; + if (options.SourceRange.HasValue()) { + std::string rangeStr = "bytes=" + std::to_string(options.SourceRange.Value().Offset) + "-"; + if (options.SourceRange.Value().Length.HasValue()) { + rangeStr += std::to_string(options.SourceRange.Value().Offset + options.SourceRange.Value().Length.Value() - 1); + } + protocolLayerOptions.SourceRange = rangeStr; + } + if (options.TransactionalContentHash.HasValue()) { + if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Md5) { + protocolLayerOptions.SourceContentMD5 = options.TransactionalContentHash.Value().Value; + } else if (options.TransactionalContentHash.Value().Algorithm == HashAlgorithm::Crc64) { + protocolLayerOptions.SourceContentcrc64 = options.TransactionalContentHash.Value().Value; + } + } + protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId; + protocolLayerOptions.SourceIfModifiedSince = options.SourceAccessConditions.IfModifiedSince; + protocolLayerOptions.SourceIfUnmodifiedSince = options.SourceAccessConditions.IfUnmodifiedSince; + protocolLayerOptions.SourceIfMatch = options.SourceAccessConditions.IfMatch; + protocolLayerOptions.SourceIfNoneMatch = options.SourceAccessConditions.IfNoneMatch; + if (m_customerProvidedKey.HasValue()) { + protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key; + protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash; + protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString(); + } + protocolLayerOptions.EncryptionScope = m_encryptionScope; + if (!options.SourceAuthorization.empty()) { + protocolLayerOptions.CopySourceAuthorization = options.SourceAuthorization; + } + + return _detail::BlockBlobClient::StageBlockFromUri(*m_pipeline, m_blobUrl, protocolLayerOptions, context); +} + +Azure::Response TDBlockBlobClient::CommitBlockList( + const std::vector& blockIds, const CommitBlockListOptions& options, + const Azure::Core::Context& context) const { + _detail::BlockBlobClient::CommitBlockBlobBlockListOptions protocolLayerOptions; + protocolLayerOptions.Blocks.Latest = blockIds; + protocolLayerOptions.BlobContentType = options.HttpHeaders.ContentType; + protocolLayerOptions.BlobContentEncoding = options.HttpHeaders.ContentEncoding; + protocolLayerOptions.BlobContentLanguage = options.HttpHeaders.ContentLanguage; + protocolLayerOptions.BlobContentMD5 = options.HttpHeaders.ContentHash.Value; + protocolLayerOptions.BlobContentDisposition = options.HttpHeaders.ContentDisposition; + protocolLayerOptions.BlobCacheControl = options.HttpHeaders.CacheControl; + protocolLayerOptions.Metadata = std::map(options.Metadata.begin(), options.Metadata.end()); + protocolLayerOptions.BlobTagsString = _detail::TagsToString(options.Tags); + protocolLayerOptions.Tier = options.AccessTier; + protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId; + protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince; + protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince; + protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch; + protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch; + protocolLayerOptions.IfTags = options.AccessConditions.TagConditions; + if (m_customerProvidedKey.HasValue()) { + protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key; + protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash; + protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString(); + } + protocolLayerOptions.EncryptionScope = m_encryptionScope; + if (options.ImmutabilityPolicy.HasValue()) { + protocolLayerOptions.ImmutabilityPolicyExpiry = options.ImmutabilityPolicy.Value().ExpiresOn; + protocolLayerOptions.ImmutabilityPolicyMode = options.ImmutabilityPolicy.Value().PolicyMode; + } + protocolLayerOptions.LegalHold = options.HasLegalHold; + + return _detail::BlockBlobClient::CommitBlockList(*m_pipeline, m_blobUrl, protocolLayerOptions, context); +} + +Azure::Response TDBlockBlobClient::GetBlockList(const GetBlockListOptions& options, + const Azure::Core::Context& context) const { + _detail::BlockBlobClient::GetBlockBlobBlockListOptions protocolLayerOptions; + protocolLayerOptions.ListType = options.ListType; + protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId; + protocolLayerOptions.IfTags = options.AccessConditions.TagConditions; + return _detail::BlockBlobClient::GetBlockList(*m_pipeline, m_blobUrl, protocolLayerOptions, + _internal::WithReplicaStatus(context)); +} +/* +Azure::Response TDBlockBlobClient::Query(const std::string& querySqlExpression, + const QueryBlobOptions& options, + const Azure::Core::Context& context) const { +_detail::BlobClient::QueryBlobOptions protocolLayerOptions; +protocolLayerOptions.QueryRequest.QueryType = Models::_detail::QueryRequestQueryType::SQL; +protocolLayerOptions.QueryRequest.Expression = querySqlExpression; +if (options.InputTextConfiguration.m_format == Models::_detail::QueryFormatType::Delimited) { + Models::_detail::DelimitedTextConfiguration c; + c.RecordSeparator = options.InputTextConfiguration.m_recordSeparator; + c.ColumnSeparator = options.InputTextConfiguration.m_columnSeparator; + c.FieldQuote = options.InputTextConfiguration.m_quotationCharacter; + c.EscapeChar = options.InputTextConfiguration.m_escapeCharacter; + c.HeadersPresent = options.InputTextConfiguration.m_hasHeaders; + Models::_detail::QuerySerialization q; + q.Format.Type = options.InputTextConfiguration.m_format; + q.Format.DelimitedTextConfiguration = std::move(c); + protocolLayerOptions.QueryRequest.InputSerialization = std::move(q); +} else if (options.InputTextConfiguration.m_format == Models::_detail::QueryFormatType::Json) { + Models::_detail::JsonTextConfiguration c; + c.RecordSeparator = options.InputTextConfiguration.m_recordSeparator; + Models::_detail::QuerySerialization q; + q.Format.Type = options.InputTextConfiguration.m_format; + q.Format.JsonTextConfiguration = std::move(c); + protocolLayerOptions.QueryRequest.InputSerialization = std::move(q); +} else if (options.InputTextConfiguration.m_format == Models::_detail::QueryFormatType::Parquet) { + Models::_detail::ParquetConfiguration c; + Models::_detail::QuerySerialization q; + q.Format.Type = options.InputTextConfiguration.m_format; + q.Format.ParquetTextConfiguration = std::move(c); + protocolLayerOptions.QueryRequest.InputSerialization = std::move(q); +} else if (options.InputTextConfiguration.m_format.ToString().empty()) { +} else { + AZURE_UNREACHABLE_CODE(); +} +if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Delimited) { + Models::_detail::DelimitedTextConfiguration c; + c.RecordSeparator = options.OutputTextConfiguration.m_recordSeparator; + c.ColumnSeparator = options.OutputTextConfiguration.m_columnSeparator; + c.FieldQuote = options.OutputTextConfiguration.m_quotationCharacter; + c.EscapeChar = options.OutputTextConfiguration.m_escapeCharacter; + c.HeadersPresent = options.OutputTextConfiguration.m_hasHeaders; + Models::_detail::QuerySerialization q; + q.Format.Type = options.OutputTextConfiguration.m_format; + q.Format.DelimitedTextConfiguration = std::move(c); + protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q); +} else if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Json) { + Models::_detail::JsonTextConfiguration c; + c.RecordSeparator = options.OutputTextConfiguration.m_recordSeparator; + Models::_detail::QuerySerialization q; + q.Format.Type = options.OutputTextConfiguration.m_format; + q.Format.JsonTextConfiguration = std::move(c); + protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q); +} else if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Parquet) { + Models::_detail::ParquetConfiguration c; + Models::_detail::QuerySerialization q; + q.Format.Type = options.OutputTextConfiguration.m_format; + q.Format.ParquetTextConfiguration = std::move(c); + protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q); +} else if (options.OutputTextConfiguration.m_format == Models::_detail::QueryFormatType::Arrow) { + Models::_detail::ArrowConfiguration c; + c.Schema = options.OutputTextConfiguration.m_schema; + Models::_detail::QuerySerialization q; + q.Format.Type = options.OutputTextConfiguration.m_format; + q.Format.ArrowConfiguration = std::move(c); + protocolLayerOptions.QueryRequest.OutputSerialization = std::move(q); +} else if (options.InputTextConfiguration.m_format.ToString().empty()) { +} else { + AZURE_UNREACHABLE_CODE(); +} + +protocolLayerOptions.LeaseId = options.AccessConditions.LeaseId; +if (m_customerProvidedKey.HasValue()) { + protocolLayerOptions.EncryptionKey = m_customerProvidedKey.Value().Key; + protocolLayerOptions.EncryptionKeySha256 = m_customerProvidedKey.Value().KeyHash; + protocolLayerOptions.EncryptionAlgorithm = m_customerProvidedKey.Value().Algorithm.ToString(); +} +protocolLayerOptions.EncryptionScope = m_encryptionScope; +protocolLayerOptions.IfModifiedSince = options.AccessConditions.IfModifiedSince; +protocolLayerOptions.IfUnmodifiedSince = options.AccessConditions.IfUnmodifiedSince; +protocolLayerOptions.IfMatch = options.AccessConditions.IfMatch; +protocolLayerOptions.IfNoneMatch = options.AccessConditions.IfNoneMatch; +protocolLayerOptions.IfTags = options.AccessConditions.TagConditions; +auto response = + _detail::BlobClient::Query(*m_pipeline, m_blobUrl, protocolLayerOptions, _internal::WithReplicaStatus(context)); + +const auto statusCode = response.RawResponse->GetStatusCode(); +const auto reasonPhrase = response.RawResponse->GetReasonPhrase(); +const auto requestId = response.RawResponse->GetHeaders().count(_internal::HttpHeaderRequestId) != 0 + ? response.RawResponse->GetHeaders().at(_internal::HttpHeaderRequestId) + : std::string(); + +const auto clientRequestId = response.RawResponse->GetHeaders().count(_internal::HttpHeaderClientRequestId) != 0 + ? response.RawResponse->GetHeaders().at(_internal::HttpHeaderClientRequestId) + : std::string(); + +auto defaultErrorHandler = [statusCode, reasonPhrase, requestId, clientRequestId](BlobQueryError e) { + if (e.IsFatal) { + StorageException exception("Fatal " + e.Name + " at " + std::to_string(e.Position)); + exception.StatusCode = statusCode; + exception.ReasonPhrase = reasonPhrase; + exception.RequestId = requestId; + exception.ClientRequestId = clientRequestId; + exception.ErrorCode = e.Name; + exception.Message = e.Description; + + throw exception; + } +}; + +response.Value.BodyStream = + std::make_unique<_detail::AvroStreamParser>(std::move(response.Value.BodyStream), options.ProgressHandler, + options.ErrorHandler ? options.ErrorHandler : defaultErrorHandler); +return response; +} +*/ +} // namespace Blobs +} // namespace Storage +} // namespace Azure + +#endif diff --git a/source/libs/azure/test/CMakeLists.txt b/source/libs/azure/test/CMakeLists.txt new file mode 100644 index 0000000000..f00257f228 --- /dev/null +++ b/source/libs/azure/test/CMakeLists.txt @@ -0,0 +1,20 @@ +if(TD_LINUX) + aux_source_directory(. AZ_TEST_SRC) + + add_executable(azTest ${AZ_TEST_SRC}) + target_include_directories(azTest + PUBLIC + "${TD_SOURCE_DIR}/include/libs/azure" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + ) + + target_link_libraries(azTest + az + gtest_main + ) + enable_testing() + add_test( + NAME az_test + COMMAND azTest + ) +endif(TD_LINUX) diff --git a/source/libs/azure/test/azTest.cpp b/source/libs/azure/test/azTest.cpp new file mode 100644 index 0000000000..0459cb5f6a --- /dev/null +++ b/source/libs/azure/test/azTest.cpp @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "az.h" + +extern int8_t tsS3Enabled; + +int32_t azInitEnv() { + int32_t code = 0; + + extern int8_t tsS3EpNum; + + extern char tsS3Hostname[][TSDB_FQDN_LEN]; + extern char tsS3AccessKeyId[][TSDB_FQDN_LEN]; + extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN]; + extern char tsS3BucketName[TSDB_FQDN_LEN]; + + /* TCS parameter format + tsS3Hostname[0] = "/.blob.core.windows.net"; + tsS3AccessKeyId[0] = ""; + tsS3AccessKeySecret[0] = ""; + tsS3BucketName = ""; + */ + + const char *hostname = "/.blob.core.windows.net"; + const char *accessKeyId = ""; + const char *accessKeySecret = ""; + const char *bucketName = ""; + + if (hostname[0] != '<') { + tstrncpy(&tsS3Hostname[0][0], hostname, TSDB_FQDN_LEN); + tstrncpy(&tsS3AccessKeyId[0][0], accessKeyId, TSDB_FQDN_LEN); + tstrncpy(&tsS3AccessKeySecret[0][0], accessKeySecret, TSDB_FQDN_LEN); + tstrncpy(tsS3BucketName, bucketName, TSDB_FQDN_LEN); + } else { + const char *accountId = getenv("ablob_account_id"); + if (!accountId) { + return -1; + } + + const char *accountSecret = getenv("ablob_account_secret"); + if (!accountSecret) { + return -1; + } + + const char *containerName = getenv("ablob_container"); + if (!containerName) { + return -1; + } + + TAOS_STRCPY(&tsS3Hostname[0][0], accountId); + TAOS_STRCAT(&tsS3Hostname[0][0], ".blob.core.windows.net"); + TAOS_STRCPY(&tsS3AccessKeyId[0][0], accountId); + TAOS_STRCPY(&tsS3AccessKeySecret[0][0], accountSecret); + TAOS_STRCPY(tsS3BucketName, containerName); + } + + tstrncpy(tsTempDir, "/tmp/", PATH_MAX); + + tsS3Enabled = true; + + return code; +} + +// TEST(AzTest, DISABLED_InterfaceTest) { +TEST(AzTest, InterfaceTest) { + int code = 0; + bool check = false; + bool withcp = false; + + code = azInitEnv(); + if (code) { + std::cout << "ablob env init failed with: " << code << std::endl; + return; + } + + GTEST_ASSERT_EQ(code, 0); + GTEST_ASSERT_EQ(tsS3Enabled, 1); + + code = azBegin(); + GTEST_ASSERT_EQ(code, 0); + + code = azCheckCfg(); + GTEST_ASSERT_EQ(code, 0); + const int size = 4096; + char data[size] = {0}; + for (int i = 0; i < size / 2; ++i) { + data[i * 2 + 1] = 1; + } + + const char object_name[] = "azut.bin"; + char path[PATH_MAX] = {0}; + char path_download[PATH_MAX] = {0}; + int ds_len = strlen(TD_DIRSEP); + int tmp_len = strlen(tsTempDir); + + (void)snprintf(path, PATH_MAX, "%s", tsTempDir); + if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) { + (void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP); + (void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", object_name); + } else { + (void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", object_name); + } + + tstrncpy(path_download, path, strlen(path) + 1); + tstrncpy(path_download + strlen(path), ".download", strlen(".download") + 1); + + TdFilePtr fp = taosOpenFile(path, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_WRITE_THROUGH); + GTEST_ASSERT_NE(fp, nullptr); + + int n = taosWriteFile(fp, data, size); + GTEST_ASSERT_EQ(n, size); + + code = taosCloseFile(&fp); + GTEST_ASSERT_EQ(code, 0); + + code = azPutObjectFromFileOffset(path, object_name, 0, size); + GTEST_ASSERT_EQ(code, 0); + + uint8_t *pBlock = NULL; + code = azGetObjectBlock(object_name, 0, size, check, &pBlock); + GTEST_ASSERT_EQ(code, 0); + + for (int i = 0; i < size / 2; ++i) { + GTEST_ASSERT_EQ(pBlock[i * 2], 0); + GTEST_ASSERT_EQ(pBlock[i * 2 + 1], 1); + } + + taosMemoryFree(pBlock); + + code = azGetObjectToFile(object_name, path_download); + GTEST_ASSERT_EQ(code, 0); + + { + TdFilePtr fp = taosOpenFile(path, TD_FILE_READ); + GTEST_ASSERT_NE(fp, nullptr); + + (void)memset(data, 0, size); + + int64_t n = taosReadFile(fp, data, size); + GTEST_ASSERT_EQ(n, size); + + code = taosCloseFile(&fp); + GTEST_ASSERT_EQ(code, 0); + + for (int i = 0; i < size / 2; ++i) { + GTEST_ASSERT_EQ(data[i * 2], 0); + GTEST_ASSERT_EQ(data[i * 2 + 1], 1); + } + } + + azDeleteObjectsByPrefix(object_name); + // list object to check + + code = azPutObjectFromFile2(path, object_name, withcp); + GTEST_ASSERT_EQ(code, 0); + + code = azGetObjectsByPrefix(object_name, tsTempDir); + GTEST_ASSERT_EQ(code, 0); + + { + TdFilePtr fp = taosOpenFile(path, TD_FILE_READ); + GTEST_ASSERT_NE(fp, nullptr); + + (void)memset(data, 0, size); + + int64_t n = taosReadFile(fp, data, size); + GTEST_ASSERT_EQ(n, size); + + code = taosCloseFile(&fp); + GTEST_ASSERT_EQ(code, 0); + + for (int i = 0; i < size / 2; ++i) { + GTEST_ASSERT_EQ(data[i * 2], 0); + GTEST_ASSERT_EQ(data[i * 2 + 1], 1); + } + } + + const char *object_name_arr[] = {object_name}; + code = azDeleteObjects(object_name_arr, 1); + GTEST_ASSERT_EQ(code, 0); + + azEnd(); +} diff --git a/source/libs/catalog/CMakeLists.txt b/source/libs/catalog/CMakeLists.txt index 6f09af8a8f..179781c2c9 100644 --- a/source/libs/catalog/CMakeLists.txt +++ b/source/libs/catalog/CMakeLists.txt @@ -7,10 +7,10 @@ target_include_directories( ) target_link_libraries( - catalog - PRIVATE os util transport qcom nodes + catalog + PRIVATE os util transport qcom nodes ) # if(${BUILD_TEST}) -# ADD_SUBDIRECTORY(test) +# ADD_SUBDIRECTORY(test) # endif(${BUILD_TEST}) diff --git a/source/libs/command/CMakeLists.txt b/source/libs/command/CMakeLists.txt index a890972d14..308f652861 100644 --- a/source/libs/command/CMakeLists.txt +++ b/source/libs/command/CMakeLists.txt @@ -12,5 +12,5 @@ target_link_libraries( ) if(${BUILD_TEST}) - ADD_SUBDIRECTORY(test) + ADD_SUBDIRECTORY(test) endif(${BUILD_TEST}) diff --git a/source/libs/crypt/CMakeLists.txt b/source/libs/crypt/CMakeLists.txt index e6d73b1882..c29c9a4a29 100644 --- a/source/libs/crypt/CMakeLists.txt +++ b/source/libs/crypt/CMakeLists.txt @@ -1,8 +1,8 @@ aux_source_directory(src CRYPT_SRC) -IF (TD_ENTERPRISE) +IF(TD_ENTERPRISE) LIST(APPEND CRYPT_SRC ${TD_ENTERPRISE_DIR}/src/plugins/crypt/cryptImpl.c) -ENDIF () +ENDIF() add_library(crypt STATIC ${CRYPT_SRC}) target_include_directories( diff --git a/source/libs/executor/CMakeLists.txt b/source/libs/executor/CMakeLists.txt index af2c3986aa..014b538375 100644 --- a/source/libs/executor/CMakeLists.txt +++ b/source/libs/executor/CMakeLists.txt @@ -1,24 +1,25 @@ aux_source_directory(src EXECUTOR_SRC) add_library(executor STATIC ${EXECUTOR_SRC}) + if(${TD_DARWIN}) - target_compile_options(executor PRIVATE -Wno-error=deprecated-non-prototype) + target_compile_options(executor PRIVATE -Wno-error=deprecated-non-prototype) endif(${TD_DARWIN}) -IF(${BUILD_WITH_ANALYSIS}) +if(${BUILD_WITH_ANALYSIS}) add_definitions(-DUSE_ANAL) -ENDIF() +endif() target_link_libraries(executor - PRIVATE os util common function parser planner qcom scalar nodes index wal tdb geometry - ) + PRIVATE os util common function parser planner qcom scalar nodes index wal tdb geometry +) target_include_directories( - executor - PUBLIC "${TD_SOURCE_DIR}/include/libs/executor" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + executor + PUBLIC "${TD_SOURCE_DIR}/include/libs/executor" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) if(${BUILD_TEST}) - ADD_SUBDIRECTORY(test) + ADD_SUBDIRECTORY(test) endif(${BUILD_TEST}) diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h index 2c1f574419..c58a2a5d5b 100644 --- a/source/libs/executor/inc/tfill.h +++ b/source/libs/executor/inc/tfill.h @@ -35,6 +35,7 @@ typedef struct SFillColInfo { SExprInfo* pExpr; bool notFillCol; // denote if this column needs fill operation SVariant fillVal; + bool fillNull; } SFillColInfo; typedef struct SFillLinearInfo { @@ -128,12 +129,14 @@ void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struc void taosFillUpdateStartTimestampInfo(SFillInfo* pFillInfo, int64_t ts); bool taosFillNotStarted(const SFillInfo* pFillInfo); SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr, - int32_t numOfNotFillCols, const struct SNodeListNode* val); + int32_t numOfNotFillCols, SExprInfo* pFillNullExpr, int32_t numOfFillNullExprs, + const struct SNodeListNode* val); bool taosFillHasMoreResults(struct SFillInfo* pFillInfo); -int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, - SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId, - int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo); +int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t fillNullCols, + int32_t capacity, SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, + int32_t slotId, int32_t order, const char* id, SExecTaskInfo* pTaskInfo, + SFillInfo** ppFillInfo); void* taosDestroyFillInfo(struct SFillInfo* pFillInfo); int32_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, SSDataBlock* p, int32_t capacity); diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index d530382f7c..1595c90419 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -53,6 +53,7 @@ typedef struct SFillOperatorInfo { SExprInfo* pExprInfo; int32_t numOfExpr; SExprSupp noFillExprSupp; + SExprSupp fillNullExprSupp; } SFillOperatorInfo; static void destroyFillOperatorInfo(void* param); @@ -140,6 +141,15 @@ void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int code = projectApplyFunctions(pNoFillSupp->pExprInfo, pInfo->pRes, pBlock, pNoFillSupp->pCtx, pNoFillSupp->numOfExprs, NULL); QUERY_CHECK_CODE(code, lino, _end); + + if (pInfo->fillNullExprSupp.pExprInfo) { + pInfo->pRes->info.rows = 0; + code = setInputDataBlock(&pInfo->fillNullExprSupp, pBlock, order, scanFlag, false); + QUERY_CHECK_CODE(code, lino, _end); + code = projectApplyFunctions(pInfo->fillNullExprSupp.pExprInfo, pInfo->pRes, pBlock, pInfo->fillNullExprSupp.pCtx, + pInfo->fillNullExprSupp.numOfExprs, NULL); + } + pInfo->pRes->info.id.groupId = pBlock->info.id.groupId; _end: @@ -327,6 +337,7 @@ void destroyFillOperatorInfo(void* param) { pInfo->pFinalRes = NULL; cleanupExprSupp(&pInfo->noFillExprSupp); + cleanupExprSupp(&pInfo->fillNullExprSupp); taosMemoryFreeClear(pInfo->p); taosArrayDestroy(pInfo->matchInfo.pList); @@ -334,10 +345,11 @@ void destroyFillOperatorInfo(void* param) { } static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t numOfCols, SExprInfo* pNotFillExpr, - int32_t numOfNotFillCols, SNodeListNode* pValNode, STimeWindow win, int32_t capacity, - const char* id, SInterval* pInterval, int32_t fillType, int32_t order, - SExecTaskInfo* pTaskInfo) { - SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode); + int32_t numOfNotFillCols, SExprInfo* pFillNullExpr, int32_t numOfFillNullExprs, + SNodeListNode* pValNode, STimeWindow win, int32_t capacity, const char* id, + SInterval* pInterval, int32_t fillType, int32_t order, SExecTaskInfo* pTaskInfo) { + SFillColInfo* pColInfo = + createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pFillNullExpr, numOfFillNullExprs, pValNode); if (!pColInfo) { qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); return terrno; @@ -348,8 +360,8 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t // STimeWindow w = {0}; // getInitialStartTimeWindow(pInterval, startKey, &w, order == TSDB_ORDER_ASC); pInfo->pFillInfo = NULL; - int32_t code = taosCreateFillInfo(startKey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo, - pInfo->primaryTsCol, order, id, pTaskInfo, &pInfo->pFillInfo); + int32_t code = taosCreateFillInfo(startKey, numOfCols, numOfNotFillCols, numOfFillNullExprs, capacity, pInterval, + fillType, pColInfo, pInfo->primaryTsCol, order, id, pTaskInfo, &pInfo->pFillInfo); if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); return code; @@ -455,6 +467,13 @@ int32_t createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFi initExprSupp(pNoFillSupp, pNoFillSupp->pExprInfo, pNoFillSupp->numOfExprs, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); + code = createExprInfo(pPhyFillNode->pFillNullExprs, NULL, &pInfo->fillNullExprSupp.pExprInfo, + &pInfo->fillNullExprSupp.numOfExprs); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->fillNullExprSupp, pInfo->fillNullExprSupp.pExprInfo, pInfo->fillNullExprSupp.numOfExprs, + &pTaskInfo->storageAPI.functionStore); + QUERY_CHECK_CODE(code, lino, _error); + SInterval* pInterval = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType ? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval @@ -482,7 +501,9 @@ int32_t createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFi code = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID, &pInfo->matchInfo); + QUERY_CHECK_CODE(code, lino, _error); code = initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pNoFillSupp->pExprInfo, pNoFillSupp->numOfExprs, + pInfo->fillNullExprSupp.pExprInfo, pInfo->fillNullExprSupp.numOfExprs, (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity, pTaskInfo->id.str, pInterval, type, order, pTaskInfo); if (code != TSDB_CODE_SUCCESS) { diff --git a/source/libs/executor/src/streamfilloperator.c b/source/libs/executor/src/streamfilloperator.c index 81832cac8f..08be9a4a64 100644 --- a/source/libs/executor/src/streamfilloperator.c +++ b/source/libs/executor/src/streamfilloperator.c @@ -1436,7 +1436,7 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod QUERY_CHECK_CODE(code, lino, _end); pFillSup->pAllColInfo = createFillColInfo(pFillExprInfo, pFillSup->numOfFillCols, noFillExprInfo, numOfNotFillCols, - (const SNodeListNode*)(pPhyFillNode->pValues)); + NULL, 0, (const SNodeListNode*)(pPhyFillNode->pValues)); if (pFillSup->pAllColInfo == NULL) { code = terrno; lino = __LINE__; diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index cdfbd7a850..190b327522 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -39,22 +39,27 @@ static int32_t doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey); static void setNotFillColumn(SFillInfo* pFillInfo, SColumnInfoData* pDstColInfo, int32_t rowIndex, int32_t colIdx) { - SRowVal* p = NULL; - if (pFillInfo->type == TSDB_FILL_NEXT) { - p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->next : &pFillInfo->prev; + SFillColInfo* pCol = &pFillInfo->pFillCol[colIdx]; + if (pCol->fillNull) { + colDataSetNULL(pDstColInfo, rowIndex); } else { - p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next; - } + SRowVal* p = NULL; + if (pFillInfo->type == TSDB_FILL_NEXT) { + p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->next : &pFillInfo->prev; + } else { + p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next; + } - SGroupKeys* pKey = taosArrayGet(p->pRowVal, colIdx); - if (!pKey) { - qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); - T_LONG_JMP(pFillInfo->pTaskInfo->env, terrno); - } - int32_t code = doSetVal(pDstColInfo, rowIndex, pKey); - if (code != TSDB_CODE_SUCCESS) { - qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); - T_LONG_JMP(pFillInfo->pTaskInfo->env, code); + SGroupKeys* pKey = taosArrayGet(p->pRowVal, colIdx); + if (!pKey) { + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); + T_LONG_JMP(pFillInfo->pTaskInfo->env, terrno); + } + int32_t code = doSetVal(pDstColInfo, rowIndex, pKey); + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + T_LONG_JMP(pFillInfo->pTaskInfo->env, code); + } } } @@ -545,9 +550,10 @@ static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { return pFillInfo->numOfRows - pFillInfo->index; } -int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, - SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t primaryTsSlotId, - int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo) { +int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t fillNullCols, + int32_t capacity, SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, + int32_t primaryTsSlotId, int32_t order, const char* id, SExecTaskInfo* pTaskInfo, + SFillInfo** ppFillInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; if (fillType == TSDB_FILL_NONE) { @@ -574,7 +580,7 @@ int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFi pFillInfo->type = fillType; pFillInfo->pFillCol = pCol; - pFillInfo->numOfCols = numOfFillCols + numOfNotFillCols; + pFillInfo->numOfCols = numOfFillCols + numOfNotFillCols + fillNullCols; pFillInfo->alloc = capacity; pFillInfo->id = id; pFillInfo->interval = *pInterval; @@ -761,10 +767,11 @@ _end: int64_t getFillInfoStart(struct SFillInfo* pFillInfo) { return pFillInfo->start; } SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr, - int32_t numOfNoFillExpr, const struct SNodeListNode* pValNode) { + int32_t numOfNoFillExpr, SExprInfo* pFillNullExpr, int32_t numOfFillNullExpr, + const struct SNodeListNode* pValNode) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; - SFillColInfo* pFillCol = taosMemoryCalloc(numOfFillExpr + numOfNoFillExpr, sizeof(SFillColInfo)); + SFillColInfo* pFillCol = taosMemoryCalloc(numOfFillExpr + numOfNoFillExpr + numOfFillNullExpr, sizeof(SFillColInfo)); if (pFillCol == NULL) { return NULL; } @@ -797,6 +804,13 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn pFillCol[i + numOfFillExpr].notFillCol = true; } + for (int32_t i = 0; i < numOfFillNullExpr; ++i) { + SExprInfo* pExprInfo = &pFillNullExpr[i]; + pFillCol[i + numOfFillExpr + numOfNoFillExpr].pExpr = pExprInfo; + pFillCol[i + numOfFillExpr + numOfNoFillExpr].notFillCol = true; + pFillCol[i + numOfFillExpr + numOfNoFillExpr].fillNull = true; + } + return pFillCol; _end: diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index b86d7725fa..5dcdfd2791 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -1147,7 +1147,8 @@ int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyN pInfo->fillType = convertFillType(pInterpPhyNode->fillMode); initResultSizeInfo(&pOperator->resultInfo, 4096); - pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, NULL, 0, (SNodeListNode*)pInterpPhyNode->pFillValues); + pInfo->pFillColInfo = + createFillColInfo(pExprInfo, numOfExprs, NULL, 0, NULL, 0, (SNodeListNode*)pInterpPhyNode->pFillValues); QUERY_CHECK_NULL(pInfo->pFillColInfo, code, lino, _error, terrno); pInfo->pLinearInfo = NULL; diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index 3a68648d49..4164852111 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -5,115 +5,114 @@ add_library(function STATIC ${FUNCTION_SRC} ${FUNCTION_SRC_DETAIL}) target_include_directories( function PUBLIC - "${TD_SOURCE_DIR}/include/libs/function" - "${TD_SOURCE_DIR}/include/util" - "${TD_SOURCE_DIR}/include/common" - "${TD_SOURCE_DIR}/include/client" - "${TD_SOURCE_DIR}/contrib/libuv/include" + "${TD_SOURCE_DIR}/include/libs/function" + "${TD_SOURCE_DIR}/include/util" + "${TD_SOURCE_DIR}/include/common" + "${TD_SOURCE_DIR}/include/client" + "${TD_SOURCE_DIR}/contrib/libuv/include" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) - ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) - SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") -ELSE () - SET(LINK_JEMALLOC "") -ENDIF () +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) + SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") +ELSE() + SET(LINK_JEMALLOC "") +ENDIF() -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) ADD_DEPENDENCIES(function jemalloc) -ENDIF () +ENDIF() target_link_libraries( function - PRIVATE os - PRIVATE util - PRIVATE common - PRIVATE nodes - PRIVATE qcom - PRIVATE scalar - PRIVATE geometry - PRIVATE transport + PRIVATE os + PRIVATE util + PRIVATE common + PRIVATE nodes + PRIVATE qcom + PRIVATE scalar + PRIVATE geometry + PRIVATE transport PUBLIC uv_a ) add_executable(runUdf test/runUdf.c) target_include_directories( - runUdf - PUBLIC - "${TD_SOURCE_DIR}/include/libs/function" - "${TD_SOURCE_DIR}/contrib/libuv/include" - "${TD_SOURCE_DIR}/include/util" - "${TD_SOURCE_DIR}/include/common" - "${TD_SOURCE_DIR}/include/client" - "${TD_SOURCE_DIR}/include/os" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + runUdf + PUBLIC + "${TD_SOURCE_DIR}/include/libs/function" + "${TD_SOURCE_DIR}/contrib/libuv/include" + "${TD_SOURCE_DIR}/include/util" + "${TD_SOURCE_DIR}/include/common" + "${TD_SOURCE_DIR}/include/client" + "${TD_SOURCE_DIR}/include/os" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) ADD_DEPENDENCIES(runUdf jemalloc) -ENDIF () +ENDIF() target_link_libraries( - runUdf - PUBLIC uv_a - PRIVATE os util common nodes function ${LINK_JEMALLOC} + runUdf + PUBLIC uv_a + PRIVATE os util common nodes function ${LINK_JEMALLOC} ) add_library(udf1 STATIC MODULE test/udf1.c) target_include_directories( - udf1 - PUBLIC - "${TD_SOURCE_DIR}/include/libs/function" - "${TD_SOURCE_DIR}/include/util" - "${TD_SOURCE_DIR}/include/common" - "${TD_SOURCE_DIR}/include/client" - "${TD_SOURCE_DIR}/include/os" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + udf1 + PUBLIC + "${TD_SOURCE_DIR}/include/libs/function" + "${TD_SOURCE_DIR}/include/util" + "${TD_SOURCE_DIR}/include/common" + "${TD_SOURCE_DIR}/include/client" + "${TD_SOURCE_DIR}/include/os" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) ADD_DEPENDENCIES(udf1 jemalloc) -ENDIF () +ENDIF() target_link_libraries( udf1 PUBLIC os ${LINK_JEMALLOC}) - add_library(udf1_dup STATIC MODULE test/udf1_dup.c) target_include_directories( - udf1_dup - PUBLIC - "${TD_SOURCE_DIR}/include/libs/function" - "${TD_SOURCE_DIR}/include/util" - "${TD_SOURCE_DIR}/include/common" - "${TD_SOURCE_DIR}/include/client" - "${TD_SOURCE_DIR}/include/os" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + udf1_dup + PUBLIC + "${TD_SOURCE_DIR}/include/libs/function" + "${TD_SOURCE_DIR}/include/util" + "${TD_SOURCE_DIR}/include/common" + "${TD_SOURCE_DIR}/include/client" + "${TD_SOURCE_DIR}/include/os" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) ADD_DEPENDENCIES(udf1_dup jemalloc) -ENDIF () +ENDIF() target_link_libraries( udf1_dup PUBLIC os ${LINK_JEMALLOC}) add_library(udf2 STATIC MODULE test/udf2.c) target_include_directories( - udf2 - PUBLIC - "${TD_SOURCE_DIR}/include/libs/function" - "${TD_SOURCE_DIR}/include/util" - "${TD_SOURCE_DIR}/include/common" - "${TD_SOURCE_DIR}/include/client" - "${TD_SOURCE_DIR}/include/os" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + udf2 + PUBLIC + "${TD_SOURCE_DIR}/include/libs/function" + "${TD_SOURCE_DIR}/include/util" + "${TD_SOURCE_DIR}/include/common" + "${TD_SOURCE_DIR}/include/client" + "${TD_SOURCE_DIR}/include/os" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) ADD_DEPENDENCIES(udf2 jemalloc) -ENDIF () +ENDIF() target_link_libraries( udf2 PUBLIC os ${LINK_JEMALLOC} @@ -121,45 +120,44 @@ target_link_libraries( add_library(udf2_dup STATIC MODULE test/udf2_dup.c) target_include_directories( - udf2_dup - PUBLIC - "${TD_SOURCE_DIR}/include/libs/function" - "${TD_SOURCE_DIR}/include/util" - "${TD_SOURCE_DIR}/include/common" - "${TD_SOURCE_DIR}/include/client" - "${TD_SOURCE_DIR}/include/os" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + udf2_dup + PUBLIC + "${TD_SOURCE_DIR}/include/libs/function" + "${TD_SOURCE_DIR}/include/util" + "${TD_SOURCE_DIR}/include/common" + "${TD_SOURCE_DIR}/include/client" + "${TD_SOURCE_DIR}/include/os" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) ADD_DEPENDENCIES(udf2_dup jemalloc) -ENDIF () +ENDIF() target_link_libraries( udf2_dup PUBLIC os ${LINK_JEMALLOC} ) -#SET(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/build/bin) +# SET(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/build/bin) add_executable(udfd src/udfd.c) target_include_directories( - udfd - PUBLIC - "${TD_SOURCE_DIR}/include/libs/function" - "${TD_SOURCE_DIR}/contrib/libuv/include" - "${TD_SOURCE_DIR}/include/util" - "${TD_SOURCE_DIR}/include/common" - "${TD_SOURCE_DIR}/include/libs/transport" - "${TD_SOURCE_DIR}/include/client" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + udfd + PUBLIC + "${TD_SOURCE_DIR}/include/libs/function" + "${TD_SOURCE_DIR}/contrib/libuv/include" + "${TD_SOURCE_DIR}/include/util" + "${TD_SOURCE_DIR}/include/common" + "${TD_SOURCE_DIR}/include/libs/transport" + "${TD_SOURCE_DIR}/include/client" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) ADD_DEPENDENCIES(udfd jemalloc) -ENDIF () +ENDIF() target_link_libraries( udfd PUBLIC uv_a PRIVATE os util common nodes function ${LINK_JEMALLOC} - ) - +) diff --git a/source/libs/index/CMakeLists.txt b/source/libs/index/CMakeLists.txt index 6f3f48610c..246708926a 100644 --- a/source/libs/index/CMakeLists.txt +++ b/source/libs/index/CMakeLists.txt @@ -1,23 +1,22 @@ aux_source_directory(src INDEX_SRC) add_library(index STATIC ${INDEX_SRC}) target_include_directories( - index - PUBLIC "${TD_SOURCE_DIR}/include/libs/index" - PUBLIC "${TD_SOURCE_DIR}/include/os" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" - + index + PUBLIC "${TD_SOURCE_DIR}/include/libs/index" + PUBLIC "${TD_SOURCE_DIR}/include/os" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( - index - PUBLIC os - PUBLIC util - PUBLIC common - PUBLIC nodes - PUBLIC scalar - PUBLIC function + index + PUBLIC os + PUBLIC util + PUBLIC common + PUBLIC nodes + PUBLIC scalar + PUBLIC function ) -if (${BUILD_WITH_LUCENE}) +if(${BUILD_WITH_LUCENE}) target_include_directories( index PUBLIC "${TD_SOURCE_DIR}/deps/lucene/include" @@ -29,12 +28,10 @@ if (${BUILD_WITH_LUCENE}) ) endif(${BUILD_WITH_LUCENE}) -if (${BUILD_WITH_INVERTEDINDEX}) - add_definitions(-DUSE_INVERTED_INDEX) +if(${BUILD_WITH_INVERTEDINDEX}) + add_definitions(-DUSE_INVERTED_INDEX) endif(${BUILD_WITH_INVERTEDINDEX}) - -if (${BUILD_TEST}) - add_subdirectory(test) +if(${BUILD_TEST}) + add_subdirectory(test) endif(${BUILD_TEST}) - diff --git a/source/libs/monitor/CMakeLists.txt b/source/libs/monitor/CMakeLists.txt index cc8f40fa4c..23597718bf 100644 --- a/source/libs/monitor/CMakeLists.txt +++ b/source/libs/monitor/CMakeLists.txt @@ -9,5 +9,5 @@ target_include_directories( target_link_libraries(monitor os util common qcom transport monitorfw) if(${BUILD_TEST}) - add_subdirectory(test) + add_subdirectory(test) endif(${BUILD_TEST}) \ No newline at end of file diff --git a/source/libs/monitorfw/CMakeLists.txt b/source/libs/monitorfw/CMakeLists.txt index f08b2d6c2b..339a97fb94 100644 --- a/source/libs/monitorfw/CMakeLists.txt +++ b/source/libs/monitorfw/CMakeLists.txt @@ -5,7 +5,9 @@ target_include_directories( PUBLIC "${TD_SOURCE_DIR}/include/libs/monitorfw" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) + if(${TD_DARWIN}) - target_compile_options(monitorfw PRIVATE -Wno-error=deprecated-pragma) + target_compile_options(monitorfw PRIVATE -Wno-error=deprecated-pragma) endif(${TD_DARWIN}) + target_link_libraries(monitorfw os util common transport) diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index fbb6943f83..b2c4a3ad2a 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -642,6 +642,7 @@ static int32_t logicFillCopy(const SFillLogicNode* pSrc, SFillLogicNode* pDst) { CLONE_NODE_FIELD(pWStartTs); CLONE_NODE_FIELD(pValues); COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow)); + CLONE_NODE_LIST_FIELD(pFillNullExprs); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 878dd10e5a..3275cfd838 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2982,6 +2982,7 @@ static const char* jkFillPhysiPlanWStartTs = "WStartTs"; static const char* jkFillPhysiPlanValues = "Values"; static const char* jkFillPhysiPlanStartTime = "StartTime"; static const char* jkFillPhysiPlanEndTime = "EndTime"; +static const char* jkFillPhysiPlanFillNullExprs = "FillNullExprs"; static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) { const SFillPhysiNode* pNode = (const SFillPhysiNode*)pObj; @@ -3008,6 +3009,9 @@ static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanEndTime, pNode->timeRange.ekey); } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkFillPhysiPlanFillNullExprs, pNode->pFillNullExprs); + } return code; } @@ -3037,6 +3041,9 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBigIntValue(pJson, jkFillPhysiPlanEndTime, &pNode->timeRange.ekey); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkFillPhysiPlanFillNullExprs, &pNode->pFillNullExprs); + } return code; } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index de33678ed2..28d0b9fbd4 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -3326,7 +3326,8 @@ enum { PHY_FILL_CODE_WSTART, PHY_FILL_CODE_VALUES, PHY_FILL_CODE_TIME_RANGE, - PHY_FILL_CODE_INPUT_TS_ORDER + PHY_FILL_CODE_INPUT_TS_ORDER, + PHY_FILL_CODE_FILL_NULL_EXPRS, }; static int32_t physiFillNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { @@ -3351,6 +3352,9 @@ static int32_t physiFillNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_TIME_RANGE, timeWindowToMsg, &pNode->timeRange); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_FILL_NULL_EXPRS, nodeListToMsg, pNode->pFillNullExprs); + } return code; } @@ -3383,6 +3387,9 @@ static int32_t msgToPhysiFillNode(STlvDecoder* pDecoder, void* pObj) { case PHY_FILL_CODE_TIME_RANGE: code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, (void**)&pNode->timeRange); break; + case PHY_FILL_CODE_FILL_NULL_EXPRS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFillNullExprs); + break; default: break; } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 956e824dd4..85ceb63954 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -1497,6 +1497,7 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pLogicNode->pValues); nodesDestroyList(pLogicNode->pFillExprs); nodesDestroyList(pLogicNode->pNotFillExprs); + nodesDestroyList(pLogicNode->pFillNullExprs); break; } case QUERY_NODE_LOGIC_PLAN_SORT: { @@ -1668,6 +1669,7 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyList(pPhyNode->pNotFillExprs); nodesDestroyNode(pPhyNode->pWStartTs); nodesDestroyNode(pPhyNode->pValues); + nodesDestroyList(pPhyNode->pFillNullExprs); break; } case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: diff --git a/source/libs/parser/CMakeLists.txt b/source/libs/parser/CMakeLists.txt index c5ee1a00c4..f1b801c563 100644 --- a/source/libs/parser/CMakeLists.txt +++ b/source/libs/parser/CMakeLists.txt @@ -1,8 +1,8 @@ aux_source_directory(src PARSER_SRC) -IF (TD_ENTERPRISE) - LIST(APPEND PARSER_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/parserView.c) -ENDIF () +IF(TD_ENTERPRISE) + LIST(APPEND PARSER_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/parserView.c) +ENDIF() add_library(parser STATIC ${PARSER_SRC}) target_include_directories( @@ -17,5 +17,5 @@ target_link_libraries( ) if(${BUILD_TEST}) - ADD_SUBDIRECTORY(test) + ADD_SUBDIRECTORY(test) endif(${BUILD_TEST}) diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index babf4c79c7..34c83acee8 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -1294,71 +1294,139 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele return TSDB_CODE_FAILED; } +typedef struct SCollectFillExprsCtx { + SHashObj* pPseudoCols; + SNodeList* pFillExprs; + SNodeList* pNotFillExprs; + bool collectAggFuncs; + SNodeList* pAggFuncCols; +} SCollectFillExprsCtx; + +typedef struct SWalkFillSubExprCtx { + bool hasFillCol; + bool hasPseudoWinCol; + bool hasGroupKeyCol; + SCollectFillExprsCtx* pCollectFillCtx; + int32_t code; +} SWalkFillSubExprCtx; + +static bool nodeAlreadyContained(SNodeList* pList, SNode* pNode) { + SNode* pExpr = NULL; + FOREACH(pExpr, pList) { + if (nodesEqualNode(pExpr, pNode)) { + return true; + } + } + return false; +} + static EDealRes needFillValueImpl(SNode* pNode, void* pContext) { + SWalkFillSubExprCtx *pCtx = pContext; + EDealRes res = DEAL_RES_CONTINUE; if (QUERY_NODE_COLUMN == nodeType(pNode)) { SColumnNode* pCol = (SColumnNode*)pNode; - if (COLUMN_TYPE_WINDOW_START != pCol->colType && COLUMN_TYPE_WINDOW_END != pCol->colType && - COLUMN_TYPE_WINDOW_DURATION != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) { - *(bool*)pContext = true; - return DEAL_RES_END; + if (COLUMN_TYPE_WINDOW_START == pCol->colType || COLUMN_TYPE_WINDOW_END == pCol->colType || + COLUMN_TYPE_WINDOW_DURATION == pCol->colType) { + pCtx->hasPseudoWinCol = true; + pCtx->code = + taosHashPut(pCtx->pCollectFillCtx->pPseudoCols, pCol->colName, TSDB_COL_NAME_LEN, &pNode, POINTER_BYTES); + } else if (COLUMN_TYPE_GROUP_KEY == pCol->colType || COLUMN_TYPE_TBNAME == pCol->colType || + COLUMN_TYPE_TAG == pCol->colType) { + pCtx->hasGroupKeyCol = true; + pCtx->code = + taosHashPut(pCtx->pCollectFillCtx->pPseudoCols, pCol->colName, TSDB_COL_NAME_LEN, &pNode, POINTER_BYTES); + } else { + pCtx->hasFillCol = true; + if (pCtx->pCollectFillCtx->collectAggFuncs) { + // Agg funcs has already been rewriten to columns by Interval + // Here, we return DEAL_RES_CONTINUE cause we need to collect all agg funcs + if (!nodeAlreadyContained(pCtx->pCollectFillCtx->pFillExprs, pNode) && + !nodeAlreadyContained(pCtx->pCollectFillCtx->pAggFuncCols, pNode)) + pCtx->code = nodesListMakeStrictAppend(&pCtx->pCollectFillCtx->pAggFuncCols, pNode); + } else { + res = DEAL_RES_END; + } } } - return DEAL_RES_CONTINUE; + if (pCtx->code != TSDB_CODE_SUCCESS) res = DEAL_RES_ERROR; + return res; } -static bool needFillValue(SNode* pNode) { - bool hasFillCol = false; - nodesWalkExpr(pNode, needFillValueImpl, &hasFillCol); - return hasFillCol; +static void needFillValue(SNode* pNode, SWalkFillSubExprCtx* pCtx) { + nodesWalkExpr(pNode, needFillValueImpl, pCtx); } -static int32_t partFillExprs(SSelectStmt* pSelect, SNodeList** pFillExprs, SNodeList** pNotFillExprs) { - int32_t code = TSDB_CODE_SUCCESS; - SNode* pProject = NULL; - FOREACH(pProject, pSelect->pProjectionList) { - if (needFillValue(pProject)) { - SNode* pNew = NULL; - code = nodesCloneNode(pProject, &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListMakeStrictAppend(pFillExprs, pNew); - } - } else if (QUERY_NODE_VALUE != nodeType(pProject)) { - SNode* pNew = NULL; - code = nodesCloneNode(pProject, &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListMakeStrictAppend(pNotFillExprs, pNew); - } +static int32_t collectFillExpr(SNode* pNode, SCollectFillExprsCtx* pCollectFillCtx) { + SNode* pNew = NULL; + SWalkFillSubExprCtx collectFillSubExprCtx = { + .hasFillCol = false, .hasPseudoWinCol = false, .hasGroupKeyCol = false, .pCollectFillCtx = pCollectFillCtx}; + needFillValue(pNode, &collectFillSubExprCtx); + if (collectFillSubExprCtx.code != TSDB_CODE_SUCCESS) { + return collectFillSubExprCtx.code; + } + + if (collectFillSubExprCtx.hasFillCol && !pCollectFillCtx->collectAggFuncs) { + if (nodeType(pNode) == QUERY_NODE_ORDER_BY_EXPR) { + collectFillSubExprCtx.code = nodesCloneNode(((SOrderByExprNode*)pNode)->pExpr, &pNew); + } else { + collectFillSubExprCtx.code = nodesCloneNode(pNode, &pNew); } - if (TSDB_CODE_SUCCESS != code) { - NODES_DESTORY_LIST(*pFillExprs); - NODES_DESTORY_LIST(*pNotFillExprs); - break; + if (collectFillSubExprCtx.code == TSDB_CODE_SUCCESS) { + collectFillSubExprCtx.code = nodesListMakeStrictAppend(&pCollectFillCtx->pFillExprs, pNew); } } - if (!pSelect->isDistinct) { - SNode* pOrderExpr = NULL; - FOREACH(pOrderExpr, pSelect->pOrderByList) { - SNode* pExpr = ((SOrderByExprNode*)pOrderExpr)->pExpr; - if (needFillValue(pExpr)) { - SNode* pNew = NULL; - code = nodesCloneNode(pExpr, &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListMakeStrictAppend(pFillExprs, pNew); - } - } else if (QUERY_NODE_VALUE != nodeType(pExpr)) { - SNode* pNew = NULL; - code = nodesCloneNode(pExpr, &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListMakeStrictAppend(pNotFillExprs, pNew); - } + return collectFillSubExprCtx.code; +} + +static int32_t collectFillExprs(SSelectStmt* pSelect, SNodeList** pFillExprs, SNodeList** pNotFillExprs, + SNodeList** pPossibleFillNullCols) { + int32_t code = TSDB_CODE_SUCCESS; + SCollectFillExprsCtx collectFillCtx = {0}; + SNode* pNode = NULL; + collectFillCtx.pPseudoCols = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (!collectFillCtx.pPseudoCols) return terrno; + + FOREACH(pNode, pSelect->pProjectionList) { + code = collectFillExpr(pNode, &collectFillCtx); + if (code != TSDB_CODE_SUCCESS) break; + } + collectFillCtx.collectAggFuncs = true; + if (code == TSDB_CODE_SUCCESS) { + code = collectFillExpr(pSelect->pHaving, &collectFillCtx); + } + if (code == TSDB_CODE_SUCCESS) { + FOREACH(pNode, pSelect->pOrderByList) { + code = collectFillExpr(pNode, &collectFillCtx); + if (code != TSDB_CODE_SUCCESS) break; + } + } + if (code == TSDB_CODE_SUCCESS) { + void* pIter = taosHashIterate(collectFillCtx.pPseudoCols, 0); + while (pIter) { + SNode* pNode = *(SNode**)pIter, *pNew = NULL; + code = nodesCloneNode(pNode, &pNew); + if (code == TSDB_CODE_SUCCESS) { + code = nodesListMakeStrictAppend(&collectFillCtx.pNotFillExprs, pNew); } - if (TSDB_CODE_SUCCESS != code) { - NODES_DESTORY_LIST(*pFillExprs); - NODES_DESTORY_LIST(*pNotFillExprs); + if (code == TSDB_CODE_SUCCESS) { + pIter = taosHashIterate(collectFillCtx.pPseudoCols, pIter); + } else { + taosHashCancelIterate(collectFillCtx.pPseudoCols, pIter); break; } } + if (code == TSDB_CODE_SUCCESS) { + TSWAP(*pFillExprs, collectFillCtx.pFillExprs); + TSWAP(*pNotFillExprs, collectFillCtx.pNotFillExprs); + TSWAP(*pPossibleFillNullCols, collectFillCtx.pAggFuncCols); + } } + if (code != TSDB_CODE_SUCCESS) { + if (collectFillCtx.pFillExprs) nodesDestroyList(collectFillCtx.pFillExprs); + if (collectFillCtx.pNotFillExprs) nodesDestroyList(collectFillCtx.pNotFillExprs); + if (collectFillCtx.pAggFuncCols) nodesDestroyList(collectFillCtx.pAggFuncCols); + } + taosHashCleanup(collectFillCtx.pPseudoCols); return code; } @@ -1384,13 +1452,16 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pFill->node.resultDataOrder = pFill->node.requireDataOrder; pFill->node.inputTsOrder = TSDB_ORDER_ASC; - code = partFillExprs(pSelect, &pFill->pFillExprs, &pFill->pNotFillExprs); + code = collectFillExprs(pSelect, &pFill->pFillExprs, &pFill->pNotFillExprs, &pFill->pFillNullExprs); if (TSDB_CODE_SUCCESS == code) { code = rewriteExprsForSelect(pFill->pFillExprs, pSelect, SQL_CLAUSE_FILL, NULL); } if (TSDB_CODE_SUCCESS == code) { code = rewriteExprsForSelect(pFill->pNotFillExprs, pSelect, SQL_CLAUSE_FILL, NULL); } + if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pFill->pFillNullExprs) > 0) { + code = createColumnByRewriteExprs(pFill->pFillNullExprs, &pFill->node.pTargets); + } if (TSDB_CODE_SUCCESS == code) { code = createColumnByRewriteExprs(pFill->pFillExprs, &pFill->node.pTargets); } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index a07e69c376..17e950e9e2 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -2612,6 +2612,12 @@ static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren if (TSDB_CODE_SUCCESS == code) { code = addDataBlockSlots(pCxt, pFill->pNotFillExprs, pFill->node.pOutputDataBlockDesc); } + if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pFillNode->pFillNullExprs) > 0) { + code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->pFillNullExprs, &pFill->pFillNullExprs); + if (TSDB_CODE_SUCCESS == code ) { + code = addDataBlockSlots(pCxt, pFill->pFillNullExprs, pFill->node.pOutputDataBlockDesc); + } + } if (TSDB_CODE_SUCCESS == code) { code = setNodeSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->pWStartTs, &pFill->pWStartTs); diff --git a/source/libs/qworker/CMakeLists.txt b/source/libs/qworker/CMakeLists.txt index 7a984cd000..5c5eafdbc5 100644 --- a/source/libs/qworker/CMakeLists.txt +++ b/source/libs/qworker/CMakeLists.txt @@ -8,8 +8,8 @@ target_include_directories( ) TARGET_LINK_LIBRARIES(qworker - PRIVATE os util transport nodes planner qcom executor index - ) + PRIVATE os util transport nodes planner qcom executor index +) if(${BUILD_TEST}) ADD_SUBDIRECTORY(test) diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index 0011d1c70c..69014d5b1c 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -502,6 +502,10 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int } int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { + if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { + QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + int32_t code = 0; int8_t status = 0; bool queryDone = false; diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 7180c58404..ddc4812b55 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -329,7 +329,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, if (len < 0) { QW_TASK_ELOG("invalid length from dsGetDataLength, length:%" PRId64 "", len); - QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + QW_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } if (len == 0) { @@ -337,18 +337,18 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, code = dsGetDataBlock(ctx->sinkHandle, &output); if (code) { QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code)); - QW_ERR_RET(code); + QW_ERR_JRET(code); } QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %" PRId64, pOutput->numOfBlocks, pOutput->numOfRows); if (!ctx->dynamicTask) { - QW_ERR_RET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC, ctx->dynamicTask)); + QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC, ctx->dynamicTask)); } if (NULL == pRsp) { - QW_ERR_RET(qwMallocFetchRsp(!ctx->localExec, len, &pRsp)); + QW_ERR_JRET(qwMallocFetchRsp(!ctx->localExec, len, &pRsp)); *pOutput = output; } else { pOutput->queryEnd = output.queryEnd; @@ -368,7 +368,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, *dataLen += len + PAYLOAD_PREFIX_LEN; *pRawDataLen += rawLen + PAYLOAD_PREFIX_LEN; - QW_ERR_RET(qwMallocFetchRsp(!ctx->localExec, *dataLen, &pRsp)); + QW_ERR_JRET(qwMallocFetchRsp(!ctx->localExec, *dataLen, &pRsp)); // set the serialize start position output.pData = pRsp->data + *dataLen - (len + PAYLOAD_PREFIX_LEN); @@ -380,7 +380,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, code = dsGetDataBlock(ctx->sinkHandle, &output); if (code) { QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code)); - QW_ERR_RET(code); + QW_ERR_JRET(code); } pOutput->queryEnd = output.queryEnd; @@ -399,7 +399,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, if (DS_BUF_EMPTY == pOutput->bufStatus && pOutput->queryEnd) { QW_TASK_DLOG("task all data fetched and done, fetched blocks %d rows %" PRId64, pOutput->numOfBlocks, pOutput->numOfRows); - QW_ERR_RET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC, ctx->dynamicTask)); + QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC, ctx->dynamicTask)); break; } @@ -416,8 +416,11 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, } } +_return: + *rspMsg = pRsp; - return TSDB_CODE_SUCCESS; + + return code; } int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes) { @@ -472,6 +475,12 @@ int32_t qwQuickRspFetchReq(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SQWMsg *qwMsg, int32 code = qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rawLen, &rsp, &sOutput); } + if (code) { + qwFreeFetchRsp(rsp); + rsp = NULL; + dataLen = 0; + } + if (NULL == rsp && TSDB_CODE_SUCCESS == code) { return TSDB_CODE_SUCCESS; } @@ -877,10 +886,11 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { break; } + qwFreeFetchRsp(rsp); + rsp = NULL; + if (code && QW_EVENT_RECEIVED(ctx, QW_EVENT_FETCH)) { QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_FETCH); - qwFreeFetchRsp(rsp); - rsp = NULL; qwMsg->connInfo = ctx->dataConnInfo; code = qwBuildAndSendFetchRsp(ctx->fetchMsgType + 1, &qwMsg->connInfo, NULL, 0, code); @@ -1432,6 +1442,8 @@ void qWorkerDestroy(void **qWorkerMgmt) { while (0 == destroyed) { taosMsleep(2); } + + *qWorkerMgmt = NULL; } int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat) { diff --git a/source/libs/scheduler/CMakeLists.txt b/source/libs/scheduler/CMakeLists.txt index fafc2a27e0..c07d267f97 100644 --- a/source/libs/scheduler/CMakeLists.txt +++ b/source/libs/scheduler/CMakeLists.txt @@ -13,5 +13,5 @@ target_link_libraries( ) if(${BUILD_TEST}) - ADD_SUBDIRECTORY(test) + ADD_SUBDIRECTORY(test) endif(${BUILD_TEST}) diff --git a/source/libs/stream/CMakeLists.txt b/source/libs/stream/CMakeLists.txt index b63a8b3900..bb2a23b106 100644 --- a/source/libs/stream/CMakeLists.txt +++ b/source/libs/stream/CMakeLists.txt @@ -1,61 +1,59 @@ aux_source_directory(src STREAM_SRC) add_library(stream STATIC ${STREAM_SRC}) target_include_directories( - stream - PUBLIC "${TD_SOURCE_DIR}/include/libs/stream" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + stream + PUBLIC "${TD_SOURCE_DIR}/include/libs/stream" + PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) - if(${BUILD_WITH_ROCKSDB}) - if (${BUILD_CONTRIB}) + if(${BUILD_CONTRIB}) target_link_libraries( stream - PUBLIC rocksdb tdb - PRIVATE os util transport qcom executor wal index + PUBLIC rocksdb tdb tcs + PRIVATE os util transport qcom executor wal index ) target_include_directories( stream PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" ) else() - if (TD_LINUX) + if(TD_LINUX) target_include_directories( - stream - PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" - ) - target_link_directories( - stream - PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" - ) - target_link_libraries( - stream - PUBLIC rocksdb tdb - PRIVATE os util transport qcom executor wal index - ) - else() + stream + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) + target_link_directories( + stream + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) target_link_libraries( stream - PUBLIC rocksdb tdb - PRIVATE os util transport qcom executor wal index + PUBLIC rocksdb tdb tcs + PRIVATE os util transport qcom executor wal index + ) + else() + target_link_libraries( + stream + PUBLIC rocksdb tdb tcs + PRIVATE os util transport qcom executor wal index ) target_include_directories( stream PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" ) endif() - endif() + endif() + add_definitions(-DUSE_ROCKSDB) endif(${BUILD_WITH_ROCKSDB}) - -#target_link_libraries( -# stream -# PUBLIC tdb -# PRIVATE os util transport qcom executor -#) - +# target_link_libraries( +# stream +# PUBLIC tdb +# PRIVATE os util transport qcom executor +# ) if(${BUILD_TEST}) - ADD_SUBDIRECTORY(test) + ADD_SUBDIRECTORY(test) endif(${BUILD_TEST}) - diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index be914d9746..302090bb37 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -13,10 +13,10 @@ * along with this program. If not, see . */ -#include "cos.h" #include "rsync.h" #include "streamBackendRocksdb.h" #include "streamInt.h" +#include "tcs.h" static int32_t downloadCheckpointDataByName(const char* id, const char* fname, const char* dstName); static int32_t deleteCheckpointFile(const char* id, const char* name); @@ -343,7 +343,7 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock // And if we don't start a new timer, and the lost of checkpoint-trigger message may cause the whole checkpoint // procedure to be stucked. SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; - int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1); + int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1); if (old == 0) { int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref); @@ -352,7 +352,7 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); pTmrInfo->launchChkptId = pActiveInfo->activeId; - } else { // already launched, do nothing + } else { // already launched, do nothing stError("s-task:%s previous checkpoint-trigger monitor tmr is set, not start new one", pTask->id.idStr); } } @@ -373,10 +373,10 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock if (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH) { stDebug("s-task:%s set childIdx:%d, and add checkpoint-trigger block into outputQ", id, pTask->info.selfChildId); - code = continueDispatchCheckpointTriggerBlock(pBlock, pTask); // todo handle this failure + code = continueDispatchCheckpointTriggerBlock(pBlock, pTask); // todo handle this failure } else { // only one task exists, no need to dispatch downstream info - code = appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT, pActiveInfo->activeId, pActiveInfo->transId, - -1); + code = + appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT, pActiveInfo->activeId, pActiveInfo->transId, -1); streamFreeQitem((SStreamQueueItem*)pBlock); } } else if (taskLevel == TASK_LEVEL__SINK || taskLevel == TASK_LEVEL__AGG) { @@ -399,8 +399,8 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock if (taskLevel == TASK_LEVEL__SINK) { stDebug("s-task:%s process checkpoint-trigger block, all %d upstreams sent, send ready msg to upstream", id, num); streamFreeQitem((SStreamQueueItem*)pBlock); - code = streamTaskBuildCheckpoint(pTask); // todo: not handle error yet - } else { // source & agg tasks need to forward the checkpoint msg downwards + code = streamTaskBuildCheckpoint(pTask); // todo: not handle error yet + } else { // source & agg tasks need to forward the checkpoint msg downwards stDebug("s-task:%s process checkpoint-trigger block, all %d upstreams sent, forwards to downstream", id, num); code = flushStateDataInExecutor(pTask, (SStreamQueueItem*)pBlock); if (code) { @@ -445,7 +445,7 @@ static int32_t processCheckpointReadyHelp(SActiveCheckpointInfo* pInfo, int32_t .transId = pInfo->transId, .streamId = streamId, .downstreamNodeId = downstreamNodeId}; - void* p = taosArrayPush(pInfo->pCheckpointReadyRecvList, &info); + void* p = taosArrayPush(pInfo->pCheckpointReadyRecvList, &info); if (p == NULL) { stError("s-task:%s failed to set checkpoint ready recv msg, code:%s", id, tstrerror(terrno)); return terrno; @@ -560,8 +560,8 @@ void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg) { } streamMutexUnlock(&pInfo->lock); - stDebug("s-task:%s clear active checkpointInfo, failed checkpointId:%"PRId64", current checkpointId:%"PRId64, - pTask->id.idStr, pInfo->failedId, pTask->chkInfo.checkpointId); + stDebug("s-task:%s clear active checkpointInfo, failed checkpointId:%" PRId64 ", current checkpointId:%" PRId64, + pTask->id.idStr, pInfo->failedId, pTask->chkInfo.checkpointId); } int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SVUpdateCheckpointInfoReq* pReq) { @@ -575,8 +575,7 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV if (pReq->checkpointId <= pInfo->checkpointId) { stDebug("s-task:%s vgId:%d latest checkpointId:%" PRId64 " Ver:%" PRId64 - " no need to update checkpoint info, updated checkpointId:%" PRId64 " Ver:%" PRId64 - " transId:%d ignored", + " no need to update checkpoint info, updated checkpointId:%" PRId64 " Ver:%" PRId64 " transId:%d ignored", id, vgId, pInfo->checkpointId, pInfo->checkpointVer, pReq->checkpointId, pReq->checkpointVer, pReq->transId); streamMutexUnlock(&pTask->lock); @@ -623,7 +622,7 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV } bool valid = (pInfo->checkpointId <= pReq->checkpointId && pInfo->checkpointVer <= pReq->checkpointVer && - pInfo->processedVer <= pReq->checkpointVer); + pInfo->processedVer <= pReq->checkpointVer); if (!valid) { stFatal("invalid checkpoint id check, current checkpointId:%" PRId64 " checkpointVer:%" PRId64 @@ -908,7 +907,7 @@ static int32_t doChkptStatusCheck(SStreamTask* pTask) { if (pTmrInfo->launchChkptId != pActiveInfo->activeId) { int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64 - ", quit, ref:%d", + ", quit, ref:%d", id, vgId, pTmrInfo->launchChkptId, ref); return -1; } @@ -1005,7 +1004,7 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { int32_t numOfNotSend = 0; SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; - SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; + SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); @@ -1023,7 +1022,8 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { } if (++pTmrInfo->activeCounter < 50) { - streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); + streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + "trigger-recv-monitor"); return; } @@ -1201,8 +1201,8 @@ int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) { STaskDispatcherFixed* pDispatch = &pTask->outputInfo.fixedDispatcher; STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pDispatch->nodeId, .taskId = pDispatch->taskId}; - void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p); - if (px == NULL) { // pause the stream task, if memory not enough + void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p); + if (px == NULL) { // pause the stream task, if memory not enough code = terrno; } } else { @@ -1213,8 +1213,8 @@ int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) { } STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pVgInfo->vgId, .taskId = pVgInfo->taskId}; - void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p); - if (px == NULL) { // pause the stream task, if memory not enough + void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p); + if (px == NULL) { // pause the stream task, if memory not enough code = terrno; break; } @@ -1288,11 +1288,11 @@ void streamTaskSetTriggerDispatchConfirmed(SStreamTask* pTask, int32_t vgId) { static int32_t uploadCheckpointToS3(const char* id, const char* path) { int32_t code = 0; int32_t nBytes = 0; - + /* if (s3Init() != 0) { return TSDB_CODE_THIRDPARTY_ERROR; } - + */ TdDirPtr pDir = taosOpenDir(path); if (pDir == NULL) { return terrno; @@ -1325,11 +1325,11 @@ static int32_t uploadCheckpointToS3(const char* id, const char* path) { break; } - code = s3PutObjectFromFile2(filename, object, 0); + code = tcsPutObjectFromFile2(filename, object, 0); if (code != 0) { - stError("[s3] failed to upload checkpoint:%s, reason:%s", filename, tstrerror(code)); + stError("[tcs] failed to upload checkpoint:%s, reason:%s", filename, tstrerror(code)); } else { - stDebug("[s3] upload checkpoint:%s", filename); + stDebug("[tcs] upload checkpoint:%s", filename); } } @@ -1355,7 +1355,7 @@ int32_t downloadCheckpointByNameS3(const char* id, const char* fname, const char taosMemoryFree(buf); return TSDB_CODE_OUT_OF_RANGE; } - int32_t code = s3GetObjectToFile(buf, dstName); + int32_t code = tcsGetObjectToFile(buf, dstName); if (code != 0) { taosMemoryFree(buf); return TAOS_SYSTEM_ERROR(errno); @@ -1418,7 +1418,7 @@ int32_t streamTaskDownloadCheckpointData(const char* id, char* path, int64_t che if (strlen(tsSnodeAddress) != 0) { return downloadByRsync(id, path, checkpointId); } else if (tsS3StreamEnabled) { - return s3GetObjectsByPrefix(id, path); + return tcsGetObjectsByPrefix(id, path); } return 0; @@ -1432,7 +1432,7 @@ int32_t deleteCheckpoint(const char* id) { if (strlen(tsSnodeAddress) != 0) { return deleteRsync(id); } else if (tsS3StreamEnabled) { - s3DeleteObjectsByPrefix(id); + tcsDeleteObjectsByPrefix(id); } return 0; } @@ -1446,7 +1446,7 @@ int32_t deleteCheckpointFile(const char* id, const char* name) { } char* tmp = object; - int32_t code = s3DeleteObjects((const char**)&tmp, 1); + int32_t code = tcsDeleteObjects((const char**)&tmp, 1); if (code != 0) { return TSDB_CODE_THIRDPARTY_ERROR; } @@ -1488,4 +1488,4 @@ int32_t streamTaskSendCheckpointsourceRsp(SStreamTask* pTask) { streamMutexUnlock(&pTask->lock); return code; -} \ No newline at end of file +} diff --git a/source/libs/stream/test/CMakeLists.txt b/source/libs/stream/test/CMakeLists.txt index c472207b27..ed66563225 100644 --- a/source/libs/stream/test/CMakeLists.txt +++ b/source/libs/stream/test/CMakeLists.txt @@ -1,81 +1,77 @@ - - # bloomFilterTest -#TARGET_LINK_LIBRARIES(streamUpdateTest - #PUBLIC os util common gtest gtest_main stream executor index - #) +# TARGET_LINK_LIBRARIES(streamUpdateTest +# PUBLIC os util common gtest gtest_main stream executor index +# ) -#TARGET_INCLUDE_DIRECTORIES( - #streamUpdateTest - #PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/" - #PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc" -#) +# TARGET_INCLUDE_DIRECTORIES( +# streamUpdateTest +# PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/" +# PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc" +# ) -#ADD_EXECUTABLE(checkpointTest checkpointTest.cpp) -#TARGET_LINK_LIBRARIES( - #checkpointTest - #PUBLIC os common gtest stream executor qcom index transport util -#) +# ADD_EXECUTABLE(checkpointTest checkpointTest.cpp) +# TARGET_LINK_LIBRARIES( +# checkpointTest +# PUBLIC os common gtest stream executor qcom index transport util +# ) -#TARGET_INCLUDE_DIRECTORIES( - #checkpointTest - #PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc" -#) +# TARGET_INCLUDE_DIRECTORIES( +# checkpointTest +# PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc" +# ) -#add_executable(backendTest "") +# add_executable(backendTest "") -#target_sources(backendTest - #PRIVATE - #"backendTest.cpp" -#) +# target_sources(backendTest +# PRIVATE +# "backendTest.cpp" +# ) -#TARGET_LINK_LIBRARIES( - #backendTest - #PUBLIC rocksdb - #PUBLIC os common gtest stream executor qcom index transport util -#) +# TARGET_LINK_LIBRARIES( +# backendTest +# PUBLIC rocksdb +# PUBLIC os common gtest stream executor qcom index transport util +# ) -#TARGET_INCLUDE_DIRECTORIES( - #backendTest - #PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/" - #PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc" -#) +# TARGET_INCLUDE_DIRECTORIES( +# backendTest +# PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/" +# PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc" +# ) -#add_test( - #NAME streamUpdateTest - #COMMAND streamUpdateTest -#) +# add_test( +# NAME streamUpdateTest +# COMMAND streamUpdateTest +# ) -#add_test( - #NAME checkpointTest - #COMMAND checkpointTest -#) -#add_test( - #NAME backendTest - #COMMAND backendTest -#) +# add_test( +# NAME checkpointTest +# COMMAND checkpointTest +# ) +# add_test( +# NAME backendTest +# COMMAND backendTest +# ) +# add_executable(backendTest "") -#add_executable(backendTest "") +# target_sources(backendTest +# PUBLIC +# "backendTest.cpp" +# ) -#target_sources(backendTest - #PUBLIC - #"backendTest.cpp" -#) - -#target_include_directories( - #backendTest - #PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/" - #PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc" -#) - -#target_link_libraries( - #backendTest - #PUBLIC rocksdb - #PUBLIC os common gtest stream executor qcom index transport util -#) +# target_include_directories( +# backendTest +# PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/" +# PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc" +# ) +# target_link_libraries( +# backendTest +# PUBLIC rocksdb +# PUBLIC os common gtest stream executor qcom index transport util +# ) MESSAGE(STATUS "build parser unit test") @@ -86,19 +82,19 @@ IF(NOT TD_DARWIN) ADD_EXECUTABLE(backendTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES( - backendTest - PUBLIC rocksdb - PUBLIC os common gtest stream executor qcom index transport util vnode + backendTest + PUBLIC rocksdb + PUBLIC os common gtest stream executor qcom index transport util vnode ) TARGET_INCLUDE_DIRECTORIES( - backendTest + backendTest PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/" PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc" ) ADD_TEST( - NAME backendTest - COMMAND backendTest + NAME backendTest + COMMAND backendTest ) -ENDIF () \ No newline at end of file +ENDIF() diff --git a/source/libs/tcs/CMakeLists.txt b/source/libs/tcs/CMakeLists.txt new file mode 100644 index 0000000000..95c167d737 --- /dev/null +++ b/source/libs/tcs/CMakeLists.txt @@ -0,0 +1,23 @@ +aux_source_directory(src TCS_SRC) + +add_library(tcs STATIC ${TCS_SRC}) +target_include_directories( + tcs + PUBLIC "${TD_SOURCE_DIR}/include/libs/tcs" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" +) + +target_link_libraries( + tcs + PUBLIC az + PUBLIC common + + # PUBLIC cjson + # PUBLIC os + # PUBLIC util + # PUBLIC crypt +) + +if(${BUILD_TEST}) + add_subdirectory(test) +endif(${BUILD_TEST}) diff --git a/source/libs/tcs/inc/tcsInt.h b/source/libs/tcs/inc/tcsInt.h new file mode 100644 index 0000000000..b24a47aa98 --- /dev/null +++ b/source/libs/tcs/inc/tcsInt.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TCS_INT_H_ +#define _TD_TCS_INT_H_ + +#include "os.h" +#include "tarray.h" +#include "tdef.h" +#include "tlog.h" +#include "tmsg.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern int8_t tsS3Ablob; + +typedef enum { + TOS_PROTO_NIL, + TOS_PROTO_S3, + TOS_PROTO_ABLOB, +} STosProto; + +typedef struct { + int32_t (*Begin)(); + void (*End)(); + int32_t (*CheckCfg)(); + + int32_t (*PutObjectFromFileOffset)(const char* file, const char* object_name, int64_t offset, int64_t size); + int32_t (*GetObjectBlock)(const char* object_name, int64_t offset, int64_t size, bool check, uint8_t** ppBlock); + + void (*DeleteObjectsByPrefix)(const char* prefix); + + int32_t (*PutObjectFromFile2)(const char* file, const char* object, int8_t withcp); + int32_t (*GetObjectsByPrefix)(const char* prefix, const char* path); + int32_t (*DeleteObjects)(const char* object_name[], int nobject); + int32_t (*GetObjectToFile)(const char* object_name, const char* fileName); +} STcs; + +extern STcs tcs; + +#ifdef __cplusplus +} +#endif + +#endif // _TD_TCS_INT_H_ diff --git a/source/libs/tcs/src/tcs.c b/source/libs/tcs/src/tcs.c new file mode 100644 index 0000000000..a668eac60f --- /dev/null +++ b/source/libs/tcs/src/tcs.c @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tcs.h" +#include "os.h" +#include "taoserror.h" +#include "tcsInt.h" +#include "tglobal.h" + +#include "az.h" +#include "cos.h" + +int32_t tcsInit() { + int32_t code = 0; + + STosProto proto = tsS3Ablob ? TOS_PROTO_ABLOB : TOS_PROTO_S3; + + if (TOS_PROTO_S3 == proto) { + tcs.Begin = s3Begin; + tcs.End = s3End; + tcs.CheckCfg = s3CheckCfg; + + tcs.PutObjectFromFileOffset = s3PutObjectFromFileOffset; + tcs.GetObjectBlock = s3GetObjectBlock; + + tcs.DeleteObjectsByPrefix = s3DeleteObjectsByPrefix; + + tcs.PutObjectFromFile2 = s3PutObjectFromFile2; + tcs.GetObjectsByPrefix = s3GetObjectsByPrefix; + tcs.DeleteObjects = s3DeleteObjects; + tcs.GetObjectToFile = s3GetObjectToFile; + + } else if (TOS_PROTO_ABLOB == proto) { + tcs.Begin = azBegin; + tcs.End = azEnd; + tcs.CheckCfg = azCheckCfg; + + tcs.PutObjectFromFileOffset = azPutObjectFromFileOffset; + tcs.GetObjectBlock = azGetObjectBlock; + + tcs.DeleteObjectsByPrefix = azDeleteObjectsByPrefix; + + tcs.PutObjectFromFile2 = azPutObjectFromFile2; + tcs.GetObjectsByPrefix = azGetObjectsByPrefix; + tcs.DeleteObjects = azDeleteObjects; + tcs.GetObjectToFile = azGetObjectToFile; + + } else { + code = TSDB_CODE_INVALID_PARA; + return code; + } + + code = tcs.Begin(); + + return code; +} + +void tcsUninit() { tcs.End(); } + +int32_t tcsCheckCfg() { + int32_t code = 0; + + if (!tsS3Enabled) { + (void)fprintf(stderr, "tcs not configured.\n"); + TAOS_RETURN(code); + } + + code = tcsInit(); + if (code != 0) { + (void)fprintf(stderr, "failed to initialize tcs.\n"); + TAOS_RETURN(code); + } + + code = tcs.CheckCfg(); + if (code != 0) { + (void)fprintf(stderr, "failed to check tcs.\n"); + TAOS_RETURN(code); + } + + tcsUninit(); + + return code; +} + +int32_t tcsPutObjectFromFileOffset(const char* file, const char* object_name, int64_t offset, int64_t size) { + return tcs.PutObjectFromFileOffset(file, object_name, offset, size); +} + +int32_t tcsGetObjectBlock(const char* object_name, int64_t offset, int64_t size, bool check, uint8_t** ppBlock) { + return tcs.GetObjectBlock(object_name, offset, size, check, ppBlock); +} + +void tcsDeleteObjectsByPrefix(const char* prefix) { return tcs.DeleteObjectsByPrefix(prefix); } diff --git a/source/libs/tcs/src/tcsStream.c b/source/libs/tcs/src/tcsStream.c new file mode 100644 index 0000000000..f73bb028ba --- /dev/null +++ b/source/libs/tcs/src/tcsStream.c @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tcs.h" +#include "tcsInt.h" + +STcs tcs; + +int32_t tcsPutObjectFromFile2(const char* file, const char* object, int8_t withcp) { + return tcs.PutObjectFromFile2(file, object, withcp); +} + +int32_t tcsGetObjectsByPrefix(const char* prefix, const char* path) { return tcs.GetObjectsByPrefix(prefix, path); } + +int32_t tcsDeleteObjects(const char* object_name[], int nobject) { return tcs.DeleteObjects(object_name, nobject); } + +int32_t tcsGetObjectToFile(const char* object_name, const char* fileName) { + return tcs.GetObjectToFile(object_name, fileName); +} diff --git a/source/libs/tcs/test/CMakeLists.txt b/source/libs/tcs/test/CMakeLists.txt new file mode 100644 index 0000000000..909128db37 --- /dev/null +++ b/source/libs/tcs/test/CMakeLists.txt @@ -0,0 +1,20 @@ +if(TD_LINUX) + aux_source_directory(. TCS_TEST_SRC) + + add_executable(tcsTest ${TCS_TEST_SRC}) + target_include_directories(tcsTest + PUBLIC + "${TD_SOURCE_DIR}/include/libs/tcs" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + ) + + target_link_libraries(tcsTest + tcs + gtest_main + ) + enable_testing() + add_test( + NAME tcs_test + COMMAND tcsTest + ) +endif() diff --git a/source/libs/tcs/test/tcsTest.cpp b/source/libs/tcs/test/tcsTest.cpp new file mode 100644 index 0000000000..4b5afc5b85 --- /dev/null +++ b/source/libs/tcs/test/tcsTest.cpp @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include + +#include +#include +#include + +#include "tcs.h" +#include "tcsInt.h" + +int32_t tcsInitEnv(int8_t isBlob) { + int32_t code = 0; + + extern char tsS3Hostname[][TSDB_FQDN_LEN]; + extern char tsS3AccessKeyId[][TSDB_FQDN_LEN]; + extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN]; + extern char tsS3BucketName[TSDB_FQDN_LEN]; + + /* TCS parameter format + tsS3Hostname[0] = "/.blob.core.windows.net"; + tsS3AccessKeyId[0] = ""; + tsS3AccessKeySecret[0] = ""; + tsS3BucketName = ""; + */ + + tsS3Ablob = isBlob; + if (isBlob) { + const char *hostname = "/.blob.core.windows.net"; + const char *accessKeyId = ""; + const char *accessKeySecret = ""; + const char *bucketName = ""; + + if (hostname[0] != '<') { + tstrncpy(&tsS3Hostname[0][0], hostname, TSDB_FQDN_LEN); + tstrncpy(&tsS3AccessKeyId[0][0], accessKeyId, TSDB_FQDN_LEN); + tstrncpy(&tsS3AccessKeySecret[0][0], accessKeySecret, TSDB_FQDN_LEN); + tstrncpy(tsS3BucketName, bucketName, TSDB_FQDN_LEN); + } else { + const char *accountId = getenv("ablob_account_id"); + if (!accountId) { + return -1; + } + + const char *accountSecret = getenv("ablob_account_secret"); + if (!accountSecret) { + return -1; + } + + const char *containerName = getenv("ablob_container"); + if (!containerName) { + return -1; + } + + TAOS_STRCPY(&tsS3Hostname[0][0], accountId); + TAOS_STRCAT(&tsS3Hostname[0][0], ".blob.core.windows.net"); + TAOS_STRCPY(&tsS3AccessKeyId[0][0], accountId); + TAOS_STRCPY(&tsS3AccessKeySecret[0][0], accountSecret); + TAOS_STRCPY(tsS3BucketName, containerName); + } + } else { + /* + const char *hostname = "endpoint/.blob.core.windows.net"; + const char *accessKeyId = ""; + const char *accessKeySecret = ""; + const char *bucketName = ""; + */ + + // const char *hostname = "http://192.168.1.52:9000"; + // const char *accessKeyId = "zOgllR6bSnw2Ah3mCNel"; + // const char *accessKeySecret = "cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX"; + // const char *bucketName = "test-bucket"; + const char *hostname = "192.168.1.52:9000"; + const char *accessKeyId = "fGPPyYjzytw05nw44ViA"; + const char *accessKeySecret = "vK1VcwxgSOykicx6hk8fL1x15uEtyDSFU3w4hTaZ"; + + const char *bucketName = "ci-bucket19"; + + tstrncpy(&tsS3Hostname[0][0], hostname, TSDB_FQDN_LEN); + tstrncpy(&tsS3AccessKeyId[0][0], accessKeyId, TSDB_FQDN_LEN); + tstrncpy(&tsS3AccessKeySecret[0][0], accessKeySecret, TSDB_FQDN_LEN); + tstrncpy(tsS3BucketName, bucketName, TSDB_FQDN_LEN); + + // setup s3 env + extern int8_t tsS3EpNum; + extern int8_t tsS3Https[TSDB_MAX_EP_NUM]; + + tsS3EpNum = 1; + tsS3Https[0] = false; + } + + tstrncpy(tsTempDir, "/tmp/", PATH_MAX); + + tsS3Enabled = true; + + return code; +} + +// TEST(TcsTest, DISABLED_InterfaceTest) { +TEST(TcsTest, InterfaceTest) { + int code = 0; + bool check = false; + bool withcp = false; + + code = tcsInitEnv(true); + if (code) { + std::cout << "ablob env init failed with: " << code << std::endl; + return; + } + + GTEST_ASSERT_EQ(code, 0); + GTEST_ASSERT_EQ(tsS3Enabled, 1); + GTEST_ASSERT_EQ(tsS3Ablob, 1); + + code = tcsInit(); + GTEST_ASSERT_EQ(code, 0); + + code = tcsCheckCfg(); + GTEST_ASSERT_EQ(code, 0); + + const int size = 4096; + char data[size] = {0}; + for (int i = 0; i < size / 2; ++i) { + data[i * 2 + 1] = 1; + } + + const char object_name[] = "tcsut.bin"; + char path[PATH_MAX] = {0}; + char path_download[PATH_MAX] = {0}; + int ds_len = strlen(TD_DIRSEP); + int tmp_len = strlen(tsTempDir); + + (void)snprintf(path, PATH_MAX, "%s", tsTempDir); + if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) { + (void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP); + (void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", object_name); + } else { + (void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", object_name); + } + + tstrncpy(path_download, path, strlen(path) + 1); + tstrncpy(path_download + strlen(path), ".download", strlen(".download") + 1); + + TdFilePtr fp = taosOpenFile(path, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_WRITE_THROUGH); + GTEST_ASSERT_NE(fp, nullptr); + + int n = taosWriteFile(fp, data, size); + GTEST_ASSERT_EQ(n, size); + + code = taosCloseFile(&fp); + GTEST_ASSERT_EQ(code, 0); + + code = tcsPutObjectFromFileOffset(path, object_name, 0, size); + GTEST_ASSERT_EQ(code, 0); + + uint8_t *pBlock = NULL; + code = tcsGetObjectBlock(object_name, 0, size, check, &pBlock); + GTEST_ASSERT_EQ(code, 0); + + for (int i = 0; i < size / 2; ++i) { + GTEST_ASSERT_EQ(pBlock[i * 2], 0); + GTEST_ASSERT_EQ(pBlock[i * 2 + 1], 1); + } + + taosMemoryFree(pBlock); + + code = tcsGetObjectToFile(object_name, path_download); + GTEST_ASSERT_EQ(code, 0); + + { + TdFilePtr fp = taosOpenFile(path, TD_FILE_READ); + GTEST_ASSERT_NE(fp, nullptr); + + (void)memset(data, 0, size); + + int64_t n = taosReadFile(fp, data, size); + GTEST_ASSERT_EQ(n, size); + + code = taosCloseFile(&fp); + GTEST_ASSERT_EQ(code, 0); + + for (int i = 0; i < size / 2; ++i) { + GTEST_ASSERT_EQ(data[i * 2], 0); + GTEST_ASSERT_EQ(data[i * 2 + 1], 1); + } + } + + tcsDeleteObjectsByPrefix(object_name); + // list object to check + + code = tcsPutObjectFromFile2(path, object_name, withcp); + GTEST_ASSERT_EQ(code, 0); + + code = tcsGetObjectsByPrefix(object_name, tsTempDir); + GTEST_ASSERT_EQ(code, 0); + + { + TdFilePtr fp = taosOpenFile(path, TD_FILE_READ); + GTEST_ASSERT_NE(fp, nullptr); + + (void)memset(data, 0, size); + + int64_t n = taosReadFile(fp, data, size); + GTEST_ASSERT_EQ(n, size); + + code = taosCloseFile(&fp); + GTEST_ASSERT_EQ(code, 0); + + for (int i = 0; i < size / 2; ++i) { + GTEST_ASSERT_EQ(data[i * 2], 0); + GTEST_ASSERT_EQ(data[i * 2 + 1], 1); + } + } + + const char *object_name_arr[] = {object_name}; + code = tcsDeleteObjects(object_name_arr, 1); + GTEST_ASSERT_EQ(code, 0); + + tcsUninit(); +} + +// TEST(TcsTest, DISABLED_InterfaceNonBlobTest) { +TEST(TcsTest, InterfaceNonBlobTest) { + int code = 0; + bool check = false; + bool withcp = false; + + code = tcsInitEnv(false); + GTEST_ASSERT_EQ(code, 0); + GTEST_ASSERT_EQ(tsS3Enabled, 1); + GTEST_ASSERT_EQ(tsS3Ablob, 0); + + code = tcsInit(); + GTEST_ASSERT_EQ(code, 0); + + code = tcsCheckCfg(); + GTEST_ASSERT_EQ(code, 0); + + const int size = 4096; + char data[size] = {0}; + for (int i = 0; i < size / 2; ++i) { + data[i * 2 + 1] = 1; + } + + const char object_name[] = "tcsut.bin"; + char path[PATH_MAX] = {0}; + char path_download[PATH_MAX] = {0}; + int ds_len = strlen(TD_DIRSEP); + int tmp_len = strlen(tsTempDir); + + (void)snprintf(path, PATH_MAX, "%s", tsTempDir); + if (strncmp(tsTempDir + tmp_len - ds_len, TD_DIRSEP, ds_len) != 0) { + (void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", TD_DIRSEP); + (void)snprintf(path + tmp_len + ds_len, PATH_MAX - tmp_len - ds_len, "%s", object_name); + } else { + (void)snprintf(path + tmp_len, PATH_MAX - tmp_len, "%s", object_name); + } + + tstrncpy(path_download, path, strlen(path) + 1); + tstrncpy(path_download + strlen(path), ".download", strlen(".download") + 1); + + TdFilePtr fp = taosOpenFile(path, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_WRITE_THROUGH); + GTEST_ASSERT_NE(fp, nullptr); + + int n = taosWriteFile(fp, data, size); + GTEST_ASSERT_EQ(n, size); + + code = taosCloseFile(&fp); + GTEST_ASSERT_EQ(code, 0); + + code = tcsPutObjectFromFileOffset(path, object_name, 0, size); + GTEST_ASSERT_EQ(code, 0); + + uint8_t *pBlock = NULL; + code = tcsGetObjectBlock(object_name, 0, size, check, &pBlock); + GTEST_ASSERT_EQ(code, 0); + + for (int i = 0; i < size / 2; ++i) { + GTEST_ASSERT_EQ(pBlock[i * 2], 0); + GTEST_ASSERT_EQ(pBlock[i * 2 + 1], 1); + } + + taosMemoryFree(pBlock); + + code = tcsGetObjectToFile(object_name, path_download); + GTEST_ASSERT_EQ(code, 0); + + { + TdFilePtr fp = taosOpenFile(path, TD_FILE_READ); + GTEST_ASSERT_NE(fp, nullptr); + + (void)memset(data, 0, size); + + int64_t n = taosReadFile(fp, data, size); + GTEST_ASSERT_EQ(n, size); + + code = taosCloseFile(&fp); + GTEST_ASSERT_EQ(code, 0); + + for (int i = 0; i < size / 2; ++i) { + GTEST_ASSERT_EQ(data[i * 2], 0); + GTEST_ASSERT_EQ(data[i * 2 + 1], 1); + } + } + + tcsDeleteObjectsByPrefix(object_name); + // list object to check + + code = tcsPutObjectFromFile2(path, object_name, withcp); + GTEST_ASSERT_EQ(code, 0); + + code = tcsGetObjectsByPrefix(object_name, tsTempDir); + GTEST_ASSERT_EQ(code, 0); + + { + TdFilePtr fp = taosOpenFile(path, TD_FILE_READ); + GTEST_ASSERT_NE(fp, nullptr); + + (void)memset(data, 0, size); + + int64_t n = taosReadFile(fp, data, size); + GTEST_ASSERT_EQ(n, size); + + code = taosCloseFile(&fp); + GTEST_ASSERT_EQ(code, 0); + + for (int i = 0; i < size / 2; ++i) { + GTEST_ASSERT_EQ(data[i * 2], 0); + GTEST_ASSERT_EQ(data[i * 2 + 1], 1); + } + } + + const char *object_name_arr[] = {object_name}; + code = tcsDeleteObjects(object_name_arr, 1); + GTEST_ASSERT_EQ(code, 0); + + tcsUninit(); +} diff --git a/source/libs/tfs/CMakeLists.txt b/source/libs/tfs/CMakeLists.txt index ef1afa01a1..98572f94d8 100644 --- a/source/libs/tfs/CMakeLists.txt +++ b/source/libs/tfs/CMakeLists.txt @@ -9,5 +9,5 @@ target_include_directories( target_link_libraries(tfs os util common monitor) if(${BUILD_TEST}) - add_subdirectory(test) + add_subdirectory(test) endif(${BUILD_TEST}) \ No newline at end of file diff --git a/source/libs/transport/CMakeLists.txt b/source/libs/transport/CMakeLists.txt index a48926d2d4..6ad130017a 100644 --- a/source/libs/transport/CMakeLists.txt +++ b/source/libs/transport/CMakeLists.txt @@ -1,34 +1,30 @@ aux_source_directory(src TRANSPORT_SRC) add_library(transport STATIC ${TRANSPORT_SRC}) target_include_directories( - transport - PUBLIC "${TD_SOURCE_DIR}/include/libs/transport" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + transport + PUBLIC "${TD_SOURCE_DIR}/include/libs/transport" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( - transport - PUBLIC lz4_static - PUBLIC os - PUBLIC util - PUBLIC common - PUBLIC zlibstatic + transport + PUBLIC lz4_static + PUBLIC os + PUBLIC util + PUBLIC common + PUBLIC zlibstatic ) -if (${BUILD_WITH_UV_TRANS}) -if (${BUILD_WITH_UV}) - target_link_libraries( - transport - PUBLIC uv_a - ) - add_definitions(-DUSE_UV) -endif(${BUILD_WITH_UV}) -endif(${BUILD_WITH_UV_TRANS}) -if (${BUILD_TEST}) +if(${BUILD_WITH_UV_TRANS}) + if(${BUILD_WITH_UV}) + target_link_libraries( + transport + PUBLIC uv_a + ) + add_definitions(-DUSE_UV) + endif(${BUILD_WITH_UV}) +endif(${BUILD_WITH_UV_TRANS}) + +if(${BUILD_TEST}) add_subdirectory(test) endif(${BUILD_TEST}) - - - - - diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 18bedab5c7..c3e214b5e3 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -27,7 +27,7 @@ typedef struct { typedef struct SConnList { queue conns; int32_t size; - int32_t totaSize; + int32_t totalSize; } SConnList; typedef struct { @@ -703,8 +703,9 @@ void cliHandleResp(SCliConn* conn) { code = cliBuildRespFromCont(pReq, &resp, pHead); STraceId* trace = &resp.info.traceId; - tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, seq:%" PRId64 ", sid:%" PRId64 "", - CONN_GET_INST_LABEL(conn), conn, TMSG_INFO(resp.msgType), conn->dst, conn->src, pHead->msgLen, seq, qId); + tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, seq:%" PRId64 ", sid:%" PRId64 ", code:%s", + CONN_GET_INST_LABEL(conn), conn, TMSG_INFO(resp.msgType), conn->dst, conn->src, pHead->msgLen, seq, qId, + tstrerror(pHead->code)); code = cliNotifyCb(conn, pReq, &resp); if (code == TSDB_CODE_RPC_ASYNC_IN_PROCESS) { @@ -854,10 +855,9 @@ static int32_t cliGetConnFromPool(SCliThrd* pThrd, const char* key, SCliConn** p } if (QUEUE_IS_EMPTY(&plist->conns)) { - if (plist->size >= pInst->connLimitNum) { + if (plist->totalSize >= pInst->connLimitNum) { return TSDB_CODE_RPC_MAX_SESSIONS; } - plist->totaSize += 1; return TSDB_CODE_RPC_NETWORK_BUSY; } @@ -1045,7 +1045,7 @@ static int32_t cliCreateConn(SCliThrd* pThrd, SCliConn** pCliConn, char* ip, int conn->hostThrd = pThrd; conn->seq = 0; - conn->pQTable = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + conn->pQTable = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); if (conn->pQTable == NULL) { TAOS_CHECK_GOTO(terrno, NULL, _failed); } @@ -1248,7 +1248,7 @@ static void cliHandleException(SCliConn* conn) { cliDestroyAllQidFromThrd(conn); QUEUE_REMOVE(&conn->q); if (conn->list) { - conn->list->totaSize -= 1; + conn->list->totalSize -= 1; conn->list = NULL; } @@ -1547,10 +1547,15 @@ static int32_t cliDoConn(SCliThrd* pThrd, SCliConn* conn) { } transRefCliHandle(conn); + + conn->list = taosHashGet((SHashObj*)pThrd->pool, conn->dstAddr, strlen(conn->dstAddr)); + if (conn->list != NULL) { + conn->list->totalSize += 1; + } + ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb); if (ret != 0) { tError("failed connect to %s since %s", conn->dstAddr, uv_err_name(ret)); - TAOS_CHECK_GOTO(TSDB_CODE_THIRDPARTY_ERROR, &lino, _exception1); } @@ -2362,7 +2367,7 @@ static int32_t createThrdObj(void* trans, SCliThrd** ppThrd) { } } - pThrd->pool = createConnPool(4); + pThrd->pool = createConnPool(128); if (pThrd->pool == NULL) { code = terrno; TAOS_CHECK_GOTO(terrno, NULL, _end); @@ -2381,22 +2386,22 @@ static int32_t createThrdObj(void* trans, SCliThrd** ppThrd) { pThrd->destroyAhandleFp = pInst->destroyFp; - pThrd->fqdn2ipCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + pThrd->fqdn2ipCache = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (pThrd->fqdn2ipCache == NULL) { TAOS_CHECK_GOTO(terrno, NULL, _end); } - pThrd->batchCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + pThrd->batchCache = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (pThrd->batchCache == NULL) { TAOS_CHECK_GOTO(terrno, NULL, _end); } - pThrd->connHeapCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + pThrd->connHeapCache = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (pThrd->connHeapCache == NULL) { TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); } - pThrd->pIdConnTable = taosHashInit(512, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + pThrd->pIdConnTable = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); if (pThrd->connHeapCache == NULL) { TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, NULL, _end); } @@ -2956,12 +2961,13 @@ int32_t cliNotifyCb(SCliConn* pConn, SCliReq* pReq, STransMsg* pResp) { STrans* pInst = pThrd->pInst; if (pReq != NULL) { - if (cliMayRetry(pConn, pReq, pResp)) { - return TSDB_CODE_RPC_ASYNC_IN_PROCESS; + if (pResp->code != TSDB_CODE_SUCCESS) { + if (cliMayRetry(pConn, pReq, pResp)) { + return TSDB_CODE_RPC_ASYNC_IN_PROCESS; + } + cliMayResetRespCode(pReq, pResp); } - cliMayResetRespCode(pReq, pResp); - if (cliTryUpdateEpset(pReq, pResp)) { cliPerfLog_epset(pConn, pReq); } @@ -3737,7 +3743,7 @@ static FORCE_INLINE int8_t shouldSWitchToOtherConn(SCliConn* pConn, char* key) { tTrace("conn %p get list %p from pool for key:%s", pConn, pConn->list, key); } } - if (pConn->list && pConn->list->totaSize >= pInst->connLimitNum / 4) { + if (pConn->list && pConn->list->totalSize >= pInst->connLimitNum / 4) { tWarn("%s conn %p try to remove timeout msg since too many conn created", transLabel(pInst), pConn); if (cliConnRemoveTimeoutMsg(pConn)) { diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index a7c24f3fae..5723f2ff23 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -239,7 +239,7 @@ SIpWhiteListTab* uvWhiteListCreate() { return NULL; } - pWhiteList->pList = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), 0, HASH_NO_LOCK); + pWhiteList->pList = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), 0, HASH_NO_LOCK); if (pWhiteList->pList == NULL) { taosMemoryFree(pWhiteList); return NULL; @@ -1333,7 +1333,7 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) { QUEUE_INIT(&exh->q); tTrace("%s handle %p, conn %p created, refId:%" PRId64, transLabel(pInst), exh, pConn, pConn->refId); - pConn->pQTable = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + pConn->pQTable = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); if (pConn->pQTable == NULL) { TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, &lino, _end); } diff --git a/source/util/CMakeLists.txt b/source/util/CMakeLists.txt index 4972e9f50b..063988ea00 100644 --- a/source/util/CMakeLists.txt +++ b/source/util/CMakeLists.txt @@ -1,19 +1,20 @@ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/src/version.c.in" "${CMAKE_CURRENT_SOURCE_DIR}/src/version.c") aux_source_directory(src UTIL_SRC) add_library(util STATIC ${UTIL_SRC}) -if (DEFINED GRANT_CFG_INCLUDE_DIR) - add_definitions(-DGRANTS_CFG) + +if(DEFINED GRANT_CFG_INCLUDE_DIR) + add_definitions(-DGRANTS_CFG) endif() -IF (${ASSERT_NOT_CORE}) +if(${ASSERT_NOT_CORE}) ADD_DEFINITIONS(-DASSERT_NOT_CORE) MESSAGE(STATUS "disable assert core") -ELSE () +else() MESSAGE(STATUS "enable assert core") -ENDIF (${ASSERT_NOT_CORE}) +endif(${ASSERT_NOT_CORE}) if(${BUILD_WITH_ANALYSIS}) - add_definitions(-DUSE_ANAL) + add_definitions(-DUSE_ANAL) endif() target_include_directories( @@ -34,7 +35,7 @@ target_link_directories( PUBLIC "${TD_SOURCE_DIR}/contrib/pcre2" ) -if (TD_LINUX) +if(TD_LINUX) target_link_libraries( util PUBLIC os common @@ -43,10 +44,10 @@ if (TD_LINUX) ) else() target_link_libraries( - util - PUBLIC os common - PUBLIC lz4_static pcre2-8 - PUBLIC api cjson geos_c TSZ + util + PUBLIC os common + PUBLIC lz4_static pcre2-8 + PUBLIC api cjson geos_c TSZ ) endif() diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 45c8a2f6c2..3ca148a625 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -26,7 +26,7 @@ #define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024) #define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128) -#define LOG_FILE_DAY_LEN 64 +#define LOG_FILE_DAY_LEN 64 #define LOG_DEFAULT_BUF_SIZE (20 * 1024 * 1024) // 20MB #define LOG_SLOW_BUF_SIZE (10 * 1024 * 1024) // 10MB @@ -113,6 +113,7 @@ int32_t rpcDebugFlag = 131; int32_t qDebugFlag = 131; int32_t stDebugFlag = 131; int32_t wDebugFlag = 131; +int32_t azDebugFlag = 131; int32_t sDebugFlag = 131; int32_t tsdbDebugFlag = 131; int32_t tdbDebugFlag = 131; @@ -151,7 +152,7 @@ static int32_t taosStartLog() { return 0; } -static void getDay(char* buf, int32_t bufSize){ +static void getDay(char *buf, int32_t bufSize) { time_t t = taosTime(NULL); struct tm tmInfo; if (taosLocalTime(&t, &tmInfo, buf, bufSize) != NULL) { @@ -172,7 +173,7 @@ static int64_t getTimestampToday() { return (int64_t)taosMktime(&tm); } -static void getFullPathName(char* fullName, const char* logName){ +static void getFullPathName(char *fullName, const char *logName) { if (strlen(tsLogDir) != 0) { char lastC = tsLogDir[strlen(tsLogDir) - 1]; if (lastC == '\\' || lastC == '/') { @@ -225,7 +226,7 @@ int32_t taosInitLog(const char *logName, int32_t maxFiles, bool tsc) { } TAOS_CHECK_RETURN(taosInitNormalLog(logName, maxFiles)); - if (tsc){ + if (tsc) { TAOS_CHECK_RETURN(taosInitSlowLog()); } TAOS_CHECK_RETURN(taosStartLog()); @@ -397,7 +398,7 @@ static int32_t taosOpenNewLogFile() { OldFileKeeper *oldFileKeeper = taosOpenNewFile(); if (!oldFileKeeper) { - TAOS_UNUSED(taosThreadMutexUnlock(&tsLogObj.logMutex)); + TAOS_UNUSED(taosThreadMutexUnlock(&tsLogObj.logMutex)); return terrno; } if (taosThreadCreate(&thread, &attr, taosThreadToCloseOldFile, oldFileKeeper) != 0) { @@ -433,7 +434,7 @@ static void taosOpenNewSlowLogFile() { char day[TD_TIME_STR_LEN] = {0}; getDay(day, sizeof(day)); TdFilePtr pFile = NULL; - char name[PATH_MAX + TD_TIME_STR_LEN] = {0}; + char name[PATH_MAX + TD_TIME_STR_LEN] = {0}; (void)snprintf(name, PATH_MAX + TD_TIME_STR_LEN, "%s.%s", tsLogObj.slowLogName, day); pFile = taosOpenFile(name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); if (pFile == NULL) { @@ -455,7 +456,7 @@ void taosResetLog() { if (tsLogObj.logHandle) { int32_t code = taosOpenNewLogFile(); - if(code != 0){ + if (code != 0) { uError("failed to open new log file, reason:%s", tstrerror(code)); } uInfo("=================================="); @@ -508,12 +509,12 @@ static void decideLogFileName(const char *fn, int32_t maxFileNum) { } } -static void decideLogFileNameFlag(){ +static void decideLogFileNameFlag() { char name[PATH_MAX + 50] = "\0"; int32_t logstat0_mtime = 0; int32_t logstat1_mtime = 0; - bool log0Exist = false; - bool log1Exist = false; + bool log0Exist = false; + bool log1Exist = false; if (strlen(tsLogObj.logName) < PATH_MAX + 50 - 2) { strcpy(name, tsLogObj.logName); @@ -535,7 +536,7 @@ static void decideLogFileNameFlag(){ } } -static void processLogFileName(const char* logName , int32_t maxFileNum){ +static void processLogFileName(const char *logName, int32_t maxFileNum) { char fullName[PATH_MAX] = {0}; getFullPathName(fullName, logName); decideLogFileName(fullName, maxFileNum); @@ -872,7 +873,7 @@ static int32_t taosGetLogRemainSize(SLogBuff *pLogBuf, int32_t start, int32_t en return rSize >= 0 ? rSize : LOG_BUF_SIZE(pLogBuf) + rSize; } -static void taosWriteSlowLog(SLogBuff *pLogBuf){ +static void taosWriteSlowLog(SLogBuff *pLogBuf) { int32_t lock = atomic_val_compare_exchange_32(&pLogBuf->lock, 0, 1); if (lock == 1) return; taosWriteLog(pLogBuf); diff --git a/tests/army/storage/blob/ablob.py b/tests/army/storage/blob/ablob.py new file mode 100644 index 0000000000..fae492a3df --- /dev/null +++ b/tests/army/storage/blob/ablob.py @@ -0,0 +1,344 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import random + +import taos +import frame +import frame.etool +import frame.eos +import frame.eutil + +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.srvCtl import * +from frame import * +from frame.eos import * + + +class TDTestCase(TBase): + index = eutil.cpuRand(20) + 1 + bucketName = f"ci-bucket{index}" + updatecfgDict = { + "supportVnodes":"1000", + 's3EndPoint': 'https://.blob.core.windows.net', + 's3AccessKey': ':', + 's3BucketName': '', + 's3PageCacheSize': '10240', + "s3UploadDelaySec": "10", + 's3MigrateIntervalSec': '600', + 's3MigrateEnabled': '1' + } + + tdLog.info(f"assign bucketName is {bucketName}\n") + maxFileSize = (128 + 10) * 1014 * 1024 # add 10M buffer + + def insertData(self): + tdLog.info(f"insert data.") + # taosBenchmark run + json = etool.curFile(__file__, "s3Basic.json") + etool.benchMark(json=json) + + tdSql.execute(f"use {self.db}") + # come from s3_basic.json + self.childtable_count = 6 + self.insert_rows = 2000000 + self.timestamp_step = 100 + + def createStream(self, sname): + sql = f"create stream {sname} fill_history 1 into stm1 as select count(*) from {self.db}.{self.stb} interval(10s);" + tdSql.execute(sql) + + def migrateDbS3(self): + sql = f"s3migrate database {self.db}" + tdSql.execute(sql, show=True) + + def checkDataFile(self, lines, maxFileSize): + # ls -l + # -rwxrwxrwx 1 root root 41652224 Apr 17 14:47 vnode2/tsdb/v2f1974ver47.3.data + overCnt = 0 + for line in lines: + cols = line.split() + fileSize = int(cols[4]) + fileName = cols[8] + #print(f" filesize={fileSize} fileName={fileName} line={line}") + if fileSize > maxFileSize: + tdLog.info(f"error, {fileSize} over max size({maxFileSize}) {fileName}\n") + overCnt += 1 + else: + tdLog.info(f"{fileName}({fileSize}) check size passed.") + + return overCnt + + def checkUploadToS3(self): + rootPath = sc.clusterRootPath() + cmd = f"ls -l {rootPath}/dnode*/data/vnode/vnode*/tsdb/*.data" + tdLog.info(cmd) + loop = 0 + rets = [] + overCnt = 0 + while loop < 200: + time.sleep(3) + + # check upload to s3 + rets = eos.runRetList(cmd) + cnt = len(rets) + if cnt == 0: + overCnt = 0 + tdLog.info("All data file upload to server over.") + break + overCnt = self.checkDataFile(rets, self.maxFileSize) + if overCnt == 0: + uploadOK = True + tdLog.info(f"All data files({len(rets)}) size bellow {self.maxFileSize}, check upload to s3 ok.") + break + + tdLog.info(f"loop={loop} no upload {overCnt} data files wait 3s retry ...") + if loop == 3: + sc.dnodeStop(1) + time.sleep(2) + sc.dnodeStart(1) + loop += 1 + # migrate + self.migrateDbS3() + + # check can pass + if overCnt > 0: + tdLog.exit(f"s3 have {overCnt} files over size.") + + + def doAction(self): + tdLog.info(f"do action.") + + self.flushDb(show=True) + #self.compactDb(show=True) + + # sleep 70s + self.migrateDbS3() + + # check upload to s3 + self.checkUploadToS3() + + def checkStreamCorrect(self): + sql = f"select count(*) from {self.db}.stm1" + count = 0 + for i in range(120): + tdSql.query(sql) + count = tdSql.getData(0, 0) + if count == 100000 or count == 100001: + return True + time.sleep(1) + + tdLog.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}") + + + def checkCreateDb(self, keepLocal, chunkSize, compact): + # keyword + kw1 = kw2 = kw3 = "" + if keepLocal is not None: + kw1 = f"s3_keeplocal {keepLocal}" + if chunkSize is not None: + kw2 = f"s3_chunksize {chunkSize}" + if compact is not None: + kw3 = f"s3_compact {compact}" + + sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}" + tdSql.execute(sql, show=True) + #sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';" + sql = f"select * from information_schema.ins_databases where name='db1';" + tdSql.query(sql) + # 29 30 31 -> chunksize keeplocal compact + if chunkSize is not None: + tdSql.checkData(0, 29, chunkSize) + if keepLocal is not None: + keepLocalm = keepLocal * 24 * 60 + tdSql.checkData(0, 30, f"{keepLocalm}m") + if compact is not None: + tdSql.checkData(0, 31, compact) + sql = "drop database db1" + tdSql.execute(sql) + + def checkExcept(self): + # errors + sqls = [ + f"create database db2 s3_keeplocal -1", + f"create database db2 s3_keeplocal 0", + f"create database db2 s3_keeplocal 365001", + f"create database db2 s3_chunksize -1", + f"create database db2 s3_chunksize 0", + f"create database db2 s3_chunksize 900000000", + f"create database db2 s3_compact -1", + f"create database db2 s3_compact 100", + f"create database db2 duration 1d s3_keeplocal 1d" + ] + tdSql.errors(sqls) + + + def checkBasic(self): + # create db + keeps = [1, 256, 1024, 365000, None] + chunks = [131072, 600000, 820000, 1048576, None] + comps = [0, 1, None] + + for keep in keeps: + for chunk in chunks: + for comp in comps: + self.checkCreateDb(keep, chunk, comp) + + + # --checks3 + idx = 1 + taosd = sc.taosdFile(idx) + cfg = sc.dnodeCfgPath(idx) + cmd = f"{taosd} -c {cfg} --checks3" + + eos.exe(cmd) + #output, error = eos.run(cmd) + #print(lines) + + ''' + tips = [ + "put object s3test.txt: success", + "listing bucket ci-bucket: success", + "get object s3test.txt: success", + "delete object s3test.txt: success" + ] + pos = 0 + for tip in tips: + pos = output.find(tip, pos) + #if pos == -1: + # tdLog.exit(f"checks3 failed not found {tip}. cmd={cmd} output={output}") + ''' + + # except + self.checkExcept() + + # + def preDb(self, vgroups): + cnt = int(time.time())%2 + 1 + for i in range(cnt): + vg = eutil.cpuRand(9) + 1 + sql = f"create database predb vgroups {vg}" + tdSql.execute(sql, show=True) + sql = "drop database predb" + tdSql.execute(sql, show=True) + + # history + def insertHistory(self): + tdLog.info(f"insert history data.") + # taosBenchmark run + json = etool.curFile(__file__, "s3Basic1.json") + etool.benchMark(json=json) + + # come from s3_basic.json + self.insert_rows += self.insert_rows/4 + self.timestamp_step = 50 + + # delete + def checkDelete(self): + # del 1000 rows + start = 1600000000000 + drows = 200 + for i in range(1, drows, 2): + sql = f"from {self.db}.{self.stb} where ts = {start + i*500}" + tdSql.execute("delete " + sql, show=True) + tdSql.query("select * " + sql) + tdSql.checkRows(0) + + # delete all 500 step + self.flushDb() + self.compactDb() + self.insert_rows -= drows/2 + sql = f"select count(*) from {self.db}.{self.stb}" + tdSql.checkAgg(sql, self.insert_rows * self.childtable_count) + + # delete 10W rows from 100000 + drows = 100000 + sdel = start + 100000 * self.timestamp_step + edel = start + 100000 * self.timestamp_step + drows * self.timestamp_step + sql = f"from {self.db}.{self.stb} where ts >= {sdel} and ts < {edel}" + tdSql.execute("delete " + sql, show=True) + tdSql.query("select * " + sql) + tdSql.checkRows(0) + + self.insert_rows -= drows + sql = f"select count(*) from {self.db}.{self.stb}" + tdSql.checkAgg(sql, self.insert_rows * self.childtable_count) + + + # run + def run(self): + tdLog.debug(f"start to excute {__file__}") + self.sname = "stream1" + if eos.isArm64Cpu(): + tdLog.success(f"{__file__} arm64 ignore executed") + else: + + self.preDb(10) + + # insert data + self.insertData() + + # creat stream + self.createStream(self.sname) + + # check insert data correct + #self.checkInsertCorrect() + + # save + self.snapshotAgg() + + # do action + self.doAction() + + # check save agg result correct + self.checkAggCorrect() + + # check insert correct again + self.checkInsertCorrect() + + + # check stream correct and drop stream + #self.checkStreamCorrect() + + # drop stream + self.dropStream(self.sname) + + # insert history disorder data + self.insertHistory() + + # checkBasic + self.checkBasic() + + #self.checkInsertCorrect() + self.snapshotAgg() + self.doAction() + self.checkAggCorrect() + self.checkInsertCorrect(difCnt=self.childtable_count*1499999) + self.checkDelete() + self.doAction() + + # drop database and free s3 file + self.dropDb() + + + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/storage/blob/s3Basic.json b/tests/army/storage/blob/s3Basic.json new file mode 100644 index 0000000000..ee341b2096 --- /dev/null +++ b/tests/army/storage/blob/s3Basic.json @@ -0,0 +1,66 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 4000, + "prepared_rand": 500, + "thread_count": 4, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 2, + "replica": 1, + "duration":"10d", + "s3_keeplocal":"30d", + "s3_chunksize":"131072", + "tsdb_pagesize":"1", + "s3_compact":"1", + "wal_retention_size":"1", + "wal_retention_period":"1", + "flush_each_batch":"no", + "keep": "3650d" + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 6, + "insert_rows": 2000000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 100, + "start_timestamp": 1600000000000, + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc" }, + { "type": "double", "name": "dc"}, + { "type": "tinyint", "name": "ti"}, + { "type": "smallint", "name": "si" }, + { "type": "int", "name": "ic" ,"max": 1,"min": 1}, + { "type": "bigint", "name": "bi" }, + { "type": "utinyint", "name": "uti"}, + { "type": "usmallint", "name": "usi"}, + { "type": "uint", "name": "ui" }, + { "type": "ubigint", "name": "ubi"}, + { "type": "binary", "name": "bin", "len": 50}, + { "type": "nchar", "name": "nch", "len": 100} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 10,"min": 1}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} diff --git a/tests/army/storage/blob/s3Basic1.json b/tests/army/storage/blob/s3Basic1.json new file mode 100644 index 0000000000..02be308443 --- /dev/null +++ b/tests/army/storage/blob/s3Basic1.json @@ -0,0 +1,66 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 5000, + "prepared_rand": 500, + "thread_count": 4, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "no", + "vgroups": 2, + "replica": 1, + "duration":"10d", + "s3_keeplocal":"30d", + "s3_chunksize":"131072", + "tsdb_pagesize":"1", + "s3_compact":"1", + "wal_retention_size":"1", + "wal_retention_period":"1", + "flush_each_batch":"no", + "keep": "3650d" + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "yes", + "childtable_count": 6, + "insert_rows": 1000000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 50, + "start_timestamp": 1600000000000, + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc" }, + { "type": "double", "name": "dc"}, + { "type": "tinyint", "name": "ti"}, + { "type": "smallint", "name": "si" }, + { "type": "int", "name": "ic" ,"max": 1,"min": 1}, + { "type": "bigint", "name": "bi" }, + { "type": "utinyint", "name": "uti"}, + { "type": "usmallint", "name": "usi"}, + { "type": "uint", "name": "ui" }, + { "type": "ubigint", "name": "ubi"}, + { "type": "binary", "name": "bin", "len": 50}, + { "type": "nchar", "name": "nch", "len": 100} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 10,"min": 1}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index 2d4e701012..9244d37456 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -129,7 +129,7 @@ def scan_files_path(source_file_path): def input_files(change_files): # scan_dir_list = ["source", "include", "docs/examples", "tests/script/api", "src/plugins"] scan_dir_list = ["source", "include", "docs/examples", "src/plugins"] - scan_skip_file_list = [f"{TD_project_path}/TDinternal/community/tools/taosws-rs/target/release/build/openssl-sys-7811e597b848e397/out/openssl-build/install/include/openssl", "/test/", "contrib", "debug", "deps", f"{TD_project_path}/TDinternal/community/source/libs/parser/src/sql.c", f"{TD_project_path}/TDinternal/community/source/client/jni/windows/win32/bridge/AccessBridgeCalls.c"] + scan_skip_file_list = ["tools/taosws-rs/target/release/build/openssl-sys-7811e597b848e397/out/openssl-build/install/include/openssl", "/test/", "contrib", "debug", "deps", "source/libs/parser/src/sql.c", "source/libs/azure", "source/client/jni/windows/win32/bridge/AccessBridgeCalls.c"] with open(change_files, 'r') as file: for line in file: file_name = line.strip() @@ -141,7 +141,7 @@ def input_files(change_files): tdc_file_path = os.path.join(TD_project_path, "community/") file_name = os.path.join(tdc_file_path, file_name) all_file_path.append(file_name) - # print(f"all_file_path:{all_file_path}") + print(f"all_file_path:{all_file_path}") logger.info("Found %s files" % len(all_file_path)) file_res_path = "" diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 6e51d9c501..d5480871c3 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -223,6 +223,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-32548.py ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False @@ -1254,7 +1255,7 @@ ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim -,,y,script,./test.sh -f tsim/query/sort-pre-cols.sim +,,y,script,./test.sh -f tsim/query/sort-pre-cols.sim ,,y,script,./test.sh -f tsim/query/groupby.sim ,,y,script,./test.sh -f tsim/query/groupby_distinct.sim ,,y,script,./test.sh -f tsim/query/event.sim @@ -1262,7 +1263,7 @@ ,,y,script,./test.sh -f tsim/query/emptyTsRange.sim ,,y,script,./test.sh -f tsim/query/emptyTsRange_scl.sim ,,y,script,./test.sh -f tsim/query/partitionby.sim -,,y,script,./test.sh -f tsim/query/tableCount.sim +,,y,script,./test.sh -f tsim/query/tableCount.sim ,,y,script,./test.sh -f tsim/query/show_db_table_kind.sim ,,y,script,./test.sh -f tsim/query/bi_star_table.sim ,,y,script,./test.sh -f tsim/query/bi_tag_scan.sim @@ -1560,8 +1561,8 @@ ,,n,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim ,,y,script,./test.sh -f tsim/tagindex/indexOverflow.sim ,,y,script,./test.sh -f tsim/view/view.sim -,,y,script,./test.sh -f tsim/query/cache_last.sim -,,y,script,./test.sh -f tsim/query/const.sim +,,y,script,./test.sh -f tsim/query/cache_last.sim +,,y,script,./test.sh -f tsim/query/const.sim ,,y,script,./test.sh -f tsim/query/nestedJoinView.sim @@ -1594,4 +1595,3 @@ ,,n,docs-examples-test,bash rust.sh ,,n,docs-examples-test,bash go.sh ,,n,docs-examples-test,bash test_R.sh - diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py index 588609e524..1964cea51f 100644 --- a/tests/system-test/2-query/db.py +++ b/tests/system-test/2-query/db.py @@ -57,7 +57,7 @@ class TDTestCase: tdSql.checkData(0, 2, 0) tdSql.query("show dnode 1 variables like '%debugFlag'") - tdSql.checkRows(23) + tdSql.checkRows(24) tdSql.query("show dnode 1 variables like '____debugFlag'") tdSql.checkRows(2) diff --git a/tests/system-test/2-query/fill_with_group.py b/tests/system-test/2-query/fill_with_group.py index 2139bbbfb3..3b98ec30ce 100644 --- a/tests/system-test/2-query/fill_with_group.py +++ b/tests/system-test/2-query/fill_with_group.py @@ -237,11 +237,123 @@ class TDTestCase: tdSql.checkData(12, 1, None) tdSql.checkData(13, 1, None) + def test_fill_with_complex_expr(self): + sql = "SELECT _wstart, _wstart + 1d, count(*), now, 1+1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' INTERVAL(5m) FILL(NULL)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + for i in range(0, 12, 2): + tdSql.checkData(i, 2, 10) + for i in range(1, 12, 2): + tdSql.checkData(i, 2, None) + for i in range(0, 12): + firstCol = tdSql.getData(i, 0) + secondCol = tdSql.getData(i, 1) + tdLog.debug(f"firstCol: {firstCol}, secondCol: {secondCol}, secondCol - firstCol: {secondCol - firstCol}") + if secondCol - firstCol != timedelta(days=1): + tdLog.exit(f"query error: secondCol - firstCol: {secondCol - firstCol}") + nowCol = tdSql.getData(i, 3) + if nowCol is None: + tdLog.exit(f"query error: nowCol: {nowCol}") + constCol = tdSql.getData(i, 4) + if constCol != 2: + tdLog.exit(f"query error: constCol: {constCol}") + + sql = "SELECT _wstart + 1d, count(*), last(ts) + 1a, timediff(_wend, last(ts)) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' INTERVAL(5m) FILL(NULL)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + for i in range(0, 12, 2): + tdSql.checkData(i, 1, 10) + tdSql.checkData(i, 3, 300000) + for i in range(1, 12, 2): + tdSql.checkData(i, 1, None) + tdSql.checkData(i, 2, None) + tdSql.checkData(i, 3, None) + + sql = "SELECT count(*), tbname FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(NULL)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(120) + + sql = "SELECT * from (SELECT count(*), timediff(_wend, last(ts)) + t1, tbname FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) LIMIT 1) order by tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(10) + j = 0 + for i in range(0, 10): + tdSql.checkData(i, 1, 300000 + j) + j = j + 1 + if j == 5: + j = 0 + + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, tbname,t1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) ORDER BY timediff(last(ts), _wstart)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(120) + + sql = "SELECT 1+1, count(*), timediff(_wend, last(ts)) + t1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) HAVING(timediff(last(ts), _wstart)+ t1 >= 1) ORDER BY timediff(last(ts), _wstart)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(48) + + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) HAVING(timediff(last(ts), _wstart) + t1 >= 1) ORDER BY timediff(last(ts), _wstart), tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(48) + + sql = "SELECT count(*) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) HAVING(timediff(last(ts), _wstart) >= 0)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(60) + + sql = "SELECT count(*) + 1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) HAVING(count(*) > 1)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(0) + + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(value, 0, 0) HAVING(timediff(last(ts), _wstart) + t1 >= 1) ORDER BY timediff(last(ts), _wstart), tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(48) + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(value, 0, 0) HAVING(count(*) >= 0) ORDER BY timediff(last(ts), _wstart), tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(120) + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(value, 0, 0) HAVING(count(*) > 0) ORDER BY timediff(last(ts), _wstart), tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(60) + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(linear) HAVING(count(*) >= 0 and t1 <= 1) ORDER BY timediff(last(ts), _wstart), tbname, t1" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(44) + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(prev) HAVING(count(*) >= 0 and t1 > 1) ORDER BY timediff(last(ts), _wstart), tbname, t1" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(72) + + sql = "SELECT 1+1, count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(linear) ORDER BY tbname, _wstart;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(120) + for i in range(11, 120, 12): + tdSql.checkData(i, 1, None) + for i in range(0, 120): + tdSql.checkData(i, 0, 2) + + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(linear) HAVING(count(*) >= 0) ORDER BY tbname;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(110) + for i in range(0, 110, 11): + lastCol = tdSql.getData(i, 3) + tdLog.debug(f"lastCol: {lastCol}") + if lastCol[-1:] != str(i//11): + tdLog.exit(f"query error: lastCol: {lastCol}") + + sql = "SELECT 1+1, count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1,t1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY t1 INTERVAL(5m) FILL(linear) ORDER BY t1, _wstart;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(60) + + sql = "SELECT 1+1, count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1,t1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY t1 INTERVAL(5m) FILL(linear) HAVING(count(*) > 0) ORDER BY t1, _wstart;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(55) + + # TODO Fix Me! + sql = "explain SELECT count(*), timediff(_wend, last(ts)), timediff('2018-09-20 01:00:00', _wstart) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY concat(tbname, 'asd') INTERVAL(5m) having(concat(tbname, 'asd') like '%asd');" + tdSql.error(sql, -2147473664) # Error: Planner internal error + def run(self): self.prepareTestEnv() self.test_partition_by_with_interval_fill_prev_new_group_fill_error() self.test_fill_with_order_by() self.test_fill_with_order_by2() + self.test_fill_with_complex_expr() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/td-32548.py b/tests/system-test/2-query/td-32548.py new file mode 100644 index 0000000000..45611b8372 --- /dev/null +++ b/tests/system-test/2-query/td-32548.py @@ -0,0 +1,32 @@ +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + + tdSql.execute("drop database if exists td_32548;") + tdSql.execute("create database td_32548 cachemodel 'last_row' keep 3650,3650,3650;") + + def run(self): + tdSql.execute("use td_32548;") + + tdSql.execute("create table ntb1 (ts timestamp, ival int);") + tdSql.execute("insert into ntb1 values ('2024-07-08 17:54:49.675', 54);") + + tdSql.execute("flush database td_32548;") + + tdSql.execute("insert into ntb1 values ('2024-07-08 17:53:49.675', 53);") + tdSql.execute("insert into ntb1 values ('2024-07-08 17:52:49.675', 52);") + tdSql.execute("delete from ntb1 where ts = '2024-07-08 17:54:49.675';") + + tdSql.query('select last_row(ts) from ntb1;') + tdSql.checkData(0, 0, '2024-07-08 17:53:49.675') + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index a16a03d30a..87630b773b 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -1,21 +1,23 @@ -IF (TD_WEBSOCKET) - IF (TD_LINUX) +IF(TD_WEBSOCKET) + IF(TD_LINUX) SET(websocket_lib_file "libtaosws.so") - ELSEIF (TD_DARWIN) + ELSEIF(TD_DARWIN) SET(websocket_lib_file "libtaosws.dylib") - ENDIF () + ENDIF() + MESSAGE("${Green} use libtaos-ws${ColourReset}") - IF (TD_ALPINE) + + IF(TD_ALPINE) include(ExternalProject) ExternalProject_Add(taosws-rs - PREFIX "taosws-rs" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs - BUILD_ALWAYS off - DEPENDS taos - BUILD_IN_SOURCE 1 + PREFIX "taosws-rs" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs + BUILD_ALWAYS off + DEPENDS taos + BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" PATCH_COMMAND - COMMAND git clean -f -d + COMMAND git clean -f -d BUILD_COMMAND COMMAND cargo update COMMAND RUSTFLAGS=-Ctarget-feature=-crt-static cargo build --release -p taos-ws-sys --features rustls @@ -23,18 +25,18 @@ IF (TD_WEBSOCKET) COMMAND cp target/release/${websocket_lib_file} ${CMAKE_BINARY_DIR}/build/lib COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/build/include COMMAND cmake -E copy target/release/taosws.h ${CMAKE_BINARY_DIR}/build/include - ) - ELSEIF (TD_WINDOWS) + ) + ELSEIF(TD_WINDOWS) include(ExternalProject) ExternalProject_Add(taosws-rs - PREFIX "taosws-rs" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs - BUILD_ALWAYS off - DEPENDS taos - BUILD_IN_SOURCE 1 + PREFIX "taosws-rs" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs + BUILD_ALWAYS off + DEPENDS taos + BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" PATCH_COMMAND - COMMAND git clean -f -d + COMMAND git clean -f -d BUILD_COMMAND COMMAND cargo update COMMAND cargo build --release -p taos-ws-sys --features rustls @@ -43,18 +45,18 @@ IF (TD_WEBSOCKET) COMMAND cp target/release/taosws.dll.lib ${CMAKE_BINARY_DIR}/build/lib/taosws.lib COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/build/include COMMAND cmake -E copy target/release/taosws.h ${CMAKE_BINARY_DIR}/build/include - ) + ) ELSE() include(ExternalProject) ExternalProject_Add(taosws-rs - PREFIX "taosws-rs" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs - BUILD_ALWAYS off - DEPENDS taos - BUILD_IN_SOURCE 1 + PREFIX "taosws-rs" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosws-rs + BUILD_ALWAYS off + DEPENDS taos + BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosws-rs no need cmake to config" PATCH_COMMAND - COMMAND git clean -f -d + COMMAND git clean -f -d BUILD_COMMAND COMMAND cargo update COMMAND cargo build --release -p taos-ws-sys --features rustls @@ -62,11 +64,11 @@ IF (TD_WEBSOCKET) COMMAND cp target/release/${websocket_lib_file} ${CMAKE_BINARY_DIR}/build/lib COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/build/include COMMAND cmake -E copy target/release/taosws.h ${CMAKE_BINARY_DIR}/build/include - ) - ENDIF () -ENDIF () + ) + ENDIF() +ENDIF() -IF (TD_TAOS_TOOLS) +IF(TD_TAOS_TOOLS) INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/tools/taos_tools/deps/avro/lang/c/src) INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/include/client) INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/include/common) @@ -74,69 +76,74 @@ IF (TD_TAOS_TOOLS) INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/include/os) INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/include/libs/transport) ADD_SUBDIRECTORY(taos-tools) -ENDIF () +ENDIF() add_subdirectory(shell) -IF (TD_BUILD_HTTP) + +IF(TD_BUILD_HTTP) MESSAGE("") MESSAGE("${Yellow} use original embedded httpd ${ColourReset}") MESSAGE("") - # ADD_SUBDIRECTORY(http) + +# ADD_SUBDIRECTORY(http) ELSEIF(TD_BUILD_TAOSA_INTERNAL) MESSAGE("${Yellow} use taosa internal as httpd ${ColourReset}") -ELSE () +ELSE() MESSAGE("") MESSAGE("${Green} use taosadapter as httpd, platform is ${PLATFORM_ARCH_STR} ${ColourReset}") EXECUTE_PROCESS( - COMMAND git rev-parse --abbrev-ref HEAD - RESULT_VARIABLE result_taos_version - OUTPUT_VARIABLE taos_version + COMMAND git rev-parse --abbrev-ref HEAD + RESULT_VARIABLE result_taos_version + OUTPUT_VARIABLE taos_version ) STRING(FIND ${taos_version} release is_release_branch) - IF ("${is_release_branch}" STREQUAL "0") + IF("${is_release_branch}" STREQUAL "0") STRING(SUBSTRING "${taos_version}" 12 -1 taos_version) STRING(STRIP "${taos_version}" taos_version) - ELSE () + ELSE() STRING(CONCAT taos_version "_branch_" "${taos_version}") STRING(STRIP "${taos_version}" taos_version) - ENDIF () + ENDIF() + EXECUTE_PROCESS( - COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter + COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter ) EXECUTE_PROCESS( - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter - COMMAND git rev-parse HEAD - RESULT_VARIABLE commit_sha1 - OUTPUT_VARIABLE taosadapter_commit_sha1 + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter + COMMAND git rev-parse HEAD + RESULT_VARIABLE commit_sha1 + OUTPUT_VARIABLE taosadapter_commit_sha1 ) - IF ("${taosadapter_commit_sha1}" STREQUAL "") + + IF("${taosadapter_commit_sha1}" STREQUAL "") SET(taosadapter_commit_sha1 "unknown") - ELSE () -# STRING(SUBSTRING "${taosadapter_commit_sha1}" 0 7 taosadapter_commit_sha1) + ELSE() + # STRING(SUBSTRING "${taosadapter_commit_sha1}" 0 7 taosadapter_commit_sha1) STRING(STRIP "${taosadapter_commit_sha1}" taosadapter_commit_sha1) - ENDIF () + ENDIF() + SET(taos_version ${TD_VER_NUMBER}) MESSAGE("${Green} taosAdapter will use ${taos_version} and commit ${taosadapter_commit_sha1} as version ${ColourReset}") EXECUTE_PROCESS( - COMMAND cd .. + COMMAND cd .. ) MESSAGE("CURRENT SOURCE DIR ${CMAKE_CURRENT_SOURCE_DIR}") - IF (TD_WINDOWS) + IF(TD_WINDOWS) MESSAGE("Building taosAdapter on Windows") INCLUDE(ExternalProject) ExternalProject_Add(taosadapter - PREFIX "taosadapter" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter - BUILD_ALWAYS off - DEPENDS taos - BUILD_IN_SOURCE 1 + PREFIX "taosadapter" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter + BUILD_ALWAYS off + DEPENDS taos + BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND - COMMAND git clean -f -d + COMMAND git clean -f -d BUILD_COMMAND COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib @@ -153,58 +160,61 @@ ELSE () COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E echo "Copy taosadapter-debug.exe" COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin - ) - ELSEIF (TD_DARWIN) + ) + ELSEIF(TD_DARWIN) MESSAGE("Building taosAdapter on MACOS") INCLUDE(ExternalProject) ExternalProject_Add(taosadapter - PREFIX "taosadapter" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter - BUILD_ALWAYS off - DEPENDS taos - BUILD_IN_SOURCE 1 - CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" - PATCH_COMMAND - COMMAND git clean -f -d - BUILD_COMMAND - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" - INSTALL_COMMAND - COMMAND cmake -E echo "Copy taosadapter" - COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin - COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ - COMMAND cmake -E echo "Copy taosadapter.toml" - COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ - COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ - COMMAND cmake -E echo "Copy taosadapter-debug" - COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin - ) - ELSE () - MESSAGE("Building taosAdapter on non-Windows") - INCLUDE(ExternalProject) - ExternalProject_Add(taosadapter - PREFIX "taosadapter" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter - BUILD_ALWAYS off - DEPENDS taos - BUILD_IN_SOURCE 1 + PREFIX "taosadapter" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter + BUILD_ALWAYS off + DEPENDS taos + BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND - COMMAND git clean -f -d + COMMAND git clean -f -d BUILD_COMMAND - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" -# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" INSTALL_COMMAND -# COMMAND cmake -E echo "Comparessing taosadapter.exe" -# COMMAND upx taosadapter || : COMMAND cmake -E echo "Copy taosadapter" COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E echo "Copy taosadapter.toml" COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ -# COMMAND cmake -E echo "Copy taosadapter-debug" -# COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin - ) - ENDIF () -ENDIF () + COMMAND cmake -E echo "Copy taosadapter-debug" + COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin + ) + ELSE() + MESSAGE("Building taosAdapter on non-Windows") + INCLUDE(ExternalProject) + ExternalProject_Add(taosadapter + PREFIX "taosadapter" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter + BUILD_ALWAYS off + DEPENDS taos + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + + # COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X 'github.com/taosdata/taosadapter/v3/version.Version=${taos_version}' -X 'github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}' -X 'github.com/taosdata/taosadapter/v3/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + INSTALL_COMMAND + + # COMMAND cmake -E echo "Comparessing taosadapter.exe" + # COMMAND upx taosadapter || : + COMMAND cmake -E echo "Copy taosadapter" + COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taosadapter.toml" + COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ + + # COMMAND cmake -E echo "Copy taosadapter-debug" + # COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin + ) + ENDIF() +ENDIF() diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt index 0ce181808f..fd46870ac5 100644 --- a/tools/shell/CMakeLists.txt +++ b/tools/shell/CMakeLists.txt @@ -2,41 +2,41 @@ aux_source_directory(src SHELL_SRC) add_executable(shell ${SHELL_SRC}) -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) +IF(TD_LINUX_64 AND JEMALLOC_ENABLED) ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") ADD_DEPENDENCIES(shell jemalloc) -ELSE () +ELSE() SET(LINK_JEMALLOC "") -ENDIF () +ENDIF() -IF (TD_LINUX AND TD_WEBSOCKET) +IF(TD_LINUX AND TD_WEBSOCKET) ADD_DEFINITIONS(-DWEBSOCKET -I${CMAKE_BINARY_DIR}/build/include -ltaosws) SET(LINK_WEBSOCKET "-L${CMAKE_BINARY_DIR}/build/lib -ltaosws") ADD_DEPENDENCIES(shell taosws-rs) -ELSEIF (TD_DARWIN AND TD_WEBSOCKET) +ELSEIF(TD_DARWIN AND TD_WEBSOCKET) ADD_DEFINITIONS(-DWEBSOCKET -I${CMAKE_BINARY_DIR}/build/include) SET(LINK_WEBSOCKET "${CMAKE_BINARY_DIR}/build/lib/libtaosws.dylib") ADD_DEPENDENCIES(shell taosws-rs) -ELSEIF (TD_WINDOWS AND TD_WEBSOCKET) +ELSEIF(TD_WINDOWS AND TD_WEBSOCKET) ADD_DEFINITIONS(-DWEBSOCKET -I${CMAKE_BINARY_DIR}/build/include) SET(LINK_WEBSOCKET "${CMAKE_BINARY_DIR}/build/lib/taosws.lib") ADD_DEPENDENCIES(shell taosws-rs) -ELSE () +ELSE() SET(LINK_WEBSOCKET "") -ENDIF () +ENDIF() -IF (TD_LINUX AND TD_ALPINE) +IF(TD_LINUX AND TD_ALPINE) SET(LINK_ARGP "/usr/lib/libargp.a") -ELSE () +ELSE() SET(LINK_ARGP "") -ENDIF () +ENDIF() if(TD_WINDOWS) target_link_libraries(shell PUBLIC taos_static ${LINK_WEBSOCKET}) else() target_link_libraries(shell PUBLIC taos ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP}) -endif () +endif() target_link_libraries( shell diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 6f0c3b5247..9872a9dc55 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -1,8 +1,8 @@ -#ADD_SUBDIRECTORY(examples/c) +# ADD_SUBDIRECTORY(examples/c) ADD_SUBDIRECTORY(tsim) ADD_SUBDIRECTORY(test/c) -#ADD_SUBDIRECTORY(comparisonTest/tdengine) -IF (NOT "${TSZ_ENABLED}" MATCHES "false") +# ADD_SUBDIRECTORY(comparisonTest/tdengine) +IF(NOT "${TSZ_ENABLED}" MATCHES "false") ADD_SUBDIRECTORY(TSZ) ENDIF() \ No newline at end of file diff --git a/utils/TSZ/CMakeLists.txt b/utils/TSZ/CMakeLists.txt index e3f4dce3c9..ba335fe769 100644 --- a/utils/TSZ/CMakeLists.txt +++ b/utils/TSZ/CMakeLists.txt @@ -6,22 +6,20 @@ INCLUDE_DIRECTORIES(sz/inc) INCLUDE_DIRECTORIES(zstd/) INCLUDE_DIRECTORIES(zstd/common/) - # source -AUX_SOURCE_DIRECTORY(sz/src SRC1) +AUX_SOURCE_DIRECTORY(sz/src SRC1) AUX_SOURCE_DIRECTORY(zstd/dictBuilder SRC2) -AUX_SOURCE_DIRECTORY(zstd/common SRC3) -AUX_SOURCE_DIRECTORY(zstd/compress SRC4) -AUX_SOURCE_DIRECTORY(zstd/decompress SRC5) -AUX_SOURCE_DIRECTORY(zstd/deprecated SRC6) -AUX_SOURCE_DIRECTORY(zstd/legacy SRC7) - +AUX_SOURCE_DIRECTORY(zstd/common SRC3) +AUX_SOURCE_DIRECTORY(zstd/compress SRC4) +AUX_SOURCE_DIRECTORY(zstd/decompress SRC5) +AUX_SOURCE_DIRECTORY(zstd/deprecated SRC6) +AUX_SOURCE_DIRECTORY(zstd/legacy SRC7) # archive ADD_LIBRARY(TSZ STATIC ${SRC1} ${SRC2} ${SRC3} ${SRC4} ${SRC5} ${SRC6} ${SRC7}) TARGET_INCLUDE_DIRECTORIES(TSZ PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/sz/inc ${TD_SOURCE_DIR}/include) # windows ignore warning -IF (TD_WINDOWS) - SET_TARGET_PROPERTIES(TSZ PROPERTIES COMPILE_FLAGS -w) -ENDIF () +IF(TD_WINDOWS) + SET_TARGET_PROPERTIES(TSZ PROPERTIES COMPILE_FLAGS -w) +ENDIF() diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index 8701f208bb..7589d11840 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -17,7 +17,7 @@ add_executable(varbinary_test varbinary_test.c) add_executable(replay_test replay_test.c) if(${TD_LINUX}) -add_executable(tsz_test tsz_test.c) + add_executable(tsz_test tsz_test.c) endif(${TD_LINUX}) target_link_libraries( @@ -124,7 +124,6 @@ target_link_libraries( PUBLIC common PUBLIC os PUBLIC geometry - ) target_link_libraries( @@ -144,11 +143,11 @@ target_link_libraries( ) if(${TD_LINUX}) -target_link_libraries( - tsz_test - PUBLIC taos - PUBLIC util - PUBLIC common - PUBLIC os -) + target_link_libraries( + tsz_test + PUBLIC taos + PUBLIC util + PUBLIC common + PUBLIC os + ) endif(${TD_LINUX}) \ No newline at end of file