other: merge 3.0
This commit is contained in:
commit
97637d2435
|
@ -121,6 +121,7 @@ TAGS
|
||||||
contrib/*
|
contrib/*
|
||||||
!contrib/CMakeLists.txt
|
!contrib/CMakeLists.txt
|
||||||
!contrib/test
|
!contrib/test
|
||||||
|
!contrib/azure-cmake
|
||||||
sql
|
sql
|
||||||
debug*/
|
debug*/
|
||||||
.env
|
.env
|
||||||
|
@ -138,3 +139,24 @@ tags
|
||||||
*CMakeCache*
|
*CMakeCache*
|
||||||
*CMakeFiles*
|
*CMakeFiles*
|
||||||
.history/
|
.history/
|
||||||
|
*.txt
|
||||||
|
*.tcl
|
||||||
|
*.pc
|
||||||
|
contrib/geos
|
||||||
|
contrib/libuv
|
||||||
|
contrib/pcre2
|
||||||
|
contrib/zlib
|
||||||
|
deps_tmp_CMakeLists.txt.in
|
||||||
|
*.a
|
||||||
|
*.ctest
|
||||||
|
pcre2-config
|
||||||
|
pcre2_test.sh
|
||||||
|
pcre2_grep_test.sh
|
||||||
|
pcre2_chartables.c
|
||||||
|
geos-config
|
||||||
|
config.h
|
||||||
|
pcre2.h
|
||||||
|
zconf.h
|
||||||
|
version.h
|
||||||
|
geos_c.h
|
||||||
|
|
||||||
|
|
|
@ -6,8 +6,8 @@ project(
|
||||||
DESCRIPTION "An open-source big data platform designed and optimized for the Internet of Things(IOT)"
|
DESCRIPTION "An open-source big data platform designed and optimized for the Internet of Things(IOT)"
|
||||||
)
|
)
|
||||||
|
|
||||||
if (NOT DEFINED TD_SOURCE_DIR)
|
if(NOT DEFINED TD_SOURCE_DIR)
|
||||||
set( TD_SOURCE_DIR ${PROJECT_SOURCE_DIR} )
|
set(TD_SOURCE_DIR ${PROJECT_SOURCE_DIR})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
||||||
|
@ -15,13 +15,11 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
||||||
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
||||||
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
||||||
|
|
||||||
|
|
||||||
include(${TD_SUPPORT_DIR}/cmake.platform)
|
include(${TD_SUPPORT_DIR}/cmake.platform)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.define)
|
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.options)
|
include(${TD_SUPPORT_DIR}/cmake.options)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||||
|
|
||||||
|
|
||||||
# contrib
|
# contrib
|
||||||
add_subdirectory(contrib)
|
add_subdirectory(contrib)
|
||||||
|
|
||||||
|
|
|
@ -426,6 +426,10 @@ pipeline {
|
||||||
cd ${WKC}/tests/parallel_test
|
cd ${WKC}/tests/parallel_test
|
||||||
./run_check_assert_container.sh -d ${WKDIR}
|
./run_check_assert_container.sh -d ${WKDIR}
|
||||||
'''
|
'''
|
||||||
|
sh '''
|
||||||
|
cd ${WKC}/tests/parallel_test
|
||||||
|
./run_check_void_container.sh -d ${WKDIR}
|
||||||
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
date
|
date
|
||||||
rm -rf ${WKC}/debug
|
rm -rf ${WKC}/debug
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
# azure
|
||||||
|
ExternalProject_Add(azure
|
||||||
|
URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz
|
||||||
|
URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9
|
||||||
|
DOWNLOAD_NO_PROGRESS 1
|
||||||
|
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||||
|
SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1"
|
||||||
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
#BUILD_ALWAYS 1
|
||||||
|
#UPDATE_COMMAND ""
|
||||||
|
CONFIGURE_COMMAND ""
|
||||||
|
BUILD_COMMAND ""
|
||||||
|
INSTALL_COMMAND ""
|
||||||
|
TEST_COMMAND ""
|
||||||
|
)
|
|
@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.0)
|
||||||
set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
||||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||||
|
|
||||||
#set output directory
|
# set output directory
|
||||||
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
||||||
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin)
|
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin)
|
||||||
SET(TD_TESTS_OUTPUT_DIR ${PROJECT_BINARY_DIR}/test)
|
SET(TD_TESTS_OUTPUT_DIR ${PROJECT_BINARY_DIR}/test)
|
||||||
|
@ -12,190 +12,248 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
|
||||||
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
|
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
|
||||||
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
||||||
|
|
||||||
if (NOT DEFINED TD_GRANT)
|
IF(NOT DEFINED TD_GRANT)
|
||||||
SET(TD_GRANT FALSE)
|
SET(TD_GRANT FALSE)
|
||||||
endif()
|
ENDIF()
|
||||||
|
|
||||||
IF (NOT DEFINED BUILD_WITH_RAND_ERR)
|
IF(NOT DEFINED BUILD_WITH_RAND_ERR)
|
||||||
SET(BUILD_WITH_RAND_ERR FALSE)
|
SET(BUILD_WITH_RAND_ERR FALSE)
|
||||||
ELSE ()
|
ELSE()
|
||||||
SET(BUILD_WITH_RAND_ERR TRUE)
|
SET(BUILD_WITH_RAND_ERR TRUE)
|
||||||
endif()
|
ENDIF()
|
||||||
|
|
||||||
IF ("${WEBSOCKET}" MATCHES "true")
|
IF("${WEBSOCKET}" MATCHES "true")
|
||||||
SET(TD_WEBSOCKET TRUE)
|
SET(TD_WEBSOCKET TRUE)
|
||||||
MESSAGE("Enable websocket")
|
MESSAGE("Enable websocket")
|
||||||
ADD_DEFINITIONS(-DWEBSOCKET)
|
ADD_DEFINITIONS(-DWEBSOCKET)
|
||||||
ELSE ()
|
ELSE()
|
||||||
SET(TD_WEBSOCKET FALSE)
|
SET(TD_WEBSOCKET FALSE)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
||||||
IF ("${BUILD_HTTP}" STREQUAL "")
|
IF("${BUILD_HTTP}" STREQUAL "")
|
||||||
IF (TD_LINUX)
|
IF(TD_LINUX)
|
||||||
IF (TD_ARM_32)
|
IF(TD_ARM_32)
|
||||||
SET(TD_BUILD_HTTP TRUE)
|
SET(TD_BUILD_HTTP TRUE)
|
||||||
ELSE ()
|
ELSE()
|
||||||
SET(TD_BUILD_HTTP TRUE)
|
SET(TD_BUILD_HTTP TRUE)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
ELSEIF (TD_DARWIN)
|
ELSEIF(TD_DARWIN)
|
||||||
SET(TD_BUILD_HTTP TRUE)
|
SET(TD_BUILD_HTTP TRUE)
|
||||||
ELSE ()
|
ELSE()
|
||||||
SET(TD_BUILD_HTTP TRUE)
|
SET(TD_BUILD_HTTP TRUE)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
ELSEIF (${BUILD_HTTP} MATCHES "false")
|
ELSEIF(${BUILD_HTTP} MATCHES "false")
|
||||||
SET(TD_BUILD_HTTP FALSE)
|
SET(TD_BUILD_HTTP FALSE)
|
||||||
ELSEIF (${BUILD_HTTP} MATCHES "true")
|
ELSEIF(${BUILD_HTTP} MATCHES "true")
|
||||||
SET(TD_BUILD_HTTP TRUE)
|
SET(TD_BUILD_HTTP TRUE)
|
||||||
ELSEIF (${BUILD_HTTP} MATCHES "internal")
|
ELSEIF(${BUILD_HTTP} MATCHES "internal")
|
||||||
SET(TD_BUILD_HTTP FALSE)
|
SET(TD_BUILD_HTTP FALSE)
|
||||||
SET(TD_BUILD_TAOSA_INTERNAL TRUE)
|
SET(TD_BUILD_TAOSA_INTERNAL TRUE)
|
||||||
ELSE ()
|
ELSE()
|
||||||
SET(TD_BUILD_HTTP TRUE)
|
SET(TD_BUILD_HTTP TRUE)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
||||||
IF (TD_BUILD_HTTP)
|
IF(TD_BUILD_HTTP)
|
||||||
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
|
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
||||||
IF ("${BUILD_TOOLS}" STREQUAL "")
|
IF("${BUILD_TOOLS}" STREQUAL "")
|
||||||
IF (TD_LINUX)
|
IF(TD_LINUX)
|
||||||
IF (TD_ARM_32)
|
IF(TD_ARM_32)
|
||||||
SET(BUILD_TOOLS "false")
|
SET(BUILD_TOOLS "false")
|
||||||
ELSEIF (TD_ARM_64)
|
ELSEIF(TD_ARM_64)
|
||||||
SET(BUILD_TOOLS "false")
|
SET(BUILD_TOOLS "false")
|
||||||
ELSE ()
|
ELSE()
|
||||||
SET(BUILD_TOOLS "false")
|
SET(BUILD_TOOLS "false")
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
ELSEIF (TD_DARWIN)
|
ELSEIF(TD_DARWIN)
|
||||||
SET(BUILD_TOOLS "false")
|
SET(BUILD_TOOLS "false")
|
||||||
ELSE ()
|
ELSE()
|
||||||
SET(BUILD_TOOLS "false")
|
SET(BUILD_TOOLS "false")
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
||||||
IF ("${BUILD_TOOLS}" MATCHES "false")
|
IF("${BUILD_TOOLS}" MATCHES "false")
|
||||||
MESSAGE("${Yellow} Will _not_ build taos_tools! ${ColourReset}")
|
MESSAGE("${Yellow} Will _not_ build taos_tools! ${ColourReset}")
|
||||||
SET(TD_TAOS_TOOLS FALSE)
|
SET(TD_TAOS_TOOLS FALSE)
|
||||||
ELSE ()
|
ELSE()
|
||||||
MESSAGE("")
|
MESSAGE("")
|
||||||
MESSAGE("${Green} Will build taos_tools! ${ColourReset}")
|
MESSAGE("${Green} Will build taos_tools! ${ColourReset}")
|
||||||
MESSAGE("")
|
MESSAGE("")
|
||||||
SET(TD_TAOS_TOOLS TRUE)
|
SET(TD_TAOS_TOOLS TRUE)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
||||||
IF (${TD_WINDOWS})
|
IF(${TD_WINDOWS})
|
||||||
SET(TAOS_LIB taos_static)
|
SET(TAOS_LIB taos_static)
|
||||||
ELSE ()
|
ELSE()
|
||||||
SET(TAOS_LIB taos)
|
SET(TAOS_LIB taos)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
||||||
# build TSZ by default
|
# build TSZ by default
|
||||||
IF ("${TSZ_ENABLED}" MATCHES "false")
|
IF("${TSZ_ENABLED}" MATCHES "false")
|
||||||
set(VAR_TSZ "" CACHE INTERNAL "global variant empty" )
|
set(VAR_TSZ "" CACHE INTERNAL "global variant empty")
|
||||||
ELSE()
|
ELSE()
|
||||||
# define add
|
# define add
|
||||||
MESSAGE(STATUS "build with TSZ enabled")
|
MESSAGE(STATUS "build with TSZ enabled")
|
||||||
ADD_DEFINITIONS(-DTD_TSZ)
|
ADD_DEFINITIONS(-DTD_TSZ)
|
||||||
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" )
|
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
# force set all platform to JEMALLOC_ENABLED = false
|
# force set all platform to JEMALLOC_ENABLED = false
|
||||||
SET(JEMALLOC_ENABLED OFF)
|
SET(JEMALLOC_ENABLED OFF)
|
||||||
IF (TD_WINDOWS)
|
|
||||||
|
IF(TD_WINDOWS)
|
||||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||||
IF (${CMAKE_BUILD_TYPE} MATCHES "Release")
|
|
||||||
|
IF(${CMAKE_BUILD_TYPE} MATCHES "Release")
|
||||||
MESSAGE("${Green} will build Release version! ${ColourReset}")
|
MESSAGE("${Green} will build Release version! ${ColourReset}")
|
||||||
SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD")
|
SET(COMMON_FLAGS "/W3 /D_WIN32 /DWIN32 /Zi- /O2 /GL /MD")
|
||||||
|
|
||||||
ELSE ()
|
ELSE()
|
||||||
MESSAGE("${Green} will build Debug version! ${ColourReset}")
|
MESSAGE("${Green} will build Debug version! ${ColourReset}")
|
||||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||||
|
|
||||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||||
# ENDIF ()
|
# ENDIF ()
|
||||||
IF (CMAKE_DEPFILE_FLAGS_C)
|
IF(CMAKE_DEPFILE_FLAGS_C)
|
||||||
SET(CMAKE_DEPFILE_FLAGS_C "")
|
SET(CMAKE_DEPFILE_FLAGS_C "")
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
IF (CMAKE_DEPFILE_FLAGS_CXX)
|
|
||||||
|
IF(CMAKE_DEPFILE_FLAGS_CXX)
|
||||||
SET(CMAKE_DEPFILE_FLAGS_CXX "")
|
SET(CMAKE_DEPFILE_FLAGS_CXX "")
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
IF (CMAKE_C_FLAGS_DEBUG)
|
|
||||||
|
IF(CMAKE_C_FLAGS_DEBUG)
|
||||||
SET(CMAKE_C_FLAGS_DEBUG "" CACHE STRING "" FORCE)
|
SET(CMAKE_C_FLAGS_DEBUG "" CACHE STRING "" FORCE)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
IF (CMAKE_CXX_FLAGS_DEBUG)
|
|
||||||
|
IF(CMAKE_CXX_FLAGS_DEBUG)
|
||||||
SET(CMAKE_CXX_FLAGS_DEBUG "" CACHE STRING "" FORCE)
|
SET(CMAKE_CXX_FLAGS_DEBUG "" CACHE STRING "" FORCE)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
||||||
|
|
||||||
ELSE ()
|
ELSE()
|
||||||
IF (${TD_DARWIN})
|
IF(${TD_DARWIN})
|
||||||
set(CMAKE_MACOSX_RPATH 0)
|
set(CMAKE_MACOSX_RPATH 0)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
IF (${COVER} MATCHES "true")
|
|
||||||
|
IF(${COVER} MATCHES "true")
|
||||||
MESSAGE(STATUS "Test coverage mode, add extra flags")
|
MESSAGE(STATUS "Test coverage mode, add extra flags")
|
||||||
SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage")
|
SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage")
|
||||||
SET(GCC_COVERAGE_LINK_FLAGS "-lgcov --coverage")
|
SET(GCC_COVERAGE_LINK_FLAGS "-lgcov --coverage")
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
||||||
# disable all assert
|
# disable all assert
|
||||||
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
|
IF((${DISABLE_ASSERT} MATCHES "true") OR(${DISABLE_ASSERTS} MATCHES "true"))
|
||||||
ADD_DEFINITIONS(-DDISABLE_ASSERT)
|
ADD_DEFINITIONS(-DDISABLE_ASSERT)
|
||||||
MESSAGE(STATUS "Disable all asserts")
|
MESSAGE(STATUS "Disable all asserts")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
INCLUDE(CheckCCompilerFlag)
|
INCLUDE(CheckCCompilerFlag)
|
||||||
IF (TD_ARM_64 OR TD_ARM_32)
|
|
||||||
|
IF(TD_ARM_64 OR TD_ARM_32)
|
||||||
SET(COMPILER_SUPPORT_SSE42 false)
|
SET(COMPILER_SUPPORT_SSE42 false)
|
||||||
ELSEIF (("${CMAKE_C_COMPILER_ID}" MATCHES "Clang") OR ("${CMAKE_C_COMPILER_ID}" MATCHES "AppleClang"))
|
ELSEIF(("${CMAKE_C_COMPILER_ID}" MATCHES "Clang") OR("${CMAKE_C_COMPILER_ID}" MATCHES "AppleClang"))
|
||||||
SET(COMPILER_SUPPORT_SSE42 true)
|
SET(COMPILER_SUPPORT_SSE42 true)
|
||||||
MESSAGE(STATUS "Always enable sse4.2 for Clang/AppleClang")
|
MESSAGE(STATUS "Always enable sse4.2 for Clang/AppleClang")
|
||||||
ELSE()
|
ELSE()
|
||||||
CHECK_C_COMPILER_FLAG("-msse4.2" COMPILER_SUPPORT_SSE42)
|
CHECK_C_COMPILER_FLAG("-msse4.2" COMPILER_SUPPORT_SSE42)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
|
IF(TD_ARM_64 OR TD_ARM_32)
|
||||||
|
SET(COMPILER_SUPPORT_FMA false)
|
||||||
|
SET(COMPILER_SUPPORT_AVX false)
|
||||||
|
SET(COMPILER_SUPPORT_AVX2 false)
|
||||||
|
SET(COMPILER_SUPPORT_AVX512F false)
|
||||||
|
SET(COMPILER_SUPPORT_AVX512BMI false)
|
||||||
|
SET(COMPILER_SUPPORT_AVX512VL false)
|
||||||
|
ELSE()
|
||||||
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
|
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
|
||||||
CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX)
|
|
||||||
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
|
|
||||||
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
||||||
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
||||||
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
|
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
|
||||||
|
|
||||||
IF (COMPILER_SUPPORT_SSE42)
|
INCLUDE(CheckCSourceRuns)
|
||||||
|
SET(CMAKE_REQUIRED_FLAGS "-mavx")
|
||||||
|
check_c_source_runs("
|
||||||
|
#include <immintrin.h>
|
||||||
|
int main() {
|
||||||
|
__m256d a, b, c;
|
||||||
|
double buf[4] = {0};
|
||||||
|
a = _mm256_loadu_pd(buf);
|
||||||
|
b = _mm256_loadu_pd(buf);
|
||||||
|
c = _mm256_add_pd(a, b);
|
||||||
|
_mm256_storeu_pd(buf, c);
|
||||||
|
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
|
||||||
|
IF (buf[i] != 0) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
" COMPILER_SUPPORT_AVX)
|
||||||
|
|
||||||
|
SET(CMAKE_REQUIRED_FLAGS "-mavx2")
|
||||||
|
check_c_source_runs("
|
||||||
|
#include <immintrin.h>
|
||||||
|
int main() {
|
||||||
|
__m256i a, b, c;
|
||||||
|
int buf[8] = {0};
|
||||||
|
a = _mm256_loadu_si256((__m256i *)buf);
|
||||||
|
b = _mm256_loadu_si256((__m256i *)buf);
|
||||||
|
c = _mm256_and_si256(a, b);
|
||||||
|
_mm256_storeu_si256((__m256i *)buf, c);
|
||||||
|
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
|
||||||
|
IF (buf[i] != 0) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
" COMPILER_SUPPORT_AVX2)
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
IF(COMPILER_SUPPORT_SSE42)
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
IF ("${SIMD_SUPPORT}" MATCHES "true")
|
IF("${SIMD_SUPPORT}" MATCHES "true")
|
||||||
IF (COMPILER_SUPPORT_FMA)
|
IF(COMPILER_SUPPORT_FMA)
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
|
||||||
ENDIF()
|
MESSAGE(STATUS "FMA instructions is ACTIVATED")
|
||||||
IF (COMPILER_SUPPORT_AVX)
|
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
|
||||||
ENDIF()
|
|
||||||
IF (COMPILER_SUPPORT_AVX2)
|
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2")
|
|
||||||
ENDIF()
|
|
||||||
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
|
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
IF ("${SIMD_AVX512_SUPPORT}" MATCHES "true")
|
IF(COMPILER_SUPPORT_AVX)
|
||||||
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
||||||
|
MESSAGE(STATUS "AVX instructions is ACTIVATED")
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
IF(COMPILER_SUPPORT_AVX2)
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2")
|
||||||
|
MESSAGE(STATUS "AVX2 instructions is ACTIVATED")
|
||||||
|
ENDIF()
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
IF("${SIMD_AVX512_SUPPORT}" MATCHES "true")
|
||||||
|
IF(COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
|
||||||
MESSAGE(STATUS "avx512f/avx512bmi enabled by compiler")
|
MESSAGE(STATUS "avx512f/avx512bmi enabled by compiler")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
IF (COMPILER_SUPPORT_AVX512VL)
|
IF(COMPILER_SUPPORT_AVX512VL)
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
|
||||||
MESSAGE(STATUS "avx512vl enabled by compiler")
|
MESSAGE(STATUS "avx512vl enabled by compiler")
|
||||||
|
@ -206,16 +264,17 @@ ELSE ()
|
||||||
SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
|
|
||||||
IF (${BUILD_SANITIZER})
|
IF(${BUILD_SANITIZER})
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
|
||||||
|
# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||||
ELSEIF (${BUILD_RELEASE})
|
ELSEIF(${BUILD_RELEASE})
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||||
ELSE ()
|
ELSE()
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
ENDIF()
|
||||||
ENDIF ()
|
|
||||||
|
|
|
@ -7,7 +7,17 @@ ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}
|
||||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/cfg/
|
COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/cfg/
|
||||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/log/
|
COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/log/
|
||||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/data/
|
COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/data/
|
||||||
COMMAND ${CMAKE_COMMAND} -E echo dataDir ${TD_TESTS_OUTPUT_DIR}/data > ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
COMMAND ${CMAKE_COMMAND} -E echo firstEp localhost:6030 > ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo fqdn localhost >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo serverPort 6030 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo debugFlag 135 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo asyncLog 0 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo supportVnodes 1024 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo numOfLogLines 300000000 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo logKeepDays -1 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo checkpointInterval 60 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo snodeAddress 127.0.0.1:873 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E echo dataDir ${TD_TESTS_OUTPUT_DIR}/data >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
COMMAND ${CMAKE_COMMAND} -E echo logDir ${TD_TESTS_OUTPUT_DIR}/log >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
COMMAND ${CMAKE_COMMAND} -E echo logDir ${TD_TESTS_OUTPUT_DIR}/log >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
COMMAND ${CMAKE_COMMAND} -E echo charset UTF-8 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
COMMAND ${CMAKE_COMMAND} -E echo charset UTF-8 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
COMMAND ${CMAKE_COMMAND} -E echo monitor 0 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
COMMAND ${CMAKE_COMMAND} -E echo monitor 0 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
|
||||||
|
|
|
@ -144,6 +144,12 @@ option(
|
||||||
OFF
|
OFF
|
||||||
)
|
)
|
||||||
|
|
||||||
|
option(
|
||||||
|
BUILD_WITH_ANALYSIS
|
||||||
|
"If build with analysis"
|
||||||
|
ON
|
||||||
|
)
|
||||||
|
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF(NOT TD_ENTERPRISE)
|
IF(NOT TD_ENTERPRISE)
|
||||||
|
@ -151,8 +157,15 @@ MESSAGE("switch s3 off with community version")
|
||||||
set(BUILD_S3 OFF)
|
set(BUILD_S3 OFF)
|
||||||
set(BUILD_WITH_S3 OFF)
|
set(BUILD_WITH_S3 OFF)
|
||||||
set(BUILD_WITH_COS OFF)
|
set(BUILD_WITH_COS OFF)
|
||||||
|
set(BUILD_WITH_ANALYSIS OFF)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
IF(${BUILD_WITH_ANALYSIS})
|
||||||
|
message("build with analysis")
|
||||||
|
set(BUILD_S3 ON)
|
||||||
|
set(BUILD_WITH_S3 ON)
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
IF(${BUILD_S3})
|
IF(${BUILD_S3})
|
||||||
|
|
||||||
IF(${BUILD_WITH_S3})
|
IF(${BUILD_WITH_S3})
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.3.3.0.alpha")
|
SET(TD_VER_NUMBER "3.3.4.0.alpha")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# stub
|
# stub
|
||||||
ExternalProject_Add(stub
|
ExternalProject_Add(stub
|
||||||
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
|
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
|
||||||
GIT_TAG 5e903b8e
|
GIT_TAG 3137465194014d66a8402941e80d2bccc6346f51
|
||||||
GIT_SUBMODULES "src"
|
GIT_SUBMODULES "src"
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
|
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
|
||||||
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"
|
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"
|
||||||
|
@ -10,5 +10,4 @@ ExternalProject_Add(stub
|
||||||
BUILD_COMMAND ""
|
BUILD_COMMAND ""
|
||||||
INSTALL_COMMAND ""
|
INSTALL_COMMAND ""
|
||||||
TEST_COMMAND ""
|
TEST_COMMAND ""
|
||||||
GIT_SHALLOW true
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosadapter
|
# taosadapter
|
||||||
ExternalProject_Add(taosadapter
|
ExternalProject_Add(taosadapter
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||||
GIT_TAG 3.0
|
GIT_TAG main
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taos-tools
|
# taos-tools
|
||||||
ExternalProject_Add(taos-tools
|
ExternalProject_Add(taos-tools
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||||
GIT_TAG 3.0
|
GIT_TAG main
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -10,39 +10,36 @@ if(${BUILD_WITH_S3})
|
||||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
|
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
|
||||||
|
|
||||||
elseif(${BUILD_WITH_COS})
|
elseif(${BUILD_WITH_COS})
|
||||||
|
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
|
||||||
|
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||||
|
|
||||||
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
|
if(${BUILD_WITH_COS})
|
||||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
|
||||||
|
|
||||||
if(${BUILD_WITH_COS})
|
|
||||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
||||||
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||||
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||||
endif(${BUILD_WITH_COS})
|
endif(${BUILD_WITH_COS})
|
||||||
|
|
||||||
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||||
|
|
||||||
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
|
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
|
||||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||||
|
|
||||||
if(${BUILD_WITH_COS})
|
if(${BUILD_WITH_COS})
|
||||||
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||||
endif(${BUILD_WITH_COS})
|
endif(${BUILD_WITH_COS})
|
||||||
|
|
||||||
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
|
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
|
||||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
|
@ -59,7 +56,7 @@ endif()
|
||||||
# taosadapter
|
# taosadapter
|
||||||
if(${BUILD_HTTP})
|
if(${BUILD_HTTP})
|
||||||
MESSAGE("BUILD_HTTP is on")
|
MESSAGE("BUILD_HTTP is on")
|
||||||
else ()
|
else()
|
||||||
MESSAGE("BUILD_HTTP is off, use taosAdapter")
|
MESSAGE("BUILD_HTTP is off, use taosAdapter")
|
||||||
cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
endif()
|
endif()
|
||||||
|
@ -110,19 +107,18 @@ cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
# xz
|
# xz
|
||||||
#cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
# cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
#lzma2
|
# lzma2
|
||||||
cat("${TD_SUPPORT_DIR}/lzma_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/lzma_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
|
if(${BUILD_CONTRIB})
|
||||||
if (${BUILD_CONTRIB})
|
|
||||||
if(${BUILD_WITH_ROCKSDB})
|
if(${BUILD_WITH_ROCKSDB})
|
||||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
add_definitions(-DUSE_ROCKSDB)
|
add_definitions(-DUSE_ROCKSDB)
|
||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
if (NOT ${TD_LINUX})
|
if(NOT ${TD_LINUX})
|
||||||
if(${BUILD_WITH_ROCKSDB})
|
if(${BUILD_WITH_ROCKSDB})
|
||||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
add_definitions(-DUSE_ROCKSDB)
|
add_definitions(-DUSE_ROCKSDB)
|
||||||
|
@ -134,9 +130,9 @@ else()
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
#cat("${TD_SUPPORT_DIR}/zstd_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
# cat("${TD_SUPPORT_DIR}/zstd_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
#libuv
|
# libuv
|
||||||
if(${BUILD_WITH_UV})
|
if(${BUILD_WITH_UV})
|
||||||
cat("${TD_SUPPORT_DIR}/libuv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/libuv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
endif(${BUILD_WITH_UV})
|
endif(${BUILD_WITH_UV})
|
||||||
|
@ -152,17 +148,17 @@ if(${BUILD_WITH_S3})
|
||||||
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
add_definitions(-DUSE_S3)
|
add_definitions(-DUSE_S3)
|
||||||
|
|
||||||
# cos
|
# cos
|
||||||
elseif(${BUILD_WITH_COS})
|
elseif(${BUILD_WITH_COS})
|
||||||
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
# cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
# cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
# cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
# cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
add_definitions(-DUSE_COS)
|
add_definitions(-DUSE_COS)
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# crashdump
|
# crashdump
|
||||||
|
@ -206,25 +202,27 @@ if(${BUILD_TEST})
|
||||||
gtest
|
gtest
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src>
|
||||||
)
|
)
|
||||||
|
|
||||||
if(${TD_WINDOWS})
|
if(${TD_WINDOWS})
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
gtest
|
gtest
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_win>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_win>
|
||||||
)
|
)
|
||||||
endif(${TD_WINDOWS})
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
if(${TD_LINUX})
|
if(${TD_LINUX})
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
gtest
|
gtest
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_linux>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_linux>
|
||||||
)
|
)
|
||||||
endif(${TD_LINUX})
|
endif(${TD_LINUX})
|
||||||
|
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
gtest
|
gtest
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_darwin>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_darwin>
|
||||||
)
|
)
|
||||||
endif(${TD_DARWIN})
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
endif(${BUILD_TEST})
|
endif(${BUILD_TEST})
|
||||||
|
|
||||||
# cJson
|
# cJson
|
||||||
|
@ -236,15 +234,16 @@ option(CJSON_BUILD_SHARED_LIBS "Overrides BUILD_SHARED_LIBS if CJSON_OVERRIDE_BU
|
||||||
add_subdirectory(cJson EXCLUDE_FROM_ALL)
|
add_subdirectory(cJson EXCLUDE_FROM_ALL)
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
cjson
|
cjson
|
||||||
|
|
||||||
# see https://stackoverflow.com/questions/25676277/cmake-target-include-directories-prints-an-error-when-i-try-to-add-the-source
|
# see https://stackoverflow.com/questions/25676277/cmake-target-include-directories-prints-an-error-when-i-try-to-add-the-source
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cJson>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cJson>
|
||||||
)
|
)
|
||||||
unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
||||||
|
|
||||||
# xml2
|
# xml2
|
||||||
#if(${BUILD_WITH_S3})
|
# if(${BUILD_WITH_S3})
|
||||||
# add_subdirectory(xml2 EXCLUDE_FROM_ALL)
|
# add_subdirectory(xml2 EXCLUDE_FROM_ALL)
|
||||||
#endif()
|
# endif()
|
||||||
|
|
||||||
# lz4
|
# lz4
|
||||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||||
|
@ -255,10 +254,12 @@ target_include_directories(
|
||||||
|
|
||||||
# zlib
|
# zlib
|
||||||
set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in")
|
set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in")
|
||||||
|
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=deprecated-non-prototype")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=deprecated-non-prototype")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-non-prototype")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-non-prototype")
|
||||||
endif(${TD_DARWIN})
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
add_subdirectory(zlib EXCLUDE_FROM_ALL)
|
add_subdirectory(zlib EXCLUDE_FROM_ALL)
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
zlibstatic
|
zlibstatic
|
||||||
|
@ -291,24 +292,27 @@ endif(${BUILD_WITH_LEVELDB})
|
||||||
|
|
||||||
# rocksdb
|
# rocksdb
|
||||||
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
||||||
if (${BUILD_WITH_UV})
|
if(${BUILD_WITH_UV})
|
||||||
if(${TD_LINUX})
|
if(${TD_LINUX})
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||||
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
|
||||||
|
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||||
SET(CMAKE_BUILD_TYPE Release)
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
endif()
|
endif()
|
||||||
endif(${TD_LINUX})
|
endif(${TD_LINUX})
|
||||||
endif (${BUILD_WITH_UV})
|
endif(${BUILD_WITH_UV})
|
||||||
|
|
||||||
if (${BUILD_WITH_ROCKSDB})
|
if(${BUILD_WITH_ROCKSDB})
|
||||||
if (${BUILD_CONTRIB})
|
if(${BUILD_CONTRIB})
|
||||||
if(${TD_LINUX})
|
if(${TD_LINUX})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
|
||||||
|
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||||
SET(CMAKE_BUILD_TYPE Release)
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
endif()
|
endif()
|
||||||
endif(${TD_LINUX})
|
endif(${TD_LINUX})
|
||||||
|
|
||||||
MESSAGE(STATUS "ROCKSDB CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
MESSAGE(STATUS "ROCKSDB CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||||
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
||||||
|
|
||||||
|
@ -316,22 +320,23 @@ if (${BUILD_WITH_ROCKSDB})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
endif(${TD_DARWIN})
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
if (${TD_DARWIN_ARM64})
|
if(${TD_DARWIN_ARM64})
|
||||||
set(HAS_ARMV8_CRC true)
|
set(HAS_ARMV8_CRC true)
|
||||||
endif(${TD_DARWIN_ARM64})
|
endif(${TD_DARWIN_ARM64})
|
||||||
|
|
||||||
if (${TD_WINDOWS})
|
if(${TD_WINDOWS})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17")
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
||||||
option(WITH_JNI "" OFF)
|
option(WITH_JNI "" OFF)
|
||||||
|
|
||||||
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
||||||
message("Rocksdb build runtime lib use /MT or /MTd")
|
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||||
endif(${TD_WINDOWS})
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
option(HAVE_THREAD_LOCAL "" OFF)
|
option(HAVE_THREAD_LOCAL "" OFF)
|
||||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||||
|
@ -357,30 +362,32 @@ if (${BUILD_WITH_ROCKSDB})
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||||
)
|
)
|
||||||
else()
|
else()
|
||||||
if (NOT ${TD_LINUX})
|
if(NOT ${TD_LINUX})
|
||||||
MESSAGE(STATUS "ROCKSDB CXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
MESSAGE(STATUS "ROCKSDB CXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||||
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
|
||||||
|
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=maybe-uninitialized")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
endif(${TD_DARWIN})
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
if (${TD_DARWIN_ARM64})
|
if(${TD_DARWIN_ARM64})
|
||||||
set(HAS_ARMV8_CRC true)
|
set(HAS_ARMV8_CRC true)
|
||||||
endif(${TD_DARWIN_ARM64})
|
endif(${TD_DARWIN_ARM64})
|
||||||
|
|
||||||
if (${TD_WINDOWS})
|
if(${TD_WINDOWS})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819 /std:c++17")
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244 /wd4819")
|
||||||
option(WITH_JNI "" OFF)
|
option(WITH_JNI "" OFF)
|
||||||
|
|
||||||
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
if(CMAKE_C_FLAGS MATCHES "/MT" OR CMAKE_C_FLAGS MATCHES "/MTd")
|
||||||
message("Rocksdb build runtime lib use /MT or /MTd")
|
message("Rocksdb build runtime lib use /MT or /MTd")
|
||||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||||
endif(${TD_WINDOWS})
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
option(HAVE_THREAD_LOCAL "" OFF)
|
option(HAVE_THREAD_LOCAL "" OFF)
|
||||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||||
|
@ -406,22 +413,23 @@ if (${BUILD_WITH_ROCKSDB})
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(${BUILD_WITH_S3})
|
if(${BUILD_WITH_S3})
|
||||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.2/include)
|
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.2/include)
|
||||||
MESSAGE("build with s3: ${BUILD_WITH_S3}")
|
MESSAGE("build with s3: ${BUILD_WITH_S3}")
|
||||||
|
|
||||||
# cos
|
# cos
|
||||||
elseif(${BUILD_WITH_COS})
|
elseif(${BUILD_WITH_COS})
|
||||||
if(${TD_LINUX})
|
if(${TD_LINUX})
|
||||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
|
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
|
||||||
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
|
|
||||||
|
# ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
|
||||||
option(ENABLE_TEST "Enable the tests" OFF)
|
option(ENABLE_TEST "Enable the tests" OFF)
|
||||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
||||||
#MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
|
||||||
|
|
||||||
|
# MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||||
set(CMAKE_BUILD_TYPE Release)
|
set(CMAKE_BUILD_TYPE Release)
|
||||||
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
||||||
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
||||||
|
@ -434,16 +442,15 @@ elseif(${BUILD_WITH_COS})
|
||||||
|
|
||||||
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
|
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
|
||||||
else()
|
else()
|
||||||
|
|
||||||
endif(${TD_LINUX})
|
endif(${TD_LINUX})
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# pthread
|
# pthread
|
||||||
if(${BUILD_PTHREAD})
|
if(${BUILD_PTHREAD})
|
||||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||||
SET(CMAKE_BUILD_TYPE Release)
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_definitions(-DPTW32_STATIC_LIB)
|
add_definitions(-DPTW32_STATIC_LIB)
|
||||||
add_subdirectory(pthread EXCLUDE_FROM_ALL)
|
add_subdirectory(pthread EXCLUDE_FROM_ALL)
|
||||||
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
|
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
|
||||||
|
@ -451,7 +458,6 @@ if(${BUILD_PTHREAD})
|
||||||
target_link_libraries(pthread INTERFACE libpthreadVC3)
|
target_link_libraries(pthread INTERFACE libpthreadVC3)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
# jemalloc
|
# jemalloc
|
||||||
if(${JEMALLOC_ENABLED})
|
if(${JEMALLOC_ENABLED})
|
||||||
include(ExternalProject)
|
include(ExternalProject)
|
||||||
|
@ -514,12 +520,13 @@ endif(${BUILD_WCWIDTH})
|
||||||
|
|
||||||
# LIBUV
|
# LIBUV
|
||||||
if(${BUILD_WITH_UV})
|
if(${BUILD_WITH_UV})
|
||||||
if (TD_WINDOWS)
|
if(TD_WINDOWS)
|
||||||
# There is no GetHostNameW function on win7.
|
# There is no GetHostNameW function on win7.
|
||||||
file(READ "libuv/src/win/util.c" LIBUV_WIN_UTIL_CONTENT)
|
file(READ "libuv/src/win/util.c" LIBUV_WIN_UTIL_CONTENT)
|
||||||
string(REPLACE "if (GetHostNameW(buf, UV_MAXHOSTNAMESIZE" "DWORD nSize = UV_MAXHOSTNAMESIZE;\n if (GetComputerNameW(buf, &nSize" LIBUV_WIN_UTIL_CONTENT "${LIBUV_WIN_UTIL_CONTENT}")
|
string(REPLACE "if (GetHostNameW(buf, UV_MAXHOSTNAMESIZE" "DWORD nSize = UV_MAXHOSTNAMESIZE;\n if (GetComputerNameW(buf, &nSize" LIBUV_WIN_UTIL_CONTENT "${LIBUV_WIN_UTIL_CONTENT}")
|
||||||
file(WRITE "libuv/src/win/util.c" "${LIBUV_WIN_UTIL_CONTENT}")
|
file(WRITE "libuv/src/win/util.c" "${LIBUV_WIN_UTIL_CONTENT}")
|
||||||
endif ()
|
endif()
|
||||||
|
|
||||||
add_subdirectory(libuv EXCLUDE_FROM_ALL)
|
add_subdirectory(libuv EXCLUDE_FROM_ALL)
|
||||||
endif(${BUILD_WITH_UV})
|
endif(${BUILD_WITH_UV})
|
||||||
|
|
||||||
|
@ -535,6 +542,7 @@ if(${BUILD_WITH_SQLITE})
|
||||||
INTERFACE m
|
INTERFACE m
|
||||||
INTERFACE pthread
|
INTERFACE pthread
|
||||||
)
|
)
|
||||||
|
|
||||||
if(NOT TD_WINDOWS)
|
if(NOT TD_WINDOWS)
|
||||||
target_link_libraries(sqlite
|
target_link_libraries(sqlite
|
||||||
INTERFACE dl
|
INTERFACE dl
|
||||||
|
@ -545,36 +553,38 @@ endif(${BUILD_WITH_SQLITE})
|
||||||
# addr2line
|
# addr2line
|
||||||
if(${BUILD_ADDR2LINE})
|
if(${BUILD_ADDR2LINE})
|
||||||
if(NOT ${TD_WINDOWS})
|
if(NOT ${TD_WINDOWS})
|
||||||
check_include_file( "sys/types.h" HAVE_SYS_TYPES_H)
|
check_include_file("sys/types.h" HAVE_SYS_TYPES_H)
|
||||||
check_include_file( "sys/stat.h" HAVE_SYS_STAT_H )
|
check_include_file("sys/stat.h" HAVE_SYS_STAT_H)
|
||||||
check_include_file( "inttypes.h" HAVE_INTTYPES_H )
|
check_include_file("inttypes.h" HAVE_INTTYPES_H)
|
||||||
check_include_file( "stddef.h" HAVE_STDDEF_H )
|
check_include_file("stddef.h" HAVE_STDDEF_H)
|
||||||
check_include_file( "stdlib.h" HAVE_STDLIB_H )
|
check_include_file("stdlib.h" HAVE_STDLIB_H)
|
||||||
check_include_file( "string.h" HAVE_STRING_H )
|
check_include_file("string.h" HAVE_STRING_H)
|
||||||
check_include_file( "memory.h" HAVE_MEMORY_H )
|
check_include_file("memory.h" HAVE_MEMORY_H)
|
||||||
check_include_file( "strings.h" HAVE_STRINGS_H )
|
check_include_file("strings.h" HAVE_STRINGS_H)
|
||||||
check_include_file( "stdint.h" HAVE_STDINT_H )
|
check_include_file("stdint.h" HAVE_STDINT_H)
|
||||||
check_include_file( "unistd.h" HAVE_UNISTD_H )
|
check_include_file("unistd.h" HAVE_UNISTD_H)
|
||||||
check_include_file( "sgidefs.h" HAVE_SGIDEFS_H )
|
check_include_file("sgidefs.h" HAVE_SGIDEFS_H)
|
||||||
check_include_file( "stdafx.h" HAVE_STDAFX_H )
|
check_include_file("stdafx.h" HAVE_STDAFX_H)
|
||||||
check_include_file( "elf.h" HAVE_ELF_H )
|
check_include_file("elf.h" HAVE_ELF_H)
|
||||||
check_include_file( "libelf.h" HAVE_LIBELF_H )
|
check_include_file("libelf.h" HAVE_LIBELF_H)
|
||||||
check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
check_include_file("libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
||||||
check_include_file( "alloca.h" HAVE_ALLOCA_H )
|
check_include_file("alloca.h" HAVE_ALLOCA_H)
|
||||||
check_include_file( "elfaccess.h" HAVE_ELFACCESS_H)
|
check_include_file("elfaccess.h" HAVE_ELFACCESS_H)
|
||||||
check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H )
|
check_include_file("sys/elf_386.h" HAVE_SYS_ELF_386_H)
|
||||||
check_include_file( "sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H)
|
check_include_file("sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H)
|
||||||
check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H)
|
check_include_file("sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H)
|
||||||
check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H )
|
check_include_file("sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H)
|
||||||
set(VERSION 0.3.1)
|
set(VERSION 0.3.1)
|
||||||
set(PACKAGE_VERSION "\"${VERSION}\"")
|
set(PACKAGE_VERSION "\"${VERSION}\"")
|
||||||
configure_file(libdwarf/cmake/config.h.cmake config.h)
|
configure_file(libdwarf/cmake/config.h.cmake config.h)
|
||||||
file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c")
|
file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c")
|
||||||
add_library(libdwarf STATIC ${LIBDWARF_SOURCES})
|
add_library(libdwarf STATIC ${LIBDWARF_SOURCES})
|
||||||
set_target_properties(libdwarf PROPERTIES OUTPUT_NAME "libdwarf")
|
set_target_properties(libdwarf PROPERTIES OUTPUT_NAME "libdwarf")
|
||||||
|
|
||||||
if(HAVE_LIBELF_H OR HAVE_LIBELF_LIBELF_H)
|
if(HAVE_LIBELF_H OR HAVE_LIBELF_LIBELF_H)
|
||||||
target_link_libraries(libdwarf PUBLIC libelf)
|
target_link_libraries(libdwarf PUBLIC libelf)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_CURRENT_BINARY_DIR})
|
target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
file(READ "addr2line/addr2line.c" ADDR2LINE_CONTENT)
|
file(READ "addr2line/addr2line.c" ADDR2LINE_CONTENT)
|
||||||
string(REPLACE "static int" "int" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}")
|
string(REPLACE "static int" "int" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}")
|
||||||
|
@ -583,7 +593,7 @@ if(${BUILD_ADDR2LINE})
|
||||||
file(WRITE "addr2line/addr2line.c" "${ADDR2LINE_CONTENT}")
|
file(WRITE "addr2line/addr2line.c" "${ADDR2LINE_CONTENT}")
|
||||||
add_library(addr2line STATIC "addr2line/addr2line.c")
|
add_library(addr2line STATIC "addr2line/addr2line.c")
|
||||||
target_link_libraries(addr2line PUBLIC libdwarf dl z)
|
target_link_libraries(addr2line PUBLIC libdwarf dl z)
|
||||||
target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf" )
|
target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf")
|
||||||
endif(NOT ${TD_WINDOWS})
|
endif(NOT ${TD_WINDOWS})
|
||||||
endif(${BUILD_ADDR2LINE})
|
endif(${BUILD_ADDR2LINE})
|
||||||
|
|
||||||
|
@ -592,31 +602,41 @@ if(${BUILD_GEOS})
|
||||||
if(${TD_LINUX})
|
if(${TD_LINUX})
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
|
||||||
|
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||||
SET(CMAKE_BUILD_TYPE Release)
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
endif()
|
endif()
|
||||||
endif(${TD_LINUX})
|
endif(${TD_LINUX})
|
||||||
|
|
||||||
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||||
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||||
if (${TD_WINDOWS})
|
|
||||||
|
if(${TD_WINDOWS})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
|
||||||
else ()
|
else()
|
||||||
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
|
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
|
||||||
endif(${TD_WINDOWS})
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
geos_c
|
geos_c
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
||||||
)
|
)
|
||||||
endif(${BUILD_GEOS})
|
endif(${BUILD_GEOS})
|
||||||
|
|
||||||
if (${BUILD_PCRE2})
|
if(${BUILD_PCRE2})
|
||||||
add_subdirectory(pcre2 EXCLUDE_FROM_ALL)
|
add_subdirectory(pcre2 EXCLUDE_FROM_ALL)
|
||||||
endif(${BUILD_PCRE2})
|
endif(${BUILD_PCRE2})
|
||||||
|
|
||||||
|
if(${TD_LINUX} AND ${BUILD_WITH_S3})
|
||||||
|
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
||||||
|
endif()
|
||||||
|
|
||||||
# ================================================================================================
|
# ================================================================================================
|
||||||
# Build test
|
# Build test
|
||||||
# ================================================================================================
|
# ================================================================================================
|
||||||
|
MESSAGE("build with dependency tests: ${BUILD_DEPENDENCY_TESTS}")
|
||||||
|
|
||||||
if(${BUILD_DEPENDENCY_TESTS})
|
if(${BUILD_DEPENDENCY_TESTS})
|
||||||
add_subdirectory(test EXCLUDE_FROM_ALL)
|
add_subdirectory(test EXCLUDE_FROM_ALL)
|
||||||
endif(${BUILD_DEPENDENCY_TESTS})
|
endif(${BUILD_DEPENDENCY_TESTS})
|
||||||
|
|
|
@ -0,0 +1,73 @@
|
||||||
|
# lib_azure_sdk
|
||||||
|
set(AZURE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1")
|
||||||
|
set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk")
|
||||||
|
|
||||||
|
file(GLOB AZURE_SDK_SRC
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/credentials/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/tracing/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp"
|
||||||
|
)
|
||||||
|
|
||||||
|
file(GLOB AZURE_SDK_UNIFIED_SRC
|
||||||
|
${AZURE_SDK_SRC}
|
||||||
|
)
|
||||||
|
|
||||||
|
set(AZURE_SDK_INCLUDES
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/inc/"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/inc/"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/inc/"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/"
|
||||||
|
)
|
||||||
|
|
||||||
|
add_library(_azure_sdk STATIC ${AZURE_SDK_UNIFIED_SRC})
|
||||||
|
target_compile_definitions(_azure_sdk PRIVATE BUILD_CURL_HTTP_TRANSPORT_ADAPTER)
|
||||||
|
|
||||||
|
target_include_directories(
|
||||||
|
_azure_sdk
|
||||||
|
PUBLIC "$ENV{HOME}/.cos-local.2/include"
|
||||||
|
)
|
||||||
|
|
||||||
|
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||||
|
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||||
|
|
||||||
|
# find_library(CURL_LIBRARY curl)
|
||||||
|
# find_library(XML2_LIBRARY xml2)
|
||||||
|
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||||
|
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||||
|
|
||||||
|
# find_library(CoreFoundation_Library CoreFoundation)
|
||||||
|
# find_library(SystemConfiguration_Library SystemConfiguration)
|
||||||
|
target_link_libraries(
|
||||||
|
_azure_sdk
|
||||||
|
PRIVATE ${CURL_LIBRARY}
|
||||||
|
PRIVATE ${SSL_LIBRARY}
|
||||||
|
PRIVATE ${CRYPTO_LIBRARY}
|
||||||
|
PRIVATE ${XML2_LIBRARY}
|
||||||
|
|
||||||
|
# PRIVATE xml2
|
||||||
|
PRIVATE zlib
|
||||||
|
|
||||||
|
# PRIVATE ${CoreFoundation_Library}
|
||||||
|
# PRIVATE ${SystemConfiguration_Library}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Originally, on Windows azure-core is built with bcrypt and crypt32 by default
|
||||||
|
if(TARGET OpenSSL::SSL)
|
||||||
|
target_link_libraries(_azure_sdk PRIVATE OpenSSL::Crypto OpenSSL::SSL)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Originally, on Windows azure-core is built with winhttp by default
|
||||||
|
if(TARGET td_contrib::curl)
|
||||||
|
target_link_libraries(_azure_sdk PRIVATE td_contrib::curl)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
target_include_directories(_azure_sdk SYSTEM BEFORE PUBLIC ${AZURE_SDK_INCLUDES})
|
||||||
|
add_library(td_contrib::azure_sdk ALIAS _azure_sdk)
|
|
@ -28,5 +28,6 @@ if(${BUILD_WITH_TRAFT})
|
||||||
# add_subdirectory(traft)
|
# add_subdirectory(traft)
|
||||||
endif(${BUILD_WITH_TRAFT})
|
endif(${BUILD_WITH_TRAFT})
|
||||||
|
|
||||||
|
add_subdirectory(azure)
|
||||||
add_subdirectory(tdev)
|
add_subdirectory(tdev)
|
||||||
add_subdirectory(lz4)
|
add_subdirectory(lz4)
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
set(CMAKE_CXX_STANDARD 14)
|
||||||
|
set(CMAKE_CXX_STANDARD_REQUIRED True)
|
||||||
|
|
||||||
|
add_executable(
|
||||||
|
azure-test
|
||||||
|
main.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||||
|
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||||
|
|
||||||
|
# find_library(XML2_LIBRARY xml2)
|
||||||
|
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||||
|
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||||
|
|
||||||
|
# find_library(CoreFoundation_Library CoreFoundation)
|
||||||
|
# find_library(SystemConfiguration_Library SystemConfiguration)
|
||||||
|
target_link_libraries(
|
||||||
|
azure-test
|
||||||
|
PRIVATE _azure_sdk
|
||||||
|
PRIVATE ${CURL_LIBRARY}
|
||||||
|
PRIVATE ${XML2_LIBRARY}
|
||||||
|
PRIVATE ${SSL_LIBRARY}
|
||||||
|
PRIVATE ${CRYPTO_LIBRARY}
|
||||||
|
PRIVATE dl
|
||||||
|
PRIVATE pthread
|
||||||
|
)
|
|
@ -0,0 +1,99 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
// Include the necessary SDK headers
|
||||||
|
#include <azure/core.hpp>
|
||||||
|
#include <azure/storage/blobs.hpp>
|
||||||
|
|
||||||
|
// Add appropriate using namespace directives
|
||||||
|
using namespace Azure::Storage;
|
||||||
|
using namespace Azure::Storage::Blobs;
|
||||||
|
|
||||||
|
// Secrets should be stored & retrieved from secure locations such as Azure::KeyVault. For
|
||||||
|
// convenience and brevity of samples, the secrets are retrieved from environment variables.
|
||||||
|
|
||||||
|
std::string GetEndpointUrl() {
|
||||||
|
// return std::getenv("AZURE_STORAGE_ACCOUNT_URL");
|
||||||
|
std::string accountId = getenv("ablob_account_id");
|
||||||
|
if (accountId.empty()) {
|
||||||
|
return accountId;
|
||||||
|
}
|
||||||
|
|
||||||
|
return accountId + ".blob.core.windows.net";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string GetAccountName() {
|
||||||
|
// return std::getenv("AZURE_STORAGE_ACCOUNT_NAME");
|
||||||
|
return getenv("ablob_account_id");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string GetAccountKey() {
|
||||||
|
// return std::getenv("AZURE_STORAGE_ACCOUNT_KEY");
|
||||||
|
|
||||||
|
return getenv("ablob_account_secret");
|
||||||
|
}
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
std::string endpointUrl = GetEndpointUrl();
|
||||||
|
std::string accountName = GetAccountName();
|
||||||
|
std::string accountKey = GetAccountKey();
|
||||||
|
|
||||||
|
try {
|
||||||
|
auto sharedKeyCredential = std::make_shared<StorageSharedKeyCredential>(accountName, accountKey);
|
||||||
|
|
||||||
|
std::string accountURL = "https://fd2d01cd892f844eeaa2273.blob.core.windows.net";
|
||||||
|
BlobServiceClient blobServiceClient(accountURL, sharedKeyCredential);
|
||||||
|
|
||||||
|
std::string containerName = "myblobcontainer";
|
||||||
|
// auto containerClient = blobServiceClient.GetBlobContainerClient("myblobcontainer");
|
||||||
|
auto containerClient = blobServiceClient.GetBlobContainerClient("td-test");
|
||||||
|
|
||||||
|
// Create the container if it does not exist
|
||||||
|
std::cout << "Creating container: " << containerName << std::endl;
|
||||||
|
// containerClient.CreateIfNotExists();
|
||||||
|
|
||||||
|
std::string blobName = "blob.txt";
|
||||||
|
uint8_t blobContent[] = "Hello Azure!";
|
||||||
|
// Create the block blob client
|
||||||
|
BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName);
|
||||||
|
|
||||||
|
// Upload the blob
|
||||||
|
std::cout << "Uploading blob: " << blobName << std::endl;
|
||||||
|
blobClient.UploadFrom(blobContent, sizeof(blobContent));
|
||||||
|
/*
|
||||||
|
auto blockBlobClient = BlockBlobClient(endpointUrl, sharedKeyCredential);
|
||||||
|
|
||||||
|
// Create some data to upload into the blob.
|
||||||
|
std::vector<uint8_t> data = {1, 2, 3, 4};
|
||||||
|
Azure::Core::IO::MemoryBodyStream stream(data);
|
||||||
|
|
||||||
|
Azure::Response<Models::UploadBlockBlobResult> response = blockBlobClient.Upload(stream);
|
||||||
|
|
||||||
|
Models::UploadBlockBlobResult model = response.Value;
|
||||||
|
std::cout << "Last modified date of uploaded blob: " << model.LastModified.ToString()
|
||||||
|
<< std::endl;
|
||||||
|
*/
|
||||||
|
} catch (const Azure::Core::RequestFailedException& e) {
|
||||||
|
std::cout << "Status Code: " << static_cast<int>(e.StatusCode) << ", Reason Phrase: " << e.ReasonPhrase
|
||||||
|
<< std::endl;
|
||||||
|
std::cout << e.what() << std::endl;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -80,7 +80,7 @@ These pseudocolumns occur after the aggregation clause.
|
||||||
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
||||||
|
|
||||||
1. NONE: No fill (the default fill mode)
|
1. NONE: No fill (the default fill mode)
|
||||||
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`.
|
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note that only exprs in select list that contains normal cols need to specify fill value, exprs like `_wstart`, `_wend`, `_wduration`, `_wstart + 1a`, `now`, `1+1`, partition keys like tbname(when using partition by) don't need to specify fill value. But exprs like `timediff(last(ts), _wstart)` need to specify fill value.
|
||||||
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
|
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
|
||||||
4. NULL: Fill with NULL, `FILL(NULL)`
|
4. NULL: Fill with NULL, `FILL(NULL)`
|
||||||
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
|
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||||
|
|
|
@ -19,7 +19,7 @@ After TDengine server or client installation, `taos.h` is located at
|
||||||
The dynamic libraries for the TDengine client driver are located in.
|
The dynamic libraries for the TDengine client driver are located in.
|
||||||
|
|
||||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||||
- Windows: `C:\TDengine\taos.dll`
|
- Windows: `C:\TDengine\driver\taos.dll`
|
||||||
- macOS: `/usr/local/lib/libtaos.dylib`
|
- macOS: `/usr/local/lib/libtaos.dylib`
|
||||||
|
|
||||||
## Supported platforms
|
## Supported platforms
|
||||||
|
|
|
@ -20,6 +20,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.3.3.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.3.3.0" />
|
||||||
|
|
||||||
## 3.3.2.0
|
## 3.3.2.0
|
||||||
|
|
||||||
<Release type="tdengine" version="3.3.2.0" />
|
<Release type="tdengine" version="3.3.2.0" />
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.3</version>
|
<version>3.4.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.locationtech.jts</groupId>
|
<groupId>org.locationtech.jts</groupId>
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
|
|
||||||
## TDengine Spring JDBC Template Demo
|
## TDengine Spring JDBC Template Demo
|
||||||
|
|
||||||
`Spring JDBC Template` 简化了原生 JDBC Connection 获取释放等操作,使得操作数据库更加方便。
|
`Spring JDBC Template` simplifies the operations of acquiring and releasing native JDBC Connections, making database operations more convenient.
|
||||||
|
|
||||||
### 配置
|
### Configuration
|
||||||
|
|
||||||
修改 `src/main/resources/applicationContext.xml` 文件中 TDengine 的配置信息:
|
Modify the TDengine configuration in the `src/main/resources/applicationContext.xml` file:
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<bean id="dataSource" class="org.springframework.jdbc.datasource.DriverManagerDataSource">
|
<bean id="dataSource" class="org.springframework.jdbc.datasource.DriverManagerDataSource">
|
||||||
|
@ -20,13 +20,15 @@
|
||||||
</bean>
|
</bean>
|
||||||
```
|
```
|
||||||
|
|
||||||
### 打包运行
|
### Package and run
|
||||||
|
|
||||||
|
Navigate to the `TDengine/tests/examples/JDBC/SpringJdbcTemplate` directory and execute the following commands to generate an executable jar file.
|
||||||
|
|
||||||
进入 `TDengine/tests/examples/JDBC/SpringJdbcTemplate` 目录下,执行以下命令可以生成可执行 jar 包。
|
|
||||||
```shell
|
```shell
|
||||||
mvn clean package
|
mvn clean package
|
||||||
```
|
```
|
||||||
打包成功之后,进入 `target/` 目录下,执行以下命令就可运行测试:
|
After successfully packaging, navigate to the `target/` directory and execute the following commands to run the tests:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
java -jar target/SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar
|
java -jar target/SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar
|
||||||
```
|
```
|
|
@ -18,7 +18,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.3</version>
|
<version>3.4.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- druid -->
|
<!-- druid -->
|
||||||
<dependency>
|
<dependency>
|
||||||
|
|
|
@ -1,18 +1,18 @@
|
||||||
### 设置###
|
### Settings###
|
||||||
log4j.rootLogger=debug,stdout,DebugLog,ErrorLog
|
log4j.rootLogger=debug,stdout,DebugLog,ErrorLog
|
||||||
### 输出信息到控制抬 ###
|
### Output information to the console ###
|
||||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||||
log4j.appender.stdout.Target=System.out
|
log4j.appender.stdout.Target=System.out
|
||||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||||
log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
|
log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
|
||||||
### 输出DEBUG 级别以上的日志到=logs/debug.log
|
### Output logs of DEBUG level and above to logs/debug.log
|
||||||
log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender
|
log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender
|
||||||
log4j.appender.DebugLog.File=logs/debug.log
|
log4j.appender.DebugLog.File=logs/debug.log
|
||||||
log4j.appender.DebugLog.Append=true
|
log4j.appender.DebugLog.Append=true
|
||||||
log4j.appender.DebugLog.Threshold=DEBUG
|
log4j.appender.DebugLog.Threshold=DEBUG
|
||||||
log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout
|
log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout
|
||||||
log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
||||||
### 输出ERROR 级别以上的日志到=logs/error.log
|
### Output logs of ERROR level and above to logs/error.log
|
||||||
log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender
|
log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender
|
||||||
log4j.appender.ErrorLog.File=logs/error.log
|
log4j.appender.ErrorLog.File=logs/error.log
|
||||||
log4j.appender.ErrorLog.Append=true
|
log4j.appender.ErrorLog.Append=true
|
||||||
|
|
|
@ -1,27 +1,28 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<something-else-entirely>
|
<something-else-entirely>
|
||||||
<proxool>
|
<proxool>
|
||||||
|
<!-- Alias for the data source -->
|
||||||
<alias>ds</alias>
|
<alias>ds</alias>
|
||||||
<!--数据源的别名-->
|
<!-- URL connection string -->
|
||||||
<driver-url>jdbc:TAOS-RS://127.0.0.1:6041/log</driver-url>
|
<driver-url>jdbc:TAOS-RS://127.0.0.1:6041/log</driver-url>
|
||||||
<!--url连接串-->
|
<!-- Driver class -->
|
||||||
<driver-class>com.taosdata.jdbc.rs.RestfulDriver</driver-class>
|
<driver-class>com.taosdata.jdbc.rs.RestfulDriver</driver-class>
|
||||||
<!--驱动类-->
|
|
||||||
<driver-properties>
|
<driver-properties>
|
||||||
<property name="user" value="root"/>
|
<property name="user" value="root"/>
|
||||||
<property name="password" value="taosdata"/>
|
<property name="password" value="taosdata"/>
|
||||||
</driver-properties>
|
</driver-properties>
|
||||||
<!--最大连接数(默认5个),超过了这个连接数,再有请求时,就排在队列中等候,最大的等待请求数由maximum-new-connections决定 -->
|
<!-- Maximum connection count (default is 5). If this number is exceeded, new requests will be queued. The maximum number of queued requests is determined by maximum-new-connections -->
|
||||||
<maximum-connection-count>100</maximum-connection-count>
|
<maximum-connection-count>100</maximum-connection-count>
|
||||||
<!-- 定义连接池中的最大连接数 -->
|
<!-- Defines the maximum number of connections in the connection pool -->
|
||||||
<maximum-active-time>100</maximum-active-time>
|
<maximum-active-time>100</maximum-active-time>
|
||||||
<!--最少保持的空闲连接数(默认2个)-->
|
<!-- Minimum number of idle connections to maintain (default is 2) -->
|
||||||
<prototype-count>1</prototype-count>
|
<prototype-count>1</prototype-count>
|
||||||
<!--最小连接数(默认2个)-->
|
<!-- Minimum connection count (default is 2) -->
|
||||||
<minimum-connection-count>5</minimum-connection-count>
|
<minimum-connection-count>5</minimum-connection-count>
|
||||||
<!--proxool自动侦察各个连接状态的时间间隔(毫秒),侦察到空闲的连接就马上回收,超时的销毁 默认30秒-->
|
<!-- Interval (in milliseconds) for Proxool to automatically check the status of each connection. Idle connections are immediately reclaimed, and timed-out connections are destroyed. Default is 30 seconds -->
|
||||||
<house-keeping-sleep-time>30000</house-keeping-sleep-time>
|
<house-keeping-sleep-time>30000</house-keeping-sleep-time>
|
||||||
<!--用于保持连接的测试语句 -->
|
<!-- Test statement used to maintain the connection -->
|
||||||
<house-keeping-test-sql>select server_version()</house-keeping-test-sql>
|
<house-keeping-test-sql>select server_version()</house-keeping-test-sql>
|
||||||
</proxool>
|
</proxool>
|
||||||
</something-else-entirely>
|
</something-else-entirely>
|
|
@ -17,7 +17,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.3</version>
|
<version>3.4.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
|
|
|
@ -35,17 +35,18 @@ public class Worker implements Runnable {
|
||||||
public void run() {
|
public void run() {
|
||||||
while (!Thread.interrupted()) {
|
while (!Thread.interrupted()) {
|
||||||
try {
|
try {
|
||||||
// 控制请求频率
|
// Control request rate
|
||||||
if (semaphore.tryAcquire()) {
|
if (semaphore.tryAcquire()) {
|
||||||
ConsumerRecords<Bean> records = consumer.poll(Duration.ofMillis(sleepTime));
|
ConsumerRecords<Bean> records = consumer.poll(Duration.ofMillis(sleepTime));
|
||||||
pool.submit(() -> {
|
pool.submit(() -> {
|
||||||
RateLimiter limiter = RateLimiter.create(rate);
|
RateLimiter limiter = RateLimiter.create(rate);
|
||||||
try {
|
try {
|
||||||
for (ConsumerRecord<Bean> record : records) {
|
for (ConsumerRecord<Bean> record : records) {
|
||||||
// 流量控制
|
// Traffic control
|
||||||
limiter.acquire();
|
limiter.acquire();
|
||||||
// 业务处理数据
|
// Business data processing
|
||||||
System.out.println("[" + LocalDateTime.now() + "] Thread id:" + Thread.currentThread().getId() + " -> " + record.value());
|
System.out.println("[" + LocalDateTime.now() + "] Thread id:"
|
||||||
|
+ Thread.currentThread().getId() + " -> " + record.value());
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
semaphore.release();
|
semaphore.release();
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
# 使用说明
|
# Instructions
|
||||||
|
|
||||||
## 创建使用db
|
## Create and use the database
|
||||||
```shell
|
```shell
|
||||||
$ taos
|
$ taos
|
||||||
|
|
||||||
> create database mp_test
|
> create database mp_test
|
||||||
```
|
```
|
||||||
|
|
||||||
## 执行测试用例
|
## Execute test cases
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ mvn clean test
|
$ mvn clean test
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
## TDengine SpringBoot + Mybatis Demo
|
## TDengine SpringBoot + Mybatis Demo
|
||||||
|
|
||||||
## 需要提前创建 test 数据库
|
## Need to create a test database in advance
|
||||||
|
|
||||||
```
|
```
|
||||||
$ taos -s 'create database if not exists test'
|
$ taos -s 'create database if not exists test'
|
||||||
|
@ -8,7 +8,7 @@ $ taos -s 'create database if not exists test'
|
||||||
$ curl http://localhost:8080/weather/init
|
$ curl http://localhost:8080/weather/init
|
||||||
```
|
```
|
||||||
|
|
||||||
### 配置 application.properties
|
### Configure application.properties
|
||||||
```properties
|
```properties
|
||||||
# datasource config
|
# datasource config
|
||||||
spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
|
spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
|
||||||
|
@ -38,9 +38,9 @@ mybatis.mapper-locations=classpath:mapper/*.xml
|
||||||
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
||||||
```
|
```
|
||||||
|
|
||||||
### 主要功能
|
### Main functions
|
||||||
|
|
||||||
* 创建数据库和表
|
* Create databases and tables
|
||||||
```xml
|
```xml
|
||||||
<!-- weatherMapper.xml -->
|
<!-- weatherMapper.xml -->
|
||||||
<update id="createDB" >
|
<update id="createDB" >
|
||||||
|
@ -52,14 +52,14 @@ logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
||||||
</update>
|
</update>
|
||||||
```
|
```
|
||||||
|
|
||||||
* 插入单条记录
|
* Insert a single record
|
||||||
```xml
|
```xml
|
||||||
<!-- weatherMapper.xml -->
|
<!-- weatherMapper.xml -->
|
||||||
<insert id="insert" parameterType="Weather" >
|
<insert id="insert" parameterType="Weather" >
|
||||||
insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT})
|
insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT})
|
||||||
</insert>
|
</insert>
|
||||||
```
|
```
|
||||||
* 插入多条记录
|
* Insert multiple records
|
||||||
```xml
|
```xml
|
||||||
<!-- weatherMapper.xml -->
|
<!-- weatherMapper.xml -->
|
||||||
<insert id="batchInsert" parameterType="java.util.List" >
|
<insert id="batchInsert" parameterType="java.util.List" >
|
||||||
|
@ -69,7 +69,7 @@ logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
||||||
</foreach>
|
</foreach>
|
||||||
</insert>
|
</insert>
|
||||||
```
|
```
|
||||||
* 分页查询
|
* Pagination query
|
||||||
```xml
|
```xml
|
||||||
<!-- weatherMapper.xml -->
|
<!-- weatherMapper.xml -->
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
|
|
@ -67,7 +67,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.3</version>
|
<version>3.4.0</version>
|
||||||
<!-- <scope>system</scope>-->
|
<!-- <scope>system</scope>-->
|
||||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
|
@ -1,11 +1,14 @@
|
||||||
```
|
```
|
||||||
cd tests/examples/JDBC/taosdemo
|
cd tests/examples/JDBC/taosdemo
|
||||||
mvn clean package -Dmaven.test.skip=true
|
mvn clean package -Dmaven.test.skip=true
|
||||||
# 先建表,再插入的
|
# Create tables first, then insert data
|
||||||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||||
# 不建表,直接插入的
|
# Insert data directly without creating tables
|
||||||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||||
```
|
```
|
||||||
|
|
||||||
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
|
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
|
||||||
|
|
||||||
|
|
||||||
|
If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library.
|
||||||
|
|
|
@ -24,14 +24,14 @@ public class TaosDemoApplication {
|
||||||
private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class);
|
private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class);
|
||||||
|
|
||||||
public static void main(String[] args) throws IOException {
|
public static void main(String[] args) throws IOException {
|
||||||
// 读配置参数
|
// Read configuration parameters
|
||||||
JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
|
JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
|
||||||
boolean isHelp = Arrays.asList(args).contains("--help");
|
boolean isHelp = Arrays.asList(args).contains("--help");
|
||||||
if (isHelp || config.host == null || config.host.isEmpty()) {
|
if (isHelp || config.host == null || config.host.isEmpty()) {
|
||||||
JdbcTaosdemoConfig.printHelp();
|
JdbcTaosdemoConfig.printHelp();
|
||||||
System.exit(0);
|
System.exit(0);
|
||||||
}
|
}
|
||||||
// 初始化
|
//
|
||||||
final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user,
|
final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user,
|
||||||
config.password);
|
config.password);
|
||||||
if (config.executeSql != null && !config.executeSql.isEmpty()
|
if (config.executeSql != null && !config.executeSql.isEmpty()
|
||||||
|
@ -50,7 +50,7 @@ public class TaosDemoApplication {
|
||||||
final SuperTableService superTableService = new SuperTableService(dataSource);
|
final SuperTableService superTableService = new SuperTableService(dataSource);
|
||||||
final SubTableService subTableService = new SubTableService(dataSource);
|
final SubTableService subTableService = new SubTableService(dataSource);
|
||||||
|
|
||||||
// 创建数据库
|
// create database
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
Map<String, String> databaseParam = new HashMap<>();
|
Map<String, String> databaseParam = new HashMap<>();
|
||||||
databaseParam.put("database", config.database);
|
databaseParam.put("database", config.database);
|
||||||
|
@ -81,13 +81,13 @@ public class TaosDemoApplication {
|
||||||
config.prefixOfFields, config.numOfTags, config.prefixOfTags);
|
config.prefixOfFields, config.numOfTags, config.prefixOfTags);
|
||||||
}
|
}
|
||||||
/**********************************************************************************/
|
/**********************************************************************************/
|
||||||
// 建表
|
// create table
|
||||||
start = System.currentTimeMillis();
|
start = System.currentTimeMillis();
|
||||||
if (config.doCreateTable) {
|
if (config.doCreateTable) {
|
||||||
superTableService.drop(superTableMeta.getDatabase(), superTableMeta.getName());
|
superTableService.drop(superTableMeta.getDatabase(), superTableMeta.getName());
|
||||||
superTableService.create(superTableMeta);
|
superTableService.create(superTableMeta);
|
||||||
if (!config.autoCreateTable) {
|
if (!config.autoCreateTable) {
|
||||||
// 批量建子表
|
// create sub tables in batch
|
||||||
subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable,
|
subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable,
|
||||||
config.numOfThreadsForCreate);
|
config.numOfThreadsForCreate);
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ public class TaosDemoApplication {
|
||||||
end = System.currentTimeMillis();
|
end = System.currentTimeMillis();
|
||||||
logger.info(">>> create table time cost : " + (end - start) + " ms.");
|
logger.info(">>> create table time cost : " + (end - start) + " ms.");
|
||||||
/**********************************************************************************/
|
/**********************************************************************************/
|
||||||
// 插入
|
// insert data
|
||||||
long tableSize = config.numOfTables;
|
long tableSize = config.numOfTables;
|
||||||
int threadSize = config.numOfThreadsForInsert;
|
int threadSize = config.numOfThreadsForInsert;
|
||||||
long startTime = getProperStartTime(config.startTime, config.days);
|
long startTime = getProperStartTime(config.startTime, config.days);
|
||||||
|
@ -111,10 +111,9 @@ public class TaosDemoApplication {
|
||||||
end = System.currentTimeMillis();
|
end = System.currentTimeMillis();
|
||||||
logger.info("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms");
|
logger.info("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms");
|
||||||
/**********************************************************************************/
|
/**********************************************************************************/
|
||||||
// 查询
|
|
||||||
|
|
||||||
/**********************************************************************************/
|
/**********************************************************************************/
|
||||||
// 删除表
|
// drop table
|
||||||
if (config.dropTable) {
|
if (config.dropTable) {
|
||||||
superTableService.drop(config.database, config.superTable);
|
superTableService.drop(config.database, config.superTable);
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,21 +9,22 @@ import java.util.List;
|
||||||
@Repository
|
@Repository
|
||||||
public interface SubTableMapper {
|
public interface SubTableMapper {
|
||||||
|
|
||||||
// 创建:子表
|
// Create: SubTable
|
||||||
void createUsingSuperTable(SubTableMeta subTableMeta);
|
void createUsingSuperTable(SubTableMeta subTableMeta);
|
||||||
|
|
||||||
// 插入:一张子表多个values
|
// Insert: Multiple records into one SubTable
|
||||||
int insertOneTableMultiValues(SubTableValue subTableValue);
|
int insertOneTableMultiValues(SubTableValue subTableValue);
|
||||||
|
|
||||||
// 插入:一张子表多个values, 自动建表
|
// Insert: Multiple records into one SubTable, auto create SubTables
|
||||||
int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue);
|
int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue);
|
||||||
|
|
||||||
// 插入:多张表多个values
|
// Insert: Multiple records into multiple SubTable
|
||||||
int insertMultiTableMultiValues(List<SubTableValue> tables);
|
int insertMultiTableMultiValues(List<SubTableValue> tables);
|
||||||
|
|
||||||
// 插入:多张表多个values,自动建表
|
// Insert: Multiple records into multiple SubTable, auto create SubTables
|
||||||
int insertMultiTableMultiValuesUsingSuperTable(List<SubTableValue> tables);
|
int insertMultiTableMultiValuesUsingSuperTable(List<SubTableValue> tables);
|
||||||
|
|
||||||
//<!-- TODO:修改子表标签值 alter table ${tablename} set tag tagName=newTagValue-->
|
// <!-- TODO: Modify SubTable tag value: alter table ${tablename} set tag
|
||||||
|
// tagName=newTagValue-->
|
||||||
|
|
||||||
}
|
}
|
|
@ -6,24 +6,26 @@ import org.springframework.stereotype.Repository;
|
||||||
@Repository
|
@Repository
|
||||||
public interface SuperTableMapper {
|
public interface SuperTableMapper {
|
||||||
|
|
||||||
// 创建超级表 create table if not exists xxx.xxx (f1 type1, f2 type2, ... ) tags( t1 type1, t2 type2 ...)
|
// Create super table: create table if not exists xxx.xxx (f1 type1, f2 type2,
|
||||||
|
// ... ) tags( t1 type1, t2 type2 ...)
|
||||||
void createSuperTable(SuperTableMeta tableMetadata);
|
void createSuperTable(SuperTableMeta tableMetadata);
|
||||||
|
|
||||||
// 删除超级表 drop table if exists xxx;
|
// Drop super table: drop table if exists xxx;
|
||||||
void dropSuperTable(String database, String name);
|
void dropSuperTable(String database, String name);
|
||||||
|
|
||||||
//<!-- TODO:查询所有超级表信息 show stables -->
|
// <!-- TODO: Query all super table information show stables -->
|
||||||
|
|
||||||
//<!-- TODO:查询表结构 describe stable -->
|
// <!-- TODO: Query table structure describe stable -->
|
||||||
|
|
||||||
//<!-- TODO:增加列 alter table ${tablename} add column fieldName dataType -->
|
// <!-- TODO: Add column alter table ${tablename} add column fieldName dataType
|
||||||
|
// -->
|
||||||
|
|
||||||
//<!-- TODO:删除列 alter table ${tablename} drop column fieldName -->
|
// <!-- TODO: Drop column alter table ${tablename} drop column fieldName -->
|
||||||
|
|
||||||
//<!-- TODO:添加标签 alter table ${tablename} add tag new_tagName tag_type -->
|
// <!-- TODO: Add tag alter table ${tablename} add tag new_tagName tag_type -->
|
||||||
|
|
||||||
//<!-- TODO:删除标签 alter table ${tablename} drop tag_name -->
|
// <!-- TODO: Drop tag alter table ${tablename} drop tag_name -->
|
||||||
|
|
||||||
//<!-- TODO:修改标签名 alter table ${tablename} change tag old_tagName new_tagName -->
|
|
||||||
|
|
||||||
|
// <!-- TODO: Change tag name alter table ${tablename} change tag old_tagName
|
||||||
|
// new_tagName -->
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,19 +9,18 @@ import java.util.List;
|
||||||
@Repository
|
@Repository
|
||||||
public interface TableMapper {
|
public interface TableMapper {
|
||||||
|
|
||||||
// 创建:普通表
|
// Create: Normal table
|
||||||
void create(TableMeta tableMeta);
|
void create(TableMeta tableMeta);
|
||||||
|
|
||||||
// 插入:一张表多个value
|
// Insert: Multiple records into one table
|
||||||
int insertOneTableMultiValues(TableValue values);
|
int insertOneTableMultiValues(TableValue values);
|
||||||
|
|
||||||
// 插入: 一张表多个value,指定的列
|
// Insert: Multiple records into one table, specified columns
|
||||||
int insertOneTableMultiValuesWithColumns(TableValue values);
|
int insertOneTableMultiValuesWithColumns(TableValue values);
|
||||||
|
|
||||||
// 插入:多个表多个value
|
// Insert: Multiple records into multiple tables
|
||||||
int insertMultiTableMultiValues(List<TableValue> tables);
|
int insertMultiTableMultiValues(List<TableValue> tables);
|
||||||
|
|
||||||
// 插入:多个表多个value, 指定的列
|
// Insert: Multiple records into multiple tables, specified columns
|
||||||
int insertMultiTableMultiValuesWithColumns(List<TableValue> tables);
|
int insertMultiTableMultiValuesWithColumns(List<TableValue> tables);
|
||||||
|
|
||||||
}
|
}
|
|
@ -14,12 +14,12 @@ public class DatabaseService {
|
||||||
this.databaseMapper = new DatabaseMapperImpl(dataSource);
|
this.databaseMapper = new DatabaseMapperImpl(dataSource);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 建库,指定 name
|
// Create database with specified name
|
||||||
public void createDatabase(String database) {
|
public void createDatabase(String database) {
|
||||||
databaseMapper.createDatabase(database);
|
databaseMapper.createDatabase(database);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 建库,指定参数 keep,days,replica等
|
// Create database with specified parameters such as keep, days, replica, etc.
|
||||||
public void createDatabase(Map<String, String> map) {
|
public void createDatabase(Map<String, String> map) {
|
||||||
if (map.isEmpty())
|
if (map.isEmpty())
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -27,7 +27,8 @@ public class SubTableService extends AbstractService {
|
||||||
this.mapper = new SubTableMapperImpl(datasource);
|
this.mapper = new SubTableMapperImpl(datasource);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void createSubTable(SuperTableMeta superTableMeta, long numOfTables, String prefixOfTable, int numOfThreadsForCreate) {
|
public void createSubTable(SuperTableMeta superTableMeta, long numOfTables, String prefixOfTable,
|
||||||
|
int numOfThreadsForCreate) {
|
||||||
ExecutorService executor = Executors.newFixedThreadPool(numOfThreadsForCreate);
|
ExecutorService executor = Executors.newFixedThreadPool(numOfThreadsForCreate);
|
||||||
for (long i = 0; i < numOfTables; i++) {
|
for (long i = 0; i < numOfTables; i++) {
|
||||||
long tableIndex = i;
|
long tableIndex = i;
|
||||||
|
@ -35,54 +36,58 @@ public class SubTableService extends AbstractService {
|
||||||
}
|
}
|
||||||
executor.shutdown();
|
executor.shutdown();
|
||||||
try {
|
try {
|
||||||
executor.awaitTermination(Long.MAX_VALUE,TimeUnit.NANOSECONDS);
|
executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void createSubTable(SuperTableMeta superTableMeta, String tableName) {
|
public void createSubTable(SuperTableMeta superTableMeta, String tableName) {
|
||||||
// 构造数据
|
// Construct data
|
||||||
SubTableMeta meta = SubTableMetaGenerator.generate(superTableMeta, tableName);
|
SubTableMeta meta = SubTableMetaGenerator.generate(superTableMeta, tableName);
|
||||||
createSubTable(meta);
|
createSubTable(meta);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 创建一张子表,可以指定database,supertable,tablename,tag值
|
// Create a sub-table, specifying database, super table, table name, and tag
|
||||||
|
// values
|
||||||
public void createSubTable(SubTableMeta subTableMeta) {
|
public void createSubTable(SubTableMeta subTableMeta) {
|
||||||
mapper.createUsingSuperTable(subTableMeta);
|
mapper.createUsingSuperTable(subTableMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*************************************************************************************************************************/
|
/*************************************************************************************************************************/
|
||||||
// 插入:多线程,多表
|
// Insert: Multi-threaded, multiple tables
|
||||||
public int insert(List<SubTableValue> subTableValues, int threadSize, int frequency) {
|
public int insert(List<SubTableValue> subTableValues, int threadSize, int frequency) {
|
||||||
ExecutorService executor = Executors.newFixedThreadPool(threadSize);
|
ExecutorService executor = Executors.newFixedThreadPool(threadSize);
|
||||||
Future<Integer> future = executor.submit(() -> insert(subTableValues));
|
Future<Integer> future = executor.submit(() -> insert(subTableValues));
|
||||||
executor.shutdown();
|
executor.shutdown();
|
||||||
//TODO:frequency
|
// TODO:frequency
|
||||||
return getAffectRows(future);
|
return getAffectRows(future);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 插入:单表,insert into xxx values(),()...
|
// Insert: Single table, insert into xxx values(),()...
|
||||||
public int insert(SubTableValue subTableValue) {
|
public int insert(SubTableValue subTableValue) {
|
||||||
return mapper.insertOneTableMultiValues(subTableValue);
|
return mapper.insertOneTableMultiValues(subTableValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 插入: 多表,insert into xxx values(),()... xxx values(),()...
|
// Insert: Multiple tables, insert into xxx values(),()... xxx values(),()...
|
||||||
public int insert(List<SubTableValue> subTableValues) {
|
public int insert(List<SubTableValue> subTableValues) {
|
||||||
return mapper.insertMultiTableMultiValues(subTableValues);
|
return mapper.insertMultiTableMultiValues(subTableValues);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 插入:单表,自动建表, insert into xxx using xxx tags(...) values(),()...
|
// Insert: Single table, auto-create table, insert into xxx using xxx tags(...)
|
||||||
|
// values(),()...
|
||||||
public int insertAutoCreateTable(SubTableValue subTableValue) {
|
public int insertAutoCreateTable(SubTableValue subTableValue) {
|
||||||
return mapper.insertOneTableMultiValuesUsingSuperTable(subTableValue);
|
return mapper.insertOneTableMultiValuesUsingSuperTable(subTableValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 插入:多表,自动建表, insert into xxx using XXX tags(...) values(),()... xxx using XXX tags(...) values(),()...
|
// Insert: Multiple tables, auto-create tables, insert into xxx using XXX
|
||||||
|
// tags(...) values(),()... xxx using XXX tags(...) values(),()...
|
||||||
public int insertAutoCreateTable(List<SubTableValue> subTableValues) {
|
public int insertAutoCreateTable(List<SubTableValue> subTableValues) {
|
||||||
return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues);
|
return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int insertMultiThreads(SuperTableMeta superTableMeta, int threadSize, long tableSize, long startTime, long gap, JdbcTaosdemoConfig config) {
|
public int insertMultiThreads(SuperTableMeta superTableMeta, int threadSize, long tableSize, long startTime,
|
||||||
|
long gap, JdbcTaosdemoConfig config) {
|
||||||
List<FutureTask> taskList = new ArrayList<>();
|
List<FutureTask> taskList = new ArrayList<>();
|
||||||
List<Thread> threads = IntStream.range(0, threadSize)
|
List<Thread> threads = IntStream.range(0, threadSize)
|
||||||
.mapToObj(i -> {
|
.mapToObj(i -> {
|
||||||
|
@ -94,8 +99,7 @@ public class SubTableService extends AbstractService {
|
||||||
startTime, config.timeGap,
|
startTime, config.timeGap,
|
||||||
config.numOfRowsPerTable, config.numOfTablesPerSQL, config.numOfValuesPerSQL,
|
config.numOfRowsPerTable, config.numOfTablesPerSQL, config.numOfValuesPerSQL,
|
||||||
config.order, config.rate, config.range,
|
config.order, config.rate, config.range,
|
||||||
config.prefixOfTable, config.autoCreateTable)
|
config.prefixOfTable, config.autoCreateTable));
|
||||||
);
|
|
||||||
taskList.add(task);
|
taskList.add(task);
|
||||||
return new Thread(task, "InsertThread-" + i);
|
return new Thread(task, "InsertThread-" + i);
|
||||||
}).collect(Collectors.toList());
|
}).collect(Collectors.toList());
|
||||||
|
@ -159,7 +163,6 @@ public class SubTableService extends AbstractService {
|
||||||
this.autoCreateTable = autoCreateTable;
|
this.autoCreateTable = autoCreateTable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Integer call() {
|
public Integer call() {
|
||||||
|
|
||||||
|
@ -171,23 +174,27 @@ public class SubTableService extends AbstractService {
|
||||||
|
|
||||||
int affectRows = 0;
|
int affectRows = 0;
|
||||||
// row
|
// row
|
||||||
for (long rowCnt = 0; rowCnt < numOfRowsPerTable; ) {
|
for (long rowCnt = 0; rowCnt < numOfRowsPerTable;) {
|
||||||
long rowSize = numOfValuesPerSQL;
|
long rowSize = numOfValuesPerSQL;
|
||||||
if (rowCnt + rowSize > numOfRowsPerTable) {
|
if (rowCnt + rowSize > numOfRowsPerTable) {
|
||||||
rowSize = numOfRowsPerTable - rowCnt;
|
rowSize = numOfRowsPerTable - rowCnt;
|
||||||
}
|
}
|
||||||
//table
|
// table
|
||||||
for (long tableCnt = startTableInd; tableCnt < endTableInd; ) {
|
for (long tableCnt = startTableInd; tableCnt < endTableInd;) {
|
||||||
long tableSize = numOfTablesPerSQL;
|
long tableSize = numOfTablesPerSQL;
|
||||||
if (tableCnt + tableSize > endTableInd) {
|
if (tableCnt + tableSize > endTableInd) {
|
||||||
tableSize = endTableInd - tableCnt;
|
tableSize = endTableInd - tableCnt;
|
||||||
}
|
}
|
||||||
long startTime = this.startTime + rowCnt * timeGap;
|
long startTime = this.startTime + rowCnt * timeGap;
|
||||||
// System.out.println(Thread.currentThread().getName() + " >>> " + "rowCnt: " + rowCnt + ", rowSize: " + rowSize + ", " + "tableCnt: " + tableCnt + ",tableSize: " + tableSize + ", " + "startTime: " + startTime + ",timeGap: " + timeGap + "");
|
// System.out.println(Thread.currentThread().getName() + " >>> " + "rowCnt: " +
|
||||||
|
// rowCnt + ", rowSize: " + rowSize + ", " + "tableCnt: " + tableCnt +
|
||||||
|
// ",tableSize: " + tableSize + ", " + "startTime: " + startTime + ",timeGap: "
|
||||||
|
// + timeGap + "");
|
||||||
/***********************************************/
|
/***********************************************/
|
||||||
// 生成数据
|
// Construct data
|
||||||
List<SubTableValue> data = SubTableValueGenerator.generate(superTableMeta, prefixOfTable, tableCnt, tableSize, rowSize, startTime, timeGap);
|
List<SubTableValue> data = SubTableValueGenerator.generate(superTableMeta, prefixOfTable, tableCnt,
|
||||||
// 乱序
|
tableSize, rowSize, startTime, timeGap);
|
||||||
|
// disorder
|
||||||
if (order != 0)
|
if (order != 0)
|
||||||
SubTableValueGenerator.disrupt(data, rate, range);
|
SubTableValueGenerator.disrupt(data, rate, range);
|
||||||
// insert
|
// insert
|
||||||
|
@ -205,5 +212,4 @@ public class SubTableService extends AbstractService {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ public class SuperTableService {
|
||||||
this.superTableMapper = new SuperTableMapperImpl(dataSource);
|
this.superTableMapper = new SuperTableMapperImpl(dataSource);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 创建超级表,指定每个field的名称和类型,每个tag的名称和类型
|
// Create super table, specifying the name and type of each field and each tag
|
||||||
public void create(SuperTableMeta superTableMeta) {
|
public void create(SuperTableMeta superTableMeta) {
|
||||||
superTableMapper.createSuperTable(superTableMeta);
|
superTableMapper.createSuperTable(superTableMeta);
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,15 +11,14 @@ public class TableService extends AbstractService {
|
||||||
|
|
||||||
private TableMapper tableMapper;
|
private TableMapper tableMapper;
|
||||||
|
|
||||||
//创建一张表
|
// Create a table
|
||||||
public void create(TableMeta tableMeta) {
|
public void create(TableMeta tableMeta) {
|
||||||
tableMapper.create(tableMeta);
|
tableMapper.create(tableMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
//创建多张表
|
// Create multiple tables
|
||||||
public void create(List<TableMeta> tables) {
|
public void create(List<TableMeta> tables) {
|
||||||
tables.stream().forEach(this::create);
|
tables.stream().forEach(this::create);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,8 @@ public class FieldValueGenerator {
|
||||||
|
|
||||||
public static Random random = new Random(System.currentTimeMillis());
|
public static Random random = new Random(System.currentTimeMillis());
|
||||||
|
|
||||||
// 生成start到end的时间序列,时间戳为顺序,不含有乱序,field的value为随机生成
|
// Generate a time series from start to end, timestamps are in order without
|
||||||
|
// disorder, field values are randomly generated
|
||||||
public static List<RowValue> generate(long start, long end, long timeGap, List<FieldMeta> fieldMetaList) {
|
public static List<RowValue> generate(long start, long end, long timeGap, List<FieldMeta> fieldMetaList) {
|
||||||
List<RowValue> values = new ArrayList<>();
|
List<RowValue> values = new ArrayList<>();
|
||||||
|
|
||||||
|
@ -29,9 +30,12 @@ public class FieldValueGenerator {
|
||||||
return values;
|
return values;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 生成start到end的时间序列,时间戳为顺序,含有乱序,rate为乱序的比例,range为乱序前跳范围,field的value为随机生成
|
// Generate a time series from start to end, timestamps are in order but include
|
||||||
|
// disorder, rate is the proportion of disorder, range is the jump range before
|
||||||
|
// disorder, field values are randomly generated
|
||||||
public static List<RowValue> disrupt(List<RowValue> values, int rate, long range) {
|
public static List<RowValue> disrupt(List<RowValue> values, int rate, long range) {
|
||||||
long timeGap = (long) (values.get(1).getFields().get(0).getValue()) - (long) (values.get(0).getFields().get(0).getValue());
|
long timeGap = (long) (values.get(1).getFields().get(0).getValue())
|
||||||
|
- (long) (values.get(0).getFields().get(0).getValue());
|
||||||
int bugSize = values.size() * rate / 100;
|
int bugSize = values.size() * rate / 100;
|
||||||
Set<Integer> bugIndSet = new HashSet<>();
|
Set<Integer> bugIndSet = new HashSet<>();
|
||||||
while (bugIndSet.size() < bugSize) {
|
while (bugIndSet.size() < bugSize) {
|
||||||
|
|
|
@ -9,7 +9,7 @@ import java.util.List;
|
||||||
|
|
||||||
public class SubTableMetaGenerator {
|
public class SubTableMetaGenerator {
|
||||||
|
|
||||||
// 创建tableSize张子表,使用tablePrefix作为子表名的前缀,使用superTableMeta的元数据
|
// Create tableSize sub-tables, using tablePrefix as the prefix for sub-table names, and using the metadata from superTableMeta
|
||||||
// create table xxx using XXX tags(XXX)
|
// create table xxx using XXX tags(XXX)
|
||||||
public static List<SubTableMeta> generate(SuperTableMeta superTableMeta, int tableSize, String tablePrefix) {
|
public static List<SubTableMeta> generate(SuperTableMeta superTableMeta, int tableSize, String tablePrefix) {
|
||||||
List<SubTableMeta> subTableMetaList = new ArrayList<>();
|
List<SubTableMeta> subTableMetaList = new ArrayList<>();
|
||||||
|
|
|
@ -10,10 +10,11 @@ import java.util.List;
|
||||||
|
|
||||||
public class SuperTableMetaGenerator {
|
public class SuperTableMetaGenerator {
|
||||||
|
|
||||||
// 创建超级表,使用指定SQL语句
|
// Create super table using the specified SQL statement
|
||||||
public static SuperTableMeta generate(String superTableSQL) {
|
public static SuperTableMeta generate(String superTableSQL) {
|
||||||
SuperTableMeta tableMeta = new SuperTableMeta();
|
SuperTableMeta tableMeta = new SuperTableMeta();
|
||||||
// for example : create table superTable (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)
|
// for example : create table superTable (ts timestamp, temperature float,
|
||||||
|
// humidity int) tags(location nchar(64), groupId int)
|
||||||
superTableSQL = superTableSQL.trim().toLowerCase();
|
superTableSQL = superTableSQL.trim().toLowerCase();
|
||||||
if (!superTableSQL.startsWith("create"))
|
if (!superTableSQL.startsWith("create"))
|
||||||
throw new RuntimeException("invalid create super table SQL");
|
throw new RuntimeException("invalid create super table SQL");
|
||||||
|
@ -54,8 +55,9 @@ public class SuperTableMetaGenerator {
|
||||||
return tableMeta;
|
return tableMeta;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 创建超级表,指定field和tag的个数
|
// Create super table with specified number of fields and tags
|
||||||
public static SuperTableMeta generate(String database, String name, int fieldSize, String fieldPrefix, int tagSize, String tagPrefix) {
|
public static SuperTableMeta generate(String database, String name, int fieldSize, String fieldPrefix, int tagSize,
|
||||||
|
String tagPrefix) {
|
||||||
if (fieldSize < 2 || tagSize < 1) {
|
if (fieldSize < 2 || tagSize < 1) {
|
||||||
throw new RuntimeException("create super table but fieldSize less than 2 or tagSize less than 1");
|
throw new RuntimeException("create super table but fieldSize less than 2 or tagSize less than 1");
|
||||||
}
|
}
|
||||||
|
@ -66,7 +68,8 @@ public class SuperTableMetaGenerator {
|
||||||
List<FieldMeta> fields = new ArrayList<>();
|
List<FieldMeta> fields = new ArrayList<>();
|
||||||
fields.add(new FieldMeta("ts", "timestamp"));
|
fields.add(new FieldMeta("ts", "timestamp"));
|
||||||
for (int i = 1; i <= fieldSize; i++) {
|
for (int i = 1; i <= fieldSize; i++) {
|
||||||
fields.add(new FieldMeta(fieldPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length]));
|
fields.add(
|
||||||
|
new FieldMeta(fieldPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length]));
|
||||||
}
|
}
|
||||||
tableMetadata.setFields(fields);
|
tableMetadata.setFields(fields);
|
||||||
// tags
|
// tags
|
||||||
|
|
|
@ -9,7 +9,7 @@ import java.util.List;
|
||||||
|
|
||||||
public class TagValueGenerator {
|
public class TagValueGenerator {
|
||||||
|
|
||||||
// 创建标签值:使用tagMetas
|
// Create tag values using tagMetas
|
||||||
public static List<TagValue> generate(List<TagMeta> tagMetas) {
|
public static List<TagValue> generate(List<TagMeta> tagMetas) {
|
||||||
List<TagValue> tagValues = new ArrayList<>();
|
List<TagValue> tagValues = new ArrayList<>();
|
||||||
for (int i = 0; i < tagMetas.size(); i++) {
|
for (int i = 0; i < tagMetas.size(); i++) {
|
||||||
|
|
|
@ -41,17 +41,17 @@ public class TimeStampUtil {
|
||||||
if (start == 0)
|
if (start == 0)
|
||||||
start = now - size * timeGap;
|
start = now - size * timeGap;
|
||||||
|
|
||||||
// 如果size小于1异常
|
// If size is less than 1, throw an exception
|
||||||
if (size < 1)
|
if (size < 1)
|
||||||
throw new IllegalArgumentException("size less than 1.");
|
throw new IllegalArgumentException("size less than 1.");
|
||||||
// 如果timeGap为1,已经超长,需要前移start
|
// If timeGap is 1 and it exceeds the limit, move start forward
|
||||||
if (start + size > now) {
|
if (start + size > now) {
|
||||||
start = now - size;
|
start = now - size;
|
||||||
return new TimeTuple(start, now, 1);
|
return new TimeTuple(start, now, 1);
|
||||||
}
|
}
|
||||||
long end = start + (long) (timeGap * size);
|
long end = start + (long) (timeGap * size);
|
||||||
if (end > now) {
|
if (end > now) {
|
||||||
//压缩timeGap
|
// Compress timeGap
|
||||||
end = now;
|
end = now;
|
||||||
double gap = (end - start) / (size * 1.0f);
|
double gap = (end - start) / (size * 1.0f);
|
||||||
if (gap < 1.0f) {
|
if (gap < 1.0f) {
|
||||||
|
|
|
@ -1,18 +1,18 @@
|
||||||
### 设置###
|
### Settings ###
|
||||||
log4j.rootLogger=info,stdout
|
log4j.rootLogger=info,stdout
|
||||||
### 输出信息到控制抬 ###
|
### Output information to the console ###
|
||||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||||
log4j.appender.stdout.Target=System.out
|
log4j.appender.stdout.Target=System.out
|
||||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||||
log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
|
log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
|
||||||
### 输出DEBUG 级别以上的日志到=logs/debug.log
|
### Output logs of DEBUG level and above to logs/debug.log ###
|
||||||
log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender
|
log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender
|
||||||
log4j.appender.DebugLog.File=logs/debug.log
|
log4j.appender.DebugLog.File=logs/debug.log
|
||||||
log4j.appender.DebugLog.Append=true
|
log4j.appender.DebugLog.Append=true
|
||||||
log4j.appender.DebugLog.Threshold=DEBUG
|
log4j.appender.DebugLog.Threshold=DEBUG
|
||||||
log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout
|
log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout
|
||||||
log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
|
||||||
### 输出ERROR 级别以上的日志到=logs/error.log
|
### Output logs of ERROR level and above to logs/error.log ###
|
||||||
log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender
|
log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender
|
||||||
log4j.appender.ErrorLog.File=logs/error.log
|
log4j.appender.ErrorLog.File=logs/error.log
|
||||||
log4j.appender.ErrorLog.Append=true
|
log4j.appender.ErrorLog.Append=true
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
*
|
||||||
|
!*.c
|
||||||
|
!.gitignore
|
|
@ -0,0 +1,21 @@
|
||||||
|
// compile with
|
||||||
|
// gcc connect_example.c -o connect_example -ltaos
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include "taosws.h"
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
ws_enable_log("debug");
|
||||||
|
char *dsn = "ws://localhost:6041";
|
||||||
|
WS_TAOS *taos = ws_connect(dsn);
|
||||||
|
if (taos == NULL) {
|
||||||
|
fprintf(stderr, "Failed to connect to %s, ErrCode: 0x%x, ErrMessage: %s.\n", dsn, ws_errno(NULL), ws_errstr(NULL));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
fprintf(stdout, "Connected to %s successfully.\n", dsn);
|
||||||
|
|
||||||
|
/* put your code here for read and write */
|
||||||
|
|
||||||
|
// close & clean
|
||||||
|
ws_close(taos);
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TAOS standard API example. The same syntax as MySQL, but only a subset
|
||||||
|
// to compile: gcc -o create_db_demo create_db_demo.c -ltaos
|
||||||
|
|
||||||
|
#include <inttypes.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include "taosws.h"
|
||||||
|
|
||||||
|
static int DemoCreateDB() {
|
||||||
|
ws_enable_log("debug");
|
||||||
|
// ANCHOR: create_db_and_table
|
||||||
|
int code = 0;
|
||||||
|
char *dsn = "ws://localhost:6041";
|
||||||
|
// connect
|
||||||
|
WS_TAOS *taos = ws_connect(dsn);
|
||||||
|
|
||||||
|
if (taos == NULL) {
|
||||||
|
fprintf(stderr, "Failed to connect to %s, ErrCode: 0x%x, ErrMessage: %s.\n", dsn, ws_errno(NULL), ws_errstr(NULL));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// create database
|
||||||
|
WS_RES *result = ws_query(taos, "CREATE DATABASE IF NOT EXISTS power");
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ws_free_result(result);
|
||||||
|
fprintf(stdout, "Create database power successfully.\n");
|
||||||
|
|
||||||
|
// create table
|
||||||
|
const char *sql =
|
||||||
|
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId "
|
||||||
|
"INT, location BINARY(24))";
|
||||||
|
result = ws_query(taos, sql);
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to create stable power.meters, ErrCode: 0x%x, ErrMessage: %s\n.", code, ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ws_free_result(result);
|
||||||
|
fprintf(stdout, "Create stable power.meters successfully.\n");
|
||||||
|
|
||||||
|
// close & clean
|
||||||
|
ws_close(taos);
|
||||||
|
return 0;
|
||||||
|
// ANCHOR_END: create_db_and_table
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) { return DemoCreateDB(); }
|
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TAOS standard API example. The same syntax as MySQL, but only a subset
|
||||||
|
// to compile: gcc -o insert_data_demo insert_data_demo.c -ltaos
|
||||||
|
|
||||||
|
#include <inttypes.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include "taosws.h"
|
||||||
|
|
||||||
|
static int DemoInsertData() {
|
||||||
|
// ANCHOR: insert_data
|
||||||
|
int code = 0;
|
||||||
|
char *dsn = "ws://localhost:6041";
|
||||||
|
// connect
|
||||||
|
WS_TAOS *taos = ws_connect(dsn);
|
||||||
|
if (taos == NULL) {
|
||||||
|
fprintf(stderr, "Failed to connect to %s, ErrCode: 0x%x, ErrMessage: %s.\n", dsn, ws_errno(NULL), ws_errstr(NULL));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert data, please make sure the database and table are already created
|
||||||
|
const char *sql =
|
||||||
|
"INSERT INTO "
|
||||||
|
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') "
|
||||||
|
"VALUES "
|
||||||
|
"(NOW + 1a, 10.30000, 219, 0.31000) "
|
||||||
|
"(NOW + 2a, 12.60000, 218, 0.33000) "
|
||||||
|
"(NOW + 3a, 12.30000, 221, 0.31000) "
|
||||||
|
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') "
|
||||||
|
"VALUES "
|
||||||
|
"(NOW + 1a, 10.30000, 218, 0.25000) ";
|
||||||
|
WS_RES *result = ws_query(taos, sql);
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to insert data to power.meters, sql: %s, ErrCode: 0x%x, ErrMessage: %s\n.", sql, code,
|
||||||
|
ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ws_free_result(result);
|
||||||
|
|
||||||
|
// you can check affectedRows here
|
||||||
|
int rows = ws_affected_rows(result);
|
||||||
|
fprintf(stdout, "Successfully inserted %d rows into power.meters.\n", rows);
|
||||||
|
|
||||||
|
// close & clean
|
||||||
|
ws_close(taos);
|
||||||
|
return 0;
|
||||||
|
// ANCHOR_END: insert_data
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) { return DemoInsertData(); }
|
|
@ -0,0 +1,70 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TAOS standard API example. The same syntax as MySQL, but only a subset
|
||||||
|
// to compile: gcc -o query_data_demo query_data_demo.c -ltaos
|
||||||
|
|
||||||
|
#include <inttypes.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include "taosws.h"
|
||||||
|
|
||||||
|
static int DemoQueryData() {
|
||||||
|
// ANCHOR: query_data
|
||||||
|
int code = 0;
|
||||||
|
char *dsn = "ws://localhost:6041";
|
||||||
|
|
||||||
|
// connect
|
||||||
|
WS_TAOS *taos = ws_connect(dsn);
|
||||||
|
if (taos == NULL) {
|
||||||
|
fprintf(stderr, "Failed to connect to %s, ErrCode: 0x%x, ErrMessage: %s.\n", dsn, ws_errno(NULL), ws_errstr(NULL));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// query data, please make sure the database and table are already created
|
||||||
|
const char *sql = "SELECT ts, current, location FROM power.meters limit 100";
|
||||||
|
WS_RES *result = ws_query(taos, sql);
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to query data from power.meters, sql: %s, ErrCode: 0x%x, ErrMessage: %s\n.", sql, code,
|
||||||
|
ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
WS_ROW row = NULL;
|
||||||
|
int rows = 0;
|
||||||
|
int num_fields = ws_field_count(result);
|
||||||
|
const WS_FIELD *fields = ws_fetch_fields(result);
|
||||||
|
|
||||||
|
fprintf(stdout, "query successfully, got %d fields, the sql is: %s.\n", num_fields, sql);
|
||||||
|
|
||||||
|
// fetch the records row by row
|
||||||
|
while ((row = ws_fetch_row(result))) {
|
||||||
|
// Add your data processing logic here
|
||||||
|
|
||||||
|
rows++;
|
||||||
|
}
|
||||||
|
fprintf(stdout, "total rows: %d\n", rows);
|
||||||
|
ws_free_result(result);
|
||||||
|
|
||||||
|
// close & clean
|
||||||
|
ws_close(taos);
|
||||||
|
return 0;
|
||||||
|
// ANCHOR_END: query_data
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) { return DemoQueryData(); }
|
|
@ -0,0 +1,121 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TAOS standard API example. The same syntax as MySQL, but only a subset
|
||||||
|
// to compile: gcc -o sml_insert_demo sml_insert_demo.c -ltaos
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include "taosws.h"
|
||||||
|
|
||||||
|
static int DemoSmlInsert() {
|
||||||
|
// ANCHOR: schemaless
|
||||||
|
int code = 0;
|
||||||
|
char *dsn = "ws://localhost:6041";
|
||||||
|
|
||||||
|
// connect
|
||||||
|
WS_TAOS *taos = ws_connect(dsn);
|
||||||
|
if (taos == NULL) {
|
||||||
|
fprintf(stderr, "Failed to connect to %s, ErrCode: 0x%x, ErrMessage: %s.\n", dsn, ws_errno(NULL), ws_errstr(NULL));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// create database
|
||||||
|
WS_RES *result = ws_query(taos, "CREATE DATABASE IF NOT EXISTS power");
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ws_free_result(result);
|
||||||
|
|
||||||
|
// use database
|
||||||
|
result = ws_query(taos, "USE power");
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to execute use power, ErrCode: 0x%x, ErrMessage: %s\n.", code, ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ws_free_result(result);
|
||||||
|
|
||||||
|
// schemaless demo data
|
||||||
|
char *line_demo =
|
||||||
|
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 "
|
||||||
|
"1626006833639";
|
||||||
|
char *telnet_demo = "metric_telnet 1707095283260 4 host=host0 interface=eth0";
|
||||||
|
char *json_demo =
|
||||||
|
"{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, "
|
||||||
|
"\"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||||
|
|
||||||
|
// influxdb line protocol
|
||||||
|
char *lines[] = {line_demo};
|
||||||
|
int totalLines = 0;
|
||||||
|
result = ws_schemaless_insert_raw(taos, line_demo, strlen(line_demo), &totalLines, WS_TSDB_SML_LINE_PROTOCOL,
|
||||||
|
WS_TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to insert schemaless line data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", line_demo,
|
||||||
|
code, ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stdout, "Insert %d rows of schemaless line data successfully.\n", totalLines);
|
||||||
|
ws_free_result(result);
|
||||||
|
|
||||||
|
// opentsdb telnet protocol
|
||||||
|
totalLines = 0;
|
||||||
|
result = ws_schemaless_insert_raw(taos, telnet_demo, strlen(telnet_demo), &totalLines, WS_TSDB_SML_TELNET_PROTOCOL,
|
||||||
|
WS_TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to insert schemaless telnet data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", telnet_demo,
|
||||||
|
code, ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stdout, "Insert %d rows of schemaless telnet data successfully.\n", totalLines);
|
||||||
|
ws_free_result(result);
|
||||||
|
|
||||||
|
// opentsdb json protocol
|
||||||
|
char *jsons[1] = {0};
|
||||||
|
// allocate memory for json data. can not use static memory.
|
||||||
|
totalLines = 0;
|
||||||
|
result = ws_schemaless_insert_raw(taos, json_demo, strlen(json_demo), &totalLines, WS_TSDB_SML_JSON_PROTOCOL,
|
||||||
|
WS_TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
free(jsons[0]);
|
||||||
|
fprintf(stderr, "Failed to insert schemaless json data, Server: %s, ErrCode: 0x%x, ErrMessage: %s\n.", json_demo,
|
||||||
|
code, ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
free(jsons[0]);
|
||||||
|
|
||||||
|
fprintf(stdout, "Insert %d rows of schemaless json data successfully.\n", totalLines);
|
||||||
|
ws_free_result(result);
|
||||||
|
|
||||||
|
// close & clean
|
||||||
|
ws_close(taos);
|
||||||
|
return 0;
|
||||||
|
// ANCHOR_END: schemaless
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) { return DemoSmlInsert(); }
|
|
@ -0,0 +1,183 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TAOS standard API example. The same syntax as MySQL, but only a subset
|
||||||
|
// to compile: gcc -o stmt_insert_demo stmt_insert_demo.c -ltaos
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <sys/time.h>
|
||||||
|
#include "taosws.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief execute sql only.
|
||||||
|
*
|
||||||
|
* @param taos
|
||||||
|
* @param sql
|
||||||
|
*/
|
||||||
|
void executeSQL(WS_TAOS *taos, const char *sql) {
|
||||||
|
WS_RES *res = ws_query(taos, sql);
|
||||||
|
int code = ws_errno(res);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "%s\n", ws_errstr(res));
|
||||||
|
ws_free_result(res);
|
||||||
|
ws_close(taos);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
ws_free_result(res);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief check return status and exit program when error occur.
|
||||||
|
*
|
||||||
|
* @param stmt
|
||||||
|
* @param code
|
||||||
|
* @param msg
|
||||||
|
*/
|
||||||
|
void checkErrorCode(WS_STMT *stmt, int code, const char *msg) {
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "%s. code: %d, error: %s\n", msg, code, ws_stmt_errstr(stmt));
|
||||||
|
ws_stmt_close(stmt);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int64_t ts;
|
||||||
|
float current;
|
||||||
|
int voltage;
|
||||||
|
float phase;
|
||||||
|
} Row;
|
||||||
|
|
||||||
|
int num_of_sub_table = 10;
|
||||||
|
int num_of_row = 10;
|
||||||
|
int total_affected = 0;
|
||||||
|
/**
|
||||||
|
* @brief insert data using stmt API
|
||||||
|
*
|
||||||
|
* @param taos
|
||||||
|
*/
|
||||||
|
void insertData(WS_TAOS *taos) {
|
||||||
|
// init
|
||||||
|
WS_STMT *stmt = ws_stmt_init(taos);
|
||||||
|
if (stmt == NULL) {
|
||||||
|
fprintf(stderr, "Failed to init ws_stmt, error: %s\n", ws_stmt_errstr(NULL));
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
// prepare
|
||||||
|
const char *sql = "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)";
|
||||||
|
int code = ws_stmt_prepare(stmt, sql, 0);
|
||||||
|
checkErrorCode(stmt, code, "Failed to execute ws_stmt_prepare");
|
||||||
|
for (int i = 1; i <= num_of_sub_table; i++) {
|
||||||
|
char table_name[20];
|
||||||
|
sprintf(table_name, "d_bind_%d", i);
|
||||||
|
char location[20];
|
||||||
|
sprintf(location, "location_%d", i);
|
||||||
|
|
||||||
|
// set table name and tags
|
||||||
|
WS_MULTI_BIND tags[2];
|
||||||
|
// groupId
|
||||||
|
tags[0].buffer_type = TSDB_DATA_TYPE_INT;
|
||||||
|
tags[0].buffer_length = sizeof(int);
|
||||||
|
tags[0].length = (int32_t *)&tags[0].buffer_length;
|
||||||
|
tags[0].buffer = &i;
|
||||||
|
tags[0].is_null = NULL;
|
||||||
|
tags[0].num = 1;
|
||||||
|
// location
|
||||||
|
tags[1].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||||
|
tags[1].buffer_length = strlen(location);
|
||||||
|
tags[1].length = (int32_t *)&tags[1].buffer_length;
|
||||||
|
tags[1].buffer = location;
|
||||||
|
tags[1].is_null = NULL;
|
||||||
|
tags[1].num = 1;
|
||||||
|
code = ws_stmt_set_tbname_tags(stmt, table_name, tags, 2);
|
||||||
|
checkErrorCode(stmt, code, "Failed to set table name and tags\n");
|
||||||
|
|
||||||
|
// insert rows
|
||||||
|
WS_MULTI_BIND params[4];
|
||||||
|
// ts
|
||||||
|
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||||
|
params[0].buffer_length = sizeof(int64_t);
|
||||||
|
params[0].length = (int32_t *)¶ms[0].buffer_length;
|
||||||
|
params[0].is_null = NULL;
|
||||||
|
params[0].num = 1;
|
||||||
|
// current
|
||||||
|
params[1].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
||||||
|
params[1].buffer_length = sizeof(float);
|
||||||
|
params[1].length = (int32_t *)¶ms[1].buffer_length;
|
||||||
|
params[1].is_null = NULL;
|
||||||
|
params[1].num = 1;
|
||||||
|
// voltage
|
||||||
|
params[2].buffer_type = TSDB_DATA_TYPE_INT;
|
||||||
|
params[2].buffer_length = sizeof(int);
|
||||||
|
params[2].length = (int32_t *)¶ms[2].buffer_length;
|
||||||
|
params[2].is_null = NULL;
|
||||||
|
params[2].num = 1;
|
||||||
|
// phase
|
||||||
|
params[3].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
||||||
|
params[3].buffer_length = sizeof(float);
|
||||||
|
params[3].length = (int32_t *)¶ms[3].buffer_length;
|
||||||
|
params[3].is_null = NULL;
|
||||||
|
params[3].num = 1;
|
||||||
|
|
||||||
|
for (int j = 0; j < num_of_row; j++) {
|
||||||
|
struct timeval tv;
|
||||||
|
gettimeofday(&tv, NULL);
|
||||||
|
long long milliseconds = tv.tv_sec * 1000LL + tv.tv_usec / 1000; // current timestamp in milliseconds
|
||||||
|
int64_t ts = milliseconds + j;
|
||||||
|
float current = (float)rand() / RAND_MAX * 30;
|
||||||
|
int voltage = rand() % 300;
|
||||||
|
float phase = (float)rand() / RAND_MAX;
|
||||||
|
params[0].buffer = &ts;
|
||||||
|
params[1].buffer = ¤t;
|
||||||
|
params[2].buffer = &voltage;
|
||||||
|
params[3].buffer = &phase;
|
||||||
|
// bind param
|
||||||
|
code = ws_stmt_bind_param_batch(stmt, params, 4);
|
||||||
|
checkErrorCode(stmt, code, "Failed to bind param");
|
||||||
|
}
|
||||||
|
// add batch
|
||||||
|
code = ws_stmt_add_batch(stmt);
|
||||||
|
checkErrorCode(stmt, code, "Failed to add batch");
|
||||||
|
// execute batch
|
||||||
|
int affected_rows = 0;
|
||||||
|
code = ws_stmt_execute(stmt, &affected_rows);
|
||||||
|
checkErrorCode(stmt, code, "Failed to exec stmt");
|
||||||
|
// get affected rows
|
||||||
|
int affected = ws_stmt_affected_rows_once(stmt);
|
||||||
|
total_affected += affected;
|
||||||
|
}
|
||||||
|
fprintf(stdout, "Successfully inserted %d rows to power.meters.\n", total_affected);
|
||||||
|
ws_stmt_close(stmt);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
int code = 0;
|
||||||
|
char *dsn = "ws://localhost:6041";
|
||||||
|
WS_TAOS *taos = ws_connect(dsn);
|
||||||
|
if (taos == NULL) {
|
||||||
|
fprintf(stderr, "Failed to connect to %s, ErrCode: 0x%x, ErrMessage: %s.\n", dsn, ws_errno(NULL), ws_errstr(NULL));
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
// create database and table
|
||||||
|
executeSQL(taos, "CREATE DATABASE IF NOT EXISTS power");
|
||||||
|
executeSQL(taos, "USE power");
|
||||||
|
executeSQL(taos,
|
||||||
|
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
|
||||||
|
"(groupId INT, location BINARY(24))");
|
||||||
|
insertData(taos);
|
||||||
|
ws_close(taos);
|
||||||
|
}
|
|
@ -0,0 +1,488 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// to compile: gcc -o tmq_demo tmq_demo.c -ltaos -lpthread
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
#include <pthread.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <time.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include "taosws.h"
|
||||||
|
|
||||||
|
volatile int thread_stop = 0;
|
||||||
|
static int running = 1;
|
||||||
|
static int count = 0;
|
||||||
|
const char* topic_name = "topic_meters";
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
const char* enable_auto_commit;
|
||||||
|
const char* auto_commit_interval_ms;
|
||||||
|
const char* group_id;
|
||||||
|
const char* client_id;
|
||||||
|
const char* td_connect_host;
|
||||||
|
const char* td_connect_port;
|
||||||
|
const char* td_connect_user;
|
||||||
|
const char* td_connect_pass;
|
||||||
|
const char* auto_offset_reset;
|
||||||
|
} ConsumerConfig;
|
||||||
|
|
||||||
|
ConsumerConfig config = {.enable_auto_commit = "true",
|
||||||
|
.auto_commit_interval_ms = "1000",
|
||||||
|
.group_id = "group1",
|
||||||
|
.client_id = "client1",
|
||||||
|
.td_connect_host = "localhost",
|
||||||
|
.td_connect_port = "6030",
|
||||||
|
.td_connect_user = "root",
|
||||||
|
.td_connect_pass = "taosdata",
|
||||||
|
.auto_offset_reset = "latest"};
|
||||||
|
|
||||||
|
void* prepare_data(void* arg) {
|
||||||
|
int code = 0;
|
||||||
|
char* dsn = "ws://localhost:6041";
|
||||||
|
WS_TAOS* pConn = ws_connect(dsn);
|
||||||
|
if (pConn == NULL) {
|
||||||
|
fprintf(stderr, "Failed to connect to %s, ErrCode: 0x%x, ErrMessage: %s.\n", dsn, ws_errno(NULL), ws_errstr(NULL));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
WS_RES* pRes;
|
||||||
|
int i = 1;
|
||||||
|
|
||||||
|
while (!thread_stop) {
|
||||||
|
char buf[200] = {0};
|
||||||
|
i++;
|
||||||
|
snprintf(
|
||||||
|
buf, sizeof(buf),
|
||||||
|
"INSERT INTO power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') VALUES (NOW + %da, 10.30000, "
|
||||||
|
"219, 0.31000)",
|
||||||
|
i);
|
||||||
|
|
||||||
|
pRes = ws_query(pConn, buf);
|
||||||
|
code = ws_errno(pRes);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to insert data to power.meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(pRes));
|
||||||
|
}
|
||||||
|
ws_free_result(pRes);
|
||||||
|
sleep(1);
|
||||||
|
}
|
||||||
|
fprintf(stdout, "Prepare data thread exit\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ANCHOR: msg_process
|
||||||
|
int32_t msg_process(WS_RES* msg) {
|
||||||
|
int32_t rows = 0;
|
||||||
|
const char* topicName = ws_tmq_get_topic_name(msg);
|
||||||
|
const char* dbName = ws_tmq_get_db_name(msg);
|
||||||
|
int32_t vgroupId = ws_tmq_get_vgroup_id(msg);
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
// get one row data from message
|
||||||
|
WS_ROW row = ws_fetch_row(msg);
|
||||||
|
if (row == NULL) break;
|
||||||
|
|
||||||
|
// Add your data processing logic here
|
||||||
|
|
||||||
|
rows++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return rows;
|
||||||
|
}
|
||||||
|
// ANCHOR_END: msg_process
|
||||||
|
|
||||||
|
WS_TAOS* init_env() {
|
||||||
|
int code = 0;
|
||||||
|
char* dsn = "ws://localhost:6041";
|
||||||
|
WS_TAOS* pConn = ws_connect(dsn);
|
||||||
|
if (pConn == NULL) {
|
||||||
|
fprintf(stderr, "Failed to connect to %s, ErrCode: 0x%x, ErrMessage: %s.\n", dsn, ws_errno(NULL), ws_errstr(NULL));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
WS_RES* pRes;
|
||||||
|
// drop database if exists
|
||||||
|
pRes = ws_query(pConn, "DROP TOPIC IF EXISTS topic_meters");
|
||||||
|
code = ws_errno(pRes);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to drop topic_meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(pRes));
|
||||||
|
goto END;
|
||||||
|
}
|
||||||
|
ws_free_result(pRes);
|
||||||
|
|
||||||
|
pRes = ws_query(pConn, "DROP DATABASE IF EXISTS power");
|
||||||
|
code = ws_errno(pRes);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to drop database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(pRes));
|
||||||
|
goto END;
|
||||||
|
}
|
||||||
|
ws_free_result(pRes);
|
||||||
|
|
||||||
|
// create database
|
||||||
|
pRes = ws_query(pConn, "CREATE DATABASE power PRECISION 'ms' WAL_RETENTION_PERIOD 3600");
|
||||||
|
code = ws_errno(pRes);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to create power, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(pRes));
|
||||||
|
goto END;
|
||||||
|
}
|
||||||
|
ws_free_result(pRes);
|
||||||
|
|
||||||
|
// create super table
|
||||||
|
pRes =
|
||||||
|
ws_query(pConn,
|
||||||
|
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
|
||||||
|
"(groupId INT, location BINARY(24))");
|
||||||
|
code = ws_errno(pRes);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to create super table meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(pRes));
|
||||||
|
goto END;
|
||||||
|
}
|
||||||
|
ws_free_result(pRes);
|
||||||
|
|
||||||
|
return pConn;
|
||||||
|
|
||||||
|
END:
|
||||||
|
ws_free_result(pRes);
|
||||||
|
ws_close(pConn);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void deinit_env(WS_TAOS* pConn) {
|
||||||
|
if (pConn) ws_close(pConn);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t create_topic(WS_TAOS* pConn) {
|
||||||
|
WS_RES* pRes;
|
||||||
|
int code = 0;
|
||||||
|
|
||||||
|
if (!pConn) {
|
||||||
|
fprintf(stderr, "Invalid input parameter.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pRes = ws_query(pConn, "USE power");
|
||||||
|
code = ws_errno(pRes);
|
||||||
|
if (ws_errno(pRes) != 0) {
|
||||||
|
fprintf(stderr, "Failed to use power, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(pRes));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ws_free_result(pRes);
|
||||||
|
|
||||||
|
pRes = ws_query(
|
||||||
|
pConn,
|
||||||
|
"CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters");
|
||||||
|
code = ws_errno(pRes);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to create topic topic_meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(pRes));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ws_free_result(pRes);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t drop_topic(WS_TAOS* pConn) {
|
||||||
|
WS_RES* pRes;
|
||||||
|
int code = 0;
|
||||||
|
|
||||||
|
if (!pConn) {
|
||||||
|
fprintf(stderr, "Invalid input parameter.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pRes = ws_query(pConn, "USE power");
|
||||||
|
code = ws_errno(pRes);
|
||||||
|
if (ws_errno(pRes) != 0) {
|
||||||
|
fprintf(stderr, "Failed to use power, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(pRes));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ws_free_result(pRes);
|
||||||
|
|
||||||
|
pRes = ws_query(pConn, "DROP TOPIC IF EXISTS topic_meters");
|
||||||
|
code = ws_errno(pRes);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to drop topic topic_meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, ws_errstr(pRes));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ws_free_result(pRes);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void tmq_commit_cb_print(ws_tmq_t* tmq, int32_t code, void* param) {
|
||||||
|
count += 1;
|
||||||
|
fprintf(stdout, "tmq_commit_cb_print() code: %d, tmq: %p, param: %p, count: %d.\n", code, tmq, param, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ANCHOR: create_consumer_1
|
||||||
|
ws_tmq_t* build_consumer(const ConsumerConfig* config) {
|
||||||
|
ws_tmq_conf_res_t code;
|
||||||
|
ws_tmq_t* tmq = NULL;
|
||||||
|
|
||||||
|
// create a configuration object
|
||||||
|
ws_tmq_conf_t* conf = ws_tmq_conf_new();
|
||||||
|
|
||||||
|
// set the configuration parameters
|
||||||
|
code = ws_tmq_conf_set(conf, "enable.auto.commit", config->enable_auto_commit);
|
||||||
|
if (WS_TMQ_CONF_OK != code) {
|
||||||
|
ws_tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
code = ws_tmq_conf_set(conf, "auto.commit.interval.ms", config->auto_commit_interval_ms);
|
||||||
|
if (WS_TMQ_CONF_OK != code) {
|
||||||
|
ws_tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
code = ws_tmq_conf_set(conf, "group.id", config->group_id);
|
||||||
|
if (WS_TMQ_CONF_OK != code) {
|
||||||
|
ws_tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
code = ws_tmq_conf_set(conf, "client.id", config->client_id);
|
||||||
|
if (WS_TMQ_CONF_OK != code) {
|
||||||
|
ws_tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = ws_tmq_conf_set(conf, "auto.offset.reset", config->auto_offset_reset);
|
||||||
|
if (WS_TMQ_CONF_OK != code) {
|
||||||
|
ws_tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a consumer object
|
||||||
|
tmq = ws_tmq_consumer_new(conf, "taos://localhost:6041", NULL, 0);
|
||||||
|
|
||||||
|
_end:
|
||||||
|
// destroy the configuration object
|
||||||
|
ws_tmq_conf_destroy(conf);
|
||||||
|
return tmq;
|
||||||
|
}
|
||||||
|
// ANCHOR_END: create_consumer_1
|
||||||
|
|
||||||
|
// ANCHOR: build_topic_list
|
||||||
|
// build a topic list used to subscribe
|
||||||
|
ws_tmq_list_t* build_topic_list() {
|
||||||
|
// create a empty topic list
|
||||||
|
ws_tmq_list_t* topicList = ws_tmq_list_new();
|
||||||
|
|
||||||
|
// append topic name to the list
|
||||||
|
int32_t code = ws_tmq_list_append(topicList, topic_name);
|
||||||
|
if (code) {
|
||||||
|
// if failed, destroy the list and return NULL
|
||||||
|
ws_tmq_list_destroy(topicList);
|
||||||
|
fprintf(stderr,
|
||||||
|
"Failed to create topic_list, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n",
|
||||||
|
topic_name, config.group_id, config.client_id, code, ws_tmq_errstr(NULL));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
// if success, return the list
|
||||||
|
return topicList;
|
||||||
|
}
|
||||||
|
// ANCHOR_END: build_topic_list
|
||||||
|
|
||||||
|
// ANCHOR: basic_consume_loop
|
||||||
|
void basic_consume_loop(ws_tmq_t* tmq) {
|
||||||
|
int32_t totalRows = 0; // total rows consumed
|
||||||
|
int32_t msgCnt = 0; // total messages consumed
|
||||||
|
int32_t timeout = 5000; // poll timeout
|
||||||
|
|
||||||
|
while (running) {
|
||||||
|
// poll message from TDengine
|
||||||
|
WS_RES* tmqmsg = ws_tmq_consumer_poll(tmq, timeout);
|
||||||
|
if (tmqmsg) {
|
||||||
|
msgCnt++;
|
||||||
|
|
||||||
|
// Add your data processing logic here
|
||||||
|
totalRows += msg_process(tmqmsg);
|
||||||
|
|
||||||
|
// free the message
|
||||||
|
ws_free_result(tmqmsg);
|
||||||
|
}
|
||||||
|
if (msgCnt > 50) {
|
||||||
|
// consume 50 messages and break
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// print the result: total messages and total rows consumed
|
||||||
|
fprintf(stdout, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||||
|
}
|
||||||
|
// ANCHOR_END: basic_consume_loop
|
||||||
|
|
||||||
|
// ANCHOR: consume_repeatly
|
||||||
|
void consume_repeatly(ws_tmq_t* tmq) {
|
||||||
|
int32_t numOfAssignment = 0;
|
||||||
|
ws_tmq_topic_assignment* pAssign = NULL;
|
||||||
|
|
||||||
|
// get the topic assignment
|
||||||
|
int32_t code = ws_tmq_get_topic_assignment(tmq, topic_name, &pAssign, &numOfAssignment);
|
||||||
|
if (code != 0 || pAssign == NULL || numOfAssignment == 0) {
|
||||||
|
fprintf(stderr, "Failed to get assignment, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n",
|
||||||
|
topic_name, config.group_id, config.client_id, code, ws_tmq_errstr(tmq));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// seek to the earliest offset
|
||||||
|
for (int32_t i = 0; i < numOfAssignment; ++i) {
|
||||||
|
ws_tmq_topic_assignment* p = &pAssign[i];
|
||||||
|
|
||||||
|
code = ws_tmq_offset_seek(tmq, topic_name, p->vgId, p->begin);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"Failed to seek offset, topic: %s, groupId: %s, clientId: %s, vgId: %d, ErrCode: 0x%x, ErrMessage: %s.\n",
|
||||||
|
topic_name, config.group_id, config.client_id, p->vgId, code, ws_tmq_errstr(tmq));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (code == 0) fprintf(stdout, "Assignment seek to beginning successfully.\n");
|
||||||
|
|
||||||
|
// free the assignment array
|
||||||
|
ws_tmq_free_assignment(pAssign, numOfAssignment);
|
||||||
|
|
||||||
|
// let's consume the messages again
|
||||||
|
basic_consume_loop(tmq);
|
||||||
|
}
|
||||||
|
// ANCHOR_END: consume_repeatly
|
||||||
|
|
||||||
|
// ANCHOR: manual_commit
|
||||||
|
void manual_commit(ws_tmq_t* tmq) {
|
||||||
|
int32_t totalRows = 0; // total rows consumed
|
||||||
|
int32_t msgCnt = 0; // total messages consumed
|
||||||
|
int32_t timeout = 5000; // poll timeout
|
||||||
|
|
||||||
|
while (running) {
|
||||||
|
// poll message from TDengine
|
||||||
|
WS_RES* tmqmsg = ws_tmq_consumer_poll(tmq, timeout);
|
||||||
|
if (tmqmsg) {
|
||||||
|
msgCnt++;
|
||||||
|
// process the message
|
||||||
|
totalRows += msg_process(tmqmsg);
|
||||||
|
// commit the message
|
||||||
|
int32_t code = ws_tmq_commit_sync(tmq, tmqmsg);
|
||||||
|
if (code) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"Failed to commit offset, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n",
|
||||||
|
topic_name, config.group_id, config.client_id, code, ws_tmq_errstr(tmq));
|
||||||
|
// free the message
|
||||||
|
ws_free_result(tmqmsg);
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
fprintf(stdout, "Commit offset manually successfully.\n");
|
||||||
|
}
|
||||||
|
// free the message
|
||||||
|
ws_free_result(tmqmsg);
|
||||||
|
}
|
||||||
|
if (msgCnt > 50) {
|
||||||
|
// consume 50 messages and break
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// print the result: total messages and total rows consumed
|
||||||
|
fprintf(stdout, "%d msg consumed, include %d rows.\n", msgCnt, totalRows);
|
||||||
|
}
|
||||||
|
// ANCHOR_END: manual_commit
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
int32_t code;
|
||||||
|
pthread_t thread_id;
|
||||||
|
|
||||||
|
WS_TAOS* pConn = init_env();
|
||||||
|
if (pConn == NULL) {
|
||||||
|
fprintf(stderr, "Failed to init env.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (create_topic(pConn) < 0) {
|
||||||
|
fprintf(stderr, "Failed to create topic.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pthread_create(&thread_id, NULL, &prepare_data, NULL)) {
|
||||||
|
fprintf(stderr, "Failed to create thread.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ANCHOR: create_consumer_2
|
||||||
|
ws_tmq_t* tmq = build_consumer(&config);
|
||||||
|
if (NULL == tmq) {
|
||||||
|
fprintf(stderr, "Failed to create native consumer, host: %s, groupId: %s, , clientId: %s.\n",
|
||||||
|
config.td_connect_host, config.group_id, config.client_id);
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
fprintf(stdout, "Create consumer successfully, host: %s, groupId: %s, clientId: %s.\n", config.td_connect_host,
|
||||||
|
config.group_id, config.client_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ANCHOR_END: create_consumer_2
|
||||||
|
|
||||||
|
// ANCHOR: subscribe_3
|
||||||
|
ws_tmq_list_t* topic_list = build_topic_list();
|
||||||
|
if (NULL == topic_list) {
|
||||||
|
fprintf(stderr, "Failed to create topic_list, topic: %s, groupId: %s, clientId: %s.\n", topic_name, config.group_id,
|
||||||
|
config.client_id);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((code = ws_tmq_subscribe(tmq, topic_list))) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"Failed to subscribe topic_list, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n",
|
||||||
|
topic_name, config.group_id, config.client_id, code, ws_tmq_errstr(tmq));
|
||||||
|
} else {
|
||||||
|
fprintf(stdout, "Subscribe topics successfully.\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
ws_tmq_list_destroy(topic_list);
|
||||||
|
|
||||||
|
basic_consume_loop(tmq);
|
||||||
|
// ANCHOR_END: subscribe_3
|
||||||
|
|
||||||
|
consume_repeatly(tmq);
|
||||||
|
|
||||||
|
manual_commit(tmq);
|
||||||
|
|
||||||
|
// ANCHOR: unsubscribe_and_close
|
||||||
|
// unsubscribe the topic
|
||||||
|
code = ws_tmq_unsubscribe(tmq);
|
||||||
|
if (code) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n",
|
||||||
|
topic_name, config.group_id, config.client_id, code, ws_tmq_errstr(tmq));
|
||||||
|
} else {
|
||||||
|
fprintf(stdout, "Consumer unsubscribed successfully.\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// close the consumer
|
||||||
|
code = ws_tmq_consumer_close(tmq);
|
||||||
|
if (code) {
|
||||||
|
fprintf(stderr, "Failed to close consumer, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n",
|
||||||
|
topic_name, config.group_id, config.client_id, code, ws_tmq_errstr(tmq));
|
||||||
|
} else {
|
||||||
|
fprintf(stdout, "Consumer closed successfully.\n");
|
||||||
|
}
|
||||||
|
// ANCHOR_END: unsubscribe_and_close
|
||||||
|
|
||||||
|
thread_stop = 1;
|
||||||
|
pthread_join(thread_id, NULL);
|
||||||
|
|
||||||
|
if (drop_topic(pConn) < 0) {
|
||||||
|
fprintf(stderr, "Failed to drop topic.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
deinit_env(pConn);
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -0,0 +1,71 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TAOS standard API example. The same syntax as MySQL, but only a subset
|
||||||
|
// to compile: gcc -o with_reqid_demo with_reqid_demo.c -ltaos
|
||||||
|
|
||||||
|
#include <inttypes.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include "taosws.h"
|
||||||
|
|
||||||
|
static int DemoWithReqId() {
|
||||||
|
// ANCHOR: with_reqid
|
||||||
|
int code = 0;
|
||||||
|
char *dsn = "ws://localhost:6041";
|
||||||
|
|
||||||
|
// connect
|
||||||
|
WS_TAOS *taos = ws_connect(dsn);
|
||||||
|
if (taos == NULL) {
|
||||||
|
fprintf(stderr, "Failed to connect to %s, ErrCode: 0x%x, ErrMessage: %s.\n", dsn, ws_errno(NULL), ws_errstr(NULL));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *sql = "SELECT ts, current, location FROM power.meters limit 1";
|
||||||
|
// query data with reqid
|
||||||
|
long reqid = 3L;
|
||||||
|
WS_RES *result = ws_query_with_reqid(taos, sql, reqid);
|
||||||
|
code = ws_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
fprintf(stderr, "Failed to execute sql withQID: %ld, ErrCode: 0x%x, ErrMessage: %s\n.", reqid, code,
|
||||||
|
ws_errstr(result));
|
||||||
|
ws_close(taos);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
WS_ROW row = NULL;
|
||||||
|
int rows = 0;
|
||||||
|
int num_fields = ws_field_count(result);
|
||||||
|
const WS_FIELD *fields = ws_fetch_fields(result);
|
||||||
|
|
||||||
|
fprintf(stdout, "query successfully, got %d fields, the sql is: %s.\n", num_fields, sql);
|
||||||
|
|
||||||
|
// fetch the records row by row
|
||||||
|
while ((row = ws_fetch_row(result))) {
|
||||||
|
// Add your data processing logic here
|
||||||
|
|
||||||
|
rows++;
|
||||||
|
}
|
||||||
|
fprintf(stdout, "total rows: %d\n", rows);
|
||||||
|
ws_free_result(result);
|
||||||
|
|
||||||
|
// close & clean
|
||||||
|
ws_close(taos);
|
||||||
|
return 0;
|
||||||
|
// ANCHOR_END: with_reqid
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) { return DemoWithReqId(); }
|
|
@ -0,0 +1,101 @@
|
||||||
|
PROJECT(TDengine)
|
||||||
|
|
||||||
|
IF (TD_LINUX)
|
||||||
|
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
||||||
|
AUX_SOURCE_DIRECTORY(. SRC)
|
||||||
|
|
||||||
|
add_executable(docs_connect_example "")
|
||||||
|
add_executable(docs_create_db_demo "")
|
||||||
|
add_executable(docs_insert_data_demo "")
|
||||||
|
add_executable(docs_query_data_demo "")
|
||||||
|
add_executable(docs_with_reqid_demo "")
|
||||||
|
add_executable(docs_sml_insert_demo "")
|
||||||
|
add_executable(docs_stmt_insert_demo "")
|
||||||
|
add_executable(docs_tmq_demo "")
|
||||||
|
|
||||||
|
target_sources(docs_connect_example
|
||||||
|
PRIVATE
|
||||||
|
"connect_example.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
target_sources(docs_create_db_demo
|
||||||
|
PRIVATE
|
||||||
|
"create_db_demo.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
target_sources(docs_insert_data_demo
|
||||||
|
PRIVATE
|
||||||
|
"insert_data_demo.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
target_sources(docs_query_data_demo
|
||||||
|
PRIVATE
|
||||||
|
"query_data_demo.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
target_sources(docs_with_reqid_demo
|
||||||
|
PRIVATE
|
||||||
|
"with_reqid_demo.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
target_sources(docs_sml_insert_demo
|
||||||
|
PRIVATE
|
||||||
|
"sml_insert_demo.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
target_sources(docs_stmt_insert_demo
|
||||||
|
PRIVATE
|
||||||
|
"stmt_insert_demo.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
target_sources(docs_tmq_demo
|
||||||
|
PRIVATE
|
||||||
|
"tmq_demo.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(docs_connect_example
|
||||||
|
taos
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(docs_create_db_demo
|
||||||
|
taos
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(docs_insert_data_demo
|
||||||
|
taos
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(docs_query_data_demo
|
||||||
|
taos
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(docs_with_reqid_demo
|
||||||
|
taos
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(docs_sml_insert_demo
|
||||||
|
taos
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(docs_stmt_insert_demo
|
||||||
|
taos
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(docs_tmq_demo
|
||||||
|
taos
|
||||||
|
pthread
|
||||||
|
)
|
||||||
|
|
||||||
|
SET_TARGET_PROPERTIES(docs_connect_example PROPERTIES OUTPUT_NAME docs_connect_example)
|
||||||
|
SET_TARGET_PROPERTIES(docs_create_db_demo PROPERTIES OUTPUT_NAME docs_create_db_demo)
|
||||||
|
SET_TARGET_PROPERTIES(docs_insert_data_demo PROPERTIES OUTPUT_NAME docs_insert_data_demo)
|
||||||
|
SET_TARGET_PROPERTIES(docs_query_data_demo PROPERTIES OUTPUT_NAME docs_query_data_demo)
|
||||||
|
SET_TARGET_PROPERTIES(docs_with_reqid_demo PROPERTIES OUTPUT_NAME docs_with_reqid_demo)
|
||||||
|
SET_TARGET_PROPERTIES(docs_sml_insert_demo PROPERTIES OUTPUT_NAME docs_sml_insert_demo)
|
||||||
|
SET_TARGET_PROPERTIES(docs_stmt_insert_demo PROPERTIES OUTPUT_NAME docs_stmt_insert_demo)
|
||||||
|
SET_TARGET_PROPERTIES(docs_tmq_demo PROPERTIES OUTPUT_NAME docs_tmq_demo)
|
||||||
|
ENDIF ()
|
||||||
|
IF (TD_DARWIN)
|
||||||
|
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
||||||
|
AUX_SOURCE_DIRECTORY(. SRC)
|
||||||
|
ENDIF ()
|
|
@ -0,0 +1,34 @@
|
||||||
|
# Makefile for building TDengine examples on TD Linux platform
|
||||||
|
|
||||||
|
INCLUDE_DIRS =
|
||||||
|
|
||||||
|
TARGETS = connect_example \
|
||||||
|
create_db_demo \
|
||||||
|
insert_data_demo \
|
||||||
|
query_data_demo \
|
||||||
|
with_reqid_demo \
|
||||||
|
sml_insert_demo \
|
||||||
|
stmt_insert_demo \
|
||||||
|
tmq_demo
|
||||||
|
|
||||||
|
SOURCES = connect_example.c \
|
||||||
|
create_db_demo.c \
|
||||||
|
insert_data_demo.c \
|
||||||
|
query_data_demo.c \
|
||||||
|
with_reqid_demo.c \
|
||||||
|
sml_insert_demo.c \
|
||||||
|
stmt_insert_demo.c \
|
||||||
|
tmq_demo.c
|
||||||
|
|
||||||
|
LIBS = -ltaos -lpthread
|
||||||
|
|
||||||
|
|
||||||
|
CFLAGS = -g
|
||||||
|
|
||||||
|
all: $(TARGETS)
|
||||||
|
|
||||||
|
$(TARGETS):
|
||||||
|
$(CC) $(CFLAGS) -o $@ $(wildcard $(@F).c) $(LIBS)
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -f $(TARGETS)
|
|
@ -22,7 +22,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.3.3</version>
|
<version>3.4.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- ANCHOR_END: dep-->
|
<!-- ANCHOR_END: dep-->
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,8 @@ public class SchemalessWsTest {
|
||||||
private static final String jsonDemo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
private static final String jsonDemo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||||
|
|
||||||
public static void main(String[] args) throws SQLException {
|
public static void main(String[] args) throws SQLException {
|
||||||
final String url = "jdbc:TAOS-RS://" + host + ":6041?user=root&password=taosdata&batchfetch=true";
|
final String url = "jdbc:TAOS-WS://" + host + ":6041?user=root&password=taosdata";
|
||||||
try(Connection connection = DriverManager.getConnection(url)){
|
try (Connection connection = DriverManager.getConnection(url)) {
|
||||||
init(connection);
|
init(connection);
|
||||||
AbstractConnection conn = connection.unwrap(AbstractConnection.class);
|
AbstractConnection conn = connection.unwrap(AbstractConnection.class);
|
||||||
|
|
||||||
|
|
|
@ -12,9 +12,9 @@ public class WSConnectExample {
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
// use
|
// use
|
||||||
// String jdbcUrl =
|
// String jdbcUrl =
|
||||||
// "jdbc:TAOS-RS://localhost:6041/dbName?user=root&password=taosdata&batchfetch=true";
|
// "jdbc:TAOS-WS://localhost:6041/dbName?user=root&password=taosdata";
|
||||||
// if you want to connect a specified database named "dbName".
|
// if you want to connect a specified database named "dbName".
|
||||||
String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata&batchfetch=true";
|
String jdbcUrl = "jdbc:TAOS-WS://localhost:6041?user=root&password=taosdata";
|
||||||
Properties connProps = new Properties();
|
Properties connProps = new Properties();
|
||||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||||
|
|
|
@ -15,7 +15,7 @@ public class WSParameterBindingBasicDemo {
|
||||||
|
|
||||||
public static void main(String[] args) throws SQLException {
|
public static void main(String[] args) throws SQLException {
|
||||||
|
|
||||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041";
|
||||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||||
init(conn);
|
init(conn);
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ public class WSParameterBindingBasicDemo {
|
||||||
pstmt.setFloat(4, random.nextFloat());
|
pstmt.setFloat(4, random.nextFloat());
|
||||||
pstmt.addBatch();
|
pstmt.addBatch();
|
||||||
}
|
}
|
||||||
int [] exeResult = pstmt.executeBatch();
|
int[] exeResult = pstmt.executeBatch();
|
||||||
// you can check exeResult here
|
// you can check exeResult here
|
||||||
System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters.");
|
System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters.");
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,8 @@ public class WSParameterBindingBasicDemo {
|
||||||
try (Statement stmt = conn.createStatement()) {
|
try (Statement stmt = conn.createStatement()) {
|
||||||
stmt.execute("CREATE DATABASE IF NOT EXISTS power");
|
stmt.execute("CREATE DATABASE IF NOT EXISTS power");
|
||||||
stmt.execute("USE power");
|
stmt.execute("USE power");
|
||||||
stmt.execute("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
|
stmt.execute(
|
||||||
|
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ public class WSParameterBindingFullDemo {
|
||||||
|
|
||||||
public static void main(String[] args) throws SQLException {
|
public static void main(String[] args) throws SQLException {
|
||||||
|
|
||||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041/";
|
||||||
|
|
||||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||||
|
|
||||||
|
@ -51,8 +51,10 @@ public class WSParameterBindingFullDemo {
|
||||||
stmtAll(conn);
|
stmtAll(conn);
|
||||||
|
|
||||||
} catch (SQLException ex) {
|
} catch (SQLException ex) {
|
||||||
// handle any errors, please refer to the JDBC specifications for detailed exceptions info
|
// handle any errors, please refer to the JDBC specifications for detailed
|
||||||
System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
|
// exceptions info
|
||||||
|
System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: "
|
||||||
|
+ ex.getMessage());
|
||||||
throw ex;
|
throw ex;
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
System.out.println("Failed to insert data using stmt, ErrMessage: " + ex.getMessage());
|
System.out.println("Failed to insert data using stmt, ErrMessage: " + ex.getMessage());
|
||||||
|
@ -104,30 +106,29 @@ public class WSParameterBindingFullDemo {
|
||||||
pstmt.setTagBoolean(3, true);
|
pstmt.setTagBoolean(3, true);
|
||||||
pstmt.setTagString(4, "binary_value");
|
pstmt.setTagString(4, "binary_value");
|
||||||
pstmt.setTagNString(5, "nchar_value");
|
pstmt.setTagNString(5, "nchar_value");
|
||||||
pstmt.setTagVarbinary(6, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
pstmt.setTagVarbinary(6, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e });
|
||||||
pstmt.setTagGeometry(7, new byte[]{
|
pstmt.setTagGeometry(7, new byte[] {
|
||||||
0x01, 0x01, 0x00, 0x00,
|
0x01, 0x01, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x59,
|
0x00, 0x00, 0x00, 0x59,
|
||||||
0x40, 0x00, 0x00, 0x00,
|
0x40, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
0x00, 0x00, 0x00, 0x59, 0x40 });
|
||||||
|
|
||||||
long current = System.currentTimeMillis();
|
long current = System.currentTimeMillis();
|
||||||
|
|
||||||
|
|
||||||
pstmt.setTimestamp(1, new Timestamp(current));
|
pstmt.setTimestamp(1, new Timestamp(current));
|
||||||
pstmt.setInt(2, 1);
|
pstmt.setInt(2, 1);
|
||||||
pstmt.setDouble(3, 1.1);
|
pstmt.setDouble(3, 1.1);
|
||||||
pstmt.setBoolean(4, true);
|
pstmt.setBoolean(4, true);
|
||||||
pstmt.setString(5, "binary_value");
|
pstmt.setString(5, "binary_value");
|
||||||
pstmt.setNString(6, "nchar_value");
|
pstmt.setNString(6, "nchar_value");
|
||||||
pstmt.setVarbinary(7, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
pstmt.setVarbinary(7, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e });
|
||||||
pstmt.setGeometry(8, new byte[]{
|
pstmt.setGeometry(8, new byte[] {
|
||||||
0x01, 0x01, 0x00, 0x00,
|
0x01, 0x01, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x59,
|
0x00, 0x00, 0x00, 0x59,
|
||||||
0x40, 0x00, 0x00, 0x00,
|
0x40, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
0x00, 0x00, 0x00, 0x59, 0x40 });
|
||||||
pstmt.addBatch();
|
pstmt.addBatch();
|
||||||
pstmt.executeBatch();
|
pstmt.executeBatch();
|
||||||
System.out.println("Successfully inserted rows to example_all_type_stmt.ntb");
|
System.out.println("Successfully inserted rows to example_all_type_stmt.ntb");
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@tdengine/client": "^3.0.1",
|
"@tdengine/websocket": "^3.1.0"
|
||||||
"@tdengine/rest": "^3.0.0"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,6 +34,7 @@ async function json_tag_example() {
|
||||||
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(`Failed to create database example_json_tag or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to create database example_json_tag or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
} finally {
|
} finally {
|
||||||
if (wsSql) {
|
if (wsSql) {
|
||||||
await wsSql.close();
|
await wsSql.close();
|
||||||
|
@ -81,6 +82,7 @@ async function all_type_example() {
|
||||||
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(`Failed to create database all_type_example or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to create database all_type_example or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
} finally {
|
} finally {
|
||||||
if (wsSql) {
|
if (wsSql) {
|
||||||
await wsSql.close();
|
await wsSql.close();
|
||||||
|
|
|
@ -46,6 +46,7 @@ async function json_tag_example() {
|
||||||
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(`Failed to create database example_json_tag or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to create database example_json_tag or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err
|
||||||
} finally {
|
} finally {
|
||||||
if (wsSql) {
|
if (wsSql) {
|
||||||
await wsSql.close();
|
await wsSql.close();
|
||||||
|
@ -125,6 +126,7 @@ async function all_type_example() {
|
||||||
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(`Failed to create database all_type_example or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to create database all_type_example or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
} finally {
|
} finally {
|
||||||
if (stmt) {
|
if (stmt) {
|
||||||
await stmt.close();
|
await stmt.close();
|
||||||
|
@ -136,10 +138,7 @@ async function all_type_example() {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
async function test() {
|
async function test() {
|
||||||
taos.setLevel("debug")
|
|
||||||
await json_tag_example()
|
await json_tag_example()
|
||||||
await all_type_example()
|
await all_type_example()
|
||||||
taos.destroy();
|
taos.destroy();
|
||||||
|
|
|
@ -1,53 +0,0 @@
|
||||||
const taos = require("@tdengine/websocket");
|
|
||||||
|
|
||||||
var host = null;
|
|
||||||
for(var i = 2; i < global.process.argv.length; i++){
|
|
||||||
var key = global.process.argv[i].split("=")[0];
|
|
||||||
var value = global.process.argv[i].split("=")[1];
|
|
||||||
if("host" == key){
|
|
||||||
host = value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if(host == null){
|
|
||||||
console.log("Usage: node nodejsChecker.js host=<hostname> port=<port>");
|
|
||||||
process.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
let dbData = ["{\"metric\": \"meter_current\",\"timestamp\": 1626846402,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}",
|
|
||||||
"{\"metric\": \"meter_current\",\"timestamp\": 1626846403,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1002\"}}",
|
|
||||||
"{\"metric\": \"meter_current\",\"timestamp\": 1626846404,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1003\"}}"]
|
|
||||||
|
|
||||||
async function createConnect() {
|
|
||||||
let dsn = 'ws://' + host + ':6041'
|
|
||||||
let conf = new taos.WSConfig(dsn);
|
|
||||||
conf.setUser('root');
|
|
||||||
conf.setPwd('taosdata');
|
|
||||||
conf.setDb('power');
|
|
||||||
return await taos.sqlConnect(conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
async function test() {
|
|
||||||
let wsSql = null;
|
|
||||||
let wsRows = null;
|
|
||||||
let reqId = 0;
|
|
||||||
try {
|
|
||||||
wsSql = await createConnect()
|
|
||||||
await wsSql.exec('CREATE DATABASE IF NOT EXISTS power KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;', reqId++);
|
|
||||||
await wsSql.schemalessInsert([dbData], taos.SchemalessProto.OpenTSDBJsonFormatProtocol, taos.Precision.SECONDS, 0);
|
|
||||||
}
|
|
||||||
catch (err) {
|
|
||||||
console.error(err.code, err.message);
|
|
||||||
}
|
|
||||||
finally {
|
|
||||||
if (wsRows) {
|
|
||||||
await wsRows.close();
|
|
||||||
}
|
|
||||||
if (wsSql) {
|
|
||||||
await wsSql.close();
|
|
||||||
}
|
|
||||||
taos.destroy();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test()
|
|
|
@ -15,8 +15,8 @@ async function createConnect() {
|
||||||
return wsSql;
|
return wsSql;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
async function test() {
|
async function test() {
|
||||||
let dsn = 'ws://localhost:6041'
|
|
||||||
let wsSql = null;
|
let wsSql = null;
|
||||||
let wsRows = null;
|
let wsRows = null;
|
||||||
let ttl = 0;
|
let ttl = 0;
|
||||||
|
@ -29,6 +29,7 @@ async function test() {
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
console.error(`Failed to insert data with schemaless, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to insert data with schemaless, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
if (wsRows) {
|
if (wsRows) {
|
||||||
|
@ -40,4 +41,5 @@ async function test() {
|
||||||
taos.destroy();
|
taos.destroy();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
test()
|
test()
|
||||||
|
|
|
@ -10,11 +10,9 @@ for(var i = 2; i < global.process.argv.length; i++){
|
||||||
}
|
}
|
||||||
|
|
||||||
if(host == null){
|
if(host == null){
|
||||||
console.log("Usage: node nodejsChecker.js host=<hostname> port=<port>");
|
host = 'localhost';
|
||||||
process.exit(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
async function createConnect() {
|
async function createConnect() {
|
||||||
let dsn = 'ws://' + host + ':6041'
|
let dsn = 'ws://' + host + ':6041'
|
||||||
console.log(dsn)
|
console.log(dsn)
|
||||||
|
@ -41,7 +39,7 @@ async function test() {
|
||||||
taosResult = await wsSql.exec('USE power', reqId++);
|
taosResult = await wsSql.exec('USE power', reqId++);
|
||||||
console.log(taosResult);
|
console.log(taosResult);
|
||||||
|
|
||||||
taosResult = await wsSql.exec('CREATE STABLE IF NOT EXISTS meters (_ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);', reqId++);
|
taosResult = await wsSql.exec('CREATE STABLE IF NOT EXISTS meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);', reqId++);
|
||||||
console.log(taosResult);
|
console.log(taosResult);
|
||||||
|
|
||||||
taosResult = await wsSql.exec('DESCRIBE meters', reqId++);
|
taosResult = await wsSql.exec('DESCRIBE meters', reqId++);
|
||||||
|
@ -62,6 +60,7 @@ async function test() {
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
console.error(err.code, err.message);
|
console.error(err.code, err.message);
|
||||||
|
throw err;
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
if (wsRows) {
|
if (wsRows) {
|
||||||
|
|
|
@ -41,6 +41,7 @@ async function createDbAndTable() {
|
||||||
console.log("Create stable power.meters successfully");
|
console.log("Create stable power.meters successfully");
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(`Failed to create database power or stable meters, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to create database power or stable meters, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
} finally {
|
} finally {
|
||||||
if (wsSql) {
|
if (wsSql) {
|
||||||
await wsSql.close();
|
await wsSql.close();
|
||||||
|
@ -53,21 +54,23 @@ async function createDbAndTable() {
|
||||||
// ANCHOR: insertData
|
// ANCHOR: insertData
|
||||||
async function insertData() {
|
async function insertData() {
|
||||||
let wsSql = null
|
let wsSql = null
|
||||||
try {
|
|
||||||
wsSql = await createConnect();
|
|
||||||
let insertQuery = "INSERT INTO " +
|
let insertQuery = "INSERT INTO " +
|
||||||
"power.d1001 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 2) " +
|
"power.d1001 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 2) " +
|
||||||
"VALUES " +
|
"VALUES " +
|
||||||
"(NOW + 1a, 10.30000, 219, 0.31000) " +
|
"(NOW + 1a, 10.30000, 219, 0.31000) " +
|
||||||
"(NOW + 2a, 12.60000, 218, 0.33000) " +
|
"(NOW + 2a, 12.60000, 218, 0.33000) " +
|
||||||
"(NOW + 3a, 12.30000, 221, 0.31000) " +
|
"(NOW + 3a, 12.30000, 221, 0.31000) " +
|
||||||
"power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) " +
|
"power.d1002 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 3) " +
|
||||||
"VALUES " +
|
"VALUES " +
|
||||||
"(NOW + 1a, 10.30000, 218, 0.25000) ";
|
"(NOW + 1a, 10.30000, 218, 0.25000) ";
|
||||||
|
|
||||||
|
try {
|
||||||
|
wsSql = await createConnect();
|
||||||
taosResult = await wsSql.exec(insertQuery);
|
taosResult = await wsSql.exec(insertQuery);
|
||||||
console.log("Successfully inserted " + taosResult.getAffectRows() + " rows to power.meters.");
|
console.log("Successfully inserted " + taosResult.getAffectRows() + " rows to power.meters.");
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(`Failed to insert data to power.meters, sql: ${insertQuery}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to insert data to power.meters, sql: ${insertQuery}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
} finally {
|
} finally {
|
||||||
if (wsSql) {
|
if (wsSql) {
|
||||||
await wsSql.close();
|
await wsSql.close();
|
||||||
|
@ -91,6 +94,7 @@ async function queryData() {
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
console.error(`Failed to query data from power.meters, sql: ${sql}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to query data from power.meters, sql: ${sql}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
if (wsRows) {
|
if (wsRows) {
|
||||||
|
@ -118,6 +122,7 @@ async function sqlWithReqid() {
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
console.error(`Failed to query data from power.meters, reqId: ${reqId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to query data from power.meters, reqId: ${reqId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
if (wsRows) {
|
if (wsRows) {
|
||||||
|
|
|
@ -23,7 +23,7 @@ async function prepare() {
|
||||||
return wsSql
|
return wsSql
|
||||||
}
|
}
|
||||||
|
|
||||||
(async () => {
|
async function test() {
|
||||||
let stmt = null;
|
let stmt = null;
|
||||||
let connector = null;
|
let connector = null;
|
||||||
try {
|
try {
|
||||||
|
@ -60,6 +60,7 @@ async function prepare() {
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
console.error(`Failed to insert to table meters using stmt, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to insert to table meters using stmt, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
if (stmt) {
|
if (stmt) {
|
||||||
|
@ -70,4 +71,6 @@ async function prepare() {
|
||||||
}
|
}
|
||||||
taos.destroy();
|
taos.destroy();
|
||||||
}
|
}
|
||||||
})();
|
}
|
||||||
|
|
||||||
|
test()
|
|
@ -1,58 +0,0 @@
|
||||||
const taos = require("@tdengine/websocket");
|
|
||||||
|
|
||||||
var host = null;
|
|
||||||
for(var i = 2; i < global.process.argv.length; i++){
|
|
||||||
var key = global.process.argv[i].split("=")[0];
|
|
||||||
var value = global.process.argv[i].split("=")[1];
|
|
||||||
if("host" == key){
|
|
||||||
host = value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if(host == null){
|
|
||||||
console.log("Usage: node nodejsChecker.js host=<hostname> port=<port>");
|
|
||||||
process.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
let dbData = ["meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
|
|
||||||
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
|
|
||||||
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
|
|
||||||
"meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
|
|
||||||
"meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
|
|
||||||
"meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
|
|
||||||
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
|
|
||||||
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",];
|
|
||||||
|
|
||||||
async function createConnect() {
|
|
||||||
let dsn = 'ws://' + host + ':6041'
|
|
||||||
let conf = new taos.WSConfig(dsn);
|
|
||||||
conf.setUser('root');
|
|
||||||
conf.setPwd('taosdata');
|
|
||||||
|
|
||||||
return await taos.sqlConnect(conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
async function test() {
|
|
||||||
let wsSql = null;
|
|
||||||
let wsRows = null;
|
|
||||||
let reqId = 0;
|
|
||||||
try {
|
|
||||||
wsSql = await createConnect()
|
|
||||||
await wsSql.exec('create database if not exists power KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;', reqId++);
|
|
||||||
await wsSql.exec('use power', reqId++);
|
|
||||||
await wsSql.schemalessInsert(dbData, taos.SchemalessProto.OpenTSDBTelnetLineProtocol, taos.Precision.MILLI_SECONDS, 0);
|
|
||||||
}
|
|
||||||
catch (err) {
|
|
||||||
console.error(err.code, err.message);
|
|
||||||
}
|
|
||||||
finally {
|
|
||||||
if (wsRows) {
|
|
||||||
await wsRows.close();
|
|
||||||
}
|
|
||||||
if (wsSql) {
|
|
||||||
await wsSql.close();
|
|
||||||
}
|
|
||||||
taos.destroy();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
test()
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
const { sleep } = require("@tdengine/websocket");
|
||||||
const taos = require("@tdengine/websocket");
|
const taos = require("@tdengine/websocket");
|
||||||
|
|
||||||
// ANCHOR: create_consumer
|
// ANCHOR: create_consumer
|
||||||
|
@ -49,12 +50,20 @@ async function prepare() {
|
||||||
|
|
||||||
let createTopic = `CREATE TOPIC IF NOT EXISTS ${topics[0]} AS SELECT * FROM ${db}.${stable}`;
|
let createTopic = `CREATE TOPIC IF NOT EXISTS ${topics[0]} AS SELECT * FROM ${db}.${stable}`;
|
||||||
await wsSql.exec(createTopic);
|
await wsSql.exec(createTopic);
|
||||||
|
await wsSql.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
async function insert() {
|
||||||
for (let i = 0; i < 10; i++) {
|
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||||
|
conf.setUser('root');
|
||||||
|
conf.setPwd('taosdata');
|
||||||
|
conf.setDb('power');
|
||||||
|
let wsSql = await taos.sqlConnect(conf);
|
||||||
|
for (let i = 0; i < 50; i++) {
|
||||||
await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`);
|
await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`);
|
||||||
|
await sleep(100);
|
||||||
}
|
}
|
||||||
wsSql.close();
|
await wsSql.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
async function subscribe(consumer) {
|
async function subscribe(consumer) {
|
||||||
|
@ -82,13 +91,17 @@ async function test() {
|
||||||
let consumer = null;
|
let consumer = null;
|
||||||
try {
|
try {
|
||||||
await prepare();
|
await prepare();
|
||||||
consumer = await createConsumer()
|
consumer = await createConsumer();
|
||||||
await subscribe(consumer)
|
const allPromises = [];
|
||||||
|
allPromises.push(subscribe(consumer));
|
||||||
|
allPromises.push(insert());
|
||||||
|
await Promise.all(allPromises);
|
||||||
await consumer.unsubscribe();
|
await consumer.unsubscribe();
|
||||||
console.log("Consumer unsubscribed successfully.");
|
console.log("Consumer unsubscribed successfully.");
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
console.error(`Failed to unsubscribe consumer, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to unsubscribe consumer, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
if (consumer) {
|
if (consumer) {
|
||||||
|
|
|
@ -1,41 +1,45 @@
|
||||||
|
const { sleep } = require("@tdengine/websocket");
|
||||||
const taos = require("@tdengine/websocket");
|
const taos = require("@tdengine/websocket");
|
||||||
|
|
||||||
const db = 'power';
|
const db = 'power';
|
||||||
const stable = 'meters';
|
const stable = 'meters';
|
||||||
|
const url = 'ws://localhost:6041';
|
||||||
const topic = 'topic_meters'
|
const topic = 'topic_meters'
|
||||||
const topics = [topic];
|
const topics = [topic];
|
||||||
const groupId = "group1";
|
const groupId = "group1";
|
||||||
const clientId = "client1";
|
const clientId = "client1";
|
||||||
|
|
||||||
|
|
||||||
// ANCHOR: create_consumer
|
|
||||||
async function createConsumer() {
|
async function createConsumer() {
|
||||||
|
|
||||||
|
let groupId = "group1";
|
||||||
|
let clientId = "client1";
|
||||||
let configMap = new Map([
|
let configMap = new Map([
|
||||||
[taos.TMQConstants.GROUP_ID, "group1"],
|
[taos.TMQConstants.GROUP_ID, groupId],
|
||||||
[taos.TMQConstants.CLIENT_ID, 'client1'],
|
[taos.TMQConstants.CLIENT_ID, clientId],
|
||||||
[taos.TMQConstants.CONNECT_USER, "root"],
|
[taos.TMQConstants.CONNECT_USER, "root"],
|
||||||
[taos.TMQConstants.CONNECT_PASS, "taosdata"],
|
[taos.TMQConstants.CONNECT_PASS, "taosdata"],
|
||||||
[taos.TMQConstants.AUTO_OFFSET_RESET, "latest"],
|
[taos.TMQConstants.AUTO_OFFSET_RESET, "latest"],
|
||||||
[taos.TMQConstants.WS_URL, 'ws://localhost:6041'],
|
[taos.TMQConstants.WS_URL, url],
|
||||||
[taos.TMQConstants.ENABLE_AUTO_COMMIT, 'true'],
|
[taos.TMQConstants.ENABLE_AUTO_COMMIT, 'true'],
|
||||||
[taos.TMQConstants.AUTO_COMMIT_INTERVAL_MS, '1000']
|
[taos.TMQConstants.AUTO_COMMIT_INTERVAL_MS, '1000']
|
||||||
]);
|
]);
|
||||||
try {
|
try {
|
||||||
return await taos.tmqConnect(configMap);
|
conn = await taos.tmqConnect(configMap);
|
||||||
|
console.log(`Create consumer successfully, host: ${url}, groupId: ${groupId}, clientId: ${clientId}`)
|
||||||
|
return conn;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(err);
|
console.error(`Failed to create websocket consumer, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
// ANCHOR_END: create_consumer
|
|
||||||
|
|
||||||
async function prepare() {
|
async function prepare() {
|
||||||
let conf = new taos.WSConfig('ws://localhost:6041');
|
let conf = new taos.WSConfig('ws://192.168.1.98:6041');
|
||||||
conf.setUser('root');
|
conf.setUser('root');
|
||||||
conf.setPwd('taosdata');
|
conf.setPwd('taosdata');
|
||||||
conf.setDb('power');
|
conf.setDb('power');
|
||||||
const createDB = `CREATE DATABASE IF NOT EXISTS ${db} KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;`;
|
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
||||||
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
||||||
|
|
||||||
let wsSql = await taos.sqlConnect(conf);
|
let wsSql = await taos.sqlConnect(conf);
|
||||||
|
@ -44,58 +48,63 @@ async function prepare() {
|
||||||
|
|
||||||
let createTopic = `CREATE TOPIC IF NOT EXISTS ${topics[0]} AS SELECT * FROM ${db}.${stable}`;
|
let createTopic = `CREATE TOPIC IF NOT EXISTS ${topics[0]} AS SELECT * FROM ${db}.${stable}`;
|
||||||
await wsSql.exec(createTopic);
|
await wsSql.exec(createTopic);
|
||||||
|
await wsSql.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
async function insert() {
|
||||||
for (let i = 0; i < 10; i++) {
|
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||||
|
conf.setUser('root');
|
||||||
|
conf.setPwd('taosdata');
|
||||||
|
conf.setDb('power');
|
||||||
|
let wsSql = await taos.sqlConnect(conf);
|
||||||
|
for (let i = 0; i < 1; i++) {
|
||||||
await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`);
|
await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`);
|
||||||
}
|
}
|
||||||
await wsSql.close();
|
await wsSql.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
// ANCHOR: subscribe
|
// ANCHOR: offset
|
||||||
async function subscribe(consumer) {
|
async function subscribe(consumer) {
|
||||||
try {
|
try {
|
||||||
await consumer.subscribe(['topic_meters']);
|
|
||||||
for (let i = 0; i < 50; i++) {
|
|
||||||
let res = await consumer.poll(100);
|
|
||||||
for (let [key, value] of res) {
|
|
||||||
// Add your data processing logic here
|
|
||||||
console.log(`data: ${key} ${value}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
console.error(`Failed to poll data, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
// ANCHOR_END: subscribe
|
|
||||||
|
|
||||||
// ANCHOR: offset
|
|
||||||
async function test() {
|
|
||||||
let consumer = null;
|
|
||||||
try {
|
|
||||||
await prepare();
|
|
||||||
let consumer = await createConsumer()
|
|
||||||
await consumer.subscribe(['topic_meters']);
|
await consumer.subscribe(['topic_meters']);
|
||||||
let res = new Map();
|
let res = new Map();
|
||||||
while (res.size == 0) {
|
while (res.size == 0) {
|
||||||
res = await consumer.poll(100);
|
res = await consumer.poll(100);
|
||||||
|
await consumer.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
let assignment = await consumer.assignment();
|
let assignment = await consumer.assignment();
|
||||||
await consumer.seekToBeginning(assignment);
|
await consumer.seekToBeginning(assignment);
|
||||||
console.log("Assignment seek to beginning successfully");
|
console.log("Assignment seek to beginning successfully");
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Failed to seek offset, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ANCHOR_END: offset
|
||||||
|
|
||||||
|
async function test() {
|
||||||
|
let consumer = null;
|
||||||
|
try {
|
||||||
|
await prepare();
|
||||||
|
consumer = await createConsumer();
|
||||||
|
const allPromises = [];
|
||||||
|
allPromises.push(subscribe(consumer));
|
||||||
|
allPromises.push(insert());
|
||||||
|
await Promise.all(allPromises);
|
||||||
|
await consumer.unsubscribe();
|
||||||
|
console.log("Consumer unsubscribed successfully.");
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
console.error(`Failed to seek offset, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
console.error(`Failed to consumer, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||||
|
throw err;
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
if (consumer) {
|
if (consumer) {
|
||||||
await consumer.close();
|
await consumer.close();
|
||||||
|
console.log("Consumer closed successfully.");
|
||||||
}
|
}
|
||||||
taos.destroy();
|
taos.destroy();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// ANCHOR_END: offset
|
|
||||||
test()
|
test()
|
||||||
|
|
|
@ -15,6 +15,7 @@ def create_connection():
|
||||||
print(f"Connected to {host}:{port} successfully.");
|
print(f"Connected to {host}:{port} successfully.");
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to connect to {host}:{port} , ErrMessage:{err}")
|
print(f"Failed to connect to {host}:{port} , ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -15,7 +15,7 @@ def create_connection():
|
||||||
print(f"Connected to {host}:{port} successfully.");
|
print(f"Connected to {host}:{port} successfully.");
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to connect to {host}:{port} , ErrMessage:{err}")
|
print(f"Failed to connect to {host}:{port} , ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
return conn
|
return conn
|
||||||
# ANCHOR_END: connect
|
# ANCHOR_END: connect
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@ def create_db_table(conn):
|
||||||
conn.execute("CREATE TABLE IF NOT EXISTS `d0` USING `meters` (groupId, location) TAGS(0, 'Los Angles')")
|
conn.execute("CREATE TABLE IF NOT EXISTS `d0` USING `meters` (groupId, location) TAGS(0, 'Los Angles')")
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f'Exception {err}')
|
print(f'Exception {err}')
|
||||||
|
raise err
|
||||||
# ANCHOR_END: create_db
|
# ANCHOR_END: create_db
|
||||||
|
|
||||||
def insert(conn):
|
def insert(conn):
|
||||||
|
@ -42,9 +43,10 @@ def insert(conn):
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
inserted = conn.execute(sql)
|
inserted = conn.execute(sql)
|
||||||
assert inserted == 8
|
assert inserted == 4
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f'Exception111 {err}')
|
print(f'Exception111 {err}')
|
||||||
|
raise err
|
||||||
# ANCHOR_END: insert
|
# ANCHOR_END: insert
|
||||||
|
|
||||||
def query(conn):
|
def query(conn):
|
||||||
|
@ -58,6 +60,7 @@ def query(conn):
|
||||||
print(row)
|
print(row)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f'Exception {err}')
|
print(f'Exception {err}')
|
||||||
|
raise err
|
||||||
# ANCHOR_END: query
|
# ANCHOR_END: query
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -21,6 +21,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to create database power or stable meters, ErrMessage:{err}")
|
print(f"Failed to create database power or stable meters, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -20,6 +20,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to create database power or stable meters, ErrMessage:{err}")
|
print(f"Failed to create database power or stable meters, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -21,6 +21,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to create database power or stable meters, ErrMessage:{err}")
|
print(f"Failed to create database power or stable meters, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -22,6 +22,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to insert data to power.meters, sql: {sql}, ErrMessage: {err}.")
|
print(f"Failed to insert data to power.meters, sql: {sql}, ErrMessage: {err}.")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -21,6 +21,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to insert data to power.meters, sql:{sql}, ErrMessage:{err}.")
|
print(f"Failed to insert data to power.meters, sql:{sql}, ErrMessage:{err}.")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -22,6 +22,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to insert data to power.meters, sql: {sql}, ErrMessage: {err}.")
|
print(f"Failed to insert data to power.meters, sql: {sql}, ErrMessage: {err}.")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -16,6 +16,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}")
|
print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -15,3 +15,4 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}")
|
print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
|
|
|
@ -15,6 +15,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}")
|
print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -18,7 +18,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}")
|
print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -16,3 +16,4 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}")
|
print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
|
|
|
@ -19,6 +19,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}")
|
print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -35,6 +35,7 @@ try:
|
||||||
print("Inserted data with schemaless successfully.");
|
print("Inserted data with schemaless successfully.");
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to insert data with schemaless, ErrMessage:{err}")
|
print(f"Failed to insert data with schemaless, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if conn:
|
if conn:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -75,8 +75,6 @@ def schemaless_insert():
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
try:
|
|
||||||
prepare()
|
prepare()
|
||||||
schemaless_insert()
|
schemaless_insert()
|
||||||
except Exception as err:
|
|
||||||
print(f"Failed to insert data with schemaless, err:{err}")
|
|
||||||
|
|
|
@ -57,6 +57,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to insert to table meters using stmt, ErrMessage:{err}")
|
print(f"Failed to insert to table meters using stmt, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if stmt:
|
if stmt:
|
||||||
stmt.close()
|
stmt.close()
|
||||||
|
|
|
@ -62,6 +62,7 @@ try:
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to insert to table meters using stmt, ErrMessage:{err}")
|
print(f"Failed to insert to table meters using stmt, ErrMessage:{err}")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if stmt:
|
if stmt:
|
||||||
stmt.close()
|
stmt.close()
|
||||||
|
|
|
@ -152,6 +152,7 @@ def unsubscribe(consumer):
|
||||||
print("Consumer unsubscribed successfully.");
|
print("Consumer unsubscribed successfully.");
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to unsubscribe consumer. topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.")
|
print(f"Failed to unsubscribe consumer. topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if consumer:
|
if consumer:
|
||||||
consumer.close()
|
consumer.close()
|
||||||
|
@ -166,7 +167,6 @@ if __name__ == "__main__":
|
||||||
subscribe(consumer)
|
subscribe(consumer)
|
||||||
seek_offset(consumer)
|
seek_offset(consumer)
|
||||||
commit_offset(consumer)
|
commit_offset(consumer)
|
||||||
except Exception as err:
|
|
||||||
print(f"Failed to execute consumer example, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.")
|
|
||||||
finally:
|
finally:
|
||||||
|
if consumer:
|
||||||
unsubscribe(consumer);
|
unsubscribe(consumer);
|
||||||
|
|
|
@ -31,7 +31,7 @@ def prepareMeta():
|
||||||
|
|
||||||
# create super table
|
# create super table
|
||||||
rowsAffected = conn.execute(
|
rowsAffected = conn.execute(
|
||||||
"CREATE TABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
|
"CREATE TABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(64))"
|
||||||
)
|
)
|
||||||
assert rowsAffected == 0
|
assert rowsAffected == 0
|
||||||
|
|
||||||
|
@ -155,6 +155,7 @@ def unsubscribe(consumer):
|
||||||
print("Consumer unsubscribed successfully.");
|
print("Consumer unsubscribed successfully.");
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Failed to unsubscribe consumer. topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.")
|
print(f"Failed to unsubscribe consumer. topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.")
|
||||||
|
raise err
|
||||||
finally:
|
finally:
|
||||||
if consumer:
|
if consumer:
|
||||||
consumer.close()
|
consumer.close()
|
||||||
|
@ -170,7 +171,6 @@ if __name__ == "__main__":
|
||||||
subscribe(consumer)
|
subscribe(consumer)
|
||||||
seek_offset(consumer)
|
seek_offset(consumer)
|
||||||
commit_offset(consumer)
|
commit_offset(consumer)
|
||||||
except Exception as err:
|
|
||||||
print(f"Failed to execute consumer example, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.")
|
|
||||||
finally:
|
finally:
|
||||||
|
if consumer:
|
||||||
unsubscribe(consumer)
|
unsubscribe(consumer)
|
||||||
|
|
|
@ -6,7 +6,7 @@ slug: /
|
||||||
|
|
||||||
TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的<a href="https://www.taosdata.com/" data-internallinksmanager029f6b8e52c="2" title="时序数据库" target="_blank" rel="noopener">时序数据库</a>(<a href="https://www.taosdata.com/time-series-database" data-internallinksmanager029f6b8e52c="9" title="Time Series DataBase" target="_blank" rel="noopener">Time Series Database</a>, <a href="https://www.taosdata.com/tsdb" data-internallinksmanager029f6b8e52c="8" title="TSDB" target="_blank" rel="noopener">TSDB</a>), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept)
|
TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的<a href="https://www.taosdata.com/" data-internallinksmanager029f6b8e52c="2" title="时序数据库" target="_blank" rel="noopener">时序数据库</a>(<a href="https://www.taosdata.com/time-series-database" data-internallinksmanager029f6b8e52c="9" title="Time Series DataBase" target="_blank" rel="noopener">Time Series Database</a>, <a href="https://www.taosdata.com/tsdb" data-internallinksmanager029f6b8e52c="8" title="TSDB" target="_blank" rel="noopener">TSDB</a>), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept)
|
||||||
|
|
||||||
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[快速入门](./basic)一章。
|
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[数据模型](./basic/model)一章。
|
||||||
|
|
||||||
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解的话,请看[连接器](./reference/connector)一章。
|
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解的话,请看[连接器](./reference/connector)一章。
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ TDengine Cloud 大幅减轻了用户在部署、运维等方面的人力负担
|
||||||
|
|
||||||
要在 TDengine Cloud 注册新用户,请遵循以下简易步骤完成注册流程:
|
要在 TDengine Cloud 注册新用户,请遵循以下简易步骤完成注册流程:
|
||||||
|
|
||||||
1. 打开浏览器,访问 TDengine Cloud 的首页:https://cloud.taosdata.com,在右边的“注册”部分,填入自己的姓名以及企业邮箱地址,点击“获取验证码”按钮。
|
1. 打开浏览器,访问 [TDengine Cloud](https://cloud.taosdata.com),在右边的“注册”部分,填入自己的姓名以及企业邮箱地址,点击“获取验证码”按钮。
|
||||||
|
|
||||||
2. 检查企业邮箱,找到主题为“你的 TDengine Cloud 注册账户验证码”的邮件。从邮件内容中复制 6 位验证码,并将其粘贴到注册页面上的“验证码”输入框中。接着,点击“注册 TDengine Cloud”按钮,进入客户信息补全页面。
|
2. 检查企业邮箱,找到主题为“你的 TDengine Cloud 注册账户验证码”的邮件。从邮件内容中复制 6 位验证码,并将其粘贴到注册页面上的“验证码”输入框中。接着,点击“注册 TDengine Cloud”按钮,进入客户信息补全页面。
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@ TDengine 还支持直接向超级表写入数据。需要注意的是,超级
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
insert into meters (tbname, ts, current, voltage, phase, location, group_id)
|
insert into meters (tbname, ts, current, voltage, phase, location, group_id)
|
||||||
values( "d1001v, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
|
values( "d1001, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
|
||||||
```
|
```
|
||||||
|
|
||||||
### 零代码写入
|
### 零代码写入
|
||||||
|
|
|
@ -27,7 +27,7 @@ PI 系统是一套用于数据收集、查找、分析、传递和可视化的
|
||||||
|
|
||||||
在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
|
在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 基本配置
|
### 基本配置
|
||||||
|
|
||||||
|
|
|
@ -208,3 +208,15 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
||||||
### 8. 创建完成
|
### 8. 创建完成
|
||||||
|
|
||||||
点击 **提交** 按钮,完成创建 OPC UA 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
点击 **提交** 按钮,完成创建 OPC UA 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
||||||
|
|
||||||
|
## 增加数据点位
|
||||||
|
|
||||||
|
在任务运行中,点击 **编辑**,点击 **增加数据点位** 按钮,追加数据点位到 CSV 文件中。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
在弹出的表单中,填写数据点位的信息。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
点击 **确定** 按钮,完成数据点位的追加。
|
|
@ -182,3 +182,15 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
||||||
### 7. 创建完成
|
### 7. 创建完成
|
||||||
|
|
||||||
点击 **提交** 按钮,完成创建 OPC DA 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
点击 **提交** 按钮,完成创建 OPC DA 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
||||||
|
|
||||||
|
## 增加数据点位
|
||||||
|
|
||||||
|
在任务运行中,点击 **编辑**,点击 **增加数据点位** 按钮,追加数据点位到 CSV 文件中。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
在弹出的表单中,填写数据点位的信息。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
点击 **确定** 按钮,完成数据点位的追加。
|
|
@ -33,13 +33,14 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
||||||
|
|
||||||
### 3. 配置连接和认证信息
|
### 3. 配置连接和认证信息
|
||||||
|
|
||||||
在 **MQTT地址** 中填写 MQTT 代理的地址,例如:`192.168.1.42:1883`
|
在 **MQTT 地址** 中填写 MQTT 代理的地址,例如:`192.168.1.42`
|
||||||
|
|
||||||
|
在 **MQTT 端口** 中填写 MQTT 代理的端口,例如:`1883`
|
||||||
|
|
||||||
在 **用户** 中填写 MQTT 代理的用户名。
|
在 **用户** 中填写 MQTT 代理的用户名。
|
||||||
|
|
||||||
在 **密码** 中填写 MQTT 代理的密码。
|
在 **密码** 中填写 MQTT 代理的密码。
|
||||||
|
|
||||||
点击 **连通性检查** 按钮,检查数据源是否可用。
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
@ -64,6 +65,8 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
||||||
|
|
||||||
在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称。使用如下格式设置: `topic1::0,topic2::1`。
|
在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称。使用如下格式设置: `topic1::0,topic2::1`。
|
||||||
|
|
||||||
|
点击 **检查连通性** 按钮,检查数据源是否可用。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 6. 配置 MQTT Payload 解析
|
### 6. 配置 MQTT Payload 解析
|
||||||
|
|
|
@ -44,8 +44,50 @@ TDengine 可以高效地从 Kafka 读取数据并将其写入 TDengine,以实
|
||||||
|
|
||||||
如果服务端开启了 SASL 认证机制,此处需要启用 SASL 并配置相关内容,目前支持 PLAIN/SCRAM-SHA-256/GSSAPI 三种认证机制,请按实际情况进行选择。
|
如果服务端开启了 SASL 认证机制,此处需要启用 SASL 并配置相关内容,目前支持 PLAIN/SCRAM-SHA-256/GSSAPI 三种认证机制,请按实际情况进行选择。
|
||||||
|
|
||||||
|
#### 4.1. PLAIN 认证
|
||||||
|
|
||||||
|
选择 `PLAIN` 认证机制,输入用户名和密码:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
#### 4.1. SCRAM(SCRAM-SHA-256) 认证
|
||||||
|
|
||||||
|
选择 `SCRAM-SHA-256` 认证机制,输入用户名和密码:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
#### 4.3. GSSAPI 认证
|
||||||
|
|
||||||
|
选择 `GSSAPI` ,将通过 [RDkafka 客户端](https://github.com/confluentinc/librdkafka) 调用 GSSAPI 应用 Kerberos 认证机制:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
需要输入的信息有:
|
||||||
|
|
||||||
|
- Kerberos 服务名,一般是 `kafka`;
|
||||||
|
- Kerberos 认证主体,即认证用户名,例如 `kafkaclient`;
|
||||||
|
- Kerberos 初始化命令(可选,一般不用填写);
|
||||||
|
- Kerberos 密钥表,需提供文件并上传;
|
||||||
|
|
||||||
|
以上信息均需由 Kafka 服务管理者提供。
|
||||||
|
|
||||||
|
除此之外,在服务器上需要配置 [Kerberos](https://web.mit.edu/kerberos/) 认证服务。在 Ubuntu 下使用 `apt install krb5-user` ;在 CentOS 下,使用 `yum install krb5-workstation`;即可。
|
||||||
|
|
||||||
|
配置完成后,可以使用 [kcat](https://github.com/edenhill/kcat) 工具进行 Kafka 主题消费验证:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kcat <topic> \
|
||||||
|
-b <kafka-server:port> \
|
||||||
|
-G kcat \
|
||||||
|
-X security.protocol=SASL_PLAINTEXT \
|
||||||
|
-X sasl.mechanism=GSSAPI \
|
||||||
|
-X sasl.kerberos.keytab=</path/to/kafkaclient.keytab> \
|
||||||
|
-X sasl.kerberos.principal=<kafkaclient> \
|
||||||
|
-X sasl.kerberos.service.name=kafka
|
||||||
|
```
|
||||||
|
|
||||||
|
如果出现错误:“Server xxxx not found in kerberos database”,则需要配置 Kafka 节点对应的域名并在 Kerberos 客户端配置文件 `/etc/krb5.conf` 中配置反向域名解析 `rdns = true`。
|
||||||
|
|
||||||
### 5. 配置 SSL 证书
|
### 5. 配置 SSL 证书
|
||||||
|
|
||||||
如果服务端开启了 SSL 加密认证,此处需要启用 SSL 并配置相关内容。
|
如果服务端开启了 SSL 加密认证,此处需要启用 SSL 并配置相关内容。
|
||||||
|
@ -60,7 +102,7 @@ TDengine 可以高效地从 Kafka 读取数据并将其写入 TDengine,以实
|
||||||
|
|
||||||
在 **主题** 中填写要消费的 Topic 名称。可以配置多个 Topic , Topic 之间用逗号分隔。例如:`tp1,tp2`。
|
在 **主题** 中填写要消费的 Topic 名称。可以配置多个 Topic , Topic 之间用逗号分隔。例如:`tp1,tp2`。
|
||||||
|
|
||||||
在 **Client ID** 中填写客户端标识,填写后会生成带有 `taosx` 前缀的客户端 ID (例如,如果填写的标识为 `foo`,则生成的客户端 ID 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 ID 拼接到 `taosx` 之后,输入的标识之前(生成的客户端 ID 形如 `taosx100foo`)。连接到同一个 Kafka 集群的所有客户端 ID 必须保证唯一。
|
在 **Client ID** 中填写客户端标识,填写后会生成带有 `taosx` 前缀的客户端 ID (例如,如果填写的标识为 `foo`,则生成的客户端 ID 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 ID 拼接到 `taosx` 之后,输入的标识之前(生成的客户端 ID 形如 `taosx100foo`)。需要注意的是,当使用多个 taosX 订阅同一 Topic 需要进行负载均衡时,必须填写一致的客户端 ID 才能达到均衡效果。
|
||||||
|
|
||||||
在 **消费者组 ID** 中填写消费者组标识,填写后会生成带有 `taosx` 前缀的消费者组 ID (例如,如果填写的标识为 `foo`,则生成的消费者组 ID 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 ID 拼接到 `taosx` 之后,输入的标识之前(生成的消费者组 ID 形如 `taosx100foo`)。
|
在 **消费者组 ID** 中填写消费者组标识,填写后会生成带有 `taosx` 前缀的消费者组 ID (例如,如果填写的标识为 `foo`,则生成的消费者组 ID 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 ID 拼接到 `taosx` 之后,输入的标识之前(生成的消费者组 ID 形如 `taosx100foo`)。
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品
|
||||||
|
|
||||||
### 1. 新增数据源
|
### 1. 新增数据源
|
||||||
|
|
||||||
在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示:
|
在数据写入页面中点击右上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue