diff --git a/CMakeLists.txt b/CMakeLists.txt index 315036d115..7bb36fe1b0 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) IF (CMAKE_VERSION VERSION_LESS 3.0) PROJECT(TDengine CXX) SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}") @@ -15,6 +15,7 @@ SET(TD_ADMIN FALSE) SET(TD_GRANT FALSE) SET(TD_MQTT FALSE) SET(TD_TSDB_PLUGINS FALSE) +SET(TD_STORAGE FALSE) SET(TD_COVER FALSE) SET(TD_MEM_CHECK FALSE) diff --git a/Jenkinsfile b/Jenkinsfile index 516b179dce..d7d2ab6a9a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,7 +5,7 @@ node { git url: 'https://github.com/taosdata/TDengine.git' } - +def kipstage=0 def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -45,6 +45,7 @@ def pre_test(){ git pull git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD + git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD develop)|grep -v -E '.*md|//src//connector|Jenkinsfile' || exit 0 cd ${WK} git reset --hard HEAD~10 git checkout develop @@ -62,6 +63,7 @@ def pre_test(){ ''' return 1 } + pipeline { agent none @@ -71,21 +73,46 @@ pipeline { } stages { - + stage('pre_build'){ + agent{label 'master'} + when { + changeRequest() + } + steps { + sh''' + cp -r ${WORKSPACE} ${WORKSPACE}.tes + cd ${WORKSPACE}.tes + git checkout develop + git pull + git fetch origin +refs/pull/${CHANGE_ID}/merge + git checkout -qf FETCH_HEAD + ''' + script{ + skipstage=sh(script:"git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) + } + sh''' + rm -rf ${WORKSPACE}.tes + ''' + } + } stage('Parallel test stage') { //only build pr when { changeRequest() + expression { + skipstage != 0 + } } parallel { - stage('python_1') { + stage('python_1_s1') { agent{label 'p1'} steps { pre_test() - timeout(time: 90, unit: 'MINUTES'){ + timeout(time: 45, unit: 'MINUTES'){ sh ''' + date cd ${WKC}/tests find pytest -name '*'sql|xargs rm -rf ./test-all.sh p1 @@ -94,26 +121,38 @@ pipeline { } } - stage('python_2') { + stage('python_2_s5') { agent{label 'p2'} steps { pre_test() - sh ''' - cd ${WKC}/tests - find pytest -name '*'sql|xargs rm -rf - ./test-all.sh p2 - date''' - sh ''' - cd ${WKC}/tests - ./test-all.sh b4fq - ''' + timeout(time: 45, unit: 'MINUTES'){ + sh ''' + date + cd ${WKC}/tests + find pytest -name '*'sql|xargs rm -rf + ./test-all.sh p2 + date''' + } } } - stage('test_b1') { + stage('python_3_s6') { + agent{label 'p3'} + steps { + timeout(time: 45, unit: 'MINUTES'){ + pre_test() + sh ''' + date + cd ${WKC}/tests + ./test-all.sh p3 + date''' + } + } + } + stage('test_b1_s2') { agent{label 'b1'} steps { - timeout(time: 90, unit: 'MINUTES'){ + timeout(time: 45, unit: 'MINUTES'){ pre_test() sh ''' cd ${WKC}/tests @@ -123,8 +162,9 @@ pipeline { } } - stage('test_crash_gen') { + stage('test_crash_gen_s3') { agent{label "b2"} + steps { pre_test() catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { @@ -136,21 +176,24 @@ pipeline { catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* ./handle_crash_gen_val_log.sh ''' - } - timeout(time: 90, unit: 'MINUTES'){ - sh ''' - date - cd ${WKC}/tests - ./test-all.sh b2fq - date - ''' - } + } + timeout(time: 45, unit: 'MINUTES'){ + sh ''' + date + cd ${WKC}/tests + ./test-all.sh b2fq + date + ''' + } + } } - stage('test_valgrind') { + stage('test_valgrind_s4') { agent{label "b3"} steps { @@ -162,7 +205,7 @@ pipeline { ./handle_val_log.sh ''' } - timeout(time: 90, unit: 'MINUTES'){ + timeout(time: 45, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -171,17 +214,68 @@ pipeline { } } } - - + stage('test_b4_s7') { + agent{label 'b4'} + steps { + timeout(time: 45, unit: 'MINUTES'){ + pre_test() + sh ''' + date + cd ${WKC}/tests + ./test-all.sh b4fq + cd ${WKC}/tests + ./test-all.sh p4 + date''' + } + } + } + stage('test_b5_s8') { + agent{label 'b5'} + steps { + timeout(time: 45, unit: 'MINUTES'){ + pre_test() + sh ''' + date + cd ${WKC}/tests + ./test-all.sh b5fq + date''' + } + } + } + stage('test_b6_s9') { + agent{label 'b6'} + steps { + timeout(time: 45, unit: 'MINUTES'){ + pre_test() + sh ''' + date + cd ${WKC}/tests + ./test-all.sh b6fq + date''' + } + } + } + stage('test_b7_s10') { + agent{label 'b7'} + steps { + timeout(time: 45, unit: 'MINUTES'){ + pre_test() + sh ''' + date + cd ${WKC}/tests + ./test-all.sh b7fq + date''' + } + } + } } } } - post { - + post { success { emailext ( - subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' + subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS", + body: """ @@ -197,29 +291,29 @@ pipeline { - ''', + """, to: "${env.CHANGE_AUTHOR_EMAIL}", from: "support@taosdata.com" ) } failure { emailext ( - subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' + subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL", + body: """ @@ -235,21 +329,21 @@ pipeline { - ''', + """, to: "${env.CHANGE_AUTHOR_EMAIL}", from: "support@taosdata.com" ) diff --git a/cmake/define.inc b/cmake/define.inc index 91adfa64c3..ae90410f2d 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -21,6 +21,10 @@ IF (TD_TSDB_PLUGINS) ADD_DEFINITIONS(-D_TSDB_PLUGINS) ENDIF () +IF (TD_STORAGE) + ADD_DEFINITIONS(-D_STORAGE) +ENDIF () + IF (TD_GODLL) ADD_DEFINITIONS(-D_TD_GO_DLL_) ENDIF () diff --git a/cmake/install.inc b/cmake/install.inc index 2f0404334c..ec0ec64519 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.18-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.19-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/version.inc b/cmake/version.inc index f9927bf1c6..07356bb216 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -13,28 +13,40 @@ ELSE () SET(TD_VER_COMPATIBLE "2.0.0.0") ENDIF () +find_program(HAVE_GIT NAMES git) + IF (DEFINED GITINFO) SET(TD_VER_GIT ${GITINFO}) +ELSEIF (HAVE_GIT) + execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${TD_COMMUNITY_DIR} OUTPUT_VARIABLE GIT_COMMITID) + message(STATUS "git log result:${GIT_COMMITID}") + IF (GIT_COMMITID) + string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID}) + SET(TD_VER_GIT ${GIT_COMMITID}) + ELSE () + message(STATUS "not a git repository") + SET(TD_VER_GIT "no git commit id") + ENDIF () ELSE () - execute_process( - COMMAND git log -1 --format=%H - WORKING_DIRECTORY ${TD_COMMUNITY_DIR} - OUTPUT_VARIABLE GIT_COMMITID - ) - string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID}) - SET(TD_VER_GIT ${GIT_COMMITID}) + message(STATUS "no git cmd") + SET(TD_VER_GIT "no git commit id") ENDIF () IF (DEFINED GITINFOI) SET(TD_VER_GIT_INTERNAL ${GITINFOI}) +ELSEIF (HAVE_GIT) + execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMITID) + message(STATUS "git log result:${GIT_COMMITID}") + IF (GIT_COMMITID) + string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID}) + SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID}) + ELSE () + message(STATUS "not a git repository") + SET(TD_VER_GIT "no git commit id") + ENDIF () ELSE () - execute_process( - COMMAND git log -1 --format=%H - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - OUTPUT_VARIABLE GIT_COMMITID - ) - string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID}) - SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID}) + message(STATUS "no git cmd") + SET(TD_VER_GIT_INTERNAL "no git commit id") ENDIF () IF (DEFINED VERDATE) diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index 1e59396c70..cfc17442f5 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) ADD_SUBDIRECTORY(zlib-1.2.11) @@ -9,6 +9,7 @@ ADD_SUBDIRECTORY(lz4) ADD_SUBDIRECTORY(cJson) ADD_SUBDIRECTORY(wepoll) ADD_SUBDIRECTORY(MsvcLibX) +ADD_SUBDIRECTORY(rmonotonic) IF (TD_LINUX AND TD_MQTT) ADD_SUBDIRECTORY(MQTT-C) diff --git a/deps/MQTT-C/CMakeLists.txt b/deps/MQTT-C/CMakeLists.txt index 36ede467ac..15b3552521 100644 --- a/deps/MQTT-C/CMakeLists.txt +++ b/deps/MQTT-C/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) # MQTT-C build options option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF) diff --git a/deps/MsvcLibX/CMakeLists.txt b/deps/MsvcLibX/CMakeLists.txt index c02e4c7a4d..4428579e1c 100644 --- a/deps/MsvcLibX/CMakeLists.txt +++ b/deps/MsvcLibX/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/MsvcLibX/include/msvcTime.h b/deps/MsvcLibX/include/msvcTime.h index 1897da5857..6f8b5dac8f 100644 --- a/deps/MsvcLibX/include/msvcTime.h +++ b/deps/MsvcLibX/include/msvcTime.h @@ -38,6 +38,7 @@ typedef int clockid_t; /* Supported values for clockid_t */ #define CLOCK_REALTIME 0 +#define CLOCK_MONOTONIC 1 int clock_gettime(clockid_t clock_id, struct timespec *tp); diff --git a/deps/MsvcLibX/include/msvcUnistd.h b/deps/MsvcLibX/include/msvcUnistd.h index 9ad60625e0..9b59bae7f0 100644 --- a/deps/MsvcLibX/include/msvcUnistd.h +++ b/deps/MsvcLibX/include/msvcUnistd.h @@ -89,11 +89,12 @@ pid_t getppid(void); /* Get parent PID */ /* Path management */ #if defined(_WIN32) -#if defined(_UTF8_SOURCE) || defined(_BSD_SOURCE) || defined(_GNU_SOURCE) #define realpath realpathU +#if defined(_UTF8_SOURCE) || defined(_BSD_SOURCE) || defined(_GNU_SOURCE) +// #define realpath realpathU #define CompactPath CompactPathU #else /* _ANSI_SOURCE */ -#define realpath realpathA +// #define realpath realpathA #define CompactPath CompactPathA #endif #endif /* defined(_WIN32) */ diff --git a/deps/MsvcLibX/src/clock_gettime.c b/deps/MsvcLibX/src/clock_gettime.c index fee7532e2e..cbddf7107a 100644 --- a/deps/MsvcLibX/src/clock_gettime.c +++ b/deps/MsvcLibX/src/clock_gettime.c @@ -34,15 +34,56 @@ #include "msvcTime.h" #include "sys/msvcStat.h" /* For MsvcLibX's Filetime2Timespec */ -int clock_gettime(clockid_t clock_id, struct timespec *pTS) { - FILETIME ft; - if (clock_id != CLOCK_REALTIME) { - errno = EINVAL; - return -1; +#define MS_PER_SEC 1000ULL // MS = milliseconds +#define US_PER_MS 1000ULL // US = microseconds +#define HNS_PER_US 10ULL // HNS = hundred-nanoseconds (e.g., 1 hns = 100 ns) +#define NS_PER_US 1000ULL + +#define HNS_PER_SEC (MS_PER_SEC * US_PER_MS * HNS_PER_US) +#define NS_PER_HNS (100ULL) // NS = nanoseconds +#define NS_PER_SEC (MS_PER_SEC * US_PER_MS * NS_PER_US) + +int clock_gettime_monotonic(struct timespec *tv) { + static LARGE_INTEGER ticksPerSec; + LARGE_INTEGER ticks; + double seconds; + + if (!ticksPerSec.QuadPart) { + QueryPerformanceFrequency(&ticksPerSec); + if (!ticksPerSec.QuadPart) { + errno = ENOTSUP; + return -1; + } } - GetSystemTimeAsFileTime(&ft); - Filetime2Timespec(&ft, pTS); + + QueryPerformanceCounter(&ticks); + + seconds = (double) ticks.QuadPart / (double) ticksPerSec.QuadPart; + tv->tv_sec = (time_t)seconds; + tv->tv_nsec = (long)((ULONGLONG)(seconds * NS_PER_SEC) % NS_PER_SEC); + return 0; } +int clock_gettime_realtime(struct timespec *pTS) { + FILETIME ft; + + GetSystemTimeAsFileTime(&ft); + Filetime2Timespec(&ft, pTS); + + return 0; +} + +int clock_gettime(clockid_t clock_id, struct timespec *pTS) { + if (clock_id == CLOCK_MONOTONIC) { + return clock_gettime_monotonic(pTS); + } else if (clock_id == CLOCK_REALTIME) { + return clock_gettime_realtime(pTS); + } + + errno = ENOTSUP; + + return -1; +} + #endif /* defined(_WIN32) */ diff --git a/deps/iconv/CMakeLists.txt b/deps/iconv/CMakeLists.txt index f26f109735..286070fa90 100644 --- a/deps/iconv/CMakeLists.txt +++ b/deps/iconv/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/pthread/CMakeLists.txt b/deps/pthread/CMakeLists.txt index dcd9ed0358..04e5be7472 100644 --- a/deps/pthread/CMakeLists.txt +++ b/deps/pthread/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/regex/CMakeLists.txt b/deps/regex/CMakeLists.txt index c7e983b992..054b093d07 100644 --- a/deps/regex/CMakeLists.txt +++ b/deps/regex/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/rmonotonic/CMakeLists.txt b/deps/rmonotonic/CMakeLists.txt new file mode 100644 index 0000000000..b870b7d6d7 --- /dev/null +++ b/deps/rmonotonic/CMakeLists.txt @@ -0,0 +1,11 @@ +AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SOURCE_LIST) + +add_definitions(-DUSE_PROCESSOR_CLOCK) + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../MsvcLibX/include) + +ADD_LIBRARY(rmonotonic ${SOURCE_LIST}) +TARGET_INCLUDE_DIRECTORIES(rmonotonic PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/inc) +IF (TD_WINDOWS) + TARGET_LINK_LIBRARIES(rmonotonic MsvcLibXw) +ENDIF () diff --git a/deps/rmonotonic/inc/monotonic.h b/deps/rmonotonic/inc/monotonic.h new file mode 100644 index 0000000000..0850548d36 --- /dev/null +++ b/deps/rmonotonic/inc/monotonic.h @@ -0,0 +1,56 @@ +#ifndef __MONOTONIC_H +#define __MONOTONIC_H +/* The monotonic clock is an always increasing clock source. It is unrelated to + * the actual time of day and should only be used for relative timings. The + * monotonic clock is also not guaranteed to be chronologically precise; there + * may be slight skew/shift from a precise clock. + * + * Depending on system architecture, the monotonic time may be able to be + * retrieved much faster than a normal clock source by using an instruction + * counter on the CPU. On x86 architectures (for example), the RDTSC + * instruction is a very fast clock source for this purpose. + */ + +//#include "fmacros.h" +#include +//#include + +#if defined(_WIN32) || defined(_WIN64) + #define inline +#endif + +/* A counter in micro-seconds. The 'monotime' type is provided for variables + * holding a monotonic time. This will help distinguish & document that the + * variable is associated with the monotonic clock and should not be confused + * with other types of time.*/ +typedef uint64_t monotime; + +/* Retrieve counter of micro-seconds relative to an arbitrary point in time. */ +extern monotime (*getMonotonicUs)(void); + + +/* Call once at startup to initialize the monotonic clock. Though this only + * needs to be called once, it may be called additional times without impact. + * Returns a printable string indicating the type of clock initialized. + * (The returned string is static and doesn't need to be freed.) */ +const char * monotonicInit(); + + +/* Functions to measure elapsed time. Example: + * monotime myTimer; + * elapsedStart(&myTimer); + * while (elapsedMs(myTimer) < 10) {} // loops for 10ms + */ +static inline void elapsedStart(monotime *start_time) { + *start_time = getMonotonicUs(); +} + +static inline uint64_t elapsedUs(monotime start_time) { + return getMonotonicUs() - start_time; +} + +static inline uint64_t elapsedMs(monotime start_time) { + return elapsedUs(start_time) / 1000; +} + +#endif diff --git a/deps/rmonotonic/src/monotonic.c b/deps/rmonotonic/src/monotonic.c new file mode 100644 index 0000000000..1470f91b56 --- /dev/null +++ b/deps/rmonotonic/src/monotonic.c @@ -0,0 +1,173 @@ +#include "monotonic.h" +#include +#include +#include +#include + +#undef NDEBUG +#include + +#if defined(_WIN32) || defined(_WIN64) +#include "msvcTime.h" +#include "msvcStdio.h" +#endif + +/* The function pointer for clock retrieval. */ +monotime (*getMonotonicUs)(void) = NULL; + +static char monotonic_info_string[32]; + + +/* Using the processor clock (aka TSC on x86) can provide improved performance + * throughout Redis wherever the monotonic clock is used. The processor clock + * is significantly faster than calling 'clock_getting' (POSIX). While this is + * generally safe on modern systems, this link provides additional information + * about use of the x86 TSC: http://oliveryang.net/2015/09/pitfalls-of-TSC-usage + * + * To use the processor clock, either uncomment this line, or build with + * CFLAGS="-DUSE_PROCESSOR_CLOCK" +#define USE_PROCESSOR_CLOCK + */ + + +#if defined(USE_PROCESSOR_CLOCK) && defined(__x86_64__) && defined(__linux__) +#include +#include + +static long mono_ticksPerMicrosecond = 0; + +static monotime getMonotonicUs_x86() { + return __rdtsc() / mono_ticksPerMicrosecond; +} + +static void monotonicInit_x86linux() { + const int bufflen = 256; + char buf[bufflen]; + regex_t cpuGhzRegex, constTscRegex; + const size_t nmatch = 2; + regmatch_t pmatch[nmatch]; + int constantTsc = 0; + int rc; + + /* Determine the number of TSC ticks in a micro-second. This is + * a constant value matching the standard speed of the processor. + * On modern processors, this speed remains constant even though + * the actual clock speed varies dynamically for each core. */ + rc = regcomp(&cpuGhzRegex, "^model name\\s+:.*@ ([0-9.]+)GHz", REG_EXTENDED); + assert(rc == 0); + + /* Also check that the constant_tsc flag is present. (It should be + * unless this is a really old CPU. */ + rc = regcomp(&constTscRegex, "^flags\\s+:.* constant_tsc", REG_EXTENDED); + assert(rc == 0); + + FILE *cpuinfo = fopen("/proc/cpuinfo", "r"); + if (cpuinfo != NULL) { + while (fgets(buf, bufflen, cpuinfo) != NULL) { + if (regexec(&cpuGhzRegex, buf, nmatch, pmatch, 0) == 0) { + buf[pmatch[1].rm_eo] = '\0'; + double ghz = atof(&buf[pmatch[1].rm_so]); + mono_ticksPerMicrosecond = (long)(ghz * 1000); + break; + } + } + while (fgets(buf, bufflen, cpuinfo) != NULL) { + if (regexec(&constTscRegex, buf, nmatch, pmatch, 0) == 0) { + constantTsc = 1; + break; + } + } + + fclose(cpuinfo); + } + regfree(&cpuGhzRegex); + regfree(&constTscRegex); + + if (mono_ticksPerMicrosecond == 0) { + //fprintf(stderr, "monotonic: x86 linux, unable to determine clock rate"); + return; + } + if (!constantTsc) { + //fprintf(stderr, "monotonic: x86 linux, 'constant_tsc' flag not present"); + return; + } + + snprintf(monotonic_info_string, sizeof(monotonic_info_string), + "X86 TSC @ %ld ticks/us", mono_ticksPerMicrosecond); + getMonotonicUs = getMonotonicUs_x86; +} +#endif + + +#if defined(USE_PROCESSOR_CLOCK) && defined(__aarch64__) +static long mono_ticksPerMicrosecond = 0; + +/* Read the clock value. */ +static inline uint64_t __cntvct() { + uint64_t virtual_timer_value; + __asm__ volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +} + +/* Read the Count-timer Frequency. */ +static inline uint32_t cntfrq_hz() { + uint64_t virtual_freq_value; + __asm__ volatile("mrs %0, cntfrq_el0" : "=r"(virtual_freq_value)); + return (uint32_t)virtual_freq_value; /* top 32 bits are reserved */ +} + +static monotime getMonotonicUs_aarch64() { + return __cntvct() / mono_ticksPerMicrosecond; +} + +static void monotonicInit_aarch64() { + mono_ticksPerMicrosecond = (long)cntfrq_hz() / 1000L / 1000L; + if (mono_ticksPerMicrosecond == 0) { + fprintf(stderr, "monotonic: aarch64, unable to determine clock rate"); + return; + } + + snprintf(monotonic_info_string, sizeof(monotonic_info_string), + "ARM CNTVCT @ %ld ticks/us", mono_ticksPerMicrosecond); + getMonotonicUs = getMonotonicUs_aarch64; +} +#endif + +static monotime getMonotonicUs_posix(void) { + /* clock_gettime() is specified in POSIX.1b (1993). Even so, some systems + * did not support this until much later. CLOCK_MONOTONIC is technically + * optional and may not be supported - but it appears to be universal. + * If this is not supported, provide a system-specific alternate version. */ + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return ((uint64_t)ts.tv_sec) * 1000000 + ts.tv_nsec / 1000; +} + +static void monotonicInit_posix() { + /* Ensure that CLOCK_MONOTONIC is supported. This should be supported + * on any reasonably current OS. If the assertion below fails, provide + * an appropriate alternate implementation. */ + struct timespec ts; + int rc = clock_gettime(CLOCK_MONOTONIC, &ts); + assert(rc == 0); + + snprintf(monotonic_info_string, sizeof(monotonic_info_string), + "POSIX clock_gettime"); + getMonotonicUs = getMonotonicUs_posix; +} + + + +const char * monotonicInit() { + #if defined(USE_PROCESSOR_CLOCK) && defined(__x86_64__) && defined(__linux__) + if (getMonotonicUs == NULL) monotonicInit_x86linux(); + #endif + + #if defined(USE_PROCESSOR_CLOCK) && defined(__aarch64__) + if (getMonotonicUs == NULL) monotonicInit_aarch64(); + #endif + + if (getMonotonicUs == NULL) monotonicInit_posix(); + + return monotonic_info_string; +} diff --git a/deps/wepoll/CMakeLists.txt b/deps/wepoll/CMakeLists.txt index a8b3411221..a81fd782bb 100644 --- a/deps/wepoll/CMakeLists.txt +++ b/deps/wepoll/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/zlib-1.2.11/CMakeLists.txt b/deps/zlib-1.2.11/CMakeLists.txt index a8750471d6..f83aa70085 100644 --- a/deps/zlib-1.2.11/CMakeLists.txt +++ b/deps/zlib-1.2.11/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 0da2883605..75c3fb897e 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -290,7 +290,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic - **修改子表标签值** ```mysql - ALTER STABLE tb_name SET TAG tag_name=new_tag_value; + ALTER TABLE tb_name SET TAG tag_name=new_tag_value; ``` 说明:除了更新标签的值的操作是针对子表进行,其他所有的标签操作(添加标签、删除标签等)均只能作用于 STable,不能对单个子表操作。对 STable 添加标签以后,依托于该 STable 建立的所有表将自动增加了一个标签,所有新增标签的默认值都是 NULL。 @@ -757,7 +757,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 - 适用于:表。(从 2.0.15 版本开始,本函数也支持超级表) + 适用于:表。(从 2.0.15.1 版本开始,本函数也支持超级表) 示例: ```mysql diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index bcaabe3c0a..6736eea7c7 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -852,7 +852,7 @@ npm install td2.0-connector ### Linux - `python` (建议`v2.7` , `v3.x.x` 目前还不支持) -- `node` 必须采用v10.x版本,其他版本存在包兼容性的问题。 +- `node` 2.0.6支持v12.x和v10.x,2.0.5及更早版本支持v10.x版本,其他版本可能存在包兼容性的问题。 - `make` - c语言编译器比如GCC diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 5ac0e39dcc..a1178e2eef 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -34,7 +34,7 @@ # 1.0: all CPU cores are available for query processing [default]. # 0.5: only half of the CPU cores are available for query. # 0.0: only one core available. -# tsRatioOfQueryCores 1.0 +# ratioOfQueryCores 1.0 # the last_row/first/last aggregator will not change the original column name in the result fields # keepColumnName 0 diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 9cec9963af..0fed7b531f 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -147,8 +147,8 @@ done #echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" -function kill_taosd() { - pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -168,7 +168,10 @@ function install_main_path() { if [ "$verMode" == "cluster" ]; then ${csudo} mkdir -p ${nginx_dir} fi - ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: + + if [[ -e ${script_dir}/email ]]; then + ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: + fi } function install_bin() { @@ -680,7 +683,7 @@ function install_service() { install_service_on_sysvinit else # must manual stop taosd - kill_taosd + kill_process taosd fi } @@ -749,11 +752,24 @@ function update_TDengine() { elif ((${service_mod}==1)); then ${csudo} service taosd stop || : else - kill_taosd + kill_process taosd fi sleep 1 fi + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi + fi + install_main_path install_log diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh index 89b5ce5b4f..58e19b1399 100755 --- a/packaging/tools/install_power.sh +++ b/packaging/tools/install_power.sh @@ -146,8 +146,8 @@ done #echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" -function kill_powerd() { - pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}') +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -652,7 +652,7 @@ function install_service() { install_service_on_sysvinit else # must manual stop powerd - kill_powerd + kill_process powerd fi } @@ -721,9 +721,21 @@ function update_PowerDB() { elif ((${service_mod}==1)); then ${csudo} service powerd stop || : else - kill_powerd + kill_process powerd fi sleep 1 + fi + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi fi install_main_path diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 04bc61ed9e..b0f2cc0a48 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,10 +1,11 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) # Base compile ADD_SUBDIRECTORY(os) ADD_SUBDIRECTORY(common) ADD_SUBDIRECTORY(util) +ADD_SUBDIRECTORY(tfs) ADD_SUBDIRECTORY(rpc) ADD_SUBDIRECTORY(client) ADD_SUBDIRECTORY(query) diff --git a/src/balance/CMakeLists.txt b/src/balance/CMakeLists.txt index bcb3769087..967635e52c 100644 --- a/src/balance/CMakeLists.txt +++ b/src/balance/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 660ad564a5..fb43751b9e 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index e1370513ef..15ef54b7b1 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -33,6 +33,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index); void tscHandleMasterJoinQuery(SSqlObj* pSql); int32_t tscHandleMasterSTableQuery(SSqlObj *pSql); +int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql); int32_t tscHandleMultivnodeInsert(SSqlObj *pSql); diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index ce623cdc03..e78e259eb2 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -132,8 +132,9 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo); bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex); bool tscQueryTags(SQueryInfo* pQueryInfo); +bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t tableIndex); -SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, +SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType); int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql); @@ -174,6 +175,7 @@ SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIn SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, int16_t size); size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo); +void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex); SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index); int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index c9702ad1fc..bdb0173e9c 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -224,7 +224,9 @@ typedef struct SQueryInfo { int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX int16_t resColumnId; // result column id bool distinctTag; // distinct tag or not - + int32_t round; // 0/1/.... + int32_t bufLen; + char* buf; } SQueryInfo; typedef struct { @@ -412,10 +414,9 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code); int tscProcessLocalCmd(SSqlObj *pSql); int tscCfgDynamicOptions(char *msg); -int taos_retrieve(TAOS_RES *res); -int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo *pQueryInfo); -void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo); +int32_t tscTansformFuncForSTableQuery(SQueryInfo *pQueryInfo); +void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo); int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo); void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo); diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 8e5f621b37..7d5b76cc2b 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -273,14 +273,15 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { taosScheduleTask(tscQhandle, &schedMsg); } -void tscAsyncResultOnError(SSqlObj *pSql) { +static void tscAsyncResultCallback(SSchedMsg *pMsg) { + SSqlObj* pSql = pMsg->ahandle; if (pSql == NULL || pSql->signature != pSql) { tscDebug("%p SqlObj is freed, not add into queue async res", pSql); return; } assert(pSql->res.code != TSDB_CODE_SUCCESS); - tscError("%p invoke user specified function due to error occured, code:%s", pSql, tstrerror(pSql->res.code)); + tscError("%p invoke user specified function due to error occurred, code:%s", pSql, tstrerror(pSql->res.code)); SSqlRes *pRes = &pSql->res; if (pSql->fp == NULL || pSql->fetchFp == NULL){ @@ -291,6 +292,16 @@ void tscAsyncResultOnError(SSqlObj *pSql) { (*pSql->fp)(pSql->param, pSql, pRes->code); } +void tscAsyncResultOnError(SSqlObj* pSql) { + SSchedMsg schedMsg = {0}; + schedMsg.fp = tscAsyncResultCallback; + schedMsg.ahandle = pSql; + schedMsg.thandle = (void *)1; + schedMsg.msg = 0; + taosScheduleTask(tscQhandle, &schedMsg); +} + + int tscSendMsgToServer(SSqlObj *pSql); void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { @@ -322,7 +333,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { code = tscGetTableMeta(pSql, pTableMetaInfo); assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { taosReleaseRef(tscObjRef, pSql->self); return; } diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index bb015bce3d..820572859e 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -892,7 +892,12 @@ int tscProcessLocalCmd(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; if (pCmd->command == TSDB_SQL_CFG_LOCAL) { - pRes->code = (uint8_t)taosCfgDynamicOptions(pCmd->payload); + if (taosCfgDynamicOptions(pCmd->payload)) { + pRes->code = TSDB_CODE_SUCCESS; + } else { + pRes->code = TSDB_CODE_COM_INVALID_CFG_MSG; + } + pRes->numOfRows = 0; } else if (pCmd->command == TSDB_SQL_DESCRIBE_TABLE) { pRes->code = (uint8_t)tscProcessDescribeTable(pSql); } else if (pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 97426b2dd1..23fb0ab67c 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -68,7 +68,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr SQLFunctionCtx *pCtx = &pReducer->pCtx[i]; SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, i); - pCtx->aOutputBuf = pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity; + pCtx->pOutput = pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity; pCtx->order = pQueryInfo->order.order; pCtx->functionId = pExpr->functionId; @@ -76,7 +76,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr int16_t offset = getColumnModelOffset(pDesc->pColumnModel, i); SSchema *pSchema = getColumnModelSchema(pDesc->pColumnModel, i); - pCtx->aInputElemBuf = pReducer->pTempBuffer->data + offset; + pCtx->pInput = pReducer->pTempBuffer->data + offset; // input data format comes from pModel pCtx->inputType = pSchema->type; @@ -86,7 +86,6 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr pCtx->outputBytes = pExpr->resBytes; pCtx->outputType = pExpr->resType; - pCtx->startOffset = 0; pCtx->size = 1; pCtx->hasNull = true; pCtx->currentStage = MERGE_STAGE; @@ -94,7 +93,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr // for top/bottom function, the output of timestamp is the first column int32_t functionId = pExpr->functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { - pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf; + pCtx->ptsOutputBuf = pReducer->pCtx[0].pOutput; pCtx->param[2].i64 = pQueryInfo->order.order; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; pCtx->param[1].i64 = pQueryInfo->order.orderColId; @@ -118,7 +117,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr if (pExpr->functionId == TSDB_FUNC_TAG_DUMMY || pExpr->functionId == TSDB_FUNC_TS_DUMMY) { tagLen += pExpr->resBytes; pTagCtx[n++] = &pReducer->pCtx[i]; - } else if ((aAggs[pExpr->functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { + } else if ((aAggs[pExpr->functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { pCtx = &pReducer->pCtx[i]; } } @@ -311,7 +310,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde pReducer->pCtx = (SQLFunctionCtx *)calloc(tscSqlExprNumOfExprs(pQueryInfo), sizeof(SQLFunctionCtx)); pReducer->rowSize = pMemBuffer[0]->nElemSize; - tscRestoreSQLFuncForSTableQuery(pQueryInfo); + tscRestoreFuncForSTableQuery(pQueryInfo); tscFieldInfoUpdateOffset(pQueryInfo); if (pReducer->rowSize > pMemBuffer[0]->pageSize) { @@ -383,7 +382,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde if (pQueryInfo->fillType != TSDB_FILL_NONE) { SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); - pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, + pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, 4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, tinfo.precision, pQueryInfo->fillType, pFillCol, pSql); } @@ -720,7 +719,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr SSchema p1 = {0}; if (pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) { - p1 = tGetTableNameColumnSchema(); + p1 = *tGetTbnameColumnSchema(); } else if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) { p1.bytes = pExpr->resBytes; p1.type = (uint8_t) pExpr->resType; @@ -744,6 +743,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr functionId = TSDB_FUNC_FIRST; } else if (functionId == TSDB_FUNC_LAST_DST) { functionId = TSDB_FUNC_LAST; + } else if (functionId == TSDB_FUNC_STDDEV_DST) { + functionId = TSDB_FUNC_STDDEV; } int32_t ret = getResultDataInfo(p1.type, p1.bytes, functionId, 0, &type, &bytes, &inter, 0, false); @@ -1041,7 +1042,7 @@ static void savePreviousRow(SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) { pLocalMerge->hasPrevRow = true; } -static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) { +static void doExecuteFinalMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) { // the tag columns need to be set before all functions execution SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); @@ -1053,7 +1054,7 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bo int32_t functionId = pCtx->functionId; if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS_DUMMY) { tVariantDestroy(&pCtx->tag); - char* input = pCtx->aInputElemBuf; + char* input = pCtx->pInput; if (pCtx->inputType == TSDB_DATA_TYPE_BINARY || pCtx->inputType == TSDB_DATA_TYPE_NCHAR) { assert(varDataLen(input) <= pCtx->inputBytes); @@ -1061,6 +1062,7 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bo } else { tVariantCreateFromBinary(&pCtx->tag, input, pCtx->inputBytes, pCtx->inputType); } + } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j); pCtx->param[0].i64 = pExpr->param[0].i64; @@ -1086,7 +1088,7 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bo static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) { if (pLocalMerge->hasUnprocessedRow) { pLocalMerge->hasUnprocessedRow = false; - doExecuteSecondaryMerge(pCmd, pLocalMerge, true); + doExecuteFinalMerge(pCmd, pLocalMerge, true); savePreviousRow(pLocalMerge, tmpBuffer); } } @@ -1142,11 +1144,11 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo int32_t inc = numOfRes - 1; // tsdb_func_tag function only produce one row of result memset(buf, 0, (size_t)maxBufSize); - memcpy(buf, pCtx->aOutputBuf, (size_t)pCtx->outputBytes); + memcpy(buf, pCtx->pOutput, (size_t)pCtx->outputBytes); for (int32_t i = 0; i < inc; ++i) { - pCtx->aOutputBuf += pCtx->outputBytes; - memcpy(pCtx->aOutputBuf, buf, (size_t)pCtx->outputBytes); + pCtx->pOutput += pCtx->outputBytes; + memcpy(pCtx->pOutput, buf, (size_t)pCtx->outputBytes); } } @@ -1289,10 +1291,10 @@ void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {// reset size_t t = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < t; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - pLocalMerge->pCtx[i].aOutputBuf = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity; + pLocalMerge->pCtx[i].pOutput = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity; if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) { - pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].aOutputBuf; + pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].pOutput; } } @@ -1404,7 +1406,7 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) { for (int32_t k = 0; k < size; ++k) { SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k]; - pCtx->aOutputBuf += pCtx->outputBytes * numOfRes; + pCtx->pOutput += pCtx->outputBytes * numOfRes; // set the correct output timestamp column position if (pCtx->functionId == TSDB_FUNC_TOP || pCtx->functionId == TSDB_FUNC_BOTTOM) { @@ -1412,7 +1414,7 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) { } } - doExecuteSecondaryMerge(pCmd, pLocalMerge, true); + doExecuteFinalMerge(pCmd, pLocalMerge, true); } int32_t tscDoLocalMerge(SSqlObj *pSql) { @@ -1504,7 +1506,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { if (pLocalMerge->hasPrevRow) { if (needToMerge(pQueryInfo, pLocalMerge, tmpBuffer)) { // belong to the group of the previous row, continue process it - doExecuteSecondaryMerge(pCmd, pLocalMerge, false); + doExecuteFinalMerge(pCmd, pLocalMerge, false); // copy to buffer savePreviousRow(pLocalMerge, tmpBuffer); @@ -1576,7 +1578,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { } } } else { - doExecuteSecondaryMerge(pCmd, pLocalMerge, true); + doExecuteFinalMerge(pCmd, pLocalMerge, true); savePreviousRow(pLocalMerge, tmpBuffer); // copy the processed row to buffer } diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index f813ff85d9..9203dcfbba 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -233,6 +233,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { // We extract the lock to tscBuildHeartBeatMsg function. + int64_t now = taosGetTimestampMs(); SSqlObj *pSql = pObj->sqlList; while (pSql) { /* @@ -247,7 +248,8 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { tstrncpy(pQdesc->sql, pSql->sqlstr, sizeof(pQdesc->sql)); pQdesc->stime = htobe64(pSql->stime); pQdesc->queryId = htonl(pSql->queryId); - pQdesc->useconds = htobe64(pSql->res.useconds); + //pQdesc->useconds = htobe64(pSql->res.useconds); + pQdesc->useconds = htobe64(now - pSql->stime); pQdesc->qHandle = htobe64(pSql->res.qhandle); pHeartbeat->numOfQueries++; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4811a3b35d..27222b1daa 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -129,6 +129,7 @@ static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index); static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid); +static bool validateDebugFlag(int32_t flag); int16_t getNewResColId(SQueryInfo* pQueryInfo) { return pQueryInfo->resColumnId--; @@ -173,6 +174,16 @@ static uint8_t convertOptr(SStrToken *pToken) { } } +static bool validateDebugFlag(int32_t v) { + const static int validFlag[] = {131, 135, 143}; + + for (int i = 0; i < tListLen(validFlag); i++) { + if (v == validFlag[i]) { + return true; + } + } + return false; +} /* * Used during parsing query sql. Since the query sql usually small in length, error position * is not needed in the final error message. @@ -565,16 +576,16 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } int32_t numOfToken = (int32_t) taosArrayGetSize(pMiscInfo->a); - SStrToken* t = taosArrayGet(pMiscInfo->a, 0); - SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1); + assert(numOfToken >= 1 && numOfToken <= 2); + SStrToken* t = taosArrayGet(pMiscInfo->a, 0); strncpy(pCmd->payload, t->z, t->n); if (numOfToken == 2) { + SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1); pCmd->payload[t->n] = ' '; // add sep strncpy(&pCmd->payload[t->n + 1], t1->z, t1->n); - } - - break; + } + return TSDB_CODE_SUCCESS; } case TSDB_SQL_CREATE_TABLE: { @@ -787,10 +798,10 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQ } SSchema s = {.bytes = TSDB_KEYSIZE, .type = TSDB_DATA_TYPE_TIMESTAMP, .colId = PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tstrncpy(s.name, aAggs[TSDB_FUNC_TS].aName, sizeof(s.name)); + tstrncpy(s.name, aAggs[TSDB_FUNC_TS].name, sizeof(s.name)); SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL); + tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL); if (parseOffsetClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; @@ -1319,7 +1330,7 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL; } -static void tscInsertPrimaryTSSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { +void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; tscColumnListInsert(pQueryInfo->colList, &tsCol); } @@ -1401,7 +1412,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); // add ts column - tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); tbufCloseWriter(&bw); taosArrayDestroy(colList); @@ -1506,7 +1517,7 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) { // add the timestamp column into the output columns SColumnIndex index = {0}; // primary timestamp column info int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); - tscAddSpecialColumnForSelect(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL); + tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL); SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols); pSupInfo->visible = false; @@ -1602,7 +1613,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel * in dealing with super table queries such as: count/first/last */ if (isSTable) { - tscTansformSQLFuncForSTableQuery(pQueryInfo); + tscTansformFuncForSTableQuery(pQueryInfo); if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { return TSDB_CODE_TSC_INVALID_SQL; @@ -1656,7 +1667,7 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tabl (functionId == TSDB_FUNC_TAGPRJ)); } -SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, +SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) { int16_t colId = getNewResColId(pQueryInfo); @@ -1738,7 +1749,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t } // add the primary timestamp column even though it is not required by user - tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); } else if (optr == TK_STRING || optr == TK_INTEGER || optr == TK_FLOAT) { // simple column projection query SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -1748,7 +1759,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->val, &pItem->pNode->token, pItem->aliasName); SSqlExpr* pExpr = - tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC); + tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC); // NOTE: the first parameter is reserved for the tag column id during join query process. pExpr->numOfParams = 2; @@ -1761,11 +1772,11 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t } if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - SSchema colSchema = tGetTableNameColumnSchema(); - tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, TSDB_COL_TAG); + SSchema* colSchema = tGetTbnameColumnSchema(); + tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, colSchema, TSDB_COL_TAG); } else if (index.columnIndex == TSDB_BLOCK_DIST_COLUMN_INDEX) { SSchema colSchema = tGetBlockDistColumnSchema(); - tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_TAG); + tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_TAG); } else { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; @@ -1779,7 +1790,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t } // add the primary timestamp column even though it is not required by user - tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); } else { return TSDB_CODE_TSC_INVALID_SQL; } @@ -1849,9 +1860,9 @@ void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrT if (tsKeepOriginalColumnName) { // keep the original column name tstrncpy(name, uname, TSDB_COL_NAME_LEN); } else { - int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1; - char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1] = {0}; - snprintf(tmp, size, "%s(%s)", aAggs[functionId].aName, uname); + int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1; + char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1] = {0}; + snprintf(tmp, size, "%s(%s)", aAggs[functionId].name, uname); tstrncpy(name, tmp, TSDB_COL_NAME_LEN); } @@ -1966,7 +1977,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // the time stamp may be always needed if (index.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { - tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); } return TSDB_CODE_SUCCESS; @@ -2036,7 +2047,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col getNewResColId(pQueryInfo), TSDB_KEYSIZE, false); SColumnList ids = getColumnList(1, 0, 0); - insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName, pExpr); + insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr); } // functions can not be applied to tags @@ -2079,7 +2090,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } } - tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); return TSDB_CODE_SUCCESS; } case TK_FIRST: @@ -2285,7 +2296,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (convertFunctionId(optr, &functionId) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } - tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); + tscInsertPrimaryTsSourceColumn(pQueryInfo, &index); colIndex += 1; // the first column is ts pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); @@ -2308,12 +2319,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SColumnIndex index1 = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pQueryInfo), TSDB_KEYSIZE, false); - tstrncpy(pExpr->aliasName, aAggs[TSDB_FUNC_TS].aName, sizeof(pExpr->aliasName)); + tstrncpy(pExpr->aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->aliasName)); const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; SColumnList ids = getColumnList(1, 0, TS_COLUMN_INDEX); insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, - aAggs[TSDB_FUNC_TS].aName, pExpr); + aAggs[TSDB_FUNC_TS].name, pExpr); colIndex += 1; // the first column is ts @@ -2384,7 +2395,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SSchema s = {0}; if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - s = tGetTableNameColumnSchema(); + s = *tGetTbnameColumnSchema(); } else { s = pTagSchema[index.columnIndex]; } @@ -2400,7 +2411,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col s.bytes = bytes; TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY); - tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG); + tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG); return TSDB_CODE_SUCCESS; } @@ -2778,7 +2789,7 @@ bool validateIpAddress(const char* ip, size_t size) { return epAddr != INADDR_NONE; } -int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) { +int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (pTableMetaInfo->pTableMeta == NULL || !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { @@ -2800,7 +2811,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) { SSchema* pSrcSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, colIndex); if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) || - (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) || + (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) { if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->param[0].i64, &type, &bytes, &interBytes, 0, true) != TSDB_CODE_SUCCESS) { @@ -2818,7 +2829,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) { } /* transfer the field-info back to original input format */ -void tscRestoreSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) { +void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { return; @@ -2842,6 +2853,8 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) { functionId = TSDB_FUNC_FIRST; } else if (functionId == TSDB_FUNC_LAST_DST) { functionId = TSDB_FUNC_LAST; + } else if (functionId == TSDB_FUNC_STDDEV_DST) { + functionId = TSDB_FUNC_STDDEV; } getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &pExpr->resType, &pExpr->resBytes, @@ -2858,7 +2871,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) size_t size = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId; - if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_STABLE) == 0) { + if ((aAggs[functionId].status & TSDB_FUNCSTATE_STABLE) == 0) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return true; } @@ -2968,7 +2981,6 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) STableMeta* pTableMeta = NULL; SSchema* pSchema = NULL; - SSchema s = tGetTbnameColumnSchema(); int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; @@ -2995,7 +3007,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) int32_t numOfCols = tscGetNumOfColumns(pTableMeta); if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - pSchema = &s; + pSchema = tGetTbnameColumnSchema(); } else { pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); } @@ -3440,6 +3452,7 @@ static int32_t getTagCondString(tSQLExpr* pExpr, char** str) { static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pTableCond, SStringBuilder* sb) { const char* msg0 = "invalid table name list"; + const char* msg1 = "not string following like"; if (pTableCond == NULL) { return TSDB_CODE_SUCCESS; @@ -3457,6 +3470,10 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* if (pTableCond->nSQLOptr == TK_IN) { ret = tablenameListToString(pRight, sb); } else if (pTableCond->nSQLOptr == TK_LIKE) { + if (pRight->nSQLOptr != TK_STRING) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + ret = tablenameCondToString(pRight, sb); } @@ -3547,38 +3564,6 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* return TSDB_CODE_SUCCESS; } -// todo error handle / such as and /or mixed with +/-/*/ -int32_t doArithmeticExprToString(tSQLExpr* pExpr, char** exprString) { - tSQLExpr* pLeft = pExpr->pLeft; - tSQLExpr* pRight = pExpr->pRight; - - *(*exprString)++ = '('; - - if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) { - doArithmeticExprToString(pLeft, exprString); - } else { - int32_t ret = tSQLExprNodeToString(pLeft, exprString); - if (ret != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } - } - - optrToString(pExpr, exprString); - - if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) { - doArithmeticExprToString(pRight, exprString); - } else { - int32_t ret = tSQLExprNodeToString(pRight, exprString); - if (ret != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } - } - - *(*exprString)++ = ')'; - - return TSDB_CODE_SUCCESS; -} - static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type, uint64_t* uid) { if (pExpr->nSQLOptr == TK_ID) { @@ -4762,7 +4747,7 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) { const char* msg0 = "only support order by primary timestamp"; const char* msg1 = "invalid column name"; - const char* msg2 = "only support order by primary timestamp or first tag in groupby clause allowed"; + const char* msg2 = "order by primary timestamp or first tag in groupby clause allowed"; const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed"; setDefaultOrderInfo(pQueryInfo); @@ -5228,7 +5213,7 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < size; ++i) { int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId; - if (!IS_STREAM_QUERY_VALID(aAggs[functId].nStatus)) { + if (!IS_STREAM_QUERY_VALID(aAggs[functId].status)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } @@ -5251,7 +5236,7 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu bool hasSelectivity = false; for (int32_t j = 0; j < size; ++j) { SSqlExpr* pEx = tscSqlExprGet(pQueryInfo, j); - if ((aAggs[pEx->functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) { + if ((aAggs[pEx->functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) { hasSelectivity = true; break; } @@ -5382,13 +5367,15 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) { SDNodeDynConfOption LOCAL_DYNAMIC_CFG_OPTIONS[6] = {{"resetLog", 8}, {"rpcDebugFlag", 12}, {"tmrDebugFlag", 12}, {"cDebugFlag", 10}, {"uDebugFlag", 10}, {"debugFlag", 9}}; + SStrToken* pOptionToken = taosArrayGet(pOptions->a, 0); if (numOfToken == 1) { // reset log does not need value for (int32_t i = 0; i < 1; ++i) { SDNodeDynConfOption* pOption = &LOCAL_DYNAMIC_CFG_OPTIONS[i]; - if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) { + if ((pOption->len == pOptionToken->n) && + (strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0)) { return TSDB_CODE_SUCCESS; } } @@ -5396,15 +5383,14 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) { SStrToken* pValToken = taosArrayGet(pOptions->a, 1); int32_t val = strtol(pValToken->z, NULL, 10); - if (val < 131 || val > 199) { - // options value is out of valid range + if (!validateDebugFlag(val)) { return TSDB_CODE_TSC_INVALID_SQL; } for (int32_t i = 1; i < tListLen(LOCAL_DYNAMIC_CFG_OPTIONS); ++i) { SDNodeDynConfOption* pOption = &LOCAL_DYNAMIC_CFG_OPTIONS[i]; - if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) { - // options is valid + if ((pOption->len == pOptionToken->n) + && (strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0)) { return TSDB_CODE_SUCCESS; } } @@ -5705,7 +5691,7 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex) { SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->colIndex); SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pColIndex->colIndex}; - tscAddSpecialColumnForSelect(pQueryInfo, (int32_t)size, TSDB_FUNC_PRJ, &colIndex, pSchema, TSDB_COL_NORMAL); + tscAddFuncInSelectClause(pQueryInfo, (int32_t)size, TSDB_FUNC_PRJ, &colIndex, pSchema, TSDB_COL_NORMAL); int32_t numOfFields = tscNumOfFields(pQueryInfo); SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfFields - 1); @@ -5869,7 +5855,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) continue; } - if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { + if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { numOfSelectivity++; } else { numOfAggregation++; @@ -5901,7 +5887,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) for (int32_t i = 0; i < numOfExprs; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int16_t functionId = pExpr->functionId; - if (functionId == TSDB_FUNC_TAGPRJ || (aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) == 0) { + if (functionId == TSDB_FUNC_TAGPRJ || (aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == 0) { continue; } @@ -5944,7 +5930,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - SSchema s = tGetTableNameColumnSchema(); + SSchema s = *tGetTbnameColumnSchema(); SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); int16_t bytes = 0; int16_t type = 0; @@ -6088,7 +6074,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { } } - if (IS_MULTIOUTPUT(aAggs[functId].nStatus) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM && + if (IS_MULTIOUTPUT(aAggs[functId].status) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM && functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -6278,7 +6264,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { char tmpBuf[1024] = {0}; int32_t tmpLen = 0; tmpLen = - sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].aName, pExpr->uid, pExpr->colInfo.colId); + sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].name, pExpr->uid, pExpr->colInfo.colId); if (tmpLen + offset >= totalBufSize - 1) break; @@ -6979,3 +6965,4 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) { return false; } + diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index bcffbde658..6328c67940 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -451,7 +451,7 @@ int doProcessSql(SSqlObj *pSql) { if (pRes->code != TSDB_CODE_SUCCESS) { tscAsyncResultOnError(pSql); - return pRes->code; + return TSDB_CODE_SUCCESS; } int32_t code = tscSendMsgToServer(pSql); @@ -460,7 +460,7 @@ int doProcessSql(SSqlObj *pSql) { if (code != TSDB_CODE_SUCCESS) { pRes->code = code; tscAsyncResultOnError(pSql); - return code; + return TSDB_CODE_SUCCESS; } return TSDB_CODE_SUCCESS; @@ -609,7 +609,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) { } return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize + - tableSerialize + sqlLen + 4096; + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; } static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) { @@ -752,6 +752,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->queryType = htonl(pQueryInfo->type); pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit); pQueryMsg->sqlstrLen = htonl(sqlLen); + pQueryMsg->prevResultLen = htonl(pQueryInfo->bufLen); size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo); pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number @@ -769,6 +770,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char n[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, n); + tscError("%p tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s", pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex, pColSchema->name); @@ -812,6 +814,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) { SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); + // the queried table has been removed and a new table with the same name has already been created already + // return error msg + if (pExpr->uid != pTableMeta->id.uid) { + tscError("%p table has already been destroyed", pSql); + return TSDB_CODE_TSC_INVALID_TABLE_NAME; + } + if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { tscError("%p table schema is not matched with parsed sql", pSql); return TSDB_CODE_TSC_INVALID_SQL; @@ -855,6 +864,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); SSqlExpr *pExpr = pField->pSqlExpr; if (pExpr != NULL) { + // the queried table has been removed and a new table with the same name has already been created already + // return error msg + if (pExpr->uid != pTableMeta->id.uid) { + tscError("%p table has already been destroyed", pSql); + return TSDB_CODE_TSC_INVALID_TABLE_NAME; + } + if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { tscError("%p table schema is not matched with parsed sql", pSql); return TSDB_CODE_TSC_INVALID_SQL; @@ -989,6 +1005,11 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } } + if (pQueryInfo->bufLen > 0) { + memcpy(pMsg, pQueryInfo->buf, pQueryInfo->bufLen); + pMsg += pQueryInfo->bufLen; + } + SCond* pCond = &pQueryInfo->tagCond.tbnameCond; if (pCond->len > 0) { strncpy(pMsg, pCond->cond, pCond->len); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 448eea16bf..bc2c42cf79 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -443,24 +443,6 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { return pFieldInfo->final; } -int taos_retrieve(TAOS_RES *res) { - if (res == NULL) return 0; - SSqlObj *pSql = (SSqlObj *)res; - SSqlCmd *pCmd = &pSql->cmd; - SSqlRes *pRes = &pSql->res; - if (pSql == NULL || pSql->signature != pSql) return 0; - if (pRes->qhandle == 0) return 0; - - tscResetForNextRetrieve(pRes); - - if (pCmd->command < TSDB_SQL_LOCAL) { - pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; - } - - tscProcessSql(pSql); - return pRes->numOfRows; -} - static bool needToFetchNewBlock(SSqlObj* pSql) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; @@ -714,7 +696,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) { } tscAsyncResultOnError(pSubObj); - taosReleaseRef(tscObjRef, pSubObj->self); + // taosRelekaseRef(tscObjRef, pSubObj->self); } if (pSql->subState.numOfSub <= 0) { diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index d787f5b515..a9cd1965e8 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -103,7 +103,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) { // failed to get table Meta or vgroup list, retry in 10sec. if (code == TSDB_CODE_SUCCESS) { - tscTansformSQLFuncForSTableQuery(pQueryInfo); + tscTansformFuncForSTableQuery(pQueryInfo); tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, tNameGetTableName(&pTableMetaInfo->name)); pSql->fp = tscProcessStreamQueryCallback; diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index 527531b31a..f3d7ef28c0 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -61,7 +61,7 @@ TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt) { SSub* pSub = (SSub*)sub; SSubscriptionProgress target = {.uid = uid, .key = 0}; - SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress); + SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress, TD_EQ); if (p == NULL) { return dflt; } @@ -76,7 +76,7 @@ void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts) { SSub* pSub = (SSub*)sub; SSubscriptionProgress target = {.uid = uid, .key = ts}; - SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress); + SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress, TD_EQ); if (p != NULL) { p->key = ts; tscDebug("subscribe:%s, uid:%"PRIu64" update sub start ts:%"PRId64, pSub->topic, p->uid, p->key); @@ -270,7 +270,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) { if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; SSubscriptionProgress target = {.uid = pTableMeta->id.uid, .key = 0}; - SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress); + SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress, TD_EQ); if (p == NULL) { taosArrayClear(pSub->progress); taosArrayPush(pSub->progress, &target); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index d1136ca4de..41cfb81442 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -13,7 +13,7 @@ * along with this program. If not, see . */ #define _GNU_SOURCE - + #include "os.h" #include "texpr.h" @@ -23,6 +23,7 @@ #include "tscSubquery.h" #include "tschemautil.h" #include "tsclient.h" +#include "qUtil.h" typedef struct SInsertSupporter { SSqlObj* pSql; @@ -94,11 +95,21 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { pthread_mutex_lock(&subState->mutex); + bool done = allSubqueryDone(pParentSql); + + if (done) { + tscDebug("%p subquery:%p,%d all subs already done", pParentSql, pSql, idx); + + pthread_mutex_unlock(&subState->mutex); + + return false; + } + tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx); subState->states[idx] = 1; - bool done = allSubqueryDone(pParentSql); + done = allSubqueryDone(pParentSql); pthread_mutex_unlock(&subState->mutex); @@ -501,7 +512,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { int16_t functionId = tscIsProjectionQuery(pQueryInfo)? TSDB_FUNC_PRJ : TSDB_FUNC_TS; - tscAddSpecialColumnForSelect(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL); + tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL); tscPrintSelectClause(pNew, 0); tscFieldInfoUpdateOffset(pQueryInfo); @@ -681,7 +692,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr } } -static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* pParent) { +static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* pParent) { SSqlCmd* pCmd = &pSql->cmd; tscClearSubqueryInfo(pCmd); tscFreeSqlResult(pSql); @@ -701,7 +712,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1}; SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL); + tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL); // set the tags value for ts_comp function if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { @@ -970,7 +981,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) { SSqlObj* sub = pParentSql->pSubs[m]; - issueTSCompQuery(sub, sub->param, pParentSql); + issueTsCompQuery(sub, sub->param, pParentSql); } } @@ -1470,7 +1481,7 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { } // restore the offset value for super table query in case of final result. - tscRestoreSQLFuncForSTableQuery(pQueryInfo); + tscRestoreFuncForSTableQuery(pQueryInfo); tscFieldInfoUpdateOffset(pQueryInfo); } @@ -1651,7 +1662,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter // set get tags query type TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY); - tscAddSpecialColumnForSelect(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG); + tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG); size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList); tscDebug( @@ -1662,7 +1673,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter } else { SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1}; SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscAddSpecialColumnForSelect(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL); + tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL); // set the tags value for ts_comp function SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0); @@ -1821,7 +1832,316 @@ void tscUnlockByThread(int64_t *lockedBy) { } } +typedef struct SFirstRoundQuerySup { + SSqlObj *pParent; + int32_t numOfRows; + SArray *pColsInfo; + int32_t tagLen; + STColumn *pTagCols; + SArray *pResult; // SArray + int64_t interval; + char* buf; + int32_t bufLen; +} SFirstRoundQuerySup; +void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, SQueryInfo* pQueryInfo) { + TSKEY key = INT64_MIN; + for(int32_t i = 0; i < numOfCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { + continue; + } + + if (pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + key = *(TSKEY*) row[i]; + continue; + } + + double v = 0; + if (row[i] != NULL) { + v = *(double*) row[i]; + } else { + SET_DOUBLE_NULL(&v); + } + + int32_t id = pExpr->colInfo.colId; + int32_t numOfQueriedCols = (int32_t) taosArrayGetSize(pInterResult->pResult); + + SArray* p = NULL; + for(int32_t j = 0; j < numOfQueriedCols; ++j) { + SStddevInterResult* pColRes = taosArrayGet(pInterResult->pResult, j); + if (pColRes->colId == id) { + p = pColRes->pResult; + break; + } + } + + //append a new column + if (p == NULL) { + SStddevInterResult t = {.colId = id, .pResult = taosArrayInit(10, sizeof(SResPair)),}; + taosArrayPush(pInterResult->pResult, &t); + p = t.pResult; + } + + SResPair pair = {.avg = v, .key = key}; + taosArrayPush(p, &pair); + } +} + +static void destroySup(SFirstRoundQuerySup* pSup) { + taosArrayDestroyEx(pSup->pResult, freeInterResult); + taosArrayDestroy(pSup->pColsInfo); + tfree(pSup); +} + +void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { + SSqlObj* pSql = (SSqlObj*)tres; + SSqlRes* pRes = &pSql->res; + + SFirstRoundQuerySup* pSup = param; + + SSqlObj* pParent = pSup->pParent; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + int32_t code = taos_errno(pSql); + if (code != TSDB_CODE_SUCCESS) { + destroySup(pSup); + taos_free_result(pSql); + pParent->res.code = code; + tscAsyncResultOnError(pParent); + return; + } + + if (numOfRows > 0) { // the number is not correct for group by column in super table query + TAOS_ROW row = NULL; + int32_t numOfCols = taos_field_count(tres); + + if (pSup->tagLen == 0) { // no tags, all rows belong to one group + SInterResult interResult = {.tags = NULL, .pResult = taosArrayInit(4, sizeof(SStddevInterResult))}; + taosArrayPush(pSup->pResult, &interResult); + + while ((row = taos_fetch_row(tres)) != NULL) { + doAppendData(&interResult, row, numOfCols, pQueryInfo); + pSup->numOfRows += 1; + } + } else { // tagLen > 0 + char* p = calloc(1, pSup->tagLen); + + while ((row = taos_fetch_row(tres)) != NULL) { + int32_t* length = taos_fetch_lengths(tres); + memset(p, 0, pSup->tagLen); + + int32_t offset = 0; + for (int32_t i = 0; i < numOfCols && offset < pSup->tagLen; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + + // tag or group by column + if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) { + memcpy(p + offset, row[i], length[i]); + offset += pExpr->resBytes; + } + } + + assert(offset == pSup->tagLen); + size_t size = taosArrayGetSize(pSup->pResult); + + if (size > 0) { + SInterResult* pInterResult = taosArrayGetLast(pSup->pResult); + if (memcmp(pInterResult->tags, p, pSup->tagLen) == 0) { // belongs to the same group + doAppendData(pInterResult, row, numOfCols, pQueryInfo); + } else { + char* tags = malloc( pSup->tagLen); + memcpy(tags, p, pSup->tagLen); + + SInterResult interResult = {.tags = tags, .pResult = taosArrayInit(4, sizeof(SStddevInterResult))}; + taosArrayPush(pSup->pResult, &interResult); + doAppendData(&interResult, row, numOfCols, pQueryInfo); + } + } else { + char* tags = malloc(pSup->tagLen); + memcpy(tags, p, pSup->tagLen); + + SInterResult interResult = {.tags = tags, .pResult = taosArrayInit(4, sizeof(SStddevInterResult))}; + taosArrayPush(pSup->pResult, &interResult); + doAppendData(&interResult, row, numOfCols, pQueryInfo); + } + + pSup->numOfRows += 1; + } + + tfree(p); + } + } + + if (!pRes->completed) { + taos_fetch_rows_a(tres, tscFirstRoundRetrieveCallback, param); + return; + } + + // set the parameters for the second round query process + SSqlCmd *pPCmd = &pParent->cmd; + SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(pPCmd, 0); + + if (pSup->numOfRows > 0) { + SBufferWriter bw = tbufInitWriter(NULL, false); + interResToBinary(&bw, pSup->pResult, pSup->tagLen); + + pQueryInfo1->bufLen = (int32_t) tbufTell(&bw); + pQueryInfo1->buf = tbufGetData(&bw, true); + + // set the serialized binary string as the parameter of arithmetic expression + tbufCloseWriter(&bw); + } + + taosArrayDestroyEx(pSup->pResult, freeInterResult); + taosArrayDestroy(pSup->pColsInfo); + tfree(pSup); + + taos_free_result(pSql); + + pQueryInfo1->round = 1; + tscDoQuery(pParent); +} + +void tscFirstRoundCallback(void* param, TAOS_RES* tres, int code) { + SFirstRoundQuerySup* pSup = (SFirstRoundQuerySup*) param; + + SSqlObj* pSql = (SSqlObj*) tres; + int32_t c = taos_errno(pSql); + + if (c != TSDB_CODE_SUCCESS) { + SSqlObj* parent = pSup->pParent; + + destroySup(pSup); + taos_free_result(pSql); + parent->res.code = code; + tscAsyncResultOnError(parent); + return; + } + + taos_fetch_rows_a(tres, tscFirstRoundRetrieveCallback, param); +} + +int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + STableMetaInfo* pTableMetaInfo1 = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); + + SFirstRoundQuerySup *pSup = calloc(1, sizeof(SFirstRoundQuerySup)); + + pSup->pParent = pSql; + pSup->interval = pQueryInfo->interval.interval; + pSup->pResult = taosArrayInit(6, sizeof(SStddevInterResult)); + pSup->pColsInfo = taosArrayInit(6, sizeof(int16_t)); // result column id + + SSqlObj *pNew = createSubqueryObj(pSql, 0, tscFirstRoundCallback, pSup, TSDB_SQL_SELECT, NULL); + SSqlCmd *pCmd = &pNew->cmd; + + tscClearSubqueryInfo(pCmd); + tscFreeSqlResult(pSql); + + SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + assert(pQueryInfo->numOfTables == 1); + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0); + + tscInitQueryInfo(pNewQueryInfo); + pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr; + if (pQueryInfo->groupbyExpr.columnInfo != NULL) { + pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo); + if (pNewQueryInfo->groupbyExpr.columnInfo == NULL) { + terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto _error; + } + } + + if (tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond) != 0) { + terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto _error; + } + + pNewQueryInfo->interval = pQueryInfo->interval; + + pCmd->command = TSDB_SQL_SELECT; + pNew->fp = tscFirstRoundCallback; + + int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + + int32_t index = 0; + for(int32_t i = 0; i < numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) { + taosArrayPush(pSup->pColsInfo, &pExpr->resColId); + + SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; + SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId); + + SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL); + p->resColId = pExpr->resColId; // update the result column id + } else if (pExpr->functionId == TSDB_FUNC_STDDEV_DST) { + taosArrayPush(pSup->pColsInfo, &pExpr->resColId); + + SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->colInfo.colIndex}; + SSchema schema = {.type = TSDB_DATA_TYPE_DOUBLE, .bytes = sizeof(double)}; + tstrncpy(schema.name, pExpr->aliasName, tListLen(schema.name)); + + SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL); + p->resColId = pExpr->resColId; // update the result column id + } else if (pExpr->functionId == TSDB_FUNC_TAG) { + pSup->tagLen += pExpr->resBytes; + SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->colInfo.colIndex}; + + SSchema* schema = NULL; + if (pExpr->colInfo.colId != TSDB_TBNAME_COLUMN_INDEX) { + schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId); + } else { + schema = tGetTbnameColumnSchema(); + } + + SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG); + p->resColId = pExpr->resColId; + } else if (pExpr->functionId == TSDB_FUNC_PRJ) { + int32_t num = (int32_t) taosArrayGetSize(pNewQueryInfo->groupbyExpr.columnInfo); + for(int32_t k = 0; k < num; ++k) { + SColIndex* pIndex = taosArrayGet(pNewQueryInfo->groupbyExpr.columnInfo, k); + if (pExpr->colInfo.colId == pIndex->colId) { + pSup->tagLen += pExpr->resBytes; + taosArrayPush(pSup->pColsInfo, &pExpr->resColId); + + SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pIndex->colIndex}; + SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId); + + //doLimitOutputNormalColOfGroupby + SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL); + p->numOfParams = 1; + p->param[0].i64 = 1; + p->param[0].nType = TSDB_DATA_TYPE_INT; + p->resColId = pExpr->resColId; // update the result column id + } + } + } + } + + SColumnIndex columnIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; + tscInsertPrimaryTsSourceColumn(pNewQueryInfo, &columnIndex); + + tscTansformFuncForSTableQuery(pNewQueryInfo); + + tscDebug( + "%p first round subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, " + "numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s", + pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type, + tscSqlExprNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name)); + + tscHandleMasterSTableQuery(pNew); + return TSDB_CODE_SUCCESS; + + _error: + destroySup(pSup); + taos_free_result(pNew); + pSql->res.code = terrno; + tscAsyncResultOnError(pSql); + return terrno; +} int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; @@ -1833,7 +2153,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return pRes->code; } - tExtMemBuffer ** pMemoryBuf = NULL; + tExtMemBuffer **pMemoryBuf = NULL; tOrderDescriptor *pDesc = NULL; SColumnModel *pModel = NULL; SColumnModel *pFinalModel = NULL; @@ -1862,11 +2182,9 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { tfree(pMemoryBuf); return ret; } - - pSql->pSubs = calloc(pState->numOfSub, POINTER_BYTES); tscDebug("%p retrieved query data from %d vnode(s)", pSql, pState->numOfSub); - + pSql->pSubs = calloc(pState->numOfSub, POINTER_BYTES); if (pSql->pSubs == NULL) { tfree(pSql->pSubs); pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -1991,7 +2309,9 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES * current query failed, and the retry count is less than the available * count, retry query clear previous retrieved data, then launch a new sub query */ -static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code) { +static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code, int32_t *sent) { + *sent = 0; + SRetrieveSupport *trsupport = malloc(sizeof(SRetrieveSupport)); if (trsupport == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -2023,21 +2343,28 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql); if (pNew == NULL) { tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d", - trsupport->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, trsupport->subqueryIndex); + oriTrs->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex); pParentSql->res.code = terrno; - trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; + oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; + tfree(trsupport); return pParentSql->res.code; } int32_t ret = tscProcessSql(pNew); + *sent = 1; + // if failed to process sql, let following code handle the pSql if (ret == TSDB_CODE_SUCCESS) { + tscFreeRetrieveSup(pSql); taos_free_result(pSql); return ret; - } else { + } else { + pParentSql->pSubs[trsupport->subqueryIndex] = pSql; + tscFreeRetrieveSup(pNew); + taos_free_result(pNew); return ret; } } @@ -2074,7 +2401,10 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO subqueryIndex, tstrerror(pParentSql->res.code)); } else { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) { - if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) { + int32_t sent = 0; + + tscReissueSubquery(trsupport, pSql, numOfRows, &sent); + if (sent) { return; } } else { // reach the maximum retry count, abort @@ -2196,7 +2526,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR SRetrieveSupport *trsupport = (SRetrieveSupport *)param; if (pSql->param == NULL || param == NULL) { tscDebug("%p already freed in dnodecallback", pSql); - assert(pSql->res.code == TSDB_CODE_TSC_QUERY_CANCELLED); return; } @@ -2228,7 +2557,10 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry); - if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) { + int32_t sent = 0; + + tscReissueSubquery(trsupport, pSql, numOfRows, &sent); + if (sent) { return; } } else { @@ -2350,7 +2682,11 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry); - if (tscReissueSubquery(trsupport, pSql, code) == TSDB_CODE_SUCCESS) { + + int32_t sent = 0; + + tscReissueSubquery(trsupport, pSql, code, &sent); + if (sent) { return; } } else { @@ -2410,7 +2746,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) // record the total inserted rows if (numOfRows > 0) { - pParentObj->res.numOfRows += numOfRows; + atomic_add_fetch_32(&pParentObj->res.numOfRows, numOfRows); } if (taos_errno(tres) != TSDB_CODE_SUCCESS) { @@ -2739,7 +3075,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { return; } - tscRestoreSQLFuncForSTableQuery(pQueryInfo); + tscRestoreFuncForSTableQuery(pQueryInfo); } assert (pRes->row >= pRes->numOfRows); diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index fe1c45bd39..92736b005f 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -18,7 +18,6 @@ #include "tref.h" #include "trpc.h" #include "tnote.h" -#include "tsystem.h" #include "ttimer.h" #include "tutil.h" #include "tsched.h" @@ -49,7 +48,7 @@ int32_t tscNumOfThreads = 1; // num of rpc threads static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently static pthread_once_t tscinit = PTHREAD_ONCE_INIT; -void tscCheckDiskUsage(void *UNUSED_PARAM(para), void* UNUSED_PARAM(param)) { +void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) { taosGetDisk(); taosTmrReset(tscCheckDiskUsage, 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } @@ -88,7 +87,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry rpcInit.sessions = tsMaxConnections; rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.user = (char *)user; - rpcInit.idleTime = 2000; + rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.ckey = "key"; rpcInit.spi = 1; rpcInit.secret = (char *)secretEncrypt; @@ -102,7 +101,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry tscError("failed to init connection to TDengine"); return -1; } - pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*10); + pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*5); if (pRpcObj == NULL) { rpcClose(rpcObj.pDnodeConn); pthread_mutex_unlock(&rpcObjMutex); @@ -155,6 +154,7 @@ void taos_init_imp(void) { if (tscNumOfThreads < 2) { tscNumOfThreads = 2; } + taosTmrThreads = tscNumOfThreads; tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc"); if (NULL == tscQhandle) { @@ -216,7 +216,6 @@ void taos_cleanup(void) { taosCloseRef(id); taosCleanupKeywordsTable(); - taosCloseLog(); p = tscRpcCache; tscRpcCache = NULL; @@ -226,7 +225,10 @@ void taos_cleanup(void) { pthread_mutex_destroy(&rpcObjMutex); } - if (tscEmbedded == 0) rpcCleanup(); + if (tscEmbedded == 0) { + rpcCleanup(); + taosCloseLog(); + }; p = tscTmr; tscTmr = NULL; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 83d9dd805c..95cf28ec49 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -107,11 +107,6 @@ bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) { return false; } - // for select query super table, the super table vgroup list can not be null in any cases. - // if (pQueryInfo->command == TSDB_SQL_SELECT && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - // assert(pTableMetaInfo->vgroupList != NULL); - // } - if ((pQueryInfo->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE) { return false; } @@ -1074,7 +1069,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { memset(pFieldInfo, 0, sizeof(SFieldInfo)); } -static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, +static SSqlExpr* doCreateSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t resColId, int16_t interSize, int32_t colType) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex); @@ -1127,14 +1122,14 @@ SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functi return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); } - SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); + SSqlExpr* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayInsert(pQueryInfo->exprList, index, &pExpr); return pExpr; } SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) { - SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); + SSqlExpr* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayPush(pQueryInfo->exprList, &pExpr); return pExpr; } @@ -1158,6 +1153,22 @@ SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functi return pExpr; } +bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t index) { + if (!UTIL_TABLE_IS_SUPER_TABLE(pQueryInfo->pTableMetaInfo[index])) { + return false; + } + + int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + for(int32_t i = 0; i < numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + if (pExpr->functionId == TSDB_FUNC_STDDEV_DST) { + return true; + } + } + + return false; +} + size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo) { return taosArrayGetSize(pQueryInfo->exprList); } @@ -1422,9 +1433,9 @@ int32_t tscValidateName(SStrToken* pToken) { if (sep == NULL) { // single part if (pToken->type == TK_STRING) { - strdequote(pToken->z); + tscDequoteAndTrimToken(pToken); tscStrToLower(pToken->z, pToken->n); - pToken->n = (uint32_t)strtrim(pToken->z); + //pToken->n = (uint32_t)strtrim(pToken->z); int len = tSQLGetToken(pToken->z, &pToken->type); @@ -1762,6 +1773,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) { pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf); tfree(pQueryInfo->fillVal); + tfree(pQueryInfo->buf); } void tscClearSubqueryInfo(SSqlCmd* pCmd) { @@ -2029,7 +2041,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNew->signature = pNew; pNew->sqlstr = strdup(pSql->sqlstr); - SSqlCmd* pnCmd = &pNew->cmd; + SSqlCmd* pnCmd = &pNew->cmd; memcpy(pnCmd, pCmd, sizeof(SSqlCmd)); pnCmd->command = cmd; @@ -2068,7 +2080,18 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit; pNewQueryInfo->numOfTables = 0; pNewQueryInfo->pTableMetaInfo = NULL; - + pNewQueryInfo->bufLen = pQueryInfo->bufLen; + + pNewQueryInfo->buf = malloc(pQueryInfo->bufLen); + if (pNewQueryInfo->buf == NULL) { + terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto _error; + } + + if (pQueryInfo->bufLen > 0) { + memcpy(pNewQueryInfo->buf, pQueryInfo->buf, pQueryInfo->bufLen); + } + pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr; if (pQueryInfo->groupbyExpr.columnInfo != NULL) { pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo); @@ -2234,6 +2257,9 @@ void tscDoQuery(SSqlObj* pSql) { } } + return; + } else if (tscMultiRoundQuery(pQueryInfo, 0) && pQueryInfo->round == 0) { + tscHandleFirstRoundStableQuery(pSql); // todo lock? return; } else if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query tscLockByThread(&pSql->squeryLock); diff --git a/src/client/tests/CMakeLists.txt b/src/client/tests/CMakeLists.txt index 4ea0b80bf7..f07af85e25 100644 --- a/src/client/tests/CMakeLists.txt +++ b/src/client/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) diff --git a/src/client/tests/cliTest.cpp b/src/client/tests/cliTest.cpp index 5cfe61d92a..30f248b541 100644 --- a/src/client/tests/cliTest.cpp +++ b/src/client/tests/cliTest.cpp @@ -57,7 +57,7 @@ void stmtInsertTest() { v.ts = start_ts + 20; v.k = 123; - char* str = "abc"; + char str[] = "abc"; uintptr_t len = strlen(str); v.a = str; @@ -65,7 +65,7 @@ void stmtInsertTest() { params[2].buffer_length = len; params[2].buffer = str; - char* nstr = "999"; + char nstr[] = "999"; uintptr_t len1 = strlen(nstr); v.b = nstr; @@ -84,18 +84,18 @@ void stmtInsertTest() { v.ts = start_ts + 30; v.k = 911; - str = "92"; - len = strlen(str); + char str1[] = "92"; + len = strlen(str1); params[2].length = &len; params[2].buffer_length = len; - params[2].buffer = str; + params[2].buffer = str1; - nstr = "1920"; - len1 = strlen(nstr); + char nstr1[] = "1920"; + len1 = strlen(nstr1); params[3].buffer_length = len1; - params[3].buffer = nstr; + params[3].buffer = nstr1; params[3].length = &len1; taos_stmt_bind_param(stmt, params); @@ -103,7 +103,7 @@ void stmtInsertTest() { ret = taos_stmt_execute(stmt); if (ret != 0) { - printf("%p\n", ret); + printf("%d\n", ret); printf("\033[31mfailed to execute insert statement.\033[0m\n"); return; } diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index df0ac79865..0da7bda994 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index e842030b4c..959654d158 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -68,9 +68,9 @@ typedef struct { typedef struct { int version; // version int numOfCols; // Number of columns appended - int tlen; // maximum length of a SDataRow without the header part + int tlen; // maximum length of a SDataRow without the header part (sizeof(VarDataOffsetT) + sizeof(VarDataLenT) + (bytes)) uint16_t flen; // First part length in a SDataRow after the header part - uint16_t vlen; // pure value part length, excluded the overhead + uint16_t vlen; // pure value part length, excluded the overhead (bytes only) STColumn columns[]; } STSchema; @@ -134,6 +134,22 @@ typedef uint64_t TKEY; #define tdGetTKEY(key) (((TKEY)ABS(key)) | (TKEY_NEGATIVE_FLAG & (TKEY)(key))) #define tdGetKey(tkey) (((TSKEY)((tkey)&TKEY_VALUE_FILTER)) * (TKEY_IS_NEGATIVE(tkey) ? -1 : 1)) +#define MIN_TS_KEY ((TSKEY)0x8000000000000001) +#define MAX_TS_KEY ((TSKEY)0x3fffffffffffffff) + +#define TD_TO_TKEY(key) tdGetTKEY(((key) < MIN_TS_KEY) ? MIN_TS_KEY : (((key) > MAX_TS_KEY) ? MAX_TS_KEY : key)) + +static FORCE_INLINE TKEY keyToTkey(TSKEY key) { + TSKEY lkey = key; + if (key > MAX_TS_KEY) { + lkey = MAX_TS_KEY; + } else if (key < MIN_TS_KEY) { + lkey = MIN_TS_KEY; + } + + return tdGetTKEY(lkey); +} + static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) { TSKEY key1 = tdGetKey(*(TKEY *)tkey1); TSKEY key2 = tdGetKey(*(TKEY *)tkey2); @@ -278,7 +294,7 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows); void tdResetDataCols(SDataCols *pCols); int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); -void tdFreeDataCols(SDataCols *pCols); +SDataCols *tdFreeDataCols(SDataCols *pCols); void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols); int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 9b498e8bd2..c6d0226244 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -16,6 +16,8 @@ #ifndef TDENGINE_COMMON_GLOBAL_H #define TDENGINE_COMMON_GLOBAL_H +#include "taosdef.h" + #ifdef __cplusplus extern "C" { #endif @@ -147,7 +149,6 @@ extern char tsDataDir[]; extern char tsLogDir[]; extern char tsScriptDir[]; extern int64_t tsMsPerDay[3]; -extern char tsVnodeBakDir[]; // system info extern char tsOsName[]; @@ -196,6 +197,14 @@ extern int32_t wDebugFlag; extern int32_t cqDebugFlag; extern int32_t debugFlag; +typedef struct { + char dir[TSDB_FILENAME_LEN]; + int level; + int primary; +} SDiskCfg; +extern int32_t tsDiskCfgNum; +extern SDiskCfg tsDiskCfg[]; + #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) void taosInitGlobalCfg(); @@ -204,6 +213,9 @@ void taosSetAllDebugFlag(); bool taosCfgDynamicOptions(char *msg); int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port); bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId); +void taosAddDataDir(int index, char *v1, int level, int primary); +void taosReadDataDirCfg(char *v1, char *v2, char *v3); +void taosPrintDataDirCfg(); #ifdef __cplusplus } diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 892d682756..b651913d73 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -36,6 +36,11 @@ typedef struct SColumnInfoData { void* pData; // the corresponding block data in memory } SColumnInfoData; +typedef struct SResPair { + TSKEY key; + double avg; +} SResPair; + #define TSDB_DB_NAME_T 1 #define TSDB_TABLE_NAME_T 2 @@ -58,7 +63,7 @@ size_t tableIdPrefix(const char* name, char* prefix, int32_t len); void extractTableNameFromToken(SStrToken *pToken, SStrToken* pTable); -SSchema tGetTableNameColumnSchema(); +//SSchema tGetTbnameColumnSchema(); SSchema tGetBlockDistColumnSchema(); @@ -68,7 +73,7 @@ bool tscValidateTableNameLength(size_t len); SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters); -SSchema tGetTbnameColumnSchema(); +SSchema* tGetTbnameColumnSchema(); /** * check if the schema is valid or not, including following aspects: diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index f212054793..f5b84e4c9a 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -289,23 +289,31 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { return NULL; } - pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol)); - if (pCols->cols == NULL) { - uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno)); - tdFreeDataCols(pCols); - return NULL; + pCols->maxPoints = maxRows; + + if (maxCols > 0) { + pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol)); + if (pCols->cols == NULL) { + uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, + strerror(errno)); + tdFreeDataCols(pCols); + return NULL; + } + + pCols->maxCols = maxCols; } pCols->maxRowSize = maxRowSize; - pCols->maxCols = maxCols; - pCols->maxPoints = maxRows; pCols->bufSize = maxRowSize * maxRows; - pCols->buf = malloc(pCols->bufSize); - if (pCols->buf == NULL) { - uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno)); - tdFreeDataCols(pCols); - return NULL; + if (pCols->bufSize > 0) { + pCols->buf = malloc(pCols->bufSize); + if (pCols->buf == NULL) { + uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, + strerror(errno)); + tdFreeDataCols(pCols); + return NULL; + } } return pCols; @@ -337,12 +345,13 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) { return 0; } -void tdFreeDataCols(SDataCols *pCols) { +SDataCols *tdFreeDataCols(SDataCols *pCols) { if (pCols) { tfree(pCols->buf); tfree(pCols->cols); free(pCols); } + return NULL; } SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) { diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index f941fc4501..f50b829baa 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -407,7 +407,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { SSchema* pSchema = exception_calloc(1, sizeof(SSchema)); left->pSchema = pSchema; - *pSchema = tGetTbnameColumnSchema(); + *pSchema = *tGetTbnameColumnSchema(); tExprNode* right = exception_calloc(1, sizeof(tExprNode)); expr->_node.pRight = right; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index a2d02be683..fb6d745931 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -59,7 +59,6 @@ char tsLocale[TSDB_LOCALE_LEN] = {0}; char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string int8_t tsEnableCoreFile = 0; int32_t tsMaxBinaryDisplayWidth = 30; -char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/"; /* * denote if the server needs to compress response message at the application layer to client, including query rsp, @@ -182,7 +181,15 @@ char tsDnodeDir[TSDB_FILENAME_LEN] = {0}; char tsMnodeDir[TSDB_FILENAME_LEN] = {0}; char tsDataDir[TSDB_FILENAME_LEN] = {0}; char tsScriptDir[TSDB_FILENAME_LEN] = {0}; -char tsVnodeBakDir[TSDB_FILENAME_LEN] = {0}; +char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/"; + +int32_t tsDiskCfgNum = 0; + +#ifndef _STORAGE +SDiskCfg tsDiskCfg[1]; +#else +SDiskCfg tsDiskCfg[TSDB_MAX_DISKS]; +#endif /* * minimum scale for whole system, millisecond by default @@ -227,6 +234,7 @@ int32_t sDebugFlag = 135; int32_t wDebugFlag = 135; int32_t tsdbDebugFlag = 131; int32_t cqDebugFlag = 131; +int32_t fsDebugFlag = 135; int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; @@ -265,7 +273,7 @@ bool taosCfgDynamicOptions(char *msg) { int32_t vint = 0; paGetToken(msg, &option, &olen); - if (olen == 0) return TSDB_CODE_COM_INVALID_CFG_MSG; + if (olen == 0) return false;; paGetToken(option + olen + 1, &value, &vlen); if (vlen == 0) @@ -308,11 +316,9 @@ bool taosCfgDynamicOptions(char *msg) { } return true; } - if (strncasecmp(cfg->option, "debugFlag", olen) == 0) { - taosSetAllDebugFlag(); + taosSetAllDebugFlag(); } - return true; } @@ -334,6 +340,39 @@ bool taosCfgDynamicOptions(char *msg) { return false; } +void taosAddDataDir(int index, char *v1, int level, int primary) { + tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN); + tsDiskCfg[index].level = level; + tsDiskCfg[index].primary = primary; + uTrace("dataDir:%s, level:%d primary:%d is configured", v1, level, primary); +} + +#ifndef _STORAGE +void taosReadDataDirCfg(char *v1, char *v2, char *v3) { + if (tsDiskCfgNum == 1) { + SDiskCfg *cfg = &tsDiskCfg[0]; + uInfo("dataDir:%s, level:%d primary:%d is replaced by %s", cfg->dir, cfg->level, cfg->primary, v1); + } + taosAddDataDir(0, v1, 0, 1); + tsDiskCfgNum = 1; +} + +void taosPrintDataDirCfg() { + for (int i = 0; i < tsDiskCfgNum; ++i) { + SDiskCfg *cfg = &tsDiskCfg[i]; + uInfo(" dataDir: %s", cfg->dir); + } +} +#endif + +static void taosCheckDataDirCfg() { + if (tsDiskCfgNum <= 0) { + taosAddDataDir(0, tsDataDir, 0, 1); + tsDiskCfgNum = 1; + uTrace("dataDir:%s, level:0 primary:1 is configured by default", tsDataDir); + } +} + static void doInitGlobalConfig(void) { osInit(); srand(taosSafeRand()); @@ -415,7 +454,7 @@ static void doInitGlobalConfig(void) { cfg.option = "dataDir"; cfg.ptr = tsDataDir; - cfg.valType = TAOS_CFG_VTYPE_DIRECTORY; + cfg.valType = TAOS_CFG_VTYPE_DATA_DIRCTORY; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; cfg.minValue = 0; cfg.maxValue = 0; @@ -1448,6 +1487,7 @@ int32_t taosCheckGlobalCfg() { snprintf(tsSecond, sizeof(tsSecond), "%s:%u", fqdn, port); } + taosCheckDataDirCfg(); taosGetSystemInfo(); tsSetLocale(); diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 178ed09123..787aa1e95b 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -10,6 +10,7 @@ #define VALID_NAME_TYPE(x) ((x) == TSDB_DB_NAME_T || (x) == TSDB_TABLE_NAME_T) +//TODO remove it void extractTableName(const char* tableId, char* name) { size_t s1 = strcspn(tableId, &TS_PATH_DELIMITER[0]); size_t s2 = strcspn(&tableId[s1 + 1], &TS_PATH_DELIMITER[0]); @@ -24,6 +25,7 @@ char* extractDBName(const char* tableId, char* name) { return strncpy(name, &tableId[offset1 + 1], len); } +// todo remove it size_t tableIdPrefix(const char* name, char* prefix, int32_t len) { tstrncpy(prefix, name, len); strcat(prefix, TS_PATH_DELIMITER); @@ -31,14 +33,6 @@ size_t tableIdPrefix(const char* name, char* prefix, int32_t len) { return strlen(prefix); } -SSchema tGetTableNameColumnSchema() { - SSchema s = {0}; - s.bytes = TSDB_TABLE_NAME_LEN - 1 + VARSTR_HEADER_SIZE; - s.type = TSDB_DATA_TYPE_BINARY; - s.colId = TSDB_TBNAME_COLUMN_INDEX; - tstrncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN); - return s; -} SSchema tGetBlockDistColumnSchema() { SSchema s = {0}; s.bytes = TSDB_MAX_BINARY_LEN;; @@ -189,15 +183,15 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) { } } -SSchema tGetTbnameColumnSchema() { - struct SSchema s = { - .colId = TSDB_TBNAME_COLUMN_INDEX, - .type = TSDB_DATA_TYPE_BINARY, - .bytes = TSDB_TABLE_NAME_LEN - }; +static struct SSchema _s = { + .colId = TSDB_TBNAME_COLUMN_INDEX, + .type = TSDB_DATA_TYPE_BINARY, + .bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE, + .name = TSQL_TBNAME_L, +}; - strcpy(s.name, TSQL_TBNAME_L); - return s; +SSchema* tGetTbnameColumnSchema() { + return &_s; } static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen) { diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index cfad85be60..7009c4d5c3 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -86,43 +86,53 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32 switch (type) { case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: { + pVar->nLen = tDataTypes[type].bytes; pVar->i64 = GET_INT8_VAL(pz); break; } case TSDB_DATA_TYPE_UTINYINT: { + pVar->nLen = tDataTypes[type].bytes; pVar->u64 = GET_UINT8_VAL(pz); break; } case TSDB_DATA_TYPE_SMALLINT: { + pVar->nLen = tDataTypes[type].bytes; pVar->i64 = GET_INT16_VAL(pz); break; } case TSDB_DATA_TYPE_USMALLINT: { + pVar->nLen = tDataTypes[type].bytes; pVar->u64 = GET_UINT16_VAL(pz); break; } case TSDB_DATA_TYPE_INT: { + pVar->nLen = tDataTypes[type].bytes; pVar->i64 = GET_INT32_VAL(pz); break; } case TSDB_DATA_TYPE_UINT: { + pVar->nLen = tDataTypes[type].bytes; pVar->u64 = GET_UINT32_VAL(pz); break; } case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: { + pVar->nLen = tDataTypes[type].bytes; pVar->i64 = GET_INT64_VAL(pz); break; } case TSDB_DATA_TYPE_UBIGINT: { + pVar->nLen = tDataTypes[type].bytes; pVar->u64 = GET_UINT64_VAL(pz); break; } case TSDB_DATA_TYPE_DOUBLE: { + pVar->nLen = tDataTypes[type].bytes; pVar->dKey = GET_DOUBLE_VAL(pz); break; } case TSDB_DATA_TYPE_FLOAT: { + pVar->nLen = tDataTypes[type].bytes; pVar->dKey = GET_FLOAT_VAL(pz); break; } @@ -144,6 +154,7 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32 default: pVar->i64 = GET_INT32_VAL(pz); + pVar->nLen = tDataTypes[TSDB_DATA_TYPE_INT].bytes; } pVar->nType = type; diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 59b09c8695..47d6b90e91 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.18-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.19-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 34b0a3c6d3..f6221aca89 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.19 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 4756ac555f..fb97c1455d 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.19 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -36,6 +36,7 @@ 3.6.0 1.1.2 3.5 + @@ -80,8 +81,6 @@ commons-dbcp2 2.7.0 - - @@ -122,12 +121,16 @@ maven-surefire-plugin 2.12.4 + pertest + ${maven.test.jvmargs} **/*Test.java **/AppMemoryLeakTest.java + **/TaosInfoMonitorTest.java **/FailOverTest.java + **/InvalidResultSetPointerTest.java true diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java similarity index 99% rename from src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java rename to src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java index f864788bff..21bf8e7a93 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java @@ -8,7 +8,7 @@ import java.util.List; import java.util.Properties; import java.util.StringTokenizer; -public abstract class AbstractTaosDriver implements Driver { +public abstract class AbstractDriver implements Driver { private static final String TAOS_CFG_FILENAME = "taos.cfg"; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java new file mode 100644 index 0000000000..14bd2929f1 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java @@ -0,0 +1,1210 @@ +package com.taosdata.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.*; +import java.util.Calendar; +import java.util.Map; + +public abstract class AbstractResultSet extends WrapperImpl implements ResultSet { + private int fetchSize; + + @Override + public abstract boolean next() throws SQLException; + + @Override + public abstract void close() throws SQLException; + + @Override + public boolean wasNull() throws SQLException { + return false; + } + + @Override + public abstract String getString(int columnIndex) throws SQLException; + + @Override + public abstract boolean getBoolean(int columnIndex) throws SQLException; + + @Override + public byte getByte(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract short getShort(int columnIndex) throws SQLException; + + @Override + public abstract int getInt(int columnIndex) throws SQLException; + + @Override + public abstract long getLong(int columnIndex) throws SQLException; + + @Override + public abstract float getFloat(int columnIndex) throws SQLException; + + @Override + public abstract double getDouble(int columnIndex) throws SQLException; + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract Timestamp getTimestamp(int columnIndex) throws SQLException; + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return getString(findColumn(columnLabel)); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return getBoolean(findColumn(columnLabel)); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return getByte(findColumn(columnLabel)); + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return getShort(findColumn(columnLabel)); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return getInt(findColumn(columnLabel)); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return getLong(findColumn(columnLabel)); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + return getFloat(findColumn(columnLabel)); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return getDouble(findColumn(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + return getBigDecimal(findColumn(columnLabel)); + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return getBytes(findColumn(columnLabel)); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return getDate(findColumn(columnLabel)); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return getTime(findColumn(columnLabel)); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return getTimestamp(findColumn(columnLabel)); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + return getAsciiStream(findColumn(columnLabel)); + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + return getUnicodeStream(findColumn(columnLabel)); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + return getBinaryStream(findColumn(columnLabel)); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + return null; + } + + @Override + public void clearWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + } + + @Override + public String getCursorName() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract ResultSetMetaData getMetaData() throws SQLException; + + @Override + public Object getObject(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + return getObject(findColumn(columnLabel)); + } + + @Override + public abstract int findColumn(String columnLabel) throws SQLException; + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + return getCharacterStream(findColumn(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + return getBigDecimal(findColumn(columnLabel)); + } + + @Override + public abstract boolean isBeforeFirst() throws SQLException; + + @Override + public abstract boolean isAfterLast() throws SQLException; + + @Override + public abstract boolean isFirst() throws SQLException; + + @Override + public abstract boolean isLast() throws SQLException; + + @Override + public abstract void beforeFirst() throws SQLException; + + @Override + public abstract void afterLast() throws SQLException; + + @Override + public abstract boolean first() throws SQLException; + + @Override + public abstract boolean last() throws SQLException; + + @Override + public abstract int getRow() throws SQLException; + + @Override + public abstract boolean absolute(int row) throws SQLException; + + @Override + public abstract boolean relative(int rows) throws SQLException; + + @Override + public abstract boolean previous() throws SQLException; + + @Override + public void setFetchDirection(int direction) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + //nothing to do + } + + @Override + public int getFetchDirection() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (rows < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + //nothing to do + this.fetchSize = rows; + } + + @Override + public int getFetchSize() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return this.fetchSize; + } + + @Override + public int getType() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public int getConcurrency() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public boolean rowUpdated() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean rowInserted() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean rowDeleted() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void insertRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void deleteRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void refreshRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void cancelRowUpdates() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void moveToInsertRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void moveToCurrentRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract Statement getStatement() throws SQLException; + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public int getHoldability() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public abstract boolean isClosed() throws SQLException; + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract String getNString(int columnIndex) throws SQLException; + + @Override + public String getNString(String columnLabel) throws SQLException { + return getNString(findColumn(columnLabel)); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java new file mode 100644 index 0000000000..aac97c530d --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java @@ -0,0 +1,249 @@ +package com.taosdata.jdbc; + +import java.sql.*; + +public abstract class AbstractStatement extends WrapperImpl implements Statement { + + private volatile boolean closeOnCompletion; + private int fetchSize; + + @Override + public abstract ResultSet executeQuery(String sql) throws SQLException; + + @Override + public abstract int executeUpdate(String sql) throws SQLException; + + @Override + public abstract void close() throws SQLException; + + @Override + public int getMaxFieldSize() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return TSDBConstants.maxFieldSize; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (max < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + // nothing to do + } + + @Override + public int getMaxRows() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return 0; + } + + @Override + public void setMaxRows(int max) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (max < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + // nothing to do + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + } + + @Override + public int getQueryTimeout() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return 0; + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (seconds < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + @Override + public void cancel() throws SQLException { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + // nothing to do + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + } + + @Override + public void setCursorName(String name) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract boolean execute(String sql) throws SQLException; + + @Override + public abstract ResultSet getResultSet() throws SQLException; + + @Override + public abstract int getUpdateCount() throws SQLException; + + @Override + public boolean getMoreResults() throws SQLException { + return getMoreResults(CLOSE_CURRENT_RESULT); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + //nothing to do + } + + @Override + public int getFetchDirection() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (rows < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + //nothing to do + this.fetchSize = rows; + } + + @Override + public int getFetchSize() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return this.fetchSize; + } + + @Override + public int getResultSetConcurrency() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public int getResultSetType() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public abstract void addBatch(String sql) throws SQLException; + + @Override + public abstract void clearBatch() throws SQLException; + + @Override + public abstract int[] executeBatch() throws SQLException; + + @Override + public abstract Connection getConnection() throws SQLException; + + @Override + public boolean getMoreResults(int current) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return false; + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public int getResultSetHoldability() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public abstract boolean isClosed() throws SQLException; + + @Override + public void setPoolable(boolean poolable) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + //nothing to do + } + + @Override + public boolean isPoolable() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return false; + } + + @Override + public void closeOnCompletion() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + this.closeOnCompletion = true; + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return this.closeOnCompletion; + } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java index f82c064e75..499c656c9d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java @@ -160,12 +160,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public Date getDate(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Time getTime(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -176,17 +176,17 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public InputStream getAsciiStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public InputStream getUnicodeStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public InputStream getBinaryStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -256,22 +256,22 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public InputStream getAsciiStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public InputStream getUnicodeStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public InputStream getBinaryStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -281,7 +281,7 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public String getCursorName() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -313,12 +313,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public Reader getCharacterStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Reader getCharacterStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -353,22 +353,22 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public void beforeFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void afterLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public boolean first() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public boolean last() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -383,17 +383,17 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public boolean absolute(int row) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public boolean relative(int rows) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public boolean previous() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -443,227 +443,227 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public void updateNull(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateNull(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void insertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void deleteRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void refreshRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void cancelRowUpdates() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void moveToInsertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void moveToCurrentRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -673,12 +673,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public Object getObject(int columnIndex, Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Ref getRef(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -1043,12 +1043,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 94abe39655..547fe6a9e9 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -34,44 +34,37 @@ import java.util.*; import java.util.concurrent.Executor; public class TSDBConnection implements Connection { - protected Properties props = null; private TSDBJNIConnector connector = null; private String catalog = null; - private TSDBDatabaseMetaData dbMetaData = null; + private TSDBDatabaseMetaData dbMetaData; private Properties clientInfoProps = new Properties(); private int timeoutMilliseconds = 0; - + private boolean batchFetch = false; public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { this.dbMetaData = meta; connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")), - info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), + info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER), info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); - + String batchLoad = info.getProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD); if (batchLoad != null) { - this.batchFetch = Boolean.parseBoolean(batchLoad); + this.batchFetch = Boolean.parseBoolean(batchLoad); } } private void connect(String host, int port, String dbName, String user, String password) throws SQLException { this.connector = new TSDBJNIConnector(); this.connector.connect(host, port, dbName, user, password); - - try { - this.setCatalog(dbName); - } catch (SQLException e) { - e.printStackTrace(); - } - + this.setCatalog(dbName); this.dbMetaData.setConnection(this); } @@ -80,68 +73,86 @@ public class TSDBConnection implements Connection { } public Statement createStatement() throws SQLException { - if (!this.connector.isClosed()) { - TSDBStatement statement = new TSDBStatement(this, this.connector); - statement.setConnection(this); - return statement; - } else { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } + + TSDBStatement statement = new TSDBStatement(this, this.connector); + statement.setConnection(this); + return statement; } public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException { - if (this.connector.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } long id = this.connector.subscribe(topic, sql, restart, 0); if (id == 0) { - throw new SQLException(TSDBConstants.WrapErrMsg("failed to create subscription")); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED); } - return new TSDBSubscribe(this.connector, id); } public PreparedStatement prepareStatement(String sql) throws SQLException { - if (!this.connector.isClosed()) { - return new TSDBPreparedStatement(this, this.connector, sql); - } else { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } + + return new TSDBPreparedStatement(this, this.connector, sql); } public CallableStatement prepareCall(String sql) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public String nativeSQL(String sql) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public void setAutoCommit(boolean autoCommit) throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + } public boolean getAutoCommit() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + return true; } public void commit() throws SQLException { - } - - public void rollback() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void close() throws SQLException { - if (this.connector != null && !this.connector.isClosed()) { - this.connector.closeConnection(); - } else { - throw new SQLException(TSDBConstants.WrapErrMsg("connection is already closed!")); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } } + public void rollback() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + public void close() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + this.connector.closeConnection(); + } + public boolean isClosed() throws SQLException { - return this.connector.isClosed(); + return this.connector != null && this.connector.isClosed(); } /** @@ -154,6 +165,9 @@ public class TSDBConnection implements Connection { * @throws SQLException if a database access error occurs */ public DatabaseMetaData getMetaData() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } return this.dbMetaData; } @@ -165,17 +179,29 @@ public class TSDBConnection implements Connection { * @throws SQLException */ public void setReadOnly(boolean readOnly) throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } } public boolean isReadOnly() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } return true; } public void setCatalog(String catalog) throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } this.catalog = catalog; } public String getCatalog() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } return this.catalog; } @@ -187,6 +213,19 @@ public class TSDBConnection implements Connection { * @throws SQLException */ public void setTransactionIsolation(int level) throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + switch (level) { + case Connection.TRANSACTION_NONE: + case Connection.TRANSACTION_READ_COMMITTED: + case Connection.TRANSACTION_READ_UNCOMMITTED: + case Connection.TRANSACTION_REPEATABLE_READ: + case Connection.TRANSACTION_SERIALIZABLE: + break; + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } } /** @@ -196,60 +235,81 @@ public class TSDBConnection implements Connection { * @throws SQLException */ public int getTransactionIsolation() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } return Connection.TRANSACTION_NONE; } public SQLWarning getWarnings() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } //todo: implement getWarnings according to the warning messages returned from TDengine return null; -// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } public void clearWarnings() throws SQLException { - // left blank to support HikariCP connection + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } //todo: implement clearWarnings according to the warning messages returned from TDengine } public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { // This method is implemented in the current way to support Spark if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) { - throw new SQLException(TSDBConstants.INVALID_VARIABLES); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); } if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) { - throw new SQLException(TSDBConstants.INVALID_VARIABLES); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); } return this.prepareStatement(sql); } - + public Boolean getBatchFetch() { - return this.batchFetch; + return this.batchFetch; } - + public void setBatchFetch(Boolean batchFetch) { - this.batchFetch = batchFetch; + this.batchFetch = batchFetch; } public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public Map> getTypeMap() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public void setTypeMap(Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public void setHoldability(int holdability) throws SQLException { - // intentionally left empty to support druid connection pool. + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } } /** @@ -259,67 +319,111 @@ public class TSDBConnection implements Connection { * @throws SQLException */ public int getHoldability() throws SQLException { - //intentionally left empty to support HikariCP connection. + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } return ResultSet.HOLD_CURSORS_OVER_COMMIT; } public Savepoint setSavepoint() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public Savepoint setSavepoint(String name) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public void rollback(Savepoint savepoint) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public void releaseSavepoint(Savepoint savepoint) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - return this.prepareStatement(sql, resultSetType, resultSetConcurrency); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public Clob createClob() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public Blob createBlob() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public NClob createNClob() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public SQLXML createSQLXML() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public boolean isValid(int timeout) throws SQLException { @@ -338,31 +442,52 @@ public class TSDBConnection implements Connection { } public String getClientInfo(String name) throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } return clientInfoProps.getProperty(name); } public Properties getClientInfo() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } return clientInfoProps; } public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public void setSchema(String schema) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public String getSchema() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public void abort(Executor executor) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { @@ -370,14 +495,21 @@ public class TSDBConnection implements Connection { } public int getNetworkTimeout() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } return this.timeoutMilliseconds; } public T unwrap(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + try { + return iface.cast(this); + } catch (ClassCastException cce) { + throw new SQLException("Unable to unwrap to " + iface.toString()); + } } public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return iface.isInstance(this); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java index 4fb172ceb5..043db9bbd7 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java @@ -19,12 +19,12 @@ import java.util.Map; public abstract class TSDBConstants { - public static final String STATEMENT_CLOSED = "Statement already closed."; - public static final String DEFAULT_PORT = "6200"; - public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!"; + public static final String STATEMENT_CLOSED = "statement is closed"; + public static final String UNSUPPORTED_METHOD_EXCEPTION_MSG = "this operation is NOT supported currently!"; public static final String INVALID_VARIABLES = "invalid variables"; - public static final String RESULT_SET_IS_CLOSED = "resultSet is closed."; + public static final String RESULT_SET_IS_CLOSED = "resultSet is closed"; + public static final String DEFAULT_PORT = "6200"; public static Map DATATYPE_MAP = null; public static final long JNI_NULL_POINTER = 0L; @@ -36,6 +36,7 @@ public abstract class TSDBConstants { public static final int JNI_NUM_OF_FIELDS_0 = -4; public static final int JNI_SQL_NULL = -5; public static final int JNI_FETCH_END = -6; + public static final int JNI_OUT_OF_MEMORY = -7; public static final int TSDB_DATA_TYPE_NULL = 0; public static final int TSDB_DATA_TYPE_BOOL = 1; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index c171ca2a36..999ce0645b 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -37,7 +37,7 @@ import java.util.logging.Logger; * register it with the DriverManager. This means that a user can load and * register a driver by doing Class.forName("foo.bah.Driver") */ -public class TSDBDriver extends AbstractTaosDriver { +public class TSDBDriver extends AbstractDriver { @Deprecated private static final String URL_PREFIX1 = "jdbc:TSDB://"; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java new file mode 100644 index 0000000000..c32a32618a --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java @@ -0,0 +1,61 @@ +package com.taosdata.jdbc; + +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.HashMap; +import java.util.Map; + +public class TSDBError { + private static Map TSDBErrorMap = new HashMap<>(); + + static { + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED, "connection already closed"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD, "this operation is NOT supported currently!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variables"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED, "statement is closed"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED, "resultSet is closed"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY, "Batch is empty!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY, "Can not issue data manipulation statements with executeQuery()"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE, "Can not issue SELECT via executeUpdate()"); + + /**************************************************/ + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error"); + /**************************************************/ + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED, "failed to create subscription"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING, "Unsupported encoding"); + + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, "JNI connection already closed!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL, "invalid JNI result set!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_NUM_OF_FIELDS_0, "invalid num of fields!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_SQL_NULL, "empty sql string!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_FETCH_END, "fetch to the end of resultset"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY, "JNI alloc memory failed!"); + } + + public static String wrapErrMsg(String msg) { + return "TDengine Error: " + msg; + } + + public static SQLException createSQLException(int errorNumber) { + return createSQLException(errorNumber, null); + } + + public static SQLException createSQLException(int errorNumber, String message) { + if (message == null || message.isEmpty()) { + if (TSDBErrorNumbers.contains(errorNumber)) + message = TSDBErrorMap.get(errorNumber); + else + message = TSDBErrorMap.get(TSDBErrorNumbers.ERROR_UNKNOWN); + } + + if (errorNumber == TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD) + return new SQLFeatureNotSupportedException(message); + + if (errorNumber < TSDBErrorNumbers.ERROR_UNKNOWN) + // JDBC exception's error number is less than 0x2350 + return new SQLException("ERROR (" + Integer.toHexString(errorNumber) + "): " + message); + // JNI exception's error number is large than 0x2350 + return new SQLException("TDengine ERROR (" + Integer.toHexString(errorNumber) + "): " + message); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java new file mode 100644 index 0000000000..51a3f908dc --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java @@ -0,0 +1,60 @@ +package com.taosdata.jdbc; + +import java.util.HashSet; + +public class TSDBErrorNumbers { + + public static final int ERROR_CONNECTION_CLOSED = 0x2301; // connection already closed + public static final int ERROR_UNSUPPORTED_METHOD = 0x2302; //this operation is NOT supported currently! + public static final int ERROR_INVALID_VARIABLE = 0x2303; //invalid variables + public static final int ERROR_STATEMENT_CLOSED = 0x2304; //statement already closed + public static final int ERROR_RESULTSET_CLOSED = 0x2305; //resultSet is closed + public static final int ERROR_BATCH_IS_EMPTY = 0x2306; //Batch is empty! + public static final int ERROR_INVALID_WITH_EXECUTEQUERY = 0x2307; //Can not issue data manipulation statements with executeQuery() + public static final int ERROR_INVALID_WITH_EXECUTEUPDATE = 0x2308; //Can not issue SELECT via executeUpdate() + + public static final int ERROR_UNKNOWN = 0x2350; //unknown error + + public static final int ERROR_SUBSCRIBE_FAILED = 0x2351; //failed to create subscription + public static final int ERROR_UNSUPPORTED_ENCODING = 0x2352; //Unsupported encoding + + public static final int ERROR_JNI_TDENGINE_ERROR = 0x2353; + public static final int ERROR_JNI_CONNECTION_NULL = 0x2354; //invalid tdengine connection! + public static final int ERROR_JNI_RESULT_SET_NULL = 0x2355; + public static final int ERROR_JNI_NUM_OF_FIELDS_0 = 0x2356; + public static final int ERROR_JNI_SQL_NULL = 0x2357; + public static final int ERROR_JNI_FETCH_END = 0x2358; + public static final int ERROR_JNI_OUT_OF_MEMORY = 0x2359; + + private static final HashSet errorNumbers; + + static { + errorNumbers = new HashSet(); + errorNumbers.add(ERROR_CONNECTION_CLOSED); + errorNumbers.add(ERROR_UNSUPPORTED_METHOD); + errorNumbers.add(ERROR_INVALID_VARIABLE); + errorNumbers.add(ERROR_STATEMENT_CLOSED); + errorNumbers.add(ERROR_RESULTSET_CLOSED); + errorNumbers.add(ERROR_INVALID_WITH_EXECUTEQUERY); + errorNumbers.add(ERROR_INVALID_WITH_EXECUTEUPDATE); + /*****************************************************/ + errorNumbers.add(ERROR_SUBSCRIBE_FAILED); + errorNumbers.add(ERROR_UNSUPPORTED_ENCODING); + + errorNumbers.add(ERROR_JNI_TDENGINE_ERROR); + errorNumbers.add(ERROR_JNI_CONNECTION_NULL); + errorNumbers.add(ERROR_JNI_RESULT_SET_NULL); + errorNumbers.add(ERROR_JNI_NUM_OF_FIELDS_0); + errorNumbers.add(ERROR_JNI_SQL_NULL); + errorNumbers.add(ERROR_JNI_FETCH_END); + errorNumbers.add(ERROR_JNI_OUT_OF_MEMORY); + + } + + private TSDBErrorNumbers() { + } + + public static boolean contains(int errorNumber) { + return errorNumbers.contains(errorNumber); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index f918463439..b0f016cd72 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -14,6 +14,8 @@ *****************************************************************************/ package com.taosdata.jdbc; +import com.taosdata.jdbc.utils.TaosInfo; + import java.sql.SQLException; import java.sql.SQLWarning; import java.util.List; @@ -21,6 +23,8 @@ import java.util.List; public class TSDBJNIConnector { private static volatile Boolean isInitialized = false; + private TaosInfo taosInfo = TaosInfo.getInstance(); + static { System.loadLibrary("taos"); System.out.println("java.library.path:" + System.getProperty("java.library.path")); @@ -34,7 +38,7 @@ public class TSDBJNIConnector { /** * Result set pointer for the current connection */ - private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; +// private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; /** * result set status in current connection @@ -91,7 +95,8 @@ public class TSDBJNIConnector { */ public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException { if (this.taos != TSDBConstants.JNI_NULL_POINTER) { - this.closeConnectionImp(this.taos); +// this.closeConnectionImp(this.taos); + closeConnection(); this.taos = TSDBConstants.JNI_NULL_POINTER; } @@ -99,7 +104,8 @@ public class TSDBJNIConnector { if (this.taos == TSDBConstants.JNI_NULL_POINTER) { throw new SQLException(TSDBConstants.WrapErrMsg(this.getErrMsg(0L)), "", this.getErrCode(0l)); } - + // invoke connectImp only here + taosInfo.conn_open_increment(); return true; } @@ -113,31 +119,43 @@ public class TSDBJNIConnector { public long executeQuery(String sql) throws SQLException { // close previous result set if the user forgets to invoke the // free method to close previous result set. - if (!this.isResultsetClosed) { - freeResultSet(taosResultSetPointer); - } +// if (!this.isResultsetClosed) { +// freeResultSet(taosResultSetPointer); +// } Long pSql = 0l; try { pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos); + taosInfo.stmt_count_increment(); } catch (Exception e) { e.printStackTrace(); this.freeResultSetImp(this.taos, pSql); - throw new SQLException(TSDBConstants.WrapErrMsg("Unsupported encoding")); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING); + } + if (pSql == TSDBConstants.JNI_CONNECTION_NULL) { + this.freeResultSetImp(this.taos, pSql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + } + if (pSql == TSDBConstants.JNI_SQL_NULL) { + this.freeResultSetImp(this.taos, pSql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL); + } + if (pSql == TSDBConstants.JNI_OUT_OF_MEMORY) { + this.freeResultSetImp(this.taos, pSql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY); } int code = this.getErrCode(pSql); - if (code != 0) { + if (code != TSDBConstants.JNI_SUCCESS) { affectedRows = -1; String msg = this.getErrMsg(pSql); - this.freeResultSetImp(this.taos, pSql); - throw new SQLException(TSDBConstants.WrapErrMsg(msg), "", code); + throw TSDBError.createSQLException(code, msg); } // Try retrieving result set for the executed SQL using the current connection pointer. - taosResultSetPointer = this.getResultSetImp(this.taos, pSql); - isResultsetClosed = (taosResultSetPointer == TSDBConstants.JNI_NULL_POINTER); + pSql = this.getResultSetImp(this.taos, pSql); + isResultsetClosed = (pSql == TSDBConstants.JNI_NULL_POINTER); return pSql; } @@ -166,9 +184,9 @@ public class TSDBJNIConnector { * Get resultset pointer * Each connection should have a single open result set at a time */ - public long getResultSet() { - return taosResultSetPointer; - } +// public long getResultSet() { +// return taosResultSetPointer; +// } private native long getResultSetImp(long connection, long pSql); @@ -181,16 +199,16 @@ public class TSDBJNIConnector { /** * Free resultset operation from C to release resultset pointer by JNI */ - public int freeResultSet(long result) { + public int freeResultSet(long pSql) { int res = TSDBConstants.JNI_SUCCESS; - if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - throw new RuntimeException("Invalid result set pointer"); - } +// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { +// throw new RuntimeException("Invalid result set pointer"); +// } - if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - res = this.freeResultSetImp(this.taos, result); - taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; - } +// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { + res = this.freeResultSetImp(this.taos, pSql); +// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; +// } isResultsetClosed = true; return res; @@ -200,15 +218,15 @@ public class TSDBJNIConnector { * Close the open result set which is associated to the current connection. If the result set is already * closed, return 0 for success. */ - public int freeResultSet() { - int resCode = TSDBConstants.JNI_SUCCESS; - if (!isResultsetClosed) { - resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer); - taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; - isResultsetClosed = true; - } - return resCode; - } +// public int freeResultSet() { +// int resCode = TSDBConstants.JNI_SUCCESS; +// if (!isResultsetClosed) { +// resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer); +// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; +// isResultsetClosed = true; +// } +// return resCode; +// } private native int freeResultSetImp(long connection, long result); @@ -244,10 +262,11 @@ public class TSDBJNIConnector { private native int fetchRowImp(long connection, long resultSet, TSDBResultSetRowData rowData); public int fetchBlock(long resultSet, TSDBResultSetBlockData blockData) { - return this.fetchBlockImp(this.taos, resultSet, blockData); + return this.fetchBlockImp(this.taos, resultSet, blockData); } - + private native int fetchBlockImp(long connection, long resultSet, TSDBResultSetBlockData blockData); + /** * Execute close operation from C to release connection pointer by JNI * @@ -262,6 +281,8 @@ public class TSDBJNIConnector { } else { throw new SQLException("Undefined error code returned by TDengine when closing a connection"); } + // invoke closeConnectionImpl only here + taosInfo.connect_close_increment(); } private native int closeConnectionImp(long connection); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index c6b41ce004..decf14434e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -264,17 +264,17 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -284,7 +284,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -321,156 +321,156 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setRef(int parameterIndex, Ref x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBlob(int parameterIndex, Blob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setClob(int parameterIndex, Clob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setArray(int parameterIndex, Array x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public ResultSetMetaData getMetaData() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setURL(int parameterIndex, URL x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public ParameterMetaData getParameterMetaData() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setRowId(int parameterIndex, RowId x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNString(int parameterIndex, String value) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNClob(int parameterIndex, NClob value) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setClob(int parameterIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNClob(int parameterIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index d06922c680..80ff492530 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -14,36 +14,17 @@ *****************************************************************************/ package com.taosdata.jdbc; -import java.io.InputStream; -import java.io.Reader; import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.Clob; -import java.sql.Date; -import java.sql.NClob; -import java.sql.Ref; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Statement; -import java.sql.Time; -import java.sql.Timestamp; +import java.sql.*; import java.util.ArrayList; -import java.util.Calendar; -import java.util.Iterator; import java.util.List; -import java.util.Map; -public class TSDBResultSet implements ResultSet { - private TSDBJNIConnector jniConnector = null; +public class TSDBResultSet extends AbstractResultSet implements ResultSet { + private TSDBJNIConnector jniConnector; + private final TSDBStatement statement; private long resultSetPointer = 0L; - private List columnMetaDataList = new ArrayList(); + private List columnMetaDataList = new ArrayList<>(); private TSDBResultSetRowData rowData; private TSDBResultSetBlockData blockData; @@ -52,24 +33,6 @@ public class TSDBResultSet implements ResultSet { private boolean lastWasNull = false; private final int COLUMN_INDEX_START_VALUE = 1; - private int rowIndex = 0; - - public TSDBJNIConnector getJniConnector() { - return jniConnector; - } - - public void setJniConnector(TSDBJNIConnector jniConnector) { - this.jniConnector = jniConnector; - } - - public long getResultSetPointer() { - return resultSetPointer; - } - - public void setResultSetPointer(long resultSetPointer) { - this.resultSetPointer = resultSetPointer; - } - public void setBatchFetch(boolean batchFetch) { this.batchFetch = batchFetch; } @@ -78,10 +41,6 @@ public class TSDBResultSet implements ResultSet { return this.batchFetch; } - public List getColumnMetaDataList() { - return columnMetaDataList; - } - public void setColumnMetaDataList(List columnMetaDataList) { this.columnMetaDataList = columnMetaDataList; } @@ -90,56 +49,25 @@ public class TSDBResultSet implements ResultSet { return rowData; } - public void setRowData(TSDBResultSetRowData rowData) { - this.rowData = rowData; - } - - public boolean isLastWasNull() { - return lastWasNull; - } - - public void setLastWasNull(boolean lastWasNull) { - this.lastWasNull = lastWasNull; - } - - public TSDBResultSet() { - - } - - public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException { + public TSDBResultSet(TSDBStatement statement, TSDBJNIConnector connector, long resultSetPointer) throws SQLException { + this.statement = statement; this.jniConnector = connector; this.resultSetPointer = resultSetPointer; + int code = this.jniConnector.getSchemaMetaData(this.resultSetPointer, this.columnMetaDataList); if (code == TSDBConstants.JNI_CONNECTION_NULL) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } else if (code == TSDBConstants.JNI_RESULT_SET_NULL) { + } + if (code == TSDBConstants.JNI_RESULT_SET_NULL) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL)); - } else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { + } + if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0)); } - this.rowData = new TSDBResultSetRowData(this.columnMetaDataList.size()); this.blockData = new TSDBResultSetBlockData(this.columnMetaDataList, this.columnMetaDataList.size()); } - public T unwrap(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - public boolean isWrapperFor(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return iface.isInstance(this); - } - public boolean next() throws SQLException { if (this.getBatchFetch()) { if (this.blockData.forward()) { @@ -273,7 +201,7 @@ public class TSDBResultSet implements ResultSet { } public long getLong(int columnIndex) throws SQLException { - long res = 0l; + long res = 0L; int colIndex = getTrueColumnIndex(columnIndex); if (!this.getBatchFetch()) { @@ -317,14 +245,6 @@ public class TSDBResultSet implements ResultSet { } } - /* - * (non-Javadoc) - * - * @see java.sql.ResultSet#getBigDecimal(int, int) - * - * @deprecated Use {@code getBigDecimal(int columnIndex)} or {@code - * getBigDecimal(String columnLabel)} - */ @Deprecated public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { return new BigDecimal(getLong(columnIndex)); @@ -334,16 +254,6 @@ public class TSDBResultSet implements ResultSet { return getString(columnIndex).getBytes(); } - public Date getDate(int columnIndex) throws SQLException { - int colIndex = getTrueColumnIndex(columnIndex); - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Time getTime(int columnIndex) throws SQLException { - int colIndex = getTrueColumnIndex(columnIndex); - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public Timestamp getTimestamp(int columnIndex) throws SQLException { Timestamp res = null; int colIndex = getTrueColumnIndex(columnIndex); @@ -359,112 +269,11 @@ public class TSDBResultSet implements ResultSet { } } - public InputStream getAsciiStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /* - * (non-Javadoc) - * - * @see java.sql.ResultSet#getUnicodeStream(int) - * - * * @deprecated use getCharacterStream in place of - * getUnicodeStream - */ - @Deprecated - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public InputStream getBinaryStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public String getString(String columnLabel) throws SQLException { - return this.getString(this.findColumn(columnLabel)); - } - - public boolean getBoolean(String columnLabel) throws SQLException { - return this.getBoolean(this.findColumn(columnLabel)); - } - - public byte getByte(String columnLabel) throws SQLException { - return this.getByte(this.findColumn(columnLabel)); - } - - public short getShort(String columnLabel) throws SQLException { - return this.getShort(this.findColumn(columnLabel)); - } - - public int getInt(String columnLabel) throws SQLException { - return this.getInt(this.findColumn(columnLabel)); - } - - public long getLong(String columnLabel) throws SQLException { - return this.getLong(this.findColumn(columnLabel)); - } - - public float getFloat(String columnLabel) throws SQLException { - return this.getFloat(this.findColumn(columnLabel)); - } - - public double getDouble(String columnLabel) throws SQLException { - return this.getDouble(this.findColumn(columnLabel)); - } - - /* - * used by spark - */ - @Deprecated - public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return this.getBigDecimal(this.findColumn(columnLabel), scale); - } - - public byte[] getBytes(String columnLabel) throws SQLException { - return this.getBytes(this.findColumn(columnLabel)); - } - - public Date getDate(String columnLabel) throws SQLException { - return this.getDate(this.findColumn(columnLabel)); - } - - public Time getTime(String columnLabel) throws SQLException { - return this.getTime(this.findColumn(columnLabel)); - } - - public Timestamp getTimestamp(String columnLabel) throws SQLException { - return this.getTimestamp(this.findColumn(columnLabel)); - } - - public InputStream getAsciiStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Deprecated - public InputStream getUnicodeStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public InputStream getBinaryStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void clearWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public String getCursorName() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public ResultSetMetaData getMetaData() throws SQLException { return new TSDBResultSetMetaData(this.columnMetaDataList); } + @Override public Object getObject(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); @@ -476,32 +285,21 @@ public class TSDBResultSet implements ResultSet { } } + @Override public Object getObject(String columnLabel) throws SQLException { return this.getObject(this.findColumn(columnLabel)); } public int findColumn(String columnLabel) throws SQLException { - Iterator colMetaDataIt = this.columnMetaDataList.iterator(); - while (colMetaDataIt.hasNext()) { - ColumnMetaData colMetaData = colMetaDataIt.next(); + for (ColumnMetaData colMetaData : this.columnMetaDataList) { if (colMetaData.getColName() != null && colMetaData.getColName().equalsIgnoreCase(columnLabel)) { return colMetaData.getColIndex() + 1; } } - throw new SQLException(TSDBConstants.INVALID_VARIABLES); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); } - public Reader getCharacterStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Reader getCharacterStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /* - * used by spark - */ + @Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); @@ -513,403 +311,111 @@ public class TSDBResultSet implements ResultSet { } } - public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - return this.getBigDecimal(this.findColumn(columnLabel)); - } - + @Override public boolean isBeforeFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public boolean isAfterLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public boolean isFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public boolean isLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public void beforeFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public void afterLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public boolean first() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public boolean last() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public int getRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public boolean absolute(int row) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public boolean relative(int rows) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + @Override public boolean previous() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void setFetchDirection(int direction) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getFetchDirection() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setFetchSize(int rows) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getFetchSize() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getType() throws SQLException { - return ResultSet.TYPE_FORWARD_ONLY; - } - - public int getConcurrency() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean rowUpdated() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean rowInserted() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean rowDeleted() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNull(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNull(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void insertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void deleteRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void refreshRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void cancelRowUpdates() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void moveToInsertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void moveToCurrentRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public Statement getStatement() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public Object getObject(int columnIndex, Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Ref getRef(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Blob getBlob(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Clob getClob(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Array getArray(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Object getObject(String columnLabel, Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Ref getRef(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Blob getBlob(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Clob getClob(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Array getArray(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Date getDate(String columnLabel, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Time getTime(String columnLabel, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public URL getURL(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public URL getURL(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRef(int columnIndex, Ref x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRef(String columnLabel, Ref x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(int columnIndex, Blob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(String columnLabel, Blob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(int columnIndex, Clob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(String columnLabel, Clob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateArray(int columnIndex, Array x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateArray(String columnLabel, Array x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public RowId getRowId(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public RowId getRowId(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRowId(int columnIndex, RowId x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRowId(String columnLabel, RowId x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getHoldability() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return this.statement; } public boolean isClosed() throws SQLException { + //TODO: check if need release resources boolean isClosed = true; if (jniConnector != null) { isClosed = jniConnector.isResultsetClosed(); @@ -917,183 +423,11 @@ public class TSDBResultSet implements ResultSet { return isClosed; } - public void updateNString(int columnIndex, String nString) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNString(String columnLabel, String nString) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public NClob getNClob(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public NClob getNClob(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public String getNString(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); return (String) rowData.get(colIndex); } - public String getNString(String columnLabel) throws SQLException { - return (String) this.getString(columnLabel); - } - - public Reader getNCharacterStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Reader getNCharacterStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - private int getTrueColumnIndex(int columnIndex) throws SQLException { if (columnIndex < this.COLUMN_INDEX_START_VALUE) { throw new SQLException("Column Index out of range, " + columnIndex + " < " + this.COLUMN_INDEX_START_VALUE); @@ -1103,7 +437,6 @@ public class TSDBResultSet implements ResultSet { if (columnIndex > numOfCols) { throw new SQLException("Column Index out of range, " + columnIndex + " > " + numOfCols); } - return columnIndex - 1; } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java index d6d69bd8b0..0c0071a949 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java @@ -29,11 +29,11 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public T unwrap(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public int getColumnCount() throws SQLException { @@ -94,7 +94,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public String getSchemaName(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public int getPrecision(int column) throws SQLException { @@ -125,11 +125,11 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public String getTableName(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public String getCatalogName(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public int getColumnType(int column) throws SQLException { @@ -173,7 +173,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public boolean isDefinitelyWritable(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public String getColumnClassName(int column) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java index 059962a7a1..98b823a3c1 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java @@ -1153,11 +1153,11 @@ public class TSDBResultSetWrapper implements ResultSet { } public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index e7317b8e1d..c4c1904629 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -17,30 +17,21 @@ package com.taosdata.jdbc; import java.sql.*; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.TimeUnit; -public class TSDBStatement implements Statement { - private TSDBJNIConnector connector = null; +public class TSDBStatement extends AbstractStatement { + private TSDBJNIConnector connector; /** * To store batched commands */ protected List batchedArgs; - - /** - * Timeout for a query - */ - protected int queryTimeout = 0; - - private Long pSql = 0l; - /** * Status of current statement */ - private boolean isClosed = true; - private int affectedRows = 0; - + private boolean isClosed; + private int affectedRows = -1; private TSDBConnection connection; + private TSDBResultSet resultSet; public void setConnection(TSDBConnection connection) { this.connection = connection; @@ -49,213 +40,89 @@ public class TSDBStatement implements Statement { TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) { this.connection = connection; this.connector = connector; - this.isClosed = false; - } - - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); } public ResultSet executeQuery(String sql) throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } + // check if closed + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + //TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了 - // TODO make sure it is not a update query - pSql = this.connector.executeQuery(sql); - - long resultSetPointer = this.connector.getResultSet(); - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + // execute query + long pSql = this.connector.executeQuery(sql); + // if pSql is create/insert/update/delete/alter SQL + if (this.connector.isUpdateQuery(pSql)) { this.connector.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } - - // create/insert/update/delete/alter - if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { - this.connector.freeResultSet(pSql); - return null; - } - - if (!this.connector.isUpdateQuery(pSql)) { - TSDBResultSet res = new TSDBResultSet(this.connector, resultSetPointer); - res.setBatchFetch(this.connection.getBatchFetch()); - return res; - } else { - this.connector.freeResultSet(pSql); - return null; + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY); } + TSDBResultSet res = new TSDBResultSet(this, this.connector, pSql); + res.setBatchFetch(this.connection.getBatchFetch()); + return res; } public int executeUpdate(String sql) throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - // TODO check if current query is update query - pSql = this.connector.executeQuery(sql); - long resultSetPointer = this.connector.getResultSet(); - - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + long pSql = this.connector.executeQuery(sql); + // if pSql is create/insert/update/delete/alter SQL + if (!this.connector.isUpdateQuery(pSql)) { this.connector.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE); } - - this.affectedRows = this.connector.getAffectedRows(pSql); + int affectedRows = this.connector.getAffectedRows(pSql); this.connector.freeResultSet(pSql); - - return this.affectedRows; - } - - public String getErrorMsg(long pSql) { - return this.connector.getErrMsg(pSql); + return affectedRows; } public void close() throws SQLException { if (!isClosed) { - if (!this.connector.isResultsetClosed()) { - this.connector.freeResultSet(); - } + if (this.resultSet != null) + this.resultSet.close(); isClosed = true; } } - public int getMaxFieldSize() throws SQLException { - return 0; -// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setMaxFieldSize(int max) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getMaxRows() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - // always set maxRows to zero, meaning unlimitted rows in a resultSet - return 0; - } - - public void setMaxRows(int max) throws SQLException { - // always set maxRows to zero, meaning unlimited rows in a resultSet - } - - public void setEscapeProcessing(boolean enable) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getQueryTimeout() throws SQLException { - return queryTimeout; - } - - public void setQueryTimeout(int seconds) throws SQLException { - this.queryTimeout = seconds; - } - - public void cancel() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void clearWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setCursorName(String name) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public boolean execute(String sql) throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - boolean res = true; - pSql = this.connector.executeQuery(sql); - long resultSetPointer = this.connector.getResultSet(); - - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + // check if closed + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + // execute query + long pSql = this.connector.executeQuery(sql); + // if pSql is create/insert/update/delete/alter SQL + if (this.connector.isUpdateQuery(pSql)) { + this.affectedRows = this.connector.getAffectedRows(pSql); this.connector.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { - // no result set is retrieved - this.connector.freeResultSet(pSql); - res = false; + return false; } - return res; + this.resultSet = new TSDBResultSet(this, this.connector, pSql); + this.resultSet.setBatchFetch(this.connection.getBatchFetch()); + return true; } public ResultSet getResultSet() throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - long resultSetPointer = connector.getResultSet(); - TSDBResultSet resSet = null; - if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - resSet = new TSDBResultSet(connector, resultSetPointer); - } - return resSet; + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); +// long resultSetPointer = connector.getResultSet(); +// TSDBResultSet resSet = null; +// if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { +// resSet = new TSDBResultSet(connector, resultSetPointer); +// } + return this.resultSet; } public int getUpdateCount() throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); return this.affectedRows; } - public boolean getMoreResults() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setFetchDirection(int direction) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getFetchDirection() throws SQLException { - return ResultSet.FETCH_FORWARD; -// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /* - * used by spark - */ - public void setFetchSize(int rows) throws SQLException { - } - - /* - * used by spark - */ - public int getFetchSize() throws SQLException { - return 4096; - } - - public int getResultSetConcurrency() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getResultSetType() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public void addBatch(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (batchedArgs == null) { batchedArgs = new ArrayList<>(); } @@ -263,83 +130,41 @@ public class TSDBStatement implements Statement { } public void clearBatch() throws SQLException { - batchedArgs.clear(); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (batchedArgs != null) + batchedArgs.clear(); } public int[] executeBatch() throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - if (batchedArgs == null) { - throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!")); - } else { - int[] res = new int[batchedArgs.size()]; - for (int i = 0; i < batchedArgs.size(); i++) { - res[i] = executeUpdate(batchedArgs.get(i)); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (batchedArgs == null || batchedArgs.isEmpty()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY); + + int[] res = new int[batchedArgs.size()]; + for (int i = 0; i < batchedArgs.size(); i++) { + boolean isSelect = execute(batchedArgs.get(i)); + if (isSelect) { + res[i] = SUCCESS_NO_INFO; + } else { + res[i] = getUpdateCount(); } - return res; } + return res; } public Connection getConnection() throws SQLException { - if (this.connector != null) - return this.connection; - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean getMoreResults(int current) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean execute(String sql, String[] columnNames) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getResultSetHoldability() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (this.connector == null) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + return this.connection; } public boolean isClosed() throws SQLException { return isClosed; } - public void setPoolable(boolean poolable) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public boolean isPoolable() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void closeOnCompletion() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isCloseOnCompletion() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java index deffd9aa2a..84d23ad45c 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java @@ -50,7 +50,7 @@ public class TSDBSubscribe { } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { return null; } else { - return new TSDBResultSet(this.connecter, resultSetPointer); + return new TSDBResultSet(null, this.connecter, resultSetPointer); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/WrapperImpl.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/WrapperImpl.java new file mode 100644 index 0000000000..5b7539d434 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/WrapperImpl.java @@ -0,0 +1,21 @@ +package com.taosdata.jdbc; + +import java.sql.SQLException; +import java.sql.Wrapper; + +public class WrapperImpl implements Wrapper { + + @Override + public T unwrap(Class iface) throws SQLException { + try { + return iface.cast(this); + } catch (ClassCastException cce) { + throw new SQLException("Unable to unwrap to " + iface.toString()); + } + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java index 5260b780bd..4449847431 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -47,7 +47,7 @@ public class RestfulConnection implements Connection { if (isClosed()) throw new SQLException(CONNECTION_IS_CLOSED); //TODO: prepareStatement - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -55,7 +55,7 @@ public class RestfulConnection implements Connection { if (isClosed()) throw new SQLException(CONNECTION_IS_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -72,7 +72,7 @@ public class RestfulConnection implements Connection { if (isClosed()) throw new SQLException(CONNECTION_IS_CLOSED); if (!autoCommit) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -162,7 +162,7 @@ public class RestfulConnection implements Connection { case Connection.TRANSACTION_READ_COMMITTED: case Connection.TRANSACTION_REPEATABLE_READ: case Connection.TRANSACTION_SERIALIZABLE: - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); default: throw new SQLException(TSDBConstants.INVALID_VARIABLES); } @@ -197,10 +197,10 @@ public class RestfulConnection implements Connection { throw new SQLException(CONNECTION_IS_CLOSED); if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); return createStatement(); } @@ -221,7 +221,7 @@ public class RestfulConnection implements Connection { if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -252,7 +252,7 @@ public class RestfulConnection implements Connection { if (isClosed()) throw new SQLException(CONNECTION_IS_CLOSED); if (holdability != ResultSet.HOLD_CURSORS_OVER_COMMIT) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -269,7 +269,7 @@ public class RestfulConnection implements Connection { if (getAutoCommit()) throw new SQLException(TSDBConstants.INVALID_VARIABLES); //nothing to do - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -279,7 +279,7 @@ public class RestfulConnection implements Connection { if (getAutoCommit()) throw new SQLException(TSDBConstants.INVALID_VARIABLES); //nothing to do - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -289,68 +289,68 @@ public class RestfulConnection implements Connection { if (getAutoCommit()) throw new SQLException(TSDBConstants.INVALID_VARIABLES); //nothing to do - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void releaseSavepoint(Savepoint savepoint) throws SQLException { if (isClosed()) throw new SQLException(CONNECTION_IS_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); return createStatement(resultSetType, resultSetConcurrency); } @Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); return prepareStatement(sql, resultSetType, resultSetConcurrency); } @Override public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Clob createClob() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Blob createBlob() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public NClob createNClob() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public SQLXML createSQLXML() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -399,12 +399,12 @@ public class RestfulConnection implements Connection { @Override public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -442,7 +442,7 @@ public class RestfulConnection implements Connection { public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { if (isClosed()) throw new SQLException(CONNECTION_IS_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index fca8847114..a8a92e4123 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -2,7 +2,7 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; -import com.taosdata.jdbc.AbstractTaosDriver; +import com.taosdata.jdbc.AbstractDriver; import com.taosdata.jdbc.TSDBConstants; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.rs.util.HttpClientPoolUtil; @@ -11,7 +11,7 @@ import java.sql.*; import java.util.Properties; import java.util.logging.Logger; -public class RestfulDriver extends AbstractTaosDriver { +public class RestfulDriver extends AbstractDriver { private static final String URL_PREFIX = "jdbc:TAOS-RS://"; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 38d3f2b6aa..ebeeded5b0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -2,19 +2,16 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.AbstractResultSet; import com.taosdata.jdbc.TSDBConstants; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; import java.sql.*; import java.util.ArrayList; -import java.util.Calendar; import java.util.List; -import java.util.Map; -public class RestfulResultSet implements ResultSet { +public class RestfulResultSet extends AbstractResultSet implements ResultSet { private volatile boolean isClosed; private int pos = -1; @@ -144,7 +141,6 @@ public class RestfulResultSet implements ResultSet { return resultSet.get(pos).get(columnIndex).toString(); } - @Override public boolean getBoolean(int columnIndex) throws SQLException { if (isClosed()) @@ -155,14 +151,6 @@ public class RestfulResultSet implements ResultSet { return result == 0 ? false : true; } - @Override - public byte getByte(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public short getShort(int columnIndex) throws SQLException { if (isClosed()) @@ -217,40 +205,6 @@ public class RestfulResultSet implements ResultSet { return columnIndex - 1; } - /*******************************************************************************************************************/ - - @Override - public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public byte[] getBytes(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Date getDate(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Time getTime(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { if (isClosed()) @@ -262,136 +216,7 @@ public class RestfulResultSet implements ResultSet { return Timestamp.valueOf(strDate); } - @Override - public InputStream getAsciiStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public InputStream getBinaryStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - /*************************************************************************************************************/ - - @Override - public String getString(String columnLabel) throws SQLException { - return getString(findColumn(columnLabel)); - } - - @Override - public boolean getBoolean(String columnLabel) throws SQLException { - return getBoolean(findColumn(columnLabel)); - } - - @Override - public byte getByte(String columnLabel) throws SQLException { - return getByte(findColumn(columnLabel)); - } - - @Override - public short getShort(String columnLabel) throws SQLException { - return getShort(findColumn(columnLabel)); - } - - @Override - public int getInt(String columnLabel) throws SQLException { - return getInt(findColumn(columnLabel)); - } - - @Override - public long getLong(String columnLabel) throws SQLException { - return getLong(findColumn(columnLabel)); - } - - @Override - public float getFloat(String columnLabel) throws SQLException { - return getFloat(findColumn(columnLabel)); - } - - @Override - public double getDouble(String columnLabel) throws SQLException { - return getDouble(findColumn(columnLabel)); - } - - @Override - public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return getBigDecimal(findColumn(columnLabel)); - } - - @Override - public byte[] getBytes(String columnLabel) throws SQLException { - return getBytes(findColumn(columnLabel)); - } - - @Override - public Date getDate(String columnLabel) throws SQLException { - return getDate(findColumn(columnLabel)); - } - - @Override - public Time getTime(String columnLabel) throws SQLException { - return getTime(findColumn(columnLabel)); - } - - @Override - public Timestamp getTimestamp(String columnLabel) throws SQLException { - return getTimestamp(findColumn(columnLabel)); - } - - @Override - public InputStream getAsciiStream(String columnLabel) throws SQLException { - return getAsciiStream(findColumn(columnLabel)); - } - - @Override - public InputStream getUnicodeStream(String columnLabel) throws SQLException { - return getUnicodeStream(findColumn(columnLabel)); - } - - @Override - public InputStream getBinaryStream(String columnLabel) throws SQLException { - return getBinaryStream(findColumn(columnLabel)); - } - - /*************************************************************************************************************/ - - @Override - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - return null; - } - - @Override - public void clearWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - return; - } - - @Override - public String getCursorName() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public ResultSetMetaData getMetaData() throws SQLException { if (isClosed()) @@ -400,14 +225,6 @@ public class RestfulResultSet implements ResultSet { return this.metaData; } - @Override - public Object getObject(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public Object getObject(String columnLabel) throws SQLException { return getObject(findColumn(columnLabel)); @@ -424,38 +241,6 @@ public class RestfulResultSet implements ResultSet { return columnIndex + 1; } - @Override - public Reader getCharacterStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Reader getCharacterStream(String columnLabel) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public boolean isBeforeFirst() throws SQLException { if (isClosed()) @@ -475,7 +260,6 @@ public class RestfulResultSet implements ResultSet { public boolean isFirst() throws SQLException { if (isClosed()) throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - return this.pos == 0; } @@ -509,7 +293,6 @@ public class RestfulResultSet implements ResultSet { this.pos = this.resultSet.size(); } } - } @Override @@ -554,7 +337,7 @@ public class RestfulResultSet implements ResultSet { @Override public boolean absolute(int row) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); // if (this.resultSet.size() == 0) // return false; @@ -586,310 +369,28 @@ public class RestfulResultSet implements ResultSet { // } // } - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean relative(int rows) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean previous() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override - public void setFetchDirection(int direction) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - if ((direction != ResultSet.FETCH_FORWARD) && (direction != ResultSet.FETCH_REVERSE) && (direction != ResultSet.FETCH_UNKNOWN)) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - - if (!(getType() == ResultSet.TYPE_FORWARD_ONLY && direction == ResultSet.FETCH_FORWARD)) - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getFetchDirection() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return ResultSet.FETCH_FORWARD; - } - - @Override - public void setFetchSize(int rows) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - if (rows < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getFetchSize() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return this.resultSet.size(); - } - - @Override - public int getType() throws SQLException { - return ResultSet.TYPE_FORWARD_ONLY; - } - - @Override - public int getConcurrency() throws SQLException { - return ResultSet.CONCUR_READ_ONLY; - } - - @Override - public boolean rowUpdated() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean rowInserted() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean rowDeleted() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNull(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNull(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void insertRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void deleteRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void refreshRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void cancelRowUpdates() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void moveToInsertRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void moveToCurrentRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + public String getNString(int columnIndex) throws SQLException { + return getString(columnIndex); } @Override @@ -900,407 +401,11 @@ public class RestfulResultSet implements ResultSet { return this.statement; } - @Override - public Object getObject(int columnIndex, Map> map) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Ref getRef(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Blob getBlob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Clob getClob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Array getArray(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /******************************************************************************************************************/ - @Override - public Object getObject(String columnLabel, Map> map) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Ref getRef(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Blob getBlob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Clob getClob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Array getArray(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Date getDate(String columnLabel, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Time getTime(String columnLabel, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public URL getURL(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public URL getURL(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRef(int columnIndex, Ref x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRef(String columnLabel, Ref x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(int columnIndex, Blob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(String columnLabel, Blob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(int columnIndex, Clob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(String columnLabel, Clob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateArray(int columnIndex, Array x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateArray(String columnLabel, Array x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public RowId getRowId(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public RowId getRowId(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRowId(int columnIndex, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRowId(String columnLabel, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getHoldability() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return ResultSet.HOLD_CURSORS_OVER_COMMIT; - } - @Override public boolean isClosed() throws SQLException { return isClosed; } - @Override - public void updateNString(int columnIndex, String nString) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override - public void updateNString(String columnLabel, String nString) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override - public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public NClob getNClob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public NClob getNClob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public String getNString(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public String getNString(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Reader getNCharacterStream(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Reader getNCharacterStream(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public T unwrap(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return iface.isInstance(this); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index 14ea318092..8a38690d60 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -2,6 +2,7 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.AbstractStatement; import com.taosdata.jdbc.TSDBConstants; import com.taosdata.jdbc.rs.util.HttpClientPoolUtil; import com.taosdata.jdbc.utils.SqlSyntaxValidator; @@ -12,7 +13,7 @@ import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; -public class RestfulStatement implements Statement { +public class RestfulStatement extends AbstractStatement { private boolean closed; private String database; @@ -20,7 +21,6 @@ public class RestfulStatement implements Statement { private volatile RestfulResultSet resultSet; private volatile int affectedRows; - private volatile boolean closeOnCompletion; public RestfulStatement(RestfulConnection conn, String database) { this.conn = conn; @@ -104,85 +104,6 @@ public class RestfulStatement implements Statement { } } - @Override - public int getMaxFieldSize() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return TSDBConstants.maxFieldSize; - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (max < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - // nothing to do - } - - @Override - public int getMaxRows() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return 0; - } - - @Override - public void setMaxRows(int max) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (max < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - // nothing to do - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - } - - @Override - public int getQueryTimeout() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return 0; - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (seconds < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - } - - @Override - public void cancel() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return null; - } - - @Override - public void clearWarnings() throws SQLException { - // nothing to do - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - } - - @Override - public void setCursorName(String name) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public boolean execute(String sql) throws SQLException { if (isClosed()) @@ -271,53 +192,6 @@ public class RestfulStatement implements Statement { return this.affectedRows; } - @Override - public boolean getMoreResults() throws SQLException { - return getMoreResults(CLOSE_CURRENT_RESULT); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - if (direction != ResultSet.FETCH_FORWARD && direction != ResultSet.FETCH_REVERSE && direction != ResultSet.FETCH_UNKNOWN) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - this.resultSet.setFetchDirection(direction); - } - - @Override - public int getFetchDirection() throws SQLException { - return this.resultSet.getFetchDirection(); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (rows < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - //nothing to do - } - - @Override - public int getFetchSize() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return 0; - } - - @Override - public int getResultSetConcurrency() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.resultSet.getConcurrency(); - } - - @Override - public int getResultSetType() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.resultSet.getType(); - } - @Override public void addBatch(String sql) throws SQLException { if (isClosed()) @@ -343,115 +217,10 @@ public class RestfulStatement implements Statement { return this.conn; } - @Override - public boolean getMoreResults(int current) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (resultSet == null) - return false; - -// switch (current) { -// case CLOSE_CURRENT_RESULT: -// resultSet.close(); -// break; -// case KEEP_CURRENT_RESULT: -// break; -// case CLOSE_ALL_RESULTS: -// resultSet.close(); -// break; -// default: -// throw new SQLException(TSDBConstants.INVALID_VARIABLES); -// } -// return next; - return false; - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getResultSetHoldability() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.resultSet.getHoldability(); - } - @Override public boolean isClosed() throws SQLException { return closed; } - @Override - public void setPoolable(boolean poolable) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - //nothing to do - } - @Override - public boolean isPoolable() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return false; - } - - @Override - public void closeOnCompletion() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - this.closeOnCompletion = true; - } - - @Override - public boolean isCloseOnCompletion() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.closeOnCompletion; - } - - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java deleted file mode 100644 index 800265868d..0000000000 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNode.java +++ /dev/null @@ -1,272 +0,0 @@ -package com.taosdata.jdbc.utils; - -import java.io.BufferedReader; -import java.io.File; -import java.io.InputStreamReader; -import java.util.*; -import java.util.concurrent.TimeUnit; - -public class TDNode { - - private int index; - private int running; - private int deployed; - private boolean testCluster; - private String path; - private String cfgDir; - private String dataDir; - private String logDir; - private String cfgPath; - - public TDNode(int index) { - this.index = index; - running = 0; - deployed = 0; - testCluster = false; - } - - public void setPath(String path) { - this.path = path; - } - - public void setTestCluster(boolean testCluster) { - this.testCluster = testCluster; - } - - public void setRunning(int running) { - this.running = running; - } - - public void searchTaosd(File dir, ArrayList taosdPath) { - File[] fileList = dir.listFiles(); - - if(fileList == null || fileList.length == 0) { - return; - } - - for(File file : fileList) { - if(file.isFile()) { - if(file.getName().equals("taosd")) { - taosdPath.add(file.getAbsolutePath()); - } - } else { - searchTaosd(file, taosdPath); - } - } - } - - public void start() { - String selfPath = System.getProperty("user.dir"); - String binPath = ""; - String projDir = selfPath + "/../../../"; - - try { - ArrayList taosdPath = new ArrayList<>(); - - File dir = new File(projDir); - String realProjDir = dir.getCanonicalPath(); - dir = new File(realProjDir); - System.out.println("project Dir: " + projDir); - searchTaosd(dir, taosdPath); - - if(taosdPath.size() == 0) { - System.out.println("The project path doens't exist"); - return; - } else { - for(String p : taosdPath) { - if(!p.contains("packaging")) { - binPath = p; - } - } - } - } catch (Exception e) { - e.printStackTrace(); - } - - if(binPath.isEmpty()) { - System.out.println("taosd not found"); - return; - } else { - System.out.println("taosd found in " + binPath); - } - - if(this.deployed == 0) { - System.out.println("dnode" + index + "is not deployed"); - return; - } - - String cmd = "nohup " + binPath + " -c " + cfgDir + " > /dev/null 2>&1 & "; - System.out.println("start taosd cmd: " + cmd); - - try{ - Runtime.getRuntime().exec(cmd); - TimeUnit.SECONDS.sleep(5); - } catch (Exception e) { - e.printStackTrace(); - } - - this.running = 1; - } - - public Integer getTaosdPid() { - String cmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"; - String[] cmds = {"sh", "-c", cmd}; - try { - Process process = Runtime.getRuntime().exec(cmds); - BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); - String line = null; - Integer res = null; - while((line = reader.readLine()) != null) { - if(!line.isEmpty()) { - res = Integer.valueOf(line); - break; - } - } - - return res; - } catch (Exception e) { - e.printStackTrace(); - } - return null; - } - - public void stop() { - - if (this.running != 0) { - Integer pid = null; - while((pid = getTaosdPid()) != null) { - - String killCmd = "kill -term " + pid; - String[] killCmds = {"sh", "-c", killCmd}; - try { - Runtime.getRuntime().exec(killCmds).waitFor(); - - TimeUnit.SECONDS.sleep(2); - } catch (Exception e) { - e.printStackTrace(); - } - } - - try { - for(int port = 6030; port < 6041; port ++) { - String fuserCmd = "fuser -k -n tcp " + port; - Runtime.getRuntime().exec(fuserCmd).waitFor(); - } - } catch (Exception e) { - e.printStackTrace(); - } - - this.running = 0; - System.out.println("dnode:" + this.index + " is stopped by kill -term"); - } - } - - public void startIP() { - try{ - String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " up"; - Runtime.getRuntime().exec(cmd).waitFor(); - } catch (Exception e) { - e.printStackTrace(); - } - } - - public void stopIP() { - try{ - String cmd = "sudo ifconfig lo:" + index + "192.168.0." + index + " down"; - Runtime.getRuntime().exec(cmd).waitFor(); - } catch (Exception e) { - e.printStackTrace(); - } - } - - public void setCfgConfig(String option, String value) { - try{ - String cmd = "echo " + option + " " + value + " >> " + this.cfgPath; - String[] cmdLine = {"sh", "-c", cmd}; - Process ps = Runtime.getRuntime().exec(cmdLine); - ps.waitFor(); - } catch (Exception e) { - e.printStackTrace(); - } - } - - public String getDnodeRootDir() { - String dnodeRootDir = this.path + "/sim/psim/dnode" + this.index; - return dnodeRootDir; - } - - public String getDnodesRootDir() { - String dnodesRootDir = this.path + "/sim/psim" + this.index; - return dnodesRootDir; - } - - public void deploy() { - this.logDir = this.path + "/sim/dnode" + this.index + "/log"; - this.dataDir = this.path + "/sim/dnode" + this.index + "/data"; - this.cfgDir = this.path + "/sim/dnode" + this.index + "/cfg"; - this.cfgPath = this.path + "/sim/dnode" + this.index + "/cfg/taos.cfg"; - - try { - String cmd = "rm -rf " + this.logDir; - Runtime.getRuntime().exec(cmd).waitFor(); - - cmd = "rm -rf " + this.cfgDir; - Runtime.getRuntime().exec(cmd).waitFor(); - - cmd = "rm -rf " + this.dataDir; - Runtime.getRuntime().exec(cmd).waitFor(); - - cmd = "mkdir -p " + this.logDir; - Runtime.getRuntime().exec(cmd).waitFor(); - - cmd = "mkdir -p " + this.cfgDir; - Runtime.getRuntime().exec(cmd).waitFor(); - - cmd = "mkdir -p " + this.dataDir; - Runtime.getRuntime().exec(cmd).waitFor(); - - cmd = "touch " + this.cfgPath; - Runtime.getRuntime().exec(cmd).waitFor(); - } catch (Exception e) { - e.printStackTrace(); - } - - if(this.testCluster) { - startIP(); - setCfgConfig("masterIp", "192.168.0.1"); - setCfgConfig("secondIp", "192.168.0.2"); - setCfgConfig("publicIp", "192.168.0." + this.index); - setCfgConfig("internalIp", "192.168.0." + this.index); - setCfgConfig("privateIp", "192.168.0." + this.index); - } - setCfgConfig("dataDir", this.dataDir); - setCfgConfig("logDir", this.logDir); - setCfgConfig("numOfLogLines", "1000000/00"); - setCfgConfig("mnodeEqualVnodeNum", "0"); - setCfgConfig("walLevel", "1"); - setCfgConfig("statusInterval", "1"); - setCfgConfig("numOfMnodes", "3"); - setCfgConfig("numOfThreadsPerCore", "2.0"); - setCfgConfig("monitor", "0"); - setCfgConfig("maxVnodeConnections", "30000"); - setCfgConfig("maxMgmtConnections", "30000"); - setCfgConfig("maxMeterConnections", "30000"); - setCfgConfig("maxShellConns", "30000"); - setCfgConfig("locale", "en_US.UTF-8"); - setCfgConfig("charset", "UTF-8"); - setCfgConfig("asyncLog", "0"); - setCfgConfig("anyIp", "0"); - setCfgConfig("dDebugFlag", "135"); - setCfgConfig("mDebugFlag", "135"); - setCfgConfig("sdbDebugFlag", "135"); - setCfgConfig("rpcDebugFlag", "135"); - setCfgConfig("tmrDebugFlag", "131"); - setCfgConfig("cDebugFlag", "135"); - setCfgConfig("httpDebugFlag", "135"); - setCfgConfig("monitorDebugFlag", "135"); - setCfgConfig("udebugFlag", "135"); - setCfgConfig("jnidebugFlag", "135"); - setCfgConfig("qdebugFlag", "135"); - this.deployed = 1; - } -} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java deleted file mode 100644 index efc4c53e28..0000000000 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java +++ /dev/null @@ -1,72 +0,0 @@ -package com.taosdata.jdbc.utils; - -import java.io.File; -import java.util.*; - -public class TDNodes { - private ArrayList tdNodes; - private boolean testCluster; - - public TDNodes () { - tdNodes = new ArrayList<>(); - for(int i = 1; i < 11; i ++) { - tdNodes.add(new TDNode(i)); - } - } - - public void setTestCluster(boolean testCluster) { - this.testCluster = testCluster; - } - - public void check(int index) { - if(index < 1 || index > 10) { - System.out.println("index: " + index + " should on a scale of [1, 10]"); - return; - } - } - - public void deploy(int index) { - try { - File file = new File(System.getProperty("user.dir") + "/../../../"); - String projectRealPath = file.getCanonicalPath(); - check(index); - tdNodes.get(index - 1).setTestCluster(this.testCluster); - tdNodes.get(index - 1).setPath(projectRealPath); - tdNodes.get(index - 1).deploy(); - } catch (Exception e) { - e.printStackTrace(); - System.out.println("deploy Test Exception"); - } - } - - public void cfg(int index, String option, String value) { - check(index); - tdNodes.get(index - 1).setCfgConfig(option, value); - } - - public TDNode getTDNode(int index) { - check(index); - return tdNodes.get(index - 1); - } - - public void start(int index) { - check(index); - tdNodes.get(index - 1).start(); - } - - public void stop(int index) { - check(index); - tdNodes.get(index - 1).stop(); - } - - public void startIP(int index) { - check(index); - tdNodes.get(index - 1).startIP(); - } - - public void stopIP(int index) { - check(index); - tdNodes.get(index - 1).stopIP(); - } - -} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java new file mode 100644 index 0000000000..ee1364ce21 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java @@ -0,0 +1,74 @@ +package com.taosdata.jdbc.utils; + +import javax.management.*; +import java.lang.management.ManagementFactory; +import java.util.concurrent.atomic.AtomicLong; + +public class TaosInfo implements TaosInfoMBean { + + private static volatile TaosInfo instance; + private AtomicLong connect_open = new AtomicLong(); + private AtomicLong connect_close = new AtomicLong(); + private AtomicLong statement_count = new AtomicLong(); + + static { + try { + MBeanServer server = ManagementFactory.getPlatformMBeanServer(); + ObjectName name = new ObjectName("TaosInfoMBean:name=TaosInfo"); + server.registerMBean(TaosInfo.getInstance(), name); + + } catch (MalformedObjectNameException | InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) { + e.printStackTrace(); + } + } + + @Override + public long getConnect_open() { + return connect_open.get(); + } + + @Override + public long getConnect_close() { + return connect_close.get(); + } + + @Override + public long getConnect_active() { + return connect_open.get() - connect_close.get(); + } + + @Override + public long getStatement_count() { + return statement_count.get(); + } + + /*******************************************************/ + + public void conn_open_increment() { + connect_open.incrementAndGet(); + } + + public void connect_close_increment() { + connect_close.incrementAndGet(); + } + + public void stmt_count_increment() { + statement_count.incrementAndGet(); + } + + /********************************************************************************/ + private TaosInfo() { + } + + public static TaosInfo getInstance() { + if (instance == null) { + synchronized (TaosInfo.class) { + if (instance == null) { + instance = new TaosInfo(); + } + } + } + return instance; + } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfoMBean.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfoMBean.java new file mode 100644 index 0000000000..e16f41b2f5 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfoMBean.java @@ -0,0 +1,13 @@ +package com.taosdata.jdbc.utils; + +public interface TaosInfoMBean { + + long getConnect_open(); + + long getConnect_close(); + + long getConnect_active(); + + long getStatement_count(); + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java index 3d80ff066c..fb0053cb4b 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java @@ -7,6 +7,7 @@ import org.junit.Test; import javax.sql.rowset.serial.SerialBlob; import javax.sql.rowset.serial.SerialClob; +import java.io.UnsupportedEncodingException; import java.sql.*; import java.util.HashMap; import java.util.Properties; @@ -39,7 +40,6 @@ public class ResultSetTest { } catch (ClassNotFoundException | SQLException e) { return; } - } @Test @@ -54,51 +54,38 @@ public class ResultSetTest { short v6 = 12; boolean v7 = false; String v8 = "TDengine is powerful"; - sql = "insert into " + dbName + "." + tName + " values (" + ts + "," + v1 + "," + v2 + "," + v3 + "," + v4 + ",\"" + v5 + "\"," + v6 + "," + v7 + ",\"" + v8 + "\")"; - try { statement.executeUpdate(sql); assertEquals(1, statement.getUpdateCount()); } catch (SQLException e) { assert false : "insert error " + e.getMessage(); } - try { - statement.executeQuery("select * from " + dbName + "." + tName + " where ts = " + ts); + statement.execute("select * from " + dbName + "." + tName + " where ts = " + ts); resSet = statement.getResultSet(); System.out.println(((TSDBResultSet) resSet).getRowData()); while (resSet.next()) { assertEquals(ts, resSet.getLong(1)); assertEquals(ts, resSet.getLong("ts")); - System.out.println(resSet.getTimestamp(1)); - assertEquals(v1, resSet.getInt(2)); assertEquals(v1, resSet.getInt("k1")); - assertEquals(v2, resSet.getLong(3)); assertEquals(v2, resSet.getLong("k2")); - assertEquals(v3, resSet.getFloat(4), 7); assertEquals(v3, resSet.getFloat("k3"), 7); - assertEquals(v4, resSet.getDouble(5), 13); assertEquals(v4, resSet.getDouble("k4"), 13); - assertEquals(v5, resSet.getString(6)); assertEquals(v5, resSet.getString("k5")); - assertEquals(v6, resSet.getShort(7)); assertEquals(v6, resSet.getShort("k6")); - assertEquals(v7, resSet.getBoolean(8)); assertEquals(v7, resSet.getBoolean("k7")); - assertEquals(v8, resSet.getString(9)); assertEquals(v8, resSet.getString("k8")); - resSet.getBytes(9); resSet.getObject(6); resSet.getObject("k8"); @@ -111,684 +98,145 @@ public class ResultSetTest { } } - @Test - public void testUnsupport() throws SQLException { - statement.executeQuery("show databases"); + @Test(expected = SQLException.class) + public void testUnsupport() throws SQLException, UnsupportedEncodingException { + statement.execute("show databases"); resSet = statement.getResultSet(); Assert.assertNotNull(resSet.unwrap(TSDBResultSet.class)); Assert.assertTrue(resSet.isWrapperFor(TSDBResultSet.class)); - try { - resSet.getAsciiStream(0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getUnicodeStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBinaryStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getAsciiStream(""); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getUnicodeStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBinaryStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getWarnings(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.clearWarnings(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getCursorName(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isBeforeFirst(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isAfterLast(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isFirst(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isLast(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.beforeFirst(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.afterLast(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.first(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.last(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.absolute(1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.relative(1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.previous(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.setFetchDirection(0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getFetchDirection(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.setFetchSize(0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getFetchSize(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getConcurrency(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.rowUpdated(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.rowInserted(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.rowDeleted(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNull(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBoolean(0, true); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateByte(0, (byte) 2); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateShort(0, (short) 1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateInt(0, 0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateLong(0, 0l); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateFloat(0, 3.14f); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDouble(0, 3.1415); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBigDecimal(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBytes(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNull(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBoolean("", false); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateByte("", (byte) 1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateShort("", (short) 1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateInt("", 0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateLong("", 0l); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateFloat("", 3.14f); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDouble("", 3.1415); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBigDecimal(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBytes(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.insertRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.deleteRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.refreshRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.cancelRowUpdates(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.moveToInsertRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.moveToCurrentRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getStatement(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getObject(0, new HashMap<>()); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRef(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBlob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getArray(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getObject("", new HashMap<>()); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRef(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBlob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getArray(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getURL(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getURL(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRef(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRef(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBlob(0, new SerialBlob("".getBytes("UTF8"))); - } catch (Exception e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBlob("", new SerialBlob("".getBytes("UTF8"))); - } catch (Exception e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateClob("", new SerialClob("".toCharArray())); - } catch (Exception e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateClob(0, new SerialClob("".toCharArray())); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateArray(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateArray(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRowId(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRowId(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRowId(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRowId(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getHoldability(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - - try { - resSet.getNClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getNClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getSQLXML(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getSQLXML(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateSQLXML(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateSQLXML(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getNCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getNCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } + resSet.getUnicodeStream(null); + resSet.getBinaryStream(null); + resSet.getAsciiStream(""); + resSet.getUnicodeStream(null); + resSet.getBinaryStream(null); + resSet.getWarnings(); + resSet.clearWarnings(); + resSet.getCursorName(); + resSet.getCharacterStream(null); + resSet.getCharacterStream(null); + resSet.isBeforeFirst(); + resSet.isAfterLast(); + resSet.isFirst(); + resSet.isLast(); + resSet.beforeFirst(); + resSet.afterLast(); + resSet.first(); + resSet.last(); + resSet.getRow(); + resSet.absolute(1); + resSet.relative(1); + resSet.previous(); + resSet.setFetchDirection(0); + resSet.getFetchDirection(); + resSet.setFetchSize(0); + resSet.getFetchSize(); + resSet.getConcurrency(); + resSet.rowUpdated(); + resSet.rowInserted(); + resSet.rowDeleted(); + resSet.updateNull(null); + resSet.updateBoolean(0, true); + resSet.updateByte(0, (byte) 2); + resSet.updateShort(0, (short) 1); + resSet.updateInt(0, 0); + resSet.updateLong(0, 0l); + resSet.updateFloat(0, 3.14f); + resSet.updateDouble(0, 3.1415); + resSet.updateBigDecimal(null, null); + resSet.updateString(null, null); + resSet.updateBytes(null, null); + resSet.updateDate(null, null); + resSet.updateTime(null, null); + resSet.updateTimestamp(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateObject(null, null); + resSet.updateObject(null, null); + resSet.updateNull(null); + resSet.updateBoolean("", false); + resSet.updateByte("", (byte) 1); + resSet.updateShort("", (short) 1); + resSet.updateInt("", 0); + resSet.updateLong("", 0l); + resSet.updateFloat("", 3.14f); + resSet.updateDouble("", 3.1415); + resSet.updateBigDecimal(null, null); + resSet.updateString(null, null); + resSet.updateBytes(null, null); + resSet.updateDate(null, null); + resSet.updateTime(null, null); + resSet.updateTimestamp(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateObject(null, null); + resSet.updateObject(null, null); + resSet.insertRow(); + resSet.updateRow(); + resSet.deleteRow(); + resSet.refreshRow(); + resSet.cancelRowUpdates(); + resSet.moveToInsertRow(); + resSet.moveToCurrentRow(); + resSet.getStatement(); + resSet.getObject(0, new HashMap<>()); + resSet.getRef(null); + resSet.getBlob(null); + resSet.getClob(null); + resSet.getArray(null); + resSet.getObject("", new HashMap<>()); + resSet.getRef(null); + resSet.getBlob(null); + resSet.getClob(null); + resSet.getArray(null); + resSet.getDate(null, null); + resSet.getDate(null, null); + resSet.getTime(null, null); + resSet.getTime(null, null); + resSet.getTimestamp(null, null); + resSet.getTimestamp(null, null); + resSet.getURL(null); + resSet.getURL(null); + resSet.updateRef(null, null); + resSet.updateRef(null, null); + resSet.updateBlob(0, new SerialBlob("".getBytes("UTF8"))); + resSet.updateBlob("", new SerialBlob("".getBytes("UTF8"))); + resSet.updateClob("", new SerialClob("".toCharArray())); + resSet.updateClob(0, new SerialClob("".toCharArray())); + resSet.updateArray(null, null); + resSet.updateArray(null, null); + resSet.getRowId(null); + resSet.getRowId(null); + resSet.updateRowId(null, null); + resSet.updateRowId(null, null); + resSet.getHoldability(); + resSet.updateNString(null, null); + resSet.updateNString(null, null); + resSet.getNClob(null); + resSet.getNClob(null); + resSet.getSQLXML(null); + resSet.getSQLXML(null); + resSet.updateSQLXML(null, null); + resSet.updateSQLXML(null, null); + resSet.getNCharacterStream(null); + resSet.getNCharacterStream(null); + resSet.updateNCharacterStream(null, null); + resSet.updateNCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateNCharacterStream(null, null); + resSet.updateNCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); } @Test @@ -816,5 +264,4 @@ public class ResultSetTest { e.printStackTrace(); } } - } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java index b09482fb03..73ceafa729 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java @@ -8,9 +8,6 @@ import org.junit.Test; import java.sql.*; import java.util.Properties; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - public class StatementTest { static Connection connection = null; static Statement statement = null; @@ -58,12 +55,12 @@ public class StatementTest { statement.executeUpdate("create database if not exists " + dbName); statement.executeUpdate("create table if not exists " + dbName + "." + tName + "(ts timestamp, k1 int)"); statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 1)"); - statement.executeQuery("select * from " + dbName + "." + tName); + statement.execute("select * from " + dbName + "." + tName); ResultSet resultSet = statement.getResultSet(); - assertTrue(null != resultSet); + Assert.assertNotNull(resultSet); boolean isClosed = statement.isClosed(); - assertEquals(false, isClosed); + Assert.assertEquals(false, isClosed); } @Test(expected = SQLException.class) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java index 1d8ff08db6..685957d60a 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java @@ -9,13 +9,14 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; +import java.util.concurrent.TimeUnit; public class SubscribeTest { Connection connection; Statement statement; String dbName = "test"; String tName = "t0"; - String host = "localhost"; + String host = "127.0.0.1"; String topic = "test"; @Before @@ -23,15 +24,15 @@ public class SubscribeTest { try { Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); statement = connection.createStatement(); - statement.executeUpdate("create database if not exists " + dbName); - statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); + statement.execute("drop database if exists " + dbName); + statement.execute("create database if not exists " + dbName); + statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); long ts = System.currentTimeMillis(); for (int i = 0; i < 2; i++) { ts += i; @@ -45,44 +46,40 @@ public class SubscribeTest { } @Test - public void subscribe() throws Exception { - TSDBSubscribe subscribe = null; + public void subscribe() { try { String rawSql = "select * from " + dbName + "." + tName + ";"; System.out.println(rawSql); - subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false); + TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false); int a = 0; while (true) { - Thread.sleep(900); + TimeUnit.MILLISECONDS.sleep(1000); TSDBResultSet resSet = subscribe.consume(); - while (resSet.next()) { for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { System.out.printf(i + ": " + resSet.getString(i) + "\t"); } System.out.println("\n======" + a + "=========="); } - resSet.close(); a++; if (a >= 2) { break; } +// resSet.close(); } + + subscribe.close(true); } catch (Exception e) { e.printStackTrace(); - } finally { - if (null != subscribe) { - subscribe.close(true); - } } } @After public void close() { try { - statement.executeQuery("drop database " + dbName); + statement.execute("drop database " + dbName); if (statement != null) statement.close(); if (connection != null) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java new file mode 100644 index 0000000000..4794fc61f1 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java @@ -0,0 +1,234 @@ +package com.taosdata.jdbc; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; +import java.util.UUID; + +public class TSDBStatementTest { + private static final String host = "127.0.0.1"; + private static Connection conn; + private static Statement stmt; + + @Test + public void executeQuery() { + try { + ResultSet rs = stmt.executeQuery("show databases"); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void executeUpdate() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + int affectRows = stmt.executeUpdate("create database " + dbName); + Assert.assertEquals(0, affectRows); + affectRows = stmt.executeUpdate("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(0, affectRows); + affectRows = stmt.executeUpdate("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(1, affectRows); + affectRows = stmt.executeUpdate("drop database " + dbName); + Assert.assertEquals(0, affectRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void close() { + } + + @Test + public void execute() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + boolean isSelect = stmt.execute("create database " + dbName); + Assert.assertEquals(false, isSelect); + int affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(1, affectedRows); + + isSelect = stmt.execute("select * from " + dbName + ".weather"); + Assert.assertEquals(true, isSelect); + + isSelect = stmt.execute("drop database " + dbName); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getResultSet() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + boolean isSelect = stmt.execute("create database " + dbName); + Assert.assertEquals(false, isSelect); + int affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(1, affectedRows); + + isSelect = stmt.execute("select * from " + dbName + ".weather"); + Assert.assertEquals(true, isSelect); + ResultSet rs = stmt.getResultSet(); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + Assert.assertEquals(3, meta.getColumnCount()); + int count = 0; + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + count++; + } + Assert.assertEquals(1, count); + + isSelect = stmt.execute("drop database " + dbName); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getUpdateCount() { + execute(); + } + + @Test + public void addBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void clearBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.clearBatch(); + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + stmt.clearBatch(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void executeBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + int[] results = stmt.executeBatch(); + Assert.assertEquals(0, results[0]); + Assert.assertEquals(0, results[1]); + Assert.assertEquals(1, results[2]); + Assert.assertEquals(Statement.SUCCESS_NO_INFO, results[3]); + Assert.assertEquals(0, results[4]); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getConnection() { + try { + Connection connection = stmt.getConnection(); + Assert.assertNotNull(connection); + Assert.assertTrue(this.conn == connection); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void isClosed() { + try { + Assert.assertEquals(false, stmt.isClosed()); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties); + stmt = conn.createStatement(); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (stmt != null) + stmt.close(); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java new file mode 100644 index 0000000000..f3d79b1df1 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java @@ -0,0 +1,187 @@ +package com.taosdata.jdbc.cases; + +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class InvalidResultSetPointerTest { + + private static String host = "127.0.0.1"; + private static final String dbName = "test"; + private static final String stbName = "stb"; + private static final String tbName = "tb"; + private static Connection connection; + private static int numOfSTb = 30000; + private static int numOfTb = 3; + private static int numOfThreads = 100; + + @Test + public void test() throws SQLException { + execute("drop database if exists " + dbName); + execute("create database if not exists " + dbName); + execute("use " + dbName); + createSTable(); + createTable(); + insert(); + selectMultiThreading(); + close(); + } + + private void insert() { + for (int i = 0; i < numOfSTb; i++) { + for (int j = 0; j < numOfTb; j++) { + final String sql = "INSERT INTO " + dbName + "." + tbName + i + "_" + j + " (ts, temperature, humidity, name) values(now, 20.5, 34, \"" + i + "\")"; + System.out.println(sql); + execute(sql); + } + } + } + + private void createSTable() { + for (int i = 0; i < numOfSTb; i++) { + final String sql = "create table if not exists " + dbName + "." + stbName + i + " (ts timestamp, temperature float, humidity int, name BINARY(" + (i % 73 + 10) + ")) TAGS (tag1 INT)"; + execute(sql); + } + } + + private void createTable() { + for (int i = 0; i < numOfSTb; i++) { + for (int j = 0; j < numOfTb; j++) { + final String sql = "create table if not exists " + dbName + "." + tbName + i + "_" + j + " USING " + stbName + i + " TAGS(" + j + ")"; + execute(sql); + } + } + } + + private void close() throws SQLException { + if (connection != null) { + this.connection.close(); + System.out.println("connection closed."); + } + } + + private void selectMultiThreading() { + int a = numOfSTb / numOfThreads; + if (a < 1) { + numOfThreads = numOfSTb; + a = 1; + } + + int b = 0; + if (numOfThreads != 0) { + b = numOfSTb % numOfThreads; + } + + multiThreadingClass instance[] = new multiThreadingClass[numOfThreads]; + + int last = 0; + for (int i = 0; i < numOfThreads; i++) { + instance[i] = new multiThreadingClass(); + instance[i].id = i; + instance[i].from = last; + if (i < b) { + instance[i].to = last + a; + } else { + instance[i].to = last + a - 1; + } + + last = instance[i].to + 1; + instance[i].numOfTb = numOfTb; + instance[i].connection = connection; + instance[i].dbName = dbName; + instance[i].tbName = tbName; + instance[i].start(); + } + + for (int i = 0; i < numOfThreads; i++) { + try { + instance[i].join(); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + } + } + + @BeforeClass + public static void beforeClass() { + try { + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); + connection = DriverManager.getConnection(url, properties); + if (connection != null) + System.out.println("[ OK ] Connection established."); + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + private void execute(String sql) { + try (Statement statement = connection.createStatement()) { + long start = System.currentTimeMillis(); + statement.execute(sql); + long end = System.currentTimeMillis(); + printSql(sql, (end - start)); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void printSql(String sql, long cost) { + System.out.println("time cost: " + cost + " ms, execute statement ====> " + sql); + } + + private void executeQuery(String sql) { + try (Statement statement = connection.createStatement()) { + long start = System.currentTimeMillis(); + ResultSet resultSet = statement.executeQuery(sql); + long end = System.currentTimeMillis(); + printSql(sql, (end - start)); +// printResult(resultSet); + resultSet.close(); + statement.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + class multiThreadingClass extends Thread { + public int id; + public int from, to; + public int numOfTb; + public Connection connection; + public String dbName; + public String tbName; + + public void run() { + System.out.println("ID: " + id + " from: " + from + " to: " + to); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + System.out.println("Thread " + id + " interrupted."); + } + + for (int i = from; i < to; i++) { + for (int j = 0; j < numOfTb; j++) { + if (j % 1000 == 0) { + try { + System.out.print(id + "s."); + Thread.sleep(1); + } catch (InterruptedException e) { + System.out.println("Thread " + id + " interrupted."); + } + } + final String sql = "select last_row(humidity) from " + dbName + "." + tbName + i + "_" + j; + executeQuery(sql); + } + } + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TaosInfoMonitorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TaosInfoMonitorTest.java new file mode 100644 index 0000000000..e9e36e20c4 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TaosInfoMonitorTest.java @@ -0,0 +1,49 @@ +package com.taosdata.jdbc.cases; + +import org.junit.Test; + +import java.sql.*; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class TaosInfoMonitorTest { + + @Test + public void testCreateTooManyConnection() throws ClassNotFoundException { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + final String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata"; + + List connectionList = IntStream.range(0, 100).mapToObj(i -> { + try { + TimeUnit.MILLISECONDS.sleep(100); + return DriverManager.getConnection(url); + } catch (SQLException | InterruptedException e) { + e.printStackTrace(); + } + return null; + }).collect(Collectors.toList()); + + connectionList.stream().forEach(conn -> { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("show databases"); + while (rs.next()) { + + } + TimeUnit.MILLISECONDS.sleep(100); + } catch (SQLException | InterruptedException e) { + e.printStackTrace(); + } + }); + + connectionList.stream().forEach(conn -> { + try { + conn.close(); + TimeUnit.MILLISECONDS.sleep(100); + } catch (SQLException | InterruptedException e) { + e.printStackTrace(); + } + }); + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java index a14d09588d..b199eff1ba 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java @@ -259,10 +259,12 @@ public class RestfulResultSetTest { rs.previous(); } - @Test(expected = SQLException.class) + @Test public void setFetchDirection() throws SQLException { rs.setFetchDirection(ResultSet.FETCH_FORWARD); + Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); rs.setFetchDirection(ResultSet.FETCH_UNKNOWN); + Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); } @Test @@ -270,14 +272,15 @@ public class RestfulResultSetTest { Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); } - @Test(expected = SQLException.class) + @Test public void setFetchSize() throws SQLException { rs.setFetchSize(0); + Assert.assertEquals(0, rs.getFetchSize()); } @Test public void getFetchSize() throws SQLException { - Assert.assertEquals(1, rs.getFetchSize()); + Assert.assertEquals(0, rs.getFetchSize()); } @Test @@ -526,9 +529,12 @@ public class RestfulResultSetTest { rs.updateSQLXML(1, null); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void getNString() throws SQLException { - rs.getNString("f1"); + String f10 = rs.getNString("f10"); + Assert.assertEquals("涛思数据", f10); + f10 = rs.getNString(10); + Assert.assertEquals("涛思数据", f10); } @Test(expected = SQLFeatureNotSupportedException.class) diff --git a/src/connector/odbc/CMakeLists.txt b/src/connector/odbc/CMakeLists.txt index 2e1e39ef12..0d8c07041a 100644 --- a/src/connector/odbc/CMakeLists.txt +++ b/src/connector/odbc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX_64) diff --git a/src/connector/odbc/src/CMakeLists.txt b/src/connector/odbc/src/CMakeLists.txt index 67357cb469..2699e1bc90 100644 --- a/src/connector/odbc/src/CMakeLists.txt +++ b/src/connector/odbc/src/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX_64) diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py index 2cad664307..dba234d7a4 100644 --- a/src/connector/python/linux/python2/setup.py +++ b/src/connector/python/linux/python2/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.4", + version="2.0.5", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py index ada83d72c5..bc3b6c65d8 100644 --- a/src/connector/python/linux/python2/taos/cursor.py +++ b/src/connector/python/linux/python2/taos/cursor.py @@ -1,7 +1,6 @@ from .cinterface import CTaosInterface from .error import * from .constants import FieldType -import threading class TDengineCursor(object): @@ -36,7 +35,6 @@ class TDengineCursor(object): self._block_iter = 0 self._affected_rows = 0 self._logfile = "" - self._threadId = threading.get_ident() if connection is not None: self._connection = connection diff --git a/src/connector/python/osx/python3/LICENSE b/src/connector/python/osx/python3/LICENSE new file mode 100644 index 0000000000..79a9d73086 --- /dev/null +++ b/src/connector/python/osx/python3/LICENSE @@ -0,0 +1,12 @@ + Copyright (c) 2019 TAOS Data, Inc. + +This program is free software: you can use, redistribute, and/or modify +it under the terms of the GNU Affero General Public License, version 3 +or later ("AGPL"), as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/src/connector/python/osx/python3/README.md b/src/connector/python/osx/python3/README.md new file mode 100644 index 0000000000..70db6bba13 --- /dev/null +++ b/src/connector/python/osx/python3/README.md @@ -0,0 +1 @@ +# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py new file mode 100644 index 0000000000..098f786d62 --- /dev/null +++ b/src/connector/python/osx/python3/setup.py @@ -0,0 +1,20 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="taos", + version="2.0.4", + author="Taosdata Inc.", + author_email="support@taosdata.com", + description="TDengine python client package", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/pypa/sampleproject", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "Operating System :: MacOS X", + ], +) diff --git a/src/connector/python/osx/python3/taos/__init__.py b/src/connector/python/osx/python3/taos/__init__.py new file mode 100644 index 0000000000..216214e097 --- /dev/null +++ b/src/connector/python/osx/python3/taos/__init__.py @@ -0,0 +1,24 @@ + +from .connection import TDengineConnection +from .cursor import TDengineCursor + +# Globals +apilevel = '2.0.4' +threadsafety = 0 +paramstyle = 'pyformat' + +__all__ = ['connection', 'cursor'] + +def connect(*args, **kwargs): + """ Function to return a TDengine connector object + + Current supporting keyword parameters: + @dsn: Data source name as string + @user: Username as string(optional) + @password: Password as string(optional) + @host: Hostname(optional) + @database: Database name(optional) + + @rtype: TDengineConnector + """ + return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py new file mode 100644 index 0000000000..2cd54d536b --- /dev/null +++ b/src/connector/python/osx/python3/taos/cinterface.py @@ -0,0 +1,477 @@ +import ctypes +from .constants import FieldType +from .error import * +import math +import datetime + +def _convert_millisecond_to_datetime(milli): + return datetime.datetime.fromtimestamp(milli/1000.0) + +def _convert_microsecond_to_datetime(micro): + return datetime.datetime.fromtimestamp(micro/1000000.0) + +def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + _timestamp_converter = _convert_millisecond_to_datetime + if micro: + _timestamp_converter = _convert_microsecond_to_datetime + + if num_of_rows > 0: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + else: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + +def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + else: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + +def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + else: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + +def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]] + else: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + +def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + else: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + +def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + else: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + +def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C float row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + +def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C double row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + +def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + assert(nbytes is not None) + if num_of_rows > 0: + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + else: + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + +def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + res=[] + for i in range(abs(num_of_rows)): + try: + if num_of_rows >= 0: + tmpstr = ctypes.c_char_p(data) + res.append( tmpstr.value.decode() ) + else: + res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + except ValueError: + res.append(None) + + return res + +def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + assert(nbytes is not None) + res=[] + if num_of_rows > 0: + for i in range(abs(num_of_rows)): + try: + rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() + tmpstr = ctypes.c_char_p(data+nbytes*i+2) + res.append( tmpstr.value.decode()[0:rbyte] ) + except ValueError: + res.append(None) + else: + for i in range(abs(num_of_rows)): + try: + rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() + tmpstr = ctypes.c_char_p(data+nbytes*i+2) + res.append( tmpstr.value.decode()[0:rbyte] ) + except ValueError: + res.append(None) + return res + +def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + res=[] + if num_of_rows >= 0: + for i in range(abs(num_of_rows)): + try: + tmpstr = ctypes.c_char_p(data+nbytes*i+2) + res.append( tmpstr.value.decode() ) + except ValueError: + res.append(None) + else: + for i in range(abs(num_of_rows)): + try: + res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + except ValueError: + res.append(None) + return res + +_CONVERT_FUNC = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT : _crow_tinyint_to_python, + FieldType.C_SMALLINT : _crow_smallint_to_python, + FieldType.C_INT : _crow_int_to_python, + FieldType.C_BIGINT : _crow_bigint_to_python, + FieldType.C_FLOAT : _crow_float_to_python, + FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python, + FieldType.C_TIMESTAMP : _crow_timestamp_to_python, + FieldType.C_NCHAR : _crow_nchar_to_python +} + +_CONVERT_FUNC_BLOCK = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT : _crow_tinyint_to_python, + FieldType.C_SMALLINT : _crow_smallint_to_python, + FieldType.C_INT : _crow_int_to_python, + FieldType.C_BIGINT : _crow_bigint_to_python, + FieldType.C_FLOAT : _crow_float_to_python, + FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python_block, + FieldType.C_TIMESTAMP : _crow_timestamp_to_python, + FieldType.C_NCHAR : _crow_nchar_to_python_block +} + +# Corresponding TAOS_FIELD structure in C +class TaosField(ctypes.Structure): + _fields_ = [('name', ctypes.c_char * 65), + ('type', ctypes.c_char), + ('bytes', ctypes.c_short)] + +# C interface class +class CTaosInterface(object): + + libtaos = ctypes.CDLL('libtaos.dylib') + + libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) + libtaos.taos_init.restype = None + libtaos.taos_connect.restype = ctypes.c_void_p + #libtaos.taos_use_result.restype = ctypes.c_void_p + libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) + libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p + libtaos.taos_fetch_lengths.restype = ctypes.c_void_p + libtaos.taos_free_result.restype = None + libtaos.taos_errno.restype = ctypes.c_int + libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) + + def __init__(self, config=None): + ''' + Function to initialize the class + @host : str, hostname to connect + @user : str, username to connect to server + @password : str, password to connect to server + @db : str, default db to use when log in + @config : str, config directory + + @rtype : None + ''' + if config is None: + self._config = ctypes.c_char_p(None) + else: + try: + self._config = ctypes.c_char_p(config.encode('utf-8')) + except AttributeError: + raise AttributeError("config is expected as a str") + + if config != None: + CTaosInterface.libtaos.taos_options(3, self._config) + + CTaosInterface.libtaos.taos_init() + + @property + def config(self): + """ Get current config + """ + return self._config + + def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + ''' + Function to connect to server + + @rtype: c_void_p, TDengine handle + ''' + # host + try: + _host = ctypes.c_char_p(host.encode( + "utf-8")) if host != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("host is expected as a str") + + # user + try: + _user = ctypes.c_char_p(user.encode("utf-8")) + except AttributeError: + raise AttributeError("user is expected as a str") + + # password + try: + _password = ctypes.c_char_p(password.encode("utf-8")) + except AttributeError: + raise AttributeError("password is expected as a str") + + # db + try: + _db = ctypes.c_char_p( + db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("db is expected as a str") + + # port + try: + _port = ctypes.c_int(port) + except TypeError: + raise TypeError("port is expected as an int") + + connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( + _host, _user, _password, _db, _port)) + + if connection.value == None: + print('connect to TDengine failed') + raise ConnectionError("connect to TDengine failed") + # sys.exit(1) + #else: + # print('connect to TDengine success') + + return connection + + @staticmethod + def close(connection): + '''Close the TDengine handle + ''' + CTaosInterface.libtaos.taos_close(connection) + #print('connection is closed') + + @staticmethod + def query(connection, sql): + '''Run SQL + + @sql: str, sql string to run + + @rtype: 0 on success and -1 on failure + ''' + try: + return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + except AttributeError: + raise AttributeError("sql is expected as a string") + # finally: + # CTaosInterface.libtaos.close(connection) + + @staticmethod + def affectedRows(result): + """The affected rows after runing query + """ + return CTaosInterface.libtaos.taos_affected_rows(result) + + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + + @staticmethod + def useResult(result): + '''Use result after calling self.query + ''' + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.fieldsCount(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + + return fields + + @staticmethod + def fetchBlock(result, fields): + pblock = ctypes.c_void_p(0) + num_of_rows = CTaosInterface.libtaos.taos_fetch_block( + result, ctypes.byref(pblock)) + if num_of_rows == 0: + return None, 0 + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: + raise DatabaseError("Invalid data type returned from database") + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + + return blocks, abs(num_of_rows) + @staticmethod + def fetchRow(result, fields): + pblock = ctypes.c_void_p(0) + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock : + num_of_rows = 1 + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + if data is None: + blocks[i] = [None] + else: + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + else: + return None, 0 + return blocks, abs(num_of_rows) + + @staticmethod + def freeResult(result): + CTaosInterface.libtaos.taos_free_result(result) + result.value = None + + @staticmethod + def fieldsCount(result): + return CTaosInterface.libtaos.taos_field_count(result) + + @staticmethod + def fetchFields(result): + return CTaosInterface.libtaos.taos_fetch_fields(result) + + # @staticmethod + # def fetchRow(result, fields): + # l = [] + # row = CTaosInterface.libtaos.taos_fetch_row(result) + # if not row: + # return None + + # for i in range(len(fields)): + # l.append(CTaosInterface.getDataValue( + # row[i], fields[i]['type'], fields[i]['bytes'])) + + # return tuple(l) + + # @staticmethod + # def getDataValue(data, dtype, byte): + # ''' + # ''' + # if not data: + # return None + + # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): + # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): + # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') + + @staticmethod + def errno(result): + """Return the error number. + """ + return CTaosInterface.libtaos.taos_errno(result) + + @staticmethod + def errStr(result): + """Return the error styring + """ + return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + result = cinter.query(conn, 'show databases') + + print('Query Affected rows: {}'.format(cinter.affectedRows(result))) + + fields = CTaosInterface.useResult(result) + + data, num_of_rows = CTaosInterface.fetchBlock(result, fields) + + print(data) + + cinter.freeResult(result) + cinter.close(conn) diff --git a/src/connector/python/osx/python3/taos/connection.py b/src/connector/python/osx/python3/taos/connection.py new file mode 100644 index 0000000000..552250f116 --- /dev/null +++ b/src/connector/python/osx/python3/taos/connection.py @@ -0,0 +1,86 @@ +from .cursor import TDengineCursor +from .subscription import TDengineSubscription +from .cinterface import CTaosInterface + +class TDengineConnection(object): + """ TDengine connection object + """ + def __init__(self, *args, **kwargs): + self._conn = None + self._host = None + self._user = "root" + self._password = "taosdata" + self._database = None + self._port = 0 + self._config = None + self._chandle = None + + self.config(**kwargs) + + def config(self, **kwargs): + # host + if 'host' in kwargs: + self._host = kwargs['host'] + + # user + if 'user' in kwargs: + self._user = kwargs['user'] + + # password + if 'password' in kwargs: + self._password = kwargs['password'] + + # database + if 'database' in kwargs: + self._database = kwargs['database'] + + # port + if 'port' in kwargs: + self._port = kwargs['port'] + + # config + if 'config' in kwargs: + self._config = kwargs['config'] + + self._chandle = CTaosInterface(self._config) + self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + + def close(self): + """Close current connection. + """ + return CTaosInterface.close(self._conn) + + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + + def cursor(self): + """Return a new Cursor object using the connection. + """ + return TDengineCursor(self) + + def commit(self): + """Commit any pending transaction to the database. + + Since TDengine do not support transactions, the implement is void functionality. + """ + pass + + def rollback(self): + """Void functionality + """ + pass + + def clear_result_set(self): + """Clear unused result set on this connection. + """ + pass + +if __name__ == "__main__": + conn = TDengineConnection(host='192.168.1.107') + conn.close() + print("Hello world") \ No newline at end of file diff --git a/src/connector/python/osx/python3/taos/constants.py b/src/connector/python/osx/python3/taos/constants.py new file mode 100644 index 0000000000..feb7050a40 --- /dev/null +++ b/src/connector/python/osx/python3/taos/constants.py @@ -0,0 +1,33 @@ +"""Constants in TDengine python +""" + +from .dbapi import * + +class FieldType(object): + """TDengine Field Types + """ + # type_code + C_NULL = 0 + C_BOOL = 1 + C_TINYINT = 2 + C_SMALLINT = 3 + C_INT = 4 + C_BIGINT = 5 + C_FLOAT = 6 + C_DOUBLE = 7 + C_BINARY = 8 + C_TIMESTAMP = 9 + C_NCHAR = 10 + # NULL value definition + # NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL = 0x02 + C_TINYINT_NULL = -128 + C_SMALLINT_NULL = -32768 + C_INT_NULL = -2147483648 + C_BIGINT_NULL = -9223372036854775808 + C_FLOAT_NULL = float('nan') + C_DOUBLE_NULL = float('nan') + C_BINARY_NULL = bytearray([int('0xff', 16)]) + # Timestamp precision definition + C_TIMESTAMP_MILLI = 0 + C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/osx/python3/taos/cursor.py b/src/connector/python/osx/python3/taos/cursor.py new file mode 100644 index 0000000000..f972d2ff07 --- /dev/null +++ b/src/connector/python/osx/python3/taos/cursor.py @@ -0,0 +1,271 @@ +from .cinterface import CTaosInterface +from .error import * +from .constants import FieldType +import threading + +# querySeqNum = 0 + +class TDengineCursor(object): + """Database cursor which is used to manage the context of a fetch operation. + + Attributes: + .description: Read-only attribute consists of 7-item sequences: + + > name (mondatory) + > type_code (mondatory) + > display_size + > internal_size + > precision + > scale + > null_ok + + This attribute will be None for operations that do not return rows or + if the cursor has not had an operation invoked via the .execute*() method yet. + + .rowcount:This read-only attribute specifies the number of rows that the last + .execute*() produced (for DQL statements like SELECT) or affected + """ + + def __init__(self, connection=None): + self._description = [] + self._rowcount = -1 + self._connection = None + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + self._affected_rows = 0 + self._logfile = "" + self._threadId = threading.get_ident() + + if connection is not None: + self._connection = connection + + def __iter__(self): + return self + + def __next__(self): + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetch iterator") + + if self._block_rows <= self._block_iter: + block, self._block_rows = CTaosInterface.fetchRow( + self._result, self._fields) + if self._block_rows == 0: + raise StopIteration + self._block = list(map(tuple, zip(*block))) + self._block_iter = 0 + + data = self._block[self._block_iter] + self._block_iter += 1 + + return data + + @property + def description(self): + """Return the description of the object. + """ + return self._description + + @property + def rowcount(self): + """Return the rowcount of the object + """ + return self._rowcount + + @property + def affected_rows(self): + """Return the rowcount of insertion + """ + return self._affected_rows + + def callproc(self, procname, *args): + """Call a stored database procedure with the given name. + + Void functionality since no stored procedures. + """ + pass + + def log(self, logfile): + self._logfile = logfile + + def close(self): + """Close the cursor. + """ + if self._connection is None: + return False + + self._reset_result() + self._connection = None + + return True + + def execute(self, operation, params=None): + """Prepare and execute a database operation (query or command). + """ + # if threading.get_ident() != self._threadId: + # info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + + if not operation: + return None + + if not self._connection: + # TODO : change the exception raised here + raise ProgrammingError("Cursor is not connected") + + self._reset_result() + + stmt = operation + if params is not None: + pass + + # global querySeqNum + # querySeqNum += 1 + # localSeqNum = querySeqNum # avoid raice condition + # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt))) + self._result = CTaosInterface.query(self._connection._conn, stmt) + # print(" << Query ({}) Exec Done".format(localSeqNum)) + if (self._logfile): + with open(self._logfile, "a") as logfile: + logfile.write("%s;\n" % operation) + + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno == 0: + if CTaosInterface.fieldsCount(self._result) == 0: + self._affected_rows += CTaosInterface.affectedRows( + self._result ) + return CTaosInterface.affectedRows(self._result ) + else: + self._fields = CTaosInterface.useResult( + self._result) + return self._handle_result() + else: + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + + def executemany(self, operation, seq_of_parameters): + """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. + """ + pass + + def fetchone(self): + """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. + """ + pass + + def fetchmany(self): + pass + + def istype(self, col, dataType): + if (dataType.upper() == "BOOL"): + if (self._description[col][1] == FieldType.C_BOOL): + return True + if (dataType.upper() == "TINYINT"): + if (self._description[col][1] == FieldType.C_TINYINT): + return True + if (dataType.upper() == "INT"): + if (self._description[col][1] == FieldType.C_INT): + return True + if (dataType.upper() == "BIGINT"): + if (self._description[col][1] == FieldType.C_INT): + return True + if (dataType.upper() == "FLOAT"): + if (self._description[col][1] == FieldType.C_FLOAT): + return True + if (dataType.upper() == "DOUBLE"): + if (self._description[col][1] == FieldType.C_DOUBLE): + return True + if (dataType.upper() == "BINARY"): + if (self._description[col][1] == FieldType.C_BINARY): + return True + if (dataType.upper() == "TIMESTAMP"): + if (self._description[col][1] == FieldType.C_TIMESTAMP): + return True + if (dataType.upper() == "NCHAR"): + if (self._description[col][1] == FieldType.C_NCHAR): + return True + + return False + + def fetchall_row(self): + """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. + """ + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetchall") + + buffer = [[] for i in range(len(self._fields))] + self._rowcount = 0 + while True: + block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + if num_of_fields == 0: + break + self._rowcount += num_of_fields + for i in range(len(self._fields)): + buffer[i].extend(block[i]) + return list(map(tuple, zip(*buffer))) + + def fetchall(self): + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetchall") + + buffer = [[] for i in range(len(self._fields))] + self._rowcount = 0 + while True: + block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + if num_of_fields == 0: break + self._rowcount += num_of_fields + for i in range(len(self._fields)): + buffer[i].extend(block[i]) + return list(map(tuple, zip(*buffer))) + def nextset(self): + """ + """ + pass + + def setinputsize(self, sizes): + pass + + def setutputsize(self, size, column=None): + pass + + def _reset_result(self): + """Reset the result to unused version. + """ + self._description = [] + self._rowcount = -1 + if self._result is not None: + CTaosInterface.freeResult(self._result) + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + self._affected_rows = 0 + + def _handle_result(self): + """Handle the return result from query. + """ + # if threading.get_ident() != self._threadId: + # info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + + self._description = [] + for ele in self._fields: + self._description.append( + (ele['name'], ele['type'], None, None, None, None, False)) + + return self._result + diff --git a/src/connector/python/osx/python3/taos/dbapi.py b/src/connector/python/osx/python3/taos/dbapi.py new file mode 100644 index 0000000000..f1c22bdb51 --- /dev/null +++ b/src/connector/python/osx/python3/taos/dbapi.py @@ -0,0 +1,38 @@ +"""Type Objects and Constructors. +""" + +import time +import datetime + +class DBAPITypeObject(object): + def __init__(self, *values): + self.values = values + + def __com__(self, other): + if other in self.values: + return 0 + if other < self.values: + return 1 + else: + return -1 + +Date = datetime.date +Time = datetime.time +Timestamp = datetime.datetime + +def DataFromTicks(ticks): + return Date(*time.localtime(ticks)[:3]) + +def TimeFromTicks(ticks): + return Time(*time.localtime(ticks)[3:6]) + +def TimestampFromTicks(ticks): + return Timestamp(*time.localtime(ticks)[:6]) + +Binary = bytes + +# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) +# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) +# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) +# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) +# ROWID = DBAPITypeObject() \ No newline at end of file diff --git a/src/connector/python/osx/python3/taos/error.py b/src/connector/python/osx/python3/taos/error.py new file mode 100644 index 0000000000..24508a72ed --- /dev/null +++ b/src/connector/python/osx/python3/taos/error.py @@ -0,0 +1,57 @@ +"""Python exceptions +""" + +class Error(Exception): + def __init__(self, msg=None, errno=None): + self.msg = msg + self._full_msg = self.msg + self.errno = errno + + def __str__(self): + return self._full_msg + +class Warning(Exception): + """Exception raised for important warnings like data truncations while inserting. + """ + pass + +class InterfaceError(Error): + """Exception raised for errors that are related to the database interface rather than the database itself. + """ + pass + +class DatabaseError(Error): + """Exception raised for errors that are related to the database. + """ + pass + +class DataError(DatabaseError): + """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. + """ + pass + +class OperationalError(DatabaseError): + """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer + """ + pass + + +class IntegrityError(DatabaseError): + """Exception raised when the relational integrity of the database is affected. + """ + pass + +class InternalError(DatabaseError): + """Exception raised when the database encounters an internal error. + """ + pass + +class ProgrammingError(DatabaseError): + """Exception raised for programming errors. + """ + pass + +class NotSupportedError(DatabaseError): + """Exception raised in case a method or database API was used which is not supported by the database,. + """ + pass \ No newline at end of file diff --git a/src/connector/python/osx/python3/taos/subscription.py b/src/connector/python/osx/python3/taos/subscription.py new file mode 100644 index 0000000000..d3cf10d5ad --- /dev/null +++ b/src/connector/python/osx/python3/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/cq/CMakeLists.txt b/src/cq/CMakeLists.txt index 73d5eebd6d..e9ed2996c7 100644 --- a/src/cq/CMakeLists.txt +++ b/src/cq/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index a5de27d7fc..fb0c1508cb 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -91,6 +91,20 @@ void cqRmFromList(SCqObj *pObj) { } +static void freeSCqContext(void *handle) { + if (handle == NULL) { + return; + } + SCqContext *pContext = handle; + pthread_mutex_destroy(&pContext->mutex); + + taosTmrCleanUp(pContext->tmrCtrl); + pContext->tmrCtrl = NULL; + cDebug("vgId:%d, CQ is closed", pContext->vgId); + free(pContext); +} + + void cqFree(void *handle) { if (tsEnableStream == 0) { return; @@ -125,15 +139,7 @@ void cqFree(void *handle) { pthread_mutex_unlock(&pContext->mutex); if (delete) { - pthread_mutex_unlock(&pContext->mutex); - - pthread_mutex_destroy(&pContext->mutex); - - taosTmrCleanUp(pContext->tmrCtrl); - pContext->tmrCtrl = NULL; - - cDebug("vgId:%d, CQ is closed", pContext->vgId); - free(pContext); + freeSCqContext(pContext); } } @@ -186,6 +192,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) { return pContext; } + void cqClose(void *handle) { if (tsEnableStream == 0) { return; @@ -194,7 +201,9 @@ void cqClose(void *handle) { if (handle == NULL) return; pContext->delete = 1; - + int32_t hasCq = 0; + int32_t existLoop = 0; + // stop all CQs cqStop(pContext); @@ -208,7 +217,13 @@ void cqClose(void *handle) { cqRmFromList(pObj); rid = pObj->rid; - } else { + + hasCq = 1; + + if (pContext->pHead == NULL) { + existLoop = 1; + } + } else { pthread_mutex_unlock(&pContext->mutex); break; } @@ -216,6 +231,14 @@ void cqClose(void *handle) { pthread_mutex_unlock(&pContext->mutex); taosRemoveRef(cqObjRef, rid); + + if (existLoop) { + break; + } + } + + if (hasCq == 0) { + freeSCqContext(pContext); } } @@ -244,6 +267,7 @@ void cqStop(void *handle) { if (tsEnableStream == 0) { return; } + SCqContext *pContext = handle; cDebug("vgId:%d, stop all CQs", pContext->vgId); if (pContext->dbConn == NULL || pContext->master == 0) return; diff --git a/src/cq/test/CMakeLists.txt b/src/cq/test/CMakeLists.txt index fc3a1ea93a..cd124567af 100644 --- a/src/cq/test/CMakeLists.txt +++ b/src/cq/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) LIST(APPEND CQTEST_SRC ./cqtest.c) diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt index 14ec98b8f8..644a4e875d 100644 --- a/src/dnode/CMakeLists.txt +++ b/src/dnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) @@ -27,7 +27,7 @@ IF (TD_GRANT) TARGET_LINK_LIBRARIES(taosd grant) ENDIF () -IF ((TD_LINUX OR TD_WINDOWS) AND TD_MQTT) +IF (TD_MQTT) TARGET_LINK_LIBRARIES(taosd mqtt) ENDIF () @@ -43,5 +43,6 @@ ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD} COMMAND ${CMAKE_COMMAND} -E echo dataDir ${TD_TESTS_OUTPUT_DIR}/data > ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg COMMAND ${CMAKE_COMMAND} -E echo logDir ${TD_TESTS_OUTPUT_DIR}/log >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg COMMAND ${CMAKE_COMMAND} -E echo charset UTF-8 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg + COMMAND ${CMAKE_COMMAND} -E echo monitor 0 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg COMMENT "prepare taosd environment") ADD_CUSTOM_TARGET(${PREPARE_ENV_TARGET} ALL WORKING_DIRECTORY ${TD_EXECUTABLE_OUTPUT_PATH} DEPENDS ${PREPARE_ENV_CMD}) diff --git a/src/dnode/inc/dnodeMgmt.h b/src/dnode/inc/dnodeMgmt.h deleted file mode 100644 index 2038ef5286..0000000000 --- a/src/dnode/inc/dnodeMgmt.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef TDENGINE_DNODE_MGMT_H -#define TDENGINE_DNODE_MGMT_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include "trpc.h" - -int32_t dnodeInitMgmt(); -void dnodeCleanupMgmt(); -int32_t dnodeInitMgmtTimer(); -void dnodeCleanupMgmtTimer(); -void dnodeDispatchToMgmtQueue(SRpcMsg *rpcMsg); - -void* dnodeGetVnode(int32_t vgId); -int32_t dnodeGetVnodeStatus(void *pVnode); -void* dnodeGetVnodeRworker(void *pVnode); -void* dnodeGetVnodeWworker(void *pVnode); -void* dnodeGetVnodeWal(void *pVnode); -void* dnodeGetVnodeTsdb(void *pVnode); -void dnodeReleaseVnode(void *pVnode); - -void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell); -void dnodeGetEpSetForPeer(SRpcEpSet *epSet); -void dnodeGetEpSetForShell(SRpcEpSet *epSet); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 517a9e9bc8..16f97d0eea 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -21,7 +21,7 @@ #include "tconfig.h" #include "tfile.h" #include "twal.h" -// #include "tfs.h" +#include "tfs.h" #include "tsync.h" #include "dnodeStep.h" #include "dnodePeer.h" @@ -189,32 +189,35 @@ static void dnodeCheckDataDirOpenned(char *dir) { } static int32_t dnodeInitStorage() { - if (dnodeCreateDir(tsDataDir) < 0) { - dError("failed to create dir: %s, reason: %s", tsDataDir, strerror(errno)); - return -1; + if (tfsInit(tsDiskCfg, tsDiskCfgNum) < 0) { + dError("failed to init TFS since %s", tstrerror(terrno)); + return -1; } + strncpy(tsDataDir, TFS_PRIMARY_PATH(), TSDB_FILENAME_LEN); sprintf(tsMnodeDir, "%s/mnode", tsDataDir); sprintf(tsVnodeDir, "%s/vnode", tsDataDir); sprintf(tsDnodeDir, "%s/dnode", tsDataDir); - sprintf(tsVnodeBakDir, "%s/vnode_bak", tsDataDir); + // sprintf(tsVnodeBakDir, "%s/vnode_bak", tsDataDir); //TODO(dengyihao): no need to init here if (dnodeCreateDir(tsMnodeDir) < 0) { dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno)); return -1; } - //TODO(dengyihao): no need to init here - if (dnodeCreateDir(tsVnodeDir) < 0) { - dError("failed to create dir: %s, reason: %s", tsVnodeDir, strerror(errno)); - return -1; - } + if (dnodeCreateDir(tsDnodeDir) < 0) { dError("failed to create dir: %s, reason: %s", tsDnodeDir, strerror(errno)); return -1; - } - if (dnodeCreateDir(tsVnodeBakDir) < 0) { - dError("failed to create dir: %s, reason: %s", tsVnodeBakDir, strerror(errno)); - return -1; + } + + if (tfsMkdir("vnode") < 0) { + dError("failed to create vnode dir since %s", tstrerror(terrno)); + return -1; + } + + if (tfsMkdir("vnode_bak") < 0) { + dError("failed to create vnode_bak dir since %s", tstrerror(terrno)); + return -1; } dnodeCheckDataDirOpenned(tsDnodeDir); @@ -223,7 +226,7 @@ static int32_t dnodeInitStorage() { return 0; } -static void dnodeCleanupStorage() {} +static void dnodeCleanupStorage() { tfsDestroy(); } bool dnodeIsFirstDeploy() { return strcmp(tsFirst, tsLocalEp) == 0; diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index a40df626e2..9a226b81e8 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -36,12 +36,12 @@ int32_t dnodeInitShell() { dnodeProcessShellMsgFp[TSDB_MSG_TYPE_QUERY] = dnodeDispatchToVReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_FETCH] = dnodeDispatchToVReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = dnodeDispatchToVWriteQueue; - + // the following message shall be treated as mnode write dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_ACCT] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_ACCT] = dnodeDispatchToMWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_ACCT] = dnodeDispatchToMWriteQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_USER] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_ACCT] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_USER] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_USER] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_USER] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DNODE]= dnodeDispatchToMWriteQueue; @@ -57,13 +57,13 @@ int32_t dnodeInitShell() { dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_STREAM] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_CONN] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CONFIG_DNODE]= dnodeDispatchToMWriteQueue; - - // the following message shall be treated as mnode query + + // the following message shall be treated as mnode query dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_HEARTBEAT] = dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CONNECT] = dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_USE_DB] = dnodeDispatchToMReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLE_META] = dnodeDispatchToMReadQueue; - dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_STABLE_VGROUP]= dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLE_META] = dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_STABLE_VGROUP]= dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLES_META] = dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SHOW] = dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = dnodeDispatchToMReadQueue; @@ -153,7 +153,7 @@ static int32_t dnodeAuthNettestUser(char *user, char *spi, char *encrypt, char * } static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey) { - if (dnodeAuthNettestUser(user, spi, encrypt, secret, ckey) == 0) return 0; + if (dnodeAuthNettestUser(user, spi, encrypt, secret, ckey) == 0) return 0; int code = mnodeRetriveAuth(user, spi, encrypt, secret, ckey); if (code != TSDB_CODE_APP_NOT_READY) return code; @@ -164,7 +164,7 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char rpcMsg.pCont = pMsg; rpcMsg.contLen = sizeof(SAuthMsg); rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH; - + dDebug("user:%s, send auth msg to mnodes", user); SRpcMsg rpcRsp = {0}; dnodeSendMsgToMnodeRecv(&rpcMsg, &rpcRsp); @@ -202,14 +202,14 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid) { SRpcMsg rpcRsp = {0}; dnodeSendMsgToMnodeRecv(&rpcMsg, &rpcRsp); terrno = rpcRsp.code; - + if (rpcRsp.code != 0) { rpcFreeCont(rpcRsp.pCont); dError("vgId:%d, tid:%d failed to config table from mnode", vgId, tid); return NULL; } else { dInfo("vgId:%d, tid:%d config table msg is received", vgId, tid); - + // delete this after debug finished SMDCreateTableMsg *pTable = rpcRsp.pCont; int16_t numOfColumns = htons(pTable->numOfColumns); @@ -231,4 +231,4 @@ SStatisInfo dnodeGetStatisInfo() { } return info; -} \ No newline at end of file +} diff --git a/src/dnode/src/dnodeStep.c b/src/dnode/src/dnodeStep.c index 8878299d94..eda6fb227d 100644 --- a/src/dnode/src/dnodeStep.c +++ b/src/dnode/src/dnodeStep.c @@ -69,6 +69,6 @@ int32_t dnodeStepInit(SStep *pSteps, int32_t stepSize) { return 0; } -void dnodeStepCleanup(SStep *pSteps, int32_t stepSize) { +void dnodeStepCleanup(SStep *pSteps, int32_t stepSize) { taosStepCleanupImp(pSteps, stepSize - 1); -} \ No newline at end of file +} diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index 33ebc8b64f..e49b3eba99 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -181,4 +181,4 @@ static void sigintHandler(int32_t signum, void *sigInfo, void *context) { #ifdef WINDOWS tsem_wait(&exitSem); #endif -} \ No newline at end of file +} diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c index 4a3d6d9a84..1e428fc8b1 100644 --- a/src/dnode/src/dnodeVMgmt.c +++ b/src/dnode/src/dnodeVMgmt.c @@ -174,7 +174,7 @@ static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) { vnodeRelease(pVnode); return code; } else { - dError("vgId:%d, vnode not exist, can't alter it", pAlter->cfg.vgId); + dInfo("vgId:%d, vnode not exist, can't alter it", pAlter->cfg.vgId); return TSDB_CODE_VND_INVALID_VGROUP_ID; } } diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 93d1611ebc..a3ff459396 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -53,7 +53,7 @@ void dnodeCleanupVWrite() { for (int32_t i = 0; i < tsVWriteWP.max; ++i) { SVWriteWorker *pWorker = tsVWriteWP.worker + i; if (taosCheckPthreadValid(pWorker->thread)) { - taosQsetThreadResume(pWorker->qset); + if (pWorker->qset) taosQsetThreadResume(pWorker->qset); } } diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index ebed2caaa6..4f7c2c7955 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -192,7 +192,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_DROPPING, 0, 0x0386, "Database n TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_READY, 0, 0x0387, "Database unsynced") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_DAYS, 0, 0x0390, "Invalid database option: days out of range") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_KEEP, 0, 0x0391, "Invalid database option: keep >= keep2 >= keep1 >= days") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION_KEEP, 0, 0x0391, "Invalid database option: keep >= keep1 >= keep0 >= days") // dnode TAOS_DEFINE_ERROR(TSDB_CODE_DND_MSG_NOT_PROCESSED, 0, 0x0400, "Message not processed") @@ -242,6 +242,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "No table d TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "File already exists") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "Need to reconfigure table") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO, 0, 0x0612, "Invalid information to create table") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_AVAIL_DISK, 0, 0x0613, "No available disk") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, 0, 0x0614, "TSDB messed message") // query TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "Invalid handle") @@ -294,7 +296,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, 0, 0x1002, "WAL size e // http TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, 0, 0x1100, "http server is not onlin") TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_UNSUPPORT_URL, 0, 0x1101, "url is not support") -TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVLALID_URL, 0, 0x1102, "invalid url format") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_URL, 0, 0x1102, "invalid url format") TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_ENOUGH_MEMORY, 0, 0x1103, "no enough memory") TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_REQUSET_TOO_BIG, 0, 0x1104, "request size is too big") TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_AUTH_INFO, 0, 0x1105, "no auth info input") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 5b31fbf292..721b9ca605 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -496,6 +496,7 @@ typedef struct { int32_t tsOrder; // ts comp block order int32_t numOfTags; // number of tags columns involved int32_t sqlstrLen; // sql query string + int32_t prevResultLen; // previous result length SColumnInfo colList[]; } SQueryTableMsg; diff --git a/src/inc/tfs.h b/src/inc/tfs.h new file mode 100644 index 0000000000..76e9b17a62 --- /dev/null +++ b/src/inc/tfs.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TD_TFS_H +#define TD_TFS_H + +#include "tglobal.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + int level; + int id; +} SDiskID; + +#define TFS_UNDECIDED_LEVEL -1 +#define TFS_UNDECIDED_ID -1 +#define TFS_PRIMARY_LEVEL 0 +#define TFS_PRIMARY_ID 0 + +// FS APIs ==================================== +typedef struct { + int64_t tsize; + int64_t avail; +} SFSMeta; + +int tfsInit(SDiskCfg *pDiskCfg, int ndisk); +void tfsDestroy(); +void tfsUpdateInfo(SFSMeta *pFSMeta); +void tfsGetMeta(SFSMeta *pMeta); +void tfsAllocDisk(int expLevel, int *level, int *id); + +const char *TFS_PRIMARY_PATH(); +const char *TFS_DISK_PATH(int level, int id); + +// TFILE APIs ==================================== +typedef struct { + int level; + int id; + char rname[TSDB_FILENAME_LEN]; // REL name + char aname[TSDB_FILENAME_LEN]; // ABS name +} TFILE; + +#define TFILE_LEVEL(pf) ((pf)->level) +#define TFILE_ID(pf) ((pf)->id) +#define TFILE_NAME(pf) ((pf)->aname) +#define TFILE_REL_NAME(pf) ((pf)->rname) + +#define tfsopen(pf, flags) open(TFILE_NAME(pf), flags) +#define tfsclose(fd) close(fd) +#define tfsremove(pf) remove(TFILE_NAME(pf)) +#define tfscopy(sf, df) taosCopy(TFILE_NAME(sf), TFILE_NAME(df)) +#define tfsrename(sf, df) taosRename(TFILE_NAME(sf), TFILE_NAME(df)) + +void tfsInitFile(TFILE *pf, int level, int id, const char *bname); +bool tfsIsSameFile(const TFILE *pf1, const TFILE *pf2); +int tfsEncodeFile(void **buf, TFILE *pf); +void *tfsDecodeFile(void *buf, TFILE *pf); +void tfsbasename(const TFILE *pf, char *dest); +void tfsdirname(const TFILE *pf, char *dest); + +// DIR APIs ==================================== +int tfsMkdirAt(const char *rname, int level, int id); +int tfsMkdirRecurAt(const char *rname, int level, int id); +int tfsMkdir(const char *rname); +int tfsRmdir(const char *rname); +int tfsRename(char *orname, char *nrname); + +typedef struct TDIR TDIR; + +TDIR * tfsOpendir(const char *rname); +const TFILE *tfsReaddir(TDIR *tdir); +void tfsClosedir(TDIR *tdir); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 95e6b7ff4a..78cd2927c7 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -40,7 +40,8 @@ extern "C" { // TSDB STATE DEFINITION #define TSDB_STATE_OK 0x0 -#define TSDB_STATE_BAD_FILE 0x1 +#define TSDB_STATE_BAD_META 0x1 +#define TSDB_STATE_BAD_DATA 0x2 // --------- TSDB APPLICATION HANDLE DEFINITION typedef struct { @@ -48,7 +49,7 @@ typedef struct { void *cqH; int (*notifyStatus)(void *, int status, int eno); int (*eventCallBack)(void *); - void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema); + void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char *dstTable, char *sqlStr, STSchema *pSchema); void (*cqDropFunc)(void *handle); } STsdbAppH; @@ -76,17 +77,17 @@ typedef struct { int64_t pointsWritten; // total data points written } STsdbStat; -typedef void TSDB_REPO_T; // use void to hide implementation details from outside +typedef struct STsdbRepo STsdbRepo; -STsdbCfg *tsdbGetCfg(const TSDB_REPO_T *repo); +STsdbCfg *tsdbGetCfg(const STsdbRepo *repo); // --------- TSDB REPOSITORY DEFINITION -int tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg); -int32_t tsdbDropRepo(char *rootDir); -TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH); -int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit); -int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg); -int tsdbGetState(TSDB_REPO_T *repo); +int32_t tsdbCreateRepo(int repoid); +int32_t tsdbDropRepo(int repoid); +STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH); +int tsdbCloseRepo(STsdbRepo *repo, int toCommit); +int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg); +int tsdbGetState(STsdbRepo *repo); // --------- TSDB TABLE DEFINITION typedef struct { @@ -110,8 +111,8 @@ typedef struct { void tsdbClearTableCfg(STableCfg *config); -void* tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type, int16_t bytes); -char* tsdbGetTableName(void *pTable); +void *tsdbGetTableTagVal(const void *pTable, int32_t colId, int16_t type, int16_t bytes); +char *tsdbGetTableName(void *pTable); #define TSDB_TABLEID(_table) ((STableId*) (_table)) #define TSDB_PREV_ROW 0x1 @@ -119,12 +120,11 @@ char* tsdbGetTableName(void *pTable); STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg); -int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg); -int tsdbDropTable(TSDB_REPO_T *pRepo, STableId tableId); -int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg); -// TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid); +int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg); +int tsdbDropTable(STsdbRepo *pRepo, STableId tableId); +int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg); -uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size); +uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size); // the TSDB repository info typedef struct STsdbRepoInfo { @@ -134,7 +134,7 @@ typedef struct STsdbRepoInfo { int64_t tsdbTotalDiskSize; // the total disk size taken by this TSDB repository // TODO: Other informations to add } STsdbRepoInfo; -STsdbRepoInfo *tsdbGetStatus(TSDB_REPO_T *pRepo); +STsdbRepoInfo *tsdbGetStatus(STsdbRepo *pRepo); // the meter information report structure typedef struct { @@ -152,7 +152,7 @@ typedef struct { * * @return the number of points inserted, -1 for failure and the error number is set */ -int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp); +int32_t tsdbInsertData(STsdbRepo *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp); // -- FOR QUERY TIME SERIES DATA @@ -168,9 +168,9 @@ typedef struct STsdbQueryCond { } STsdbQueryCond; typedef struct SMemRef { - int32_t ref; - void *mem; - void *imem; + int32_t ref; + void * mem; + void * imem; } SMemRef; typedef struct SDataBlockInfo { @@ -182,14 +182,14 @@ typedef struct SDataBlockInfo { } SDataBlockInfo; typedef struct { - void *pTable; - TSKEY lastKey; + void *pTable; + TSKEY lastKey; } STableKeyInfo; typedef struct { size_t numOfTables; - SArray *pGroupList; - SHashObj *map; // speedup acquire the tableQueryInfo by table uid + SArray * pGroupList; + SHashObj *map; // speedup acquire the tableQueryInfo by table uid } STableGroupInfo; /** @@ -202,7 +202,8 @@ typedef struct { * @param qinfo query info handle from query processor * @return */ -TsdbQueryHandleT *tsdbQueryTables(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, void *qinfo, SMemRef* pRef); +TsdbQueryHandleT *tsdbQueryTables(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, void *qinfo, + SMemRef *pRef); /** * Get the last row of the given query time window for all the tables in STableGroupInfo object. @@ -214,14 +215,15 @@ TsdbQueryHandleT *tsdbQueryTables(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STab * @param tableInfo table list. * @return */ -TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, void *qinfo, SMemRef* pRef); +TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, void *qinfo, + SMemRef *pRef); /** * get the queried table object list * @param pHandle * @return */ -SArray* tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle); +SArray *tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle); /** * get the group list according to table id from client @@ -231,8 +233,8 @@ SArray* tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle); * @param qinfo * @return */ -TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, - void *qinfo, SMemRef* pRef); +TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, + void *qinfo, SMemRef *pRef); /** @@ -268,7 +270,7 @@ SArray* tsdbGetExternalRow(TsdbQueryHandleT *pHandle, SMemRef* pMemRef, int16_t * @param pBlockInfo * @return */ -void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT *pQueryHandle, SDataBlockInfo* pBlockInfo); +void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT *pQueryHandle, SDataBlockInfo *pBlockInfo); /** * @@ -299,7 +301,7 @@ SArray *tsdbRetrieveDataBlock(TsdbQueryHandleT *pQueryHandle, SArray *pColumnIdL * @param stableid. super table sid * @param pTagCond. tag query condition */ -int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T *tsdb, uint64_t uid, TSKEY key, const char *pTagCond, size_t len, +int32_t tsdbQuerySTableByTagCond(STsdbRepo *tsdb, uint64_t uid, TSKEY key, const char *pTagCond, size_t len, int16_t tagNameRelType, const char *tbnameCond, STableGroupInfo *pGroupList, SColIndex *pColIndex, int32_t numOfCols); @@ -317,7 +319,7 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList); * @param pGroupInfo the generated result * @return */ -int32_t tsdbGetOneTableGroup(TSDB_REPO_T *tsdb, uint64_t uid, TSKEY startKey, STableGroupInfo *pGroupInfo); +int32_t tsdbGetOneTableGroup(STsdbRepo *tsdb, uint64_t uid, TSKEY startKey, STableGroupInfo *pGroupInfo); /** * @@ -326,7 +328,7 @@ int32_t tsdbGetOneTableGroup(TSDB_REPO_T *tsdb, uint64_t uid, TSKEY startKey, ST * @param pGroupInfo * @return */ -int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo); +int32_t tsdbGetTableGroupFromIdList(STsdbRepo *tsdb, SArray *pTableIdList, STableGroupInfo *pGroupInfo); /** * clean up the query handle @@ -345,10 +347,14 @@ void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int int tsdbInitCommitQueue(); void tsdbDestroyCommitQueue(); -int tsdbSyncCommit(TSDB_REPO_T *repo); +int tsdbSyncCommit(STsdbRepo *repo); void tsdbIncCommitRef(int vgId); void tsdbDecCommitRef(int vgId); +// For TSDB file sync +int tsdbSyncSend(void *pRepo, SOCKET socketFd); +int tsdbSyncRecv(void *pRepo, SOCKET socketFd); + #ifdef __cplusplus } #endif diff --git a/src/inc/tsync.h b/src/inc/tsync.h index 4dae86bbed..379c877b26 100644 --- a/src/inc/tsync.h +++ b/src/inc/tsync.h @@ -56,16 +56,6 @@ typedef struct { int32_t role[TAOS_SYNC_MAX_REPLICA]; } SNodesRole; -/* - if name is empty(name[0] is zero), get the file from index or after, but not larger than eindex. If a file - is found between index and eindex, index shall be updated, name shall be set, size shall be set to - file size, and file magic number shall be returned. - - if name is provided(name[0] is not zero), get the named file at the specified index. If not there, return - zero. If it is there, set the size to file size, and return file magic number. Index shall not be updated. -*/ -typedef uint32_t (*FGetFileInfo)(int32_t vgId, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion); - // get the wal file from index or after // return value, -1: error, 1:more wal files, 0:last WAL. if name[0]==0, no WAL file typedef int32_t (*FGetWalInfo)(int32_t vgId, char *fileName, int64_t *fileId); @@ -83,24 +73,31 @@ typedef void (*FNotifyRole)(int32_t vgId, int8_t role); typedef void (*FNotifyFlowCtrl)(int32_t vgId, int32_t level); // when data file is synced successfully, notity app -typedef int32_t (*FNotifyFileSynced)(int32_t vgId, uint64_t fversion); +typedef void (*FStartSyncFile)(int32_t vgId); +typedef void (*FStopSyncFile)(int32_t vgId, uint64_t fversion); // get file version typedef int32_t (*FGetVersion)(int32_t vgId, uint64_t *fver, uint64_t *vver); +typedef int32_t (*FSendFile)(void *tsdb, SOCKET socketFd); +typedef int32_t (*FRecvFile)(void *tsdb, SOCKET socketFd); + typedef struct { int32_t vgId; // vgroup ID uint64_t version; // initial version SSyncCfg syncCfg; // configuration from mgmt char path[TSDB_FILENAME_LEN]; // path to the file - FGetFileInfo getFileInfo; - FGetWalInfo getWalInfo; - FWriteToCache writeToCache; + void * pTsdb; + FGetWalInfo getWalInfoFp; + FWriteToCache writeToCacheFp; FConfirmForward confirmForward; - FNotifyRole notifyRole; - FNotifyFlowCtrl notifyFlowCtrl; - FNotifyFileSynced notifyFileSynced; - FGetVersion getVersion; + FNotifyRole notifyRoleFp; + FNotifyFlowCtrl notifyFlowCtrlFp; + FStartSyncFile startSyncFileFp; + FStopSyncFile stopSyncFileFp; + FGetVersion getVersionFp; + FSendFile sendFileFp; + FRecvFile recvFileFp; } SSyncInfo; typedef void *tsync_h; diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt index bf77d856f9..bf52784300 100644 --- a/src/kit/CMakeLists.txt +++ b/src/kit/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) ADD_SUBDIRECTORY(shell) diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index c4f3cc5696..b6babc5bc5 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index a986f2d3cb..716a317fca 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -470,7 +470,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) { wordexp_t full_path; - if (wordexp(fname, &full_path, 0) != 0) { + if (wordexp((char *)fname, &full_path, 0) != 0) { fprintf(stderr, "ERROR: invalid file name: %s\n", fname); return -1; } diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index f74dbc2de4..390e10cc26 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 1d77a6bb63..abcadd64b1 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -67,6 +67,7 @@ typedef struct DemoArguments { char * sqlFile; bool use_metric; bool insert_only; + bool answer_yes; char * output_file; int mode; char * datatype[MAX_NUM_DATATYPE + 1]; @@ -114,6 +115,7 @@ typedef struct DemoArguments { {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14}, #endif {0, 'x', 0, 0, "Insert only flag.", 13}, + {0, 'y', 0, 0, "Default input yes for prompt", 13}, {0, 'O', "order", 0, "Insert mode--0: In order, 1: Out of order. Default is in order.", 14}, {0, 'R', "rate", 0, "Out of order data's rate--if order=1 Default 10, min: 0, max: 50.", 14}, {0, 'D', "delete table", 0, "Delete data methods——0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database", 14}, @@ -210,6 +212,9 @@ typedef struct DemoArguments { case 'x': arguments->insert_only = true; break; + case 'y': + arguments->answer_yes = true; + break; case 'c': if (wordexp(arg, &full_path, 0) != 0) { fprintf(stderr, "Invalid path %s\n", arg); @@ -328,6 +333,8 @@ typedef struct DemoArguments { #endif printf("%s%s\n", indent, "-x"); printf("%s%s%s\n", indent, indent, "flag, Insert only flag."); + printf("%s%s\n", indent, "-y"); + printf("%s%s%s\n", indent, indent, "flag, Anser Yes for prompt."); printf("%s%s\n", indent, "-O"); printf("%s%s%s\n", indent, indent, "order, Insert mode--0: In order, 1: Out of order. Default is in order."); printf("%s%s\n", indent, "-R"); @@ -409,6 +416,8 @@ typedef struct DemoArguments { arguments->use_metric = true; } else if (strcmp(argv[i], "-x") == 0) { arguments->insert_only = true; + } else if (strcmp(argv[i], "-y") == 0) { + arguments->answer_yes = true; } else if (strcmp(argv[i], "-c") == 0) { strcpy(configDir, argv[++i]); } else if (strcmp(argv[i], "-O") == 0) { @@ -549,6 +558,7 @@ int main(int argc, char *argv[]) { NULL, false, // use_metric false, // insert_only + false, // answer_yes "./output.txt", // output_file 0, // mode { @@ -584,6 +594,7 @@ int main(int argc, char *argv[]) { arguments.num_of_RPR = 1000; arguments.use_metric = true; arguments.insert_only = false; + arguments.answer_yes = false; // end change parse_args(argc, argv, &arguments); @@ -606,6 +617,7 @@ int main(int argc, char *argv[]) { int nrecords_per_request = arguments.num_of_RPR; bool use_metric = arguments.use_metric; bool insert_only = arguments.insert_only; + bool answer_yes = arguments.answer_yes; char **data_type = arguments.datatype; int count_data_type = 0; char dataString[STRING_LEN]; @@ -666,9 +678,12 @@ int main(int argc, char *argv[]) { printf("# Delete method: %d\n", method_of_delete); printf("# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - printf("###################################################################\n\n"); - printf("Press enter key to continue"); - (void)getchar(); + + if (!answer_yes) { + printf("###################################################################\n\n"); + printf("Press enter key to continue"); + (void)getchar(); + } fprintf(fp, "###################################################################\n"); fprintf(fp, "# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port); @@ -908,6 +923,7 @@ int main(int argc, char *argv[]) { } pthread_join(read_id, NULL); taos_close(rInfo->taos); + free(rInfo); } taos_cleanup(); diff --git a/src/kit/taosdemox/CMakeLists.txt b/src/kit/taosdemox/CMakeLists.txt index 3993cb0feb..abe4e74710 100644 --- a/src/kit/taosdemox/CMakeLists.txt +++ b/src/kit/taosdemox/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) diff --git a/src/kit/taosdemox/insert.json b/src/kit/taosdemox/insert.json index aa071c115d..56a64b7b85 100644 --- a/src/kit/taosdemox/insert.json +++ b/src/kit/taosdemox/insert.json @@ -8,6 +8,7 @@ "thread_count": 4, "thread_count_create_tbl": 1, "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", "databases": [{ "dbinfo": { "name": "db", diff --git a/src/kit/taosdemox/query.json b/src/kit/taosdemox/query.json index b7b08edfc9..4a5403a55d 100644 --- a/src/kit/taosdemox/query.json +++ b/src/kit/taosdemox/query.json @@ -5,6 +5,7 @@ "port": 6030, "user": "root", "password": "taosdata", + "confirm_parameter_prompt": "yes", "databases": "db01", "specified_table_query": {"query_interval":1, "concurrent":1, diff --git a/src/kit/taosdemox/taosdemox.c b/src/kit/taosdemox/taosdemox.c index a6d962df55..deb2a47645 100644 --- a/src/kit/taosdemox/taosdemox.c +++ b/src/kit/taosdemox/taosdemox.c @@ -93,6 +93,8 @@ extern char configDir[]; #define MAX_QUERY_SQL_COUNT 10 #define MAX_QUERY_SQL_LENGTH 256 +#define MAX_DATABASE_COUNT 256 + typedef enum CREATE_SUB_TALBE_MOD_EN { PRE_CREATE_SUBTBL, AUTO_CREATE_SUBTBL, @@ -116,7 +118,41 @@ enum QUERY_TYPE { INSERT_TYPE, QUERY_TYPE_BUT } ; - + +enum _show_db_index { + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_BLOCKS_INDEX, + TSDB_SHOW_DB_MINROWS_INDEX, + TSDB_SHOW_DB_MAXROWS_INDEX, + TSDB_SHOW_DB_WALLEVEL_INDEX, + TSDB_SHOW_DB_FSYNC_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_SHOW_DB_CACHELAST_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_UPDATE_INDEX, + TSDB_SHOW_DB_STATUS_INDEX, + TSDB_MAX_SHOW_DB +}; + +// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- +enum _show_stables_index { + TSDB_SHOW_STABLES_NAME_INDEX, + TSDB_SHOW_STABLES_CREATED_TIME_INDEX, + TSDB_SHOW_STABLES_COLUMNS_INDEX, + TSDB_SHOW_STABLES_METRIC_INDEX, + TSDB_SHOW_STABLES_UID_INDEX, + TSDB_SHOW_STABLES_TID_INDEX, + TSDB_SHOW_STABLES_VGID_INDEX, + TSDB_MAX_SHOW_STABLES +}; enum _describe_table_index { TSDB_DESCRIBE_METRIC_FIELD_INDEX, TSDB_DESCRIBE_METRIC_TYPE_INDEX, @@ -145,6 +181,7 @@ typedef struct SArguments_S { char * sqlFile; bool use_metric; bool insert_only; + bool answer_yes; char * output_file; int mode; char * datatype[MAX_NUM_DATATYPE + 1]; @@ -173,6 +210,7 @@ typedef struct SSuperTable_S { int childTblCount; bool superTblExists; // 0: no, 1: yes bool childTblExists; // 0: no, 1: yes + int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample @@ -218,6 +256,28 @@ typedef struct SSuperTable_S { int64_t totalAffectedRows; } SSuperTable; +typedef struct { + char name[TSDB_DB_NAME_LEN + 1]; + char create_time[32]; + int32_t ntables; + int32_t vgroups; + int16_t replica; + int16_t quorum; + int16_t days; + char keeplist[32]; + int32_t cache; //MB + int32_t blocks; + int32_t minrows; + int32_t maxrows; + int8_t wallevel; + int32_t fsync; + int8_t comp; + int8_t cachelast; + char precision[8]; // time resolution + int8_t update; + char status[16]; +} SDbInfo; + typedef struct SDbCfg_S { // int maxtablesPerVnode; int minRows; @@ -371,13 +431,14 @@ typedef struct curlMemInfo_S { {0, 'w', "length_of_chartype", 0, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16", 4}, {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 10.", 4}, {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 4}, - // {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 100.", 4}, + // {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 100.", 4}, {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 4}, {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 10000.", 4}, - {0, 'x', 0, 0, "Not insert only flag.", 4}, + {0, 'x', 0, 0, "Not insert only flag.", 4}, + {0, 'y', 0, 0, "Default input yes for prompt.", 4}, {0, 'O', "disorderRatio", 0, "Insert mode--0: In order, > 0: disorder ratio. Default is in order.", 4}, {0, 'R', "disorderRang", 0, "Out of order data's range, ms, default is 1000.", 4}, - //{0, 'D', "delete database", 0, "if elete database if exists. 0: no, 1: yes, default is 1", 5}, + //{0, 'D', "delete database", 0, "if elete database if exists. 0: no, 1: yes, default is 1", 5}, {0}}; /* Parse a single option. */ @@ -470,6 +531,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { break; case 'x': arguments->insert_only = false; + case 'y': + arguments->answer_yes = true; break; case 'c': if (wordexp(arg, &full_path, 0) != 0) { @@ -585,6 +648,7 @@ SArguments g_args = {NULL, NULL, // sqlFile false, // use_metric true, // insert_only + false, // answer_yes; "./output.txt", // output_file 0, // mode : sync or async { @@ -808,13 +872,14 @@ static void init_rand_data() { static void printfInsertMeta() { printf("\033[1m\033[40;32m================ insert.json parse result START ================\033[0m\n"); - printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); - printf("user: \033[33m%s\033[0m\n", g_Dbs.user); - printf("password: \033[33m%s\033[0m\n", g_Dbs.password); - printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); - printf("thread count: \033[33m%d\033[0m\n", g_Dbs.threadCount); + printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); + printf("user: \033[33m%s\033[0m\n", g_Dbs.user); + printf("password: \033[33m%s\033[0m\n", g_Dbs.password); + printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); + printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); + printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); - printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); + printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); for (int i = 0; i < g_Dbs.dbCount; i++) { printf("database[\033[33m%d\033[0m]:\n", i); printf(" database name: \033[33m%s\033[0m\n", g_Dbs.db[i].dbName); @@ -944,11 +1009,12 @@ static void printfInsertMeta() { static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, "================ insert.json parse result START================\n"); - fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); - fprintf(fp, "user: %s\n", g_Dbs.user); - fprintf(fp, "password: %s\n", g_Dbs.password); - fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); - fprintf(fp, "thread count: %d\n", g_Dbs.threadCount); + fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); + fprintf(fp, "user: %s\n", g_Dbs.user); + fprintf(fp, "password: %s\n", g_Dbs.password); + fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); + fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); + fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); fprintf(fp, "database count: %d\n", g_Dbs.dbCount); for (int i = 0; i < g_Dbs.dbCount; i++) { @@ -1123,6 +1189,272 @@ static void printfQueryMeta() { printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n"); } + +static char* xFormatTimestamp(char* buf, int64_t val, int precision) { + time_t tt; + if (precision == TSDB_TIME_PRECISION_MICRO) { + tt = (time_t)(val / 1000000); + } else { + tt = (time_t)(val / 1000); + } + +/* comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. + if (tt < 0) { + tt = 0; + } + */ + +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + + struct tm* ptm = localtime(&tt); + size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); + + if (precision == TSDB_TIME_PRECISION_MICRO) { + sprintf(buf + pos, ".%06d", (int)(val % 1000000)); + } else { + sprintf(buf + pos, ".%03d", (int)(val % 1000)); + } + + return buf; +} + +static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) { + if (val == NULL) { + fprintf(fp, "%s", TSDB_DATA_NULL_STR); + return; + } + + char buf[TSDB_MAX_BYTES_PER_ROW]; + switch (field->type) { + case TSDB_DATA_TYPE_BOOL: + fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + fprintf(fp, "%d", *((int8_t *)val)); + break; + case TSDB_DATA_TYPE_SMALLINT: + fprintf(fp, "%d", *((int16_t *)val)); + break; + case TSDB_DATA_TYPE_INT: + fprintf(fp, "%d", *((int32_t *)val)); + break; + case TSDB_DATA_TYPE_BIGINT: + fprintf(fp, "%" PRId64, *((int64_t *)val)); + break; + case TSDB_DATA_TYPE_FLOAT: + fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); + break; + case TSDB_DATA_TYPE_DOUBLE: + fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(buf, val, length); + buf[length] = 0; + fprintf(fp, "\'%s\'", buf); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + xFormatTimestamp(buf, *(int64_t*)val, precision); + fprintf(fp, "'%s'", buf); + break; + default: + break; + } +} + +static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { + TAOS_ROW row = taos_fetch_row(tres); + if (row == NULL) { + return 0; + } + + FILE* fp = fopen(fname, "at"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to open file: %s\n", fname); + return -1; + } + + int num_fields = taos_num_fields(tres); + TAOS_FIELD *fields = taos_fetch_fields(tres); + int precision = taos_result_precision(tres); + + for (int col = 0; col < num_fields; col++) { + if (col > 0) { + fprintf(fp, ","); + } + fprintf(fp, "%s", fields[col].name); + } + fputc('\n', fp); + + int numOfRows = 0; + do { + int32_t* length = taos_fetch_lengths(tres); + for (int i = 0; i < num_fields; i++) { + if (i > 0) { + fputc(',', fp); + } + xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision); + } + fputc('\n', fp); + + numOfRows++; + row = taos_fetch_row(tres); + } while( row != NULL); + + fclose(fp); + + return numOfRows; +} + +static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { + TAOS_RES * res; + TAOS_ROW row = NULL; + int count = 0; + + res = taos_query(taos, "show databases;"); + int32_t code = taos_errno(res); + + if (code != 0) { + fprintf(stderr, "failed to run , reason: %s\n", taos_errstr(res)); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(res); + + while ((row = taos_fetch_row(res)) != NULL) { + // sys database name : 'log' + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue; + + dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (dbInfos[count] == NULL) { + fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count); + return -1; + } + + strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI); + dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + + strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + + strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); + strncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); + + count++; + if (count > MAX_DATABASE_COUNT) { + fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT); + break; + } + } + + return count; +} + +static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) { + FILE *fp = NULL; + if (filename[0] != 0) { + fp = fopen(filename, "at"); + if (fp == NULL) { + fprintf(stderr, "failed to open file: %s\n", filename); + return; + } + } + + fprintf(fp, "================ database[%d] ================\n", index); + fprintf(fp, "name: %s\n", dbInfos->name); + fprintf(fp, "created_time: %s\n", dbInfos->create_time); + fprintf(fp, "ntables: %d\n", dbInfos->ntables); + fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); + fprintf(fp, "replica: %d\n", dbInfos->replica); + fprintf(fp, "quorum: %d\n", dbInfos->quorum); + fprintf(fp, "days: %d\n", dbInfos->days); + fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); + fprintf(fp, "cache(MB): %d\n", dbInfos->cache); + fprintf(fp, "blocks: %d\n", dbInfos->blocks); + fprintf(fp, "minrows: %d\n", dbInfos->minrows); + fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); + fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); + fprintf(fp, "fsync: %d\n", dbInfos->fsync); + fprintf(fp, "comp: %d\n", dbInfos->comp); + fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); + fprintf(fp, "precision: %s\n", dbInfos->precision); + fprintf(fp, "update: %d\n", dbInfos->update); + fprintf(fp, "status: %s\n", dbInfos->status); + fprintf(fp, "\n"); + + fclose(fp); +} + +static void printfQuerySystemInfo(TAOS * taos) { + char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; + char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; + TAOS_RES* res; + + time_t t; + struct tm* lt; + time(&t); + lt = localtime(&t); + snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); + + // show variables + res = taos_query(taos, "show variables;"); + //getResult(res, filename); + xDumpResultToFile(filename, res); + + // show dnodes + res = taos_query(taos, "show dnodes;"); + xDumpResultToFile(filename, res); + //getResult(res, filename); + + // show databases + res = taos_query(taos, "show databases;"); + SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); + if (dbInfos == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + int dbCount = getDbFromServer(taos, dbInfos); + if (dbCount <= 0) return; + + for (int i = 0; i < dbCount; i++) { + // printf database info + printfDbInfoForQueryToFile(filename, dbInfos[i], i); + + // show db.vgroups + snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); + + // show db.stables + snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); + + free(dbInfos[i]); + } + + free(dbInfos); + +} + + #ifdef TD_LOWA_CURL static size_t responseCallback(void *contents, size_t size, size_t nmemb, void *userp) { @@ -1730,19 +2062,27 @@ static int createDatabases() { void * createTable(void *sarg) -{ - char command[BUFFER_SIZE] = "\0"; - +{ threadInfo *winfo = (threadInfo *)sarg; SSuperTable* superTblInfo = winfo->superTblInfo; int64_t lastPrintTime = taosGetTimestampMs(); + char* buffer = calloc(superTblInfo->maxSqlLen, 1); + + int len = 0; + int batchNum = 0; //printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { if (0 == g_Dbs.use_metric) { - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d %s;", winfo->db_name, superTblInfo->childTblPrefix, i, superTblInfo->colsOfCreatChildTable); + snprintf(buffer, BUFFER_SIZE, "create table if not exists %s.%s%d %s;", winfo->db_name, superTblInfo->childTblPrefix, i, superTblInfo->colsOfCreatChildTable); } else { + if (0 == len) { + batchNum = 0; + memset(buffer, 0, superTblInfo->maxSqlLen); + len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "create table "); + } + char* tagsValBuf = NULL; if (0 == superTblInfo->tagSource) { tagsValBuf = generateTagVaulesForStb(superTblInfo); @@ -1750,13 +2090,22 @@ void * createTable(void *sarg) tagsValBuf = getTagValueFromTagSample(superTblInfo, i % superTblInfo->tagSampleCount); } if (NULL == tagsValBuf) { + free(buffer); return NULL; } - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.%s tags %s;", winfo->db_name, superTblInfo->childTblPrefix, i, winfo->db_name, superTblInfo->sTblName, tagsValBuf); + + len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "if not exists %s.%s%d using %s.%s tags %s ", winfo->db_name, superTblInfo->childTblPrefix, i, winfo->db_name, superTblInfo->sTblName, tagsValBuf); free(tagsValBuf); + batchNum++; + + if ((batchNum < superTblInfo->batchCreateTableNum) && ((superTblInfo->maxSqlLen - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) { + continue; + } } - - if (0 != queryDbExec(winfo->taos, command, NO_INSERT_TYPE)){ + + len = 0; + if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){ + free(buffer); return NULL; } @@ -1766,7 +2115,12 @@ void * createTable(void *sarg) lastPrintTime = currentPrintTime; } } - + + if (0 != len) { + (void)queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE); + } + + free(buffer); return NULL; } @@ -2186,6 +2540,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else { printf("failed to read json, threads2 not found"); goto PARSE_OVER; + } + + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = false; + } else { + printf("failed to read json, confirm_parameter_prompt not found"); + goto PARSE_OVER; } cJSON* dbs = cJSON_GetObjectItem(root, "databases"); @@ -2422,6 +2792,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("failed to read json, auto_create_table not found"); goto PARSE_OVER; } + + cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); + if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; + } else if (!batchCreateTbl) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = 2000; + } else { + printf("failed to read json, batch_create_tbl_num not found"); + goto PARSE_OVER; + } cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no if (childTblExists && childTblExists->type == cJSON_String && childTblExists->valuestring != NULL) { @@ -2693,6 +3073,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { strncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);; } + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = false; + } else { + printf("failed to read json, confirm_parameter_prompt not found"); + goto PARSE_OVER; + } + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { strncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE); @@ -3679,14 +4075,14 @@ void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSu b = ntables % threads; } - TAOS* taos; - if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - if (NULL == taos) { - printf("connect to server fail, reason: %s\n", taos_errstr(NULL)); - exit(-1); - } - } + //TAOS* taos; + //if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { + // taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); + // if (NULL == taos) { + // printf("connect to server fail, reason: %s\n", taos_errstr(NULL)); + // exit(-1); + // } + //} int32_t timePrec = TSDB_TIME_PRECISION_MILLI; if (0 != precision[0]) { @@ -3719,7 +4115,12 @@ void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSu t_info->start_time = start_time; if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - t_info->taos = taos; + //t_info->taos = taos; + t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); + if (NULL == t_info->taos) { + printf("connect to server fail from insert sub thread, reason: %s\n", taos_errstr(NULL)); + exit(-1); + } } else { t_info->taos = NULL; #ifdef TD_LOWA_CURL @@ -3754,6 +4155,7 @@ void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSu threadInfo *t_info = infos + i; tsem_destroy(&(t_info->lock_sem)); + taos_close(t_info->taos); superTblInfo->totalAffectedRows += t_info->totalAffectedRows; superTblInfo->totalRowsInserted += t_info->totalRowsInserted; @@ -3766,7 +4168,7 @@ void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSu double end = getCurrentTime(); - taos_close(taos); + //taos_close(taos); free(pids); free(infos); @@ -3924,9 +4326,11 @@ int insertTestProcess() { printfInsertMeta(); printfInsertMetaToFile(g_fpOfInsertResult); - printf("Press enter key to continue\n\n"); - (void)getchar(); - + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); + } + init_rand_data(); // create database and super tables @@ -4093,7 +4497,7 @@ void *subQueryProcess(void *sarg) { int queryTestProcess() { TAOS * taos = NULL; taos_init(); - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port); + taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, NULL, g_queryInfo.port); if (taos == NULL) { fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); exit(-1); @@ -4104,8 +4508,13 @@ int queryTestProcess() { } printfQueryMeta(); - printf("Press enter key to continue\n\n"); - (void)getchar(); + + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); + } + + printfQuerySystemInfo(taos); pthread_t *pids = NULL; threadInfo *infos = NULL; @@ -4357,8 +4766,10 @@ void *superSubscribeProcess(void *sarg) { int subscribeTestProcess() { printfQueryMeta(); - printf("Press enter key to continue\n\n"); - (void)getchar(); + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); + } TAOS * taos = NULL; taos_init(); diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt index b50ad85c08..58897b89e9 100644 --- a/src/kit/taosdump/CMakeLists.txt +++ b/src/kit/taosdump/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) diff --git a/src/mnode/CMakeLists.txt b/src/mnode/CMakeLists.txt index fffc82c6ef..2df4708c23 100644 --- a/src/mnode/CMakeLists.txt +++ b/src/mnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) diff --git a/src/mnode/inc/mnodeDnode.h b/src/mnode/inc/mnodeDnode.h index 56d7455ad2..fa1995254e 100644 --- a/src/mnode/inc/mnodeDnode.h +++ b/src/mnode/inc/mnodeDnode.h @@ -67,6 +67,7 @@ void mnodeCleanupDnodes(); int32_t mnodeGetDnodesNum(); int32_t mnodeGetOnlinDnodesCpuCoreNum(); int32_t mnodeGetOnlineDnodesNum(); +void mnodeGetOnlineAndTotalDnodesNum(int32_t *onlineNum, int32_t *totalNum); void * mnodeGetNextDnode(void *pIter, SDnodeObj **pDnode); void mnodeCancelGetNextDnode(void *pIter); void mnodeIncDnodeRef(SDnodeObj *pDnode); diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index a853342ab3..9fdbaa7965 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -568,7 +568,7 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn pShow->bytes[cols] = 24 + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; - strcpy(pSchema[cols].name, "keep1,keep2,keep(D)"); + strcpy(pSchema[cols].name, "keep0,keep1,keep(D)"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; @@ -679,7 +679,7 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void pShow->pIter = mnodeGetNextDb(pShow->pIter, &pDb); if (pDb == NULL) break; - if (pDb->pAcct != pUser->pAcct) { + if (pDb->pAcct != pUser->pAcct || pDb->status != TSDB_DB_STATUS_READY) { mnodeDecDbRef(pDb); continue; } diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index d76ecd9ba0..304096f3ae 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -263,6 +263,28 @@ int32_t mnodeGetOnlineDnodesNum() { return onlineDnodes; } +void mnodeGetOnlineAndTotalDnodesNum(int32_t *onlineNum, int32_t *totalNum) { + SDnodeObj *pDnode = NULL; + void * pIter = NULL; + int32_t onlineDnodes = 0, totalDnodes = 0; + + while (1) { + pIter = mnodeGetNextDnode(pIter, &pDnode); + if (pDnode == NULL) break; + if (pDnode->status != TAOS_DN_STATUS_OFFLINE) ++onlineDnodes; + ++totalDnodes; + mnodeDecDnodeRef(pDnode); + } + + if (onlineNum) { + *onlineNum = onlineDnodes; + } + + if (totalNum) { + *totalNum = totalDnodes; + } +} + void *mnodeGetDnode(int32_t dnodeId) { return sdbGetRow(tsDnodeSdb, &dnodeId); } diff --git a/src/mnode/src/mnodePeer.c b/src/mnode/src/mnodePeer.c index aaf8b69427..9bd8d7e4d7 100644 --- a/src/mnode/src/mnodePeer.c +++ b/src/mnode/src/mnodePeer.c @@ -17,7 +17,6 @@ #include "os.h" #include "taoserror.h" #include "tsched.h" -#include "tsystem.h" #include "tutil.h" #include "tgrant.h" #include "tbn.h" diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 89dc32f03a..17a4282d05 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -356,7 +356,7 @@ static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC pShow->bytes[cols] = 8; pSchema[cols].type = TSDB_DATA_TYPE_BIGINT; - strcpy(pSchema[cols].name, "time(us)"); + strcpy(pSchema[cols].name, "time"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 17cfd3e9d4..fe1f70cb50 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -242,11 +242,6 @@ void sdbUpdateMnodeRoles() { mnodeUpdateMnodeEpSet(NULL); } -static uint32_t sdbGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion) { - sdbUpdateMnodeRoles(); - return 0; -} - static int32_t sdbGetWalInfo(int32_t vgId, char *fileName, int64_t *fileId) { return walGetWalFile(tsSdbMgmt.wal, fileName, fileId); } @@ -262,7 +257,9 @@ static void sdbNotifyRole(int32_t vgId, int8_t role) { sdbUpdateMnodeRoles(); } -static int32_t sdbNotifyFileSynced(int32_t vgId, uint64_t fversion) { return 0; } +static void sdbStartFileSync(int32_t vgId) {} + +static void sdbStopFileSync(int32_t vgId, uint64_t fversion) {} static void sdbNotifyFlowCtrl(int32_t vgId, int32_t level) {} @@ -396,14 +393,14 @@ int32_t sdbUpdateSync(void *pMnodes) { syncInfo.version = sdbGetVersion(); syncInfo.syncCfg = syncCfg; sprintf(syncInfo.path, "%s", tsMnodeDir); - syncInfo.getFileInfo = sdbGetFileInfo; - syncInfo.getWalInfo = sdbGetWalInfo; - syncInfo.writeToCache = sdbWriteFwdToQueue; + syncInfo.getWalInfoFp = sdbGetWalInfo; + syncInfo.writeToCacheFp = sdbWriteFwdToQueue; syncInfo.confirmForward = sdbConfirmForward; - syncInfo.notifyRole = sdbNotifyRole; - syncInfo.notifyFileSynced = sdbNotifyFileSynced; - syncInfo.notifyFlowCtrl = sdbNotifyFlowCtrl; - syncInfo.getVersion = sdbGetSyncVersion; + syncInfo.notifyRoleFp = sdbNotifyRole; + syncInfo.startSyncFileFp = sdbStartFileSync; + syncInfo.stopSyncFileFp = sdbStopFileSync; + syncInfo.notifyFlowCtrlFp = sdbNotifyFlowCtrl; + syncInfo.getVersionFp = sdbGetSyncVersion; tsSdbMgmt.cfg = syncCfg; if (tsSdbMgmt.sync) { diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 0377df97fd..4ff2a38dff 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -280,8 +280,11 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) { } } - pRsp->onlineDnodes = htonl(mnodeGetOnlineDnodesNum()); - pRsp->totalDnodes = htonl(mnodeGetDnodesNum()); + int32_t onlineDnodes = 0, totalDnodes = 0; + mnodeGetOnlineAndTotalDnodesNum(&onlineDnodes, &totalDnodes); + + pRsp->onlineDnodes = htonl(onlineDnodes); + pRsp->totalDnodes = htonl(totalDnodes); mnodeGetMnodeEpSetForShell(&pRsp->epSet, false); pMsg->rpcRsp.rsp = pRsp; diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 8928bf0aac..7f463899ce 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1081,20 +1081,13 @@ static int32_t mnodeDropSuperTableCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pTable = (SSTableObj *)pMsg->pTable; if (code != TSDB_CODE_SUCCESS) { mError("msg:%p, app:%p stable:%s, failed to drop, sdb error", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); - } else { - mLInfo("msg:%p, app:%p stable:%s, is dropped from sdb", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); + + return code; } - return code; -} - -static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) { - if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; + mLInfo("msg:%p, app:%p stable:%s, is dropped from sdb", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId); SSTableObj *pStable = (SSTableObj *)pMsg->pTable; - mInfo("msg:%p, app:%p stable:%s will be dropped, hash:%p sizeOfVgList:%d", pMsg, pMsg->rpcMsg.ahandle, - pStable->info.tableId, pStable->vgHash, taosHashGetSize(pStable->vgHash)); - if (pStable->vgHash != NULL /*pStable->numOfTables != 0*/) { int32_t *pVgId = taosHashIterate(pStable->vgHash, NULL); while (pVgId) { @@ -1122,6 +1115,16 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) { mnodeDropAllChildTablesInStable(pStable); } + return TSDB_CODE_SUCCESS; +} + +static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) { + if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; + + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; + mInfo("msg:%p, app:%p stable:%s will be dropped, hash:%p sizeOfVgList:%d", pMsg, pMsg->rpcMsg.ahandle, + pStable->info.tableId, pStable->vgHash, taosHashGetSize(pStable->vgHash)); + SSdbRow row = { .type = SDB_OPER_GLOBAL, .pTable = tsSuperTableSdb, @@ -1461,9 +1464,9 @@ static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, int32_t cols = 0; SSchema *pSchema = pMeta->schema; - SSchema tbnameSchema = tGetTableNameColumnSchema(); - pShow->bytes[cols] = tbnameSchema.bytes; - pSchema[cols].type = tbnameSchema.type; + SSchema* tbnameSchema = tGetTbnameColumnSchema(); + pShow->bytes[cols] = tbnameSchema->bytes; + pSchema[cols].type = tbnameSchema->type; strcpy(pSchema[cols].name, "name"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; @@ -2821,9 +2824,9 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void int32_t cols = 0; SSchema *pSchema = pMeta->schema; - SSchema s = tGetTableNameColumnSchema(); - pShow->bytes[cols] = s.bytes; - pSchema[cols].type = s.type; + SSchema* s = tGetTbnameColumnSchema(); + pShow->bytes[cols] = s->bytes; + pSchema[cols].type = s->type; strcpy(pSchema[cols].name, "table_name"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; @@ -2840,9 +2843,9 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; - SSchema tbCol = tGetTableNameColumnSchema(); - pShow->bytes[cols] = tbCol.bytes + VARSTR_HEADER_SIZE; - pSchema[cols].type = tbCol.type; + SSchema* tbCol = tGetTbnameColumnSchema(); + pShow->bytes[cols] = tbCol->bytes + VARSTR_HEADER_SIZE; + pSchema[cols].type = tbCol->type; strcpy(pSchema[cols].name, "stable_name"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; @@ -3076,9 +3079,9 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo int32_t cols = 0; SSchema *pSchema = pMeta->schema; - SSchema tbnameColSchema = tGetTableNameColumnSchema(); - pShow->bytes[cols] = tbnameColSchema.bytes; - pSchema[cols].type = tbnameColSchema.type; + SSchema* tbnameColSchema = tGetTbnameColumnSchema(); + pShow->bytes[cols] = tbnameColSchema->bytes; + pSchema[cols].type = tbnameColSchema->type; strcpy(pSchema[cols].name, "table_name"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt index f41719f240..ab8b0f7678 100644 --- a/src/os/CMakeLists.txt +++ b/src/os/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/src/os/inc/osDarwin.h b/src/os/inc/osDarwin.h index 14b8ccf53c..7c206afe7a 100644 --- a/src/os/inc/osDarwin.h +++ b/src/os/inc/osDarwin.h @@ -85,6 +85,7 @@ extern "C" { #define TAOS_OS_FUNC_STRING_STR2INT64 #define TAOS_OS_FUNC_SYSINFO #define TAOS_OS_FUNC_TIMER +#define TAOS_OS_FUNC_SEMPHONE_PTHREAD // specific #define htobe64 htonll @@ -105,6 +106,8 @@ typedef int(*__compar_fn_t)(const void *, const void *); #define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE #endif +#define TAOS_OS_FUNC_PTHREAD_RWLOCK + int64_t tsosStr2int64(char *str); #include "eok.h" @@ -112,7 +115,7 @@ int64_t tsosStr2int64(char *str); void taos_block_sigalrm(void); #define TAOS_OS_DEF_EPOLL - #define TAOS_EPOLL_WAIT_TIME 500 + #define TAOS_EPOLL_WAIT_TIME 500 typedef int32_t SOCKET; typedef SOCKET EpollFd; #define EpollClose(pollFd) epoll_close(pollFd) diff --git a/src/os/inc/osDef.h b/src/os/inc/osDef.h index 04cb8b6e74..cb91b0526b 100644 --- a/src/os/inc/osDef.h +++ b/src/os/inc/osDef.h @@ -26,10 +26,6 @@ extern "C" { #endif #endif -#ifndef STDERR_FILENO -#define STDERR_FILENO (2) -#endif - #define FD_VALID(x) ((x) > STDERR_FILENO) #define FD_INITIALIZER ((int32_t)-1) diff --git a/src/os/inc/osDir.h b/src/os/inc/osDir.h index 67cfdb3b53..540efc310c 100644 --- a/src/os/inc/osDir.h +++ b/src/os/inc/osDir.h @@ -25,8 +25,8 @@ extern "C" { // TAOS_OS_FUNC_DIR void taosRemoveDir(char *rootDir); int taosMkDir(const char *pathname, mode_t mode); -void taosRename(char* oldName, char *newName); void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays); +int32_t taosRename(char* oldName, char *newName); int32_t taosCompressFile(char *srcFileName, char *destFileName); #ifdef __cplusplus diff --git a/src/os/inc/osFile.h b/src/os/inc/osFile.h index 19cc78472c..c117ae4039 100644 --- a/src/os/inc/osFile.h +++ b/src/os/inc/osFile.h @@ -26,6 +26,7 @@ int64_t taosReadImp(int32_t fd, void *buf, int64_t count); int64_t taosWriteImp(int32_t fd, void *buf, int64_t count); int64_t taosLSeekImp(int32_t fd, int64_t offset, int32_t whence); int32_t taosRenameFile(char *fullPath, char *suffix, char delimiter, char **dstPath); +int64_t taosCopy(char *from, char *to); #define taosRead(fd, buf, count) taosReadImp(fd, buf, count) #define taosWrite(fd, buf, count) taosWriteImp(fd, buf, count) diff --git a/src/os/inc/osMemory.h b/src/os/inc/osMemory.h index 439e4cab72..2cf7e14d2f 100644 --- a/src/os/inc/osMemory.h +++ b/src/os/inc/osMemory.h @@ -35,7 +35,7 @@ void taosDumpMemoryLeak(); void * taosTMalloc(size_t size); void * taosTCalloc(size_t nmemb, size_t size); void * taosTRealloc(void *ptr, size_t size); -void taosTZfree(void *ptr); +void * taosTZfree(void *ptr); size_t taosTSizeof(void *ptr); void taosTMemset(void *ptr, int c); diff --git a/src/os/inc/osSemphone.h b/src/os/inc/osSemphone.h index 74e1bd4878..3332a9234b 100644 --- a/src/os/inc/osSemphone.h +++ b/src/os/inc/osSemphone.h @@ -28,6 +28,21 @@ extern "C" { #define tsem_destroy sem_destroy #endif +#ifdef TAOS_OS_FUNC_PTHREAD_RWLOCK + #define pthread_rwlock_t pthread_mutex_t + #define pthread_rwlock_init(lock, NULL) pthread_mutex_init(lock, NULL) + #define pthread_rwlock_destroy(lock) pthread_mutex_destroy(lock) + #define pthread_rwlock_wrlock(lock) pthread_mutex_lock(lock) + #define pthread_rwlock_rdlock(lock) pthread_mutex_lock(lock) + #define pthread_rwlock_unlock(lock) pthread_mutex_unlock(lock) + + #define pthread_spinlock_t pthread_mutex_t + #define pthread_spin_init(lock, NULL) pthread_mutex_init(lock, NULL) + #define pthread_spin_destroy(lock) pthread_mutex_destroy(lock) + #define pthread_spin_lock(lock) pthread_mutex_lock(lock) + #define pthread_spin_unlock(lock) pthread_mutex_unlock(lock) +#endif + // TAOS_OS_FUNC_SEMPHONE_PTHREAD bool taosCheckPthreadValid(pthread_t thread); int64_t taosGetSelfPthreadId(); diff --git a/src/os/inc/osSysinfo.h b/src/os/inc/osSysinfo.h index b592a6c679..25c9c97b1e 100644 --- a/src/os/inc/osSysinfo.h +++ b/src/os/inc/osSysinfo.h @@ -21,10 +21,16 @@ extern "C" { #endif // TAOS_OS_FUNC_SYSINFO +typedef struct { + int64_t tsize; + int64_t avail; +} SysDiskSize; + +int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize); void taosGetSystemInfo(); bool taosGetProcIO(float *readKB, float *writeKB); bool taosGetBandSpeed(float *bandSpeedKb); -bool taosGetDisk(); +void taosGetDisk(); bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ; bool taosGetProcMemory(float *memoryUsedMB) ; bool taosGetSysMemory(float *memoryUsedMB); diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h index 1f3b1b02e3..a058f2cc99 100644 --- a/src/os/inc/osWindows.h +++ b/src/os/inc/osWindows.h @@ -46,6 +46,8 @@ #include "msvcFcntl.h" #include "msvcLibgen.h" #include "msvcStdio.h" +#include "msvcUnistd.h" +#include "msvcLibgen.h" #include "sys/msvcStat.h" #include "sys/msvcTypes.h" @@ -67,6 +69,8 @@ extern "C" { #define TAOS_OS_FUNC_FILE_GETTMPFILEPATH #define TAOS_OS_FUNC_FILE_FTRUNCATE +#define TAOS_OS_FUNC_DIR + #define TAOS_OS_FUNC_MATH #define SWAP(a, b, c) \ do { \ @@ -144,7 +148,6 @@ typedef int (*__compar_fn_t)(const void *, const void *); #define in_addr_t unsigned long #define socklen_t int #define htobe64 htonll -#define getpid _getpid struct tm *localtime_r(const time_t *timep, struct tm *result); char * strptime(const char *buf, const char *fmt, struct tm *tm); @@ -153,15 +156,8 @@ char * getpass(const char *prefix); int flock(int fd, int option); int fsync(int filedes); char * strndup(const char *s, size_t n); -char * dirname(char *pszPathname); int gettimeofday(struct timeval *ptv, void *pTimeZone); -// for access function in io.h -#define F_OK 00 //Existence only -#define W_OK 02 //Write - only -#define R_OK 04 //Read - only -#define X_OK 06 //Read and write - // for send function in tsocket.c #define MSG_NOSIGNAL 0 #define SO_NO_CHECK 0x1234 @@ -201,11 +197,11 @@ int gettimeofday(struct timeval *ptv, void *pTimeZone); typedef struct { int we_wordc; - char **we_wordv; + char *we_wordv[1]; int we_offs; - char wordPos[20]; + char wordPos[1025]; } wordexp_t; -int wordexp(const char *words, wordexp_t *pwordexp, int flags); +int wordexp(char *words, wordexp_t *pwordexp, int flags); void wordfree(wordexp_t *pwordexp); #define openlog(a, b, c) diff --git a/src/os/src/alpine/CMakeLists.txt b/src/os/src/alpine/CMakeLists.txt index daa0b3cf43..b5e739c24c 100644 --- a/src/os/src/alpine/CMakeLists.txt +++ b/src/os/src/alpine/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/darwin/CMakeLists.txt b/src/os/src/darwin/CMakeLists.txt index 7f05ddd64b..c4cb28aa05 100644 --- a/src/os/src/darwin/CMakeLists.txt +++ b/src/os/src/darwin/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/darwin/darwinEnv.c b/src/os/src/darwin/darwinEnv.c index da4b32139e..8ac97eacfb 100644 --- a/src/os/src/darwin/darwinEnv.c +++ b/src/os/src/darwin/darwinEnv.c @@ -17,17 +17,27 @@ #include "os.h" #include "tglobal.h" +static const char* expand_like_shell(const char *path) { + static __thread char buf[TSDB_FILENAME_LEN]; + buf[0] = '\0'; + wordexp_t we; + if (wordexp(path, &we, 0)) return "/tmp/taosd"; + if (sizeof(buf)<=snprintf(buf, sizeof(buf), "%s", we.we_wordv[0])) return "/tmp/taosd"; + wordfree(&we); + return buf; +} + void osInit() { if (configDir[0] == 0) { - strcpy(configDir, "~/TDengine/cfg"); + strcpy(configDir, expand_like_shell("/usr/local/etc/taos")); } + strcpy(tsDataDir, expand_like_shell("/usr/local/var/lib/taos")); + strcpy(tsLogDir, expand_like_shell("/usr/local/var/log/taos")); + strcpy(tsScriptDir, expand_like_shell("/usr/local/etc/taos")); strcpy(tsVnodeDir, ""); strcpy(tsDnodeDir, ""); strcpy(tsMnodeDir, ""); - strcpy(tsDataDir, "~/TDengine/data"); - strcpy(tsLogDir, "~/TDengine/log"); - strcpy(tsScriptDir, "~/TDengine/cfg"); strcpy(tsOsName, "Darwin"); } diff --git a/src/os/src/darwin/darwinFile.c b/src/os/src/darwin/darwinFile.c index 1e77cd68d8..4236ea1c5f 100644 --- a/src/os/src/darwin/darwinFile.c +++ b/src/os/src/darwin/darwinFile.c @@ -17,71 +17,49 @@ #include "os.h" #include "tulog.h" -#define _SEND_FILE_STEP_ 1000 - int64_t taosFSendFile(FILE *out_file, FILE *in_file, int64_t *offset, int64_t count) { - fseek(in_file, (int32_t)(*offset), 0); - int writeLen = 0; - uint8_t buffer[_SEND_FILE_STEP_] = {0}; - - for (int len = 0; len < (count - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) { - size_t rlen = fread(buffer, 1, _SEND_FILE_STEP_, in_file); - if (rlen <= 0) { - return writeLen; - } else if (rlen < _SEND_FILE_STEP_) { - fwrite(buffer, 1, rlen, out_file); - return (int)(writeLen + rlen); - } else { - fwrite(buffer, 1, _SEND_FILE_STEP_, in_file); - writeLen += _SEND_FILE_STEP_; - } + int r = 0; + if (offset) { + r = fseek(in_file, *offset, SEEK_SET); + if (r==-1) return -1; } - - int remain = count - writeLen; - if (remain > 0) { - size_t rlen = fread(buffer, 1, remain, in_file); - if (rlen <= 0) { - return writeLen; - } else { - fwrite(buffer, 1, remain, out_file); - writeLen += remain; + off_t len = count; + while (len>0) { + char buf[1024*16]; + off_t n = sizeof(buf); + if (len 0) { - int32_t rlen = read(sfd, buffer, (int32_t)remain); - if (rlen <= 0) { - return writeLen; - } - else { - taosWriteSocket(sfd, buffer, (int32_t)remain); - writeLen += remain; - } + off_t len = count; + while (len>0) { + char buf[1024*16]; + off_t n = sizeof(buf); + if (len + // #define SEM_USE_PTHREAD // #define SEM_USE_POSIX #define SEM_USE_SEM @@ -279,3 +281,41 @@ int tsem_destroy(tsem_t *sem) { return 0; } +bool taosCheckPthreadValid(pthread_t thread) { + uint64_t id = 0; + int r = pthread_threadid_np(thread, &id); + return r ? false : true; +} + +int64_t taosGetSelfPthreadId() { + return (int64_t)pthread_self(); +} + +int64_t taosGetPthreadId(pthread_t thread) { + return (int64_t)thread; +} + +void taosResetPthread(pthread_t* thread) { + *thread = NULL; +} + +bool taosComparePthread(pthread_t first, pthread_t second) { + return pthread_equal(first, second) ? true : false; +} + +int32_t taosGetPId() { + return (int32_t)getpid(); +} + +int32_t taosGetCurrentAPPName(char* name, int32_t* len) { + char buf[PATH_MAX+1]; + buf[0] = '\0'; + proc_name(getpid(), buf, sizeof(buf)-1); + buf[PATH_MAX] = '\0'; + size_t n = strlen(buf); + if (len) *len = n; + if (name) strcpy(name, buf); + return 0; +} + + diff --git a/src/os/src/darwin/darwinSysInfo.c b/src/os/src/darwin/darwinSysInfo.c index 0eb784e9f0..6af6285f56 100644 --- a/src/os/src/darwin/darwinSysInfo.c +++ b/src/os/src/darwin/darwinSysInfo.c @@ -18,44 +18,140 @@ #include "tconfig.h" #include "tglobal.h" #include "tulog.h" +#include "taoserror.h" +#include +#include + static void taosGetSystemTimezone() { - // get and set default timezone SGlobalCfg *cfg_timezone = taosGetConfigOption("timezone"); - if (cfg_timezone && cfg_timezone->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) { - char *tz = getenv("TZ"); - if (tz == NULL || strlen(tz) == 0) { - strcpy(tsTimezone, "not configured"); - } - else { - strcpy(tsTimezone, tz); - } - cfg_timezone->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT; - uInfo("timezone not configured, use default"); + if (cfg_timezone == NULL) return; + if (cfg_timezone->cfgStatus >= TAOS_CFG_CSTATUS_DEFAULT) { + return; } + + /* load time zone string from /etc/localtime */ + char buf[4096]; + char *tz = NULL; { + int n = readlink("/etc/localtime", buf, sizeof(buf)); + if (n<0) { + uError("read /etc/localtime error, reason:%s", strerror(errno)); + return; + } + buf[n] = '\0'; + for(int i=n-1; i>=0; --i) { + if (buf[i]=='/') { + if (tz) { + tz = buf + i + 1; + break; + } + tz = buf + i + 1; + } + } + if (!tz || 0==strchr(tz, '/')) { + uError("parsing /etc/localtime failed"); + return; + } + + setenv("TZ", tz, 1); + tzset(); + } + + /* + * NOTE: do not remove it. + * Enforce set the correct daylight saving time(DST) flag according + * to current time + */ + time_t tx1 = time(NULL); + struct tm tm1; + localtime_r(&tx1, &tm1); + + /* + * format example: + * + * Asia/Shanghai (CST, +0800) + * Europe/London (BST, +0100) + */ + snprintf(tsTimezone, TSDB_TIMEZONE_LEN, "%s (%s, %+03ld00)", + tz, tm1.tm_isdst ? tzname[daylight] : tzname[0], -timezone/3600); + + // cfg_timezone->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT; + uWarn("timezone not configured, set to system default:%s", tsTimezone); } -static void taosGetSystemLocale() { - // get and set default locale +/* + * originally from src/os/src/detail/osSysinfo.c + * POSIX format locale string: + * (Language Strings)_(Country/Region Strings).(code_page) + * + * example: en_US.UTF-8, zh_CN.GB18030, zh_CN.UTF-8, + * + * if user does not specify the locale in taos.cfg the program use default LC_CTYPE as system locale. + * + * In case of some CentOS systems, their default locale is "en_US.utf8", which is not valid code_page + * for libiconv that is employed to convert string in this system. This program will automatically use + * UTF-8 instead as the charset. + * + * In case of windows client, the locale string is not valid POSIX format, user needs to set the + * correct code_page for libiconv. Usually, the code_page of windows system with simple chinese is + * CP936, CP437 for English charset. + * + */ +static void taosGetSystemLocale() { // get and set default locale + char sep = '.'; + char *locale = NULL; + SGlobalCfg *cfg_locale = taosGetConfigOption("locale"); if (cfg_locale && cfg_locale->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) { - char *locale = setlocale(LC_CTYPE, "chs"); - if (locale != NULL) { + locale = setlocale(LC_CTYPE, ""); + if (locale == NULL) { + uError("can't get locale from system, set it to en_US.UTF-8 since error:%d:%s", errno, strerror(errno)); + strcpy(tsLocale, "en_US.UTF-8"); + } else { tstrncpy(tsLocale, locale, TSDB_LOCALE_LEN); - cfg_locale->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT; - uInfo("locale not configured, set to default:%s", tsLocale); + uWarn("locale not configured, set to system default:%s", tsLocale); } } + /* if user does not specify the charset, extract it from locale */ SGlobalCfg *cfg_charset = taosGetConfigOption("charset"); if (cfg_charset && cfg_charset->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) { - strcpy(tsCharset, "cp936"); - cfg_charset->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT; - uInfo("charset not configured, set to default:%s", tsCharset); + char *str = strrchr(tsLocale, sep); + if (str != NULL) { + str++; + + char *revisedCharset = taosCharsetReplace(str); + tstrncpy(tsCharset, revisedCharset, TSDB_LOCALE_LEN); + + free(revisedCharset); + uWarn("charset not configured, set to system default:%s", tsCharset); + } else { + strcpy(tsCharset, "UTF-8"); + uWarn("can't get locale and charset from system, set it to UTF-8"); + } } } -void taosPrintOsInfo() {} +void taosPrintOsInfo() { + uInfo(" os pageSize: %" PRId64 "(KB)", tsPageSize / 1024); + // uInfo(" os openMax: %" PRId64, tsOpenMax); + // uInfo(" os streamMax: %" PRId64, tsStreamMax); + uInfo(" os numOfCores: %d", tsNumOfCores); + uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); + uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB); + + struct utsname buf; + if (uname(&buf)) { + uInfo(" can't fetch os info"); + return; + } + uInfo(" os sysname: %s", buf.sysname); + uInfo(" os nodename: %s", buf.nodename); + uInfo(" os release: %s", buf.release); + uInfo(" os version: %s", buf.version); + uInfo(" os machine: %s", buf.machine); + uInfo("=================================="); +} void taosKillSystem() { uError("function taosKillSystem, exit!"); @@ -63,12 +159,26 @@ void taosKillSystem() { } void taosGetSystemInfo() { + // taosGetProcInfos(); + + tsNumOfCores = sysconf(_SC_NPROCESSORS_ONLN); + long physical_pages = sysconf(_SC_PHYS_PAGES); + long page_size = sysconf(_SC_PAGESIZE); + tsTotalMemoryMB = physical_pages * page_size / (1024 * 1024); + tsPageSize = page_size; + + // float tmp1, tmp2; + // taosGetSysMemory(&tmp1); + // taosGetProcMemory(&tmp2); + // taosGetDisk(); + // taosGetBandSpeed(&tmp1); + // taosGetCpuUsage(&tmp1, &tmp2); + // taosGetProcIO(&tmp1, &tmp2); + taosGetSystemTimezone(); taosGetSystemLocale(); } -bool taosGetDisk() { return true; } - bool taosGetProcIO(float *readKB, float *writeKB) { *readKB = 0; *writeKB = 0; @@ -103,14 +213,37 @@ int taosSystem(const char *cmd) { void taosSetCoreDump() {} +int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) { + struct statvfs info; + if (statvfs(tsDataDir, &info)) { + uError("failed to get disk size, dataDir:%s errno:%s", tsDataDir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } else { + diskSize->tsize = info.f_blocks * info.f_frsize; + diskSize->avail = info.f_bavail * info.f_frsize; + return 0; + } +} + +char cmdline[1024]; + char *taosGetCmdlineByPID(int pid) { - return "[not supported yet]"; + errno = 0; + + if (proc_pidpath(pid, cmdline, sizeof(cmdline)) <= 0) { + fprintf(stderr, "PID is %d, %s", pid, strerror(errno)); + return strerror(errno); + } + + return cmdline; } bool taosGetSystemUid(char *uid) { uuid_t uuid = {0}; uuid_generate(uuid); // it's caller's responsibility to make enough space for `uid`, that's 36-char + 1-null - uuid_unparse(uuid, uid); + uuid_unparse_lower(uuid, uid); return true; } + diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt index 1c5e55a522..facfbd23af 100644 --- a/src/os/src/detail/CMakeLists.txt +++ b/src/os/src/detail/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(.) diff --git a/src/os/src/detail/osDir.c b/src/os/src/detail/osDir.c index 4f2985548c..144c59fa2d 100644 --- a/src/os/src/detail/osDir.c +++ b/src/os/src/detail/osDir.c @@ -51,19 +51,22 @@ int taosMkDir(const char *path, mode_t mode) { return code; } -void taosRename(char* oldName, char *newName) { - // if newName in not empty, rename return fail. - // the newName must be empty or does not exist -#ifdef WINDOWS - remove(newName); -#endif - if (rename(oldName, newName)) { + +#ifndef TAOS_OS_FUNC_DIR + +int32_t taosRename(char* oldName, char *newName) { + int32_t code = rename(oldName, newName); + if (code < 0) { uError("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); } else { - uInfo("successfully to rename file %s to %s", oldName, newName); + uTrace("successfully to rename file %s to %s", oldName, newName); } + + return code; } +#endif + void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays) { DIR *dir = opendir(rootDir); if (dir == NULL) return; diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index bb68622731..0b7b5ca487 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -119,6 +119,42 @@ int64_t taosLSeekImp(int32_t fd, int64_t offset, int32_t whence) { return (int64_t)lseek(fd, (long)offset, whence); } +int64_t taosCopy(char *from, char *to) { + char buffer[4096]; + int fidto = -1, fidfrom = -1; + int64_t size = 0; + int64_t bytes; + + fidfrom = open(from, O_RDONLY | O_BINARY); + if (fidfrom < 0) goto _err; + + fidto = open(to, O_WRONLY | O_CREAT | O_EXCL | O_BINARY, 0755); + if (fidto < 0) goto _err; + + while (true) { + bytes = taosRead(fidfrom, buffer, sizeof(buffer)); + if (bytes < 0) goto _err; + if (bytes == 0) break; + + size += bytes; + + if (taosWrite(fidto, (void *)buffer, bytes) < bytes) goto _err; + if (bytes < sizeof(buffer)) break; + } + + fsync(fidto); + + close(fidfrom); + close(fidto); + return size; + +_err: + if (fidfrom >= 0) close(fidfrom); + if (fidto >= 0) close(fidto); + remove(to); + return -1; +} + #ifndef TAOS_OS_FUNC_FILE_SENDIFLE int64_t taosSendFile(SOCKET dfd, int32_t sfd, int64_t *offset, int64_t size) { diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c index 53310d179c..291a54b669 100644 --- a/src/os/src/detail/osMemory.c +++ b/src/os/src/detail/osMemory.c @@ -512,8 +512,9 @@ void * taosTRealloc(void *ptr, size_t size) { return (void *)((char *)tptr + sizeof(size_t)); } -void taosTZfree(void *ptr) { +void* taosTZfree(void* ptr) { if (ptr) { - free((void *)((char *)ptr - sizeof(size_t))); + free((void*)((char*)ptr - sizeof(size_t))); } + return NULL; } \ No newline at end of file diff --git a/src/os/src/detail/osSemphone.c b/src/os/src/detail/osSemphone.c index d379e56ed8..1d05ce20a5 100644 --- a/src/os/src/detail/osSemphone.c +++ b/src/os/src/detail/osSemphone.c @@ -62,4 +62,4 @@ int32_t taosGetCurrentAPPName(char *name, int32_t* len) { return 0; } -#endif \ No newline at end of file +#endif diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c index 360e99bb8f..f12ec93bf7 100644 --- a/src/os/src/detail/osSysinfo.c +++ b/src/os/src/detail/osSysinfo.c @@ -18,6 +18,7 @@ #include "tconfig.h" #include "tglobal.h" #include "tulog.h" +#include "taoserror.h" #ifndef TAOS_OS_FUNC_SYSINFO @@ -316,37 +317,17 @@ bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) { return true; } -bool taosGetDisk() { +int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) { struct statvfs info; - const double unit = 1024 * 1024 * 1024; - - if (tscEmbedded) { - if (statvfs(tsDataDir, &info)) { - uError("failed to get disk size, dataDir:%s errno:%s", tsDataDir, strerror(errno)); - return false; - } else { - tsTotalDataDirGB = (float)((double)info.f_blocks * (double)info.f_frsize / unit); - tsAvailDataDirGB = (float)((double)info.f_bavail * (double)info.f_frsize / unit); - } - } - - if (statvfs(tsLogDir, &info)) { - uError("failed to get disk size, logDir:%s errno:%s", tsLogDir, strerror(errno)); - return false; + if (statvfs(tsDataDir, &info)) { + uError("failed to get disk size, dataDir:%s errno:%s", tsDataDir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; } else { - tsTotalLogDirGB = (float)((double)info.f_blocks * (double)info.f_frsize / unit); - tsAvailLogDirGB = (float)((double)info.f_bavail * (double)info.f_frsize / unit); + diskSize->tsize = info.f_blocks * info.f_frsize; + diskSize->avail = info.f_bavail * info.f_frsize; + return 0; } - - if (statvfs("/tmp", &info)) { - uError("failed to get disk size, tmpDir:/tmp errno:%s", strerror(errno)); - return false; - } else { - tsTotalTmpDirGB = (float)((double)info.f_blocks * (double)info.f_frsize / unit); - tsAvailTmpDirectorySpace = (float)((double)info.f_bavail * (double)info.f_frsize / unit); - } - - return true; } static bool taosGetCardInfo(int64_t *bytes) { @@ -510,7 +491,7 @@ void taosGetSystemInfo() { float tmp1, tmp2; taosGetSysMemory(&tmp1); taosGetProcMemory(&tmp2); - taosGetDisk(); + // taosGetDisk(); taosGetBandSpeed(&tmp1); taosGetCpuUsage(&tmp1, &tmp2); taosGetProcIO(&tmp1, &tmp2); @@ -537,7 +518,6 @@ void taosPrintOsInfo() { uInfo(" os release: %s", buf.release); uInfo(" os version: %s", buf.version); uInfo(" os machine: %s", buf.machine); - uInfo("=================================="); } void taosKillSystem() { diff --git a/src/os/src/linux/CMakeLists.txt b/src/os/src/linux/CMakeLists.txt index 8ab8f55467..b1a7ebf54e 100644 --- a/src/os/src/linux/CMakeLists.txt +++ b/src/os/src/linux/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/windows/CMakeLists.txt b/src/os/src/windows/CMakeLists.txt index a430dd3b3f..9dcc9e7e6d 100644 --- a/src/os/src/windows/CMakeLists.txt +++ b/src/os/src/windows/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/windows/wFile.c b/src/os/src/windows/wFile.c index 4ad195dc6f..3bfe1c1b5d 100644 --- a/src/os/src/windows/wFile.c +++ b/src/os/src/windows/wFile.c @@ -160,7 +160,6 @@ int32_t taosFtruncate(int32_t fd, int64_t l_size) { return 0; } - int fsync(int filedes) { if (filedes < 0) { errno = EBADF; @@ -172,3 +171,14 @@ int fsync(int filedes) { return FlushFileBuffers(h); } + +int32_t taosRename(char* oldName, char *newName) { + int32_t code = MoveFileEx(oldName, newName, MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED); + if (code < 0) { + uError("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); + } else { + uTrace("successfully to rename file %s to %s", oldName, newName); + } + + return code; +} \ No newline at end of file diff --git a/src/os/src/windows/wString.c b/src/os/src/windows/wString.c index 1fb235a005..67237e655c 100644 --- a/src/os/src/windows/wString.c +++ b/src/os/src/windows/wString.c @@ -75,18 +75,6 @@ char *getpass(const char *prefix) { return passwd; } -char *strndup(const char *s, size_t n) { - size_t len = strlen(s); - if (len >= n) { - len = n; - } - - char *r = calloc(len + 1, 1); - memcpy(r, s, len); - r[len] = 0; - return r; -} - int twcslen(const wchar_t *wcs) { int *wstr = (int *)wcs; if (NULL == wstr) { diff --git a/src/os/src/windows/wSysinfo.c b/src/os/src/windows/wSysinfo.c index 082aaaf5d8..48fb3c13a8 100644 --- a/src/os/src/windows/wSysinfo.c +++ b/src/os/src/windows/wSysinfo.c @@ -21,6 +21,7 @@ #include "ttimer.h" #include "tulog.h" #include "tutil.h" +#include "taoserror.h" #if (_WIN64) #include #include @@ -126,37 +127,22 @@ bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) { return true; } -bool taosGetDisk() { - const double unit = 1024 * 1024 * 1024; - BOOL fResult; +int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) { unsigned _int64 i64FreeBytesToCaller; unsigned _int64 i64TotalBytes; unsigned _int64 i64FreeBytes; - if (tscEmbedded) { - fResult = GetDiskFreeSpaceExA(tsDataDir, (PULARGE_INTEGER)&i64FreeBytesToCaller, (PULARGE_INTEGER)&i64TotalBytes, - (PULARGE_INTEGER)&i64FreeBytes); - if (fResult) { - tsTotalDataDirGB = (float)(i64TotalBytes / unit); - tsAvailDataDirGB = (float)(i64FreeBytes / unit); - } - } - - fResult = GetDiskFreeSpaceExA(tsLogDir, (PULARGE_INTEGER)&i64FreeBytesToCaller, (PULARGE_INTEGER)&i64TotalBytes, - (PULARGE_INTEGER)&i64FreeBytes); + BOOL fResult = GetDiskFreeSpaceExA(dataDir, (PULARGE_INTEGER)&i64FreeBytesToCaller, (PULARGE_INTEGER)&i64TotalBytes, + (PULARGE_INTEGER)&i64FreeBytes); if (fResult) { - tsTotalLogDirGB = (float)(i64TotalBytes / unit); - tsAvailLogDirGB = (float)(i64FreeBytes / unit); + diskSize->tsize = (int64_t)(i64TotalBytes); + diskSize->avail = (int64_t)(i64FreeBytes); + return 0; + } else { + uError("failed to get disk size, dataDir:%s errno:%s", tsDataDir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; } - - fResult = GetDiskFreeSpaceExA(tsTempDir, (PULARGE_INTEGER)&i64FreeBytesToCaller, (PULARGE_INTEGER)&i64TotalBytes, - (PULARGE_INTEGER)&i64FreeBytes); - if (fResult) { - tsTotalTmpDirGB = (float)(i64TotalBytes / unit); - tsAvailTmpDirectorySpace = (float)(i64FreeBytes / unit); - } - - return true; } bool taosGetBandSpeed(float *bandSpeedKb) { @@ -207,7 +193,7 @@ void taosGetSystemInfo() { tsTotalMemoryMB = taosGetTotalMemory(); float tmp1, tmp2; - taosGetDisk(); + // taosGetDisk(); taosGetBandSpeed(&tmp1); taosGetCpuUsage(&tmp1, &tmp2); taosGetProcIO(&tmp1, &tmp2); diff --git a/src/os/src/windows/wWordexp.c b/src/os/src/windows/wWordexp.c index bb9acde25a..febe22ac8f 100644 --- a/src/os/src/windows/wWordexp.c +++ b/src/os/src/windows/wWordexp.c @@ -21,13 +21,20 @@ #include "tulog.h" #include "tutil.h" -int wordexp(const char *words, wordexp_t *pwordexp, int flags) { +int wordexp(char *words, wordexp_t *pwordexp, int flags) { pwordexp->we_offs = 0; pwordexp->we_wordc = 1; - pwordexp->we_wordv = (char **)(pwordexp->wordPos); - pwordexp->we_wordv[0] = (char *)words; + pwordexp->we_wordv[0] = pwordexp->wordPos; + + memset(pwordexp->wordPos, 0, 1025); + if (_fullpath(pwordexp->wordPos, words, 1024) == NULL) { + pwordexp->we_wordv[0] = words; + uError("failed to parse relative path:%s to abs path", words); + return -1; + } + + uTrace("parse relative path:%s to abs path:%s", words, pwordexp->wordPos); return 0; } void wordfree(wordexp_t *pwordexp) {} - diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index e66997dc8e..7dcaaf27e6 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) ADD_SUBDIRECTORY(monitor) diff --git a/src/plugins/http/CMakeLists.txt b/src/plugins/http/CMakeLists.txt index 42016b8645..bfb47ad12e 100644 --- a/src/plugins/http/CMakeLists.txt +++ b/src/plugins/http/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c index 37eef2bfad..063f2bb04e 100644 --- a/src/plugins/http/src/httpResp.c +++ b/src/plugins/http/src/httpResp.c @@ -74,7 +74,7 @@ void httpSendErrorResp(HttpContext *pContext, int32_t errNo) { httpCode = 404; else if (errNo == TSDB_CODE_HTTP_UNSUPPORT_URL) httpCode = 404; - else if (errNo == TSDB_CODE_HTTP_INVLALID_URL) + else if (errNo == TSDB_CODE_HTTP_INVALID_URL) httpCode = 404; else if (errNo == TSDB_CODE_HTTP_NO_ENOUGH_MEMORY) httpCode = 507; diff --git a/src/plugins/http/src/httpRestHandle.c b/src/plugins/http/src/httpRestHandle.c index 8999fb879e..8728b9e37a 100644 --- a/src/plugins/http/src/httpRestHandle.c +++ b/src/plugins/http/src/httpRestHandle.c @@ -141,6 +141,6 @@ bool restProcessRequest(struct HttpContext* pContext) { } else { } - httpSendErrorResp(pContext, TSDB_CODE_HTTP_INVLALID_URL); + httpSendErrorResp(pContext, TSDB_CODE_HTTP_INVALID_URL); return false; } diff --git a/src/plugins/http/src/httpRestJson.c b/src/plugins/http/src/httpRestJson.c index baa61117be..a620625d25 100644 --- a/src/plugins/http/src/httpRestJson.c +++ b/src/plugins/http/src/httpRestJson.c @@ -35,7 +35,7 @@ void restBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int32_t // data row array end httpJsonToken(jsonBuf, JsonArrEnd); - cmd->numOfRows = affect_rows; + cmd->numOfRows = 1; } void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) { diff --git a/src/plugins/http/src/httpSystem.c b/src/plugins/http/src/httpSystem.c index 4721451529..203db21895 100644 --- a/src/plugins/http/src/httpSystem.c +++ b/src/plugins/http/src/httpSystem.c @@ -92,6 +92,11 @@ void httpStopSystem() { tsHttpServer.stop = 1; #ifdef WINDOWS closesocket(tsHttpServer.fd); +#elif __APPLE__ + if (tsHttpServer.fd!=-1) { + close(tsHttpServer.fd); + tsHttpServer.fd = -1; + } #else shutdown(tsHttpServer.fd, SHUT_RD); #endif diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index a84ae9f617..399a33954d 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -239,6 +239,10 @@ JsonBuf *httpMallocJsonBuf(HttpContext *pContext) { pContext->jsonBuf = (JsonBuf *)malloc(sizeof(JsonBuf)); } + if (!pContext->jsonBuf->pContext) { + pContext->jsonBuf->pContext = pContext; + } + return pContext->jsonBuf; } diff --git a/src/plugins/monitor/CMakeLists.txt b/src/plugins/monitor/CMakeLists.txt index abab07e0cd..28c62a099c 100644 --- a/src/plugins/monitor/CMakeLists.txt +++ b/src/plugins/monitor/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index f61298fb83..424ab0f216 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -20,7 +20,6 @@ #include "tlog.h" #include "ttimer.h" #include "tutil.h" -#include "tsystem.h" #include "tscUtil.h" #include "tsclient.h" #include "dnode.h" diff --git a/src/plugins/mqtt/CMakeLists.txt b/src/plugins/mqtt/CMakeLists.txt index b6de421517..50b0bbe8af 100644 --- a/src/plugins/mqtt/CMakeLists.txt +++ b/src/plugins/mqtt/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/query/CMakeLists.txt b/src/query/CMakeLists.txt index 967e86de3c..f23ac7dd86 100644 --- a/src/query/CMakeLists.txt +++ b/src/query/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index 53af502e27..7122f63593 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -59,30 +59,32 @@ extern "C" { #define TSDB_FUNC_FIRST_DST 25 #define TSDB_FUNC_LAST_DST 26 -#define TSDB_FUNC_INTERP 27 +#define TSDB_FUNC_STDDEV_DST 27 +#define TSDB_FUNC_INTERP 28 -#define TSDB_FUNC_RATE 28 -#define TSDB_FUNC_IRATE 29 -#define TSDB_FUNC_SUM_RATE 30 -#define TSDB_FUNC_SUM_IRATE 31 -#define TSDB_FUNC_AVG_RATE 32 -#define TSDB_FUNC_AVG_IRATE 33 +#define TSDB_FUNC_RATE 29 +#define TSDB_FUNC_IRATE 30 +#define TSDB_FUNC_SUM_RATE 31 +#define TSDB_FUNC_SUM_IRATE 32 +#define TSDB_FUNC_AVG_RATE 33 +#define TSDB_FUNC_AVG_IRATE 34 + +#define TSDB_FUNC_TID_TAG 35 +#define TSDB_FUNC_HISTOGRAM 36 +#define TSDB_FUNC_HLL 37 +#define TSDB_FUNC_MODE 38 +#define TSDB_FUNC_SAMPLE 39 +#define TSDB_FUNC_CEIL 40 +#define TSDB_FUNC_FLOOR 41 +#define TSDB_FUNC_ROUND 42 +#define TSDB_FUNC_MAVG 43 +#define TSDB_FUNC_CSUM 44 -#define TSDB_FUNC_TID_TAG 34 -#define TSDB_FUNC_HISTOGRAM 35 -#define TSDB_FUNC_HLL 36 -#define TSDB_FUNC_MODE 37 -#define TSDB_FUNC_SAMPLE 38 -#define TSDB_FUNC_CEIL 39 -#define TSDB_FUNC_FLOOR 40 -#define TSDB_FUNC_ROUND 41 -#define TSDB_FUNC_MAVG 42 -#define TSDB_FUNC_CSUM 43 #define TSDB_FUNCSTATE_SO 0x1u // single output #define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM #define TSDB_FUNCSTATE_STREAM 0x4u // function avail for stream -#define TSDB_FUNCSTATE_STABLE 0x8u // function avail for metric +#define TSDB_FUNCSTATE_STABLE 0x8u // function avail for super table #define TSDB_FUNCSTATE_OF 0x10u // outer forward #define TSDB_FUNCSTATE_NEED_TS 0x20u // timestamp is required during query processing #define TSDB_FUNCSTATE_SELECTIVITY 0x40u // selectivity functions, can exists along with tag columns @@ -90,15 +92,12 @@ extern "C" { #define TSDB_BASE_FUNC_SO TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF #define TSDB_BASE_FUNC_MO TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF - #define TSDB_FUNCTIONS_NAME_MAX_LENGTH 16 #define TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE 50 #define DATA_SET_FLAG ',' // to denote the output area has data, not null value #define DATA_SET_FLAG_SIZE sizeof(DATA_SET_FLAG) - - #define QUERY_ASC_FORWARD_STEP 1 #define QUERY_DESC_FORWARD_STEP -1 @@ -167,8 +166,8 @@ typedef struct SExtTagsInfo { // sql function runtime context typedef struct SQLFunctionCtx { - int32_t startOffset; int32_t size; // number of rows + void * pInput; // input data buffer uint32_t order; // asc|desc int16_t inputType; int16_t inputBytes; @@ -177,15 +176,14 @@ typedef struct SQLFunctionCtx { int16_t outputBytes; // size of results, determined by function and input column data type int32_t interBufBytes; // internal buffer size bool hasNull; // null value exist in current block - bool requireNull; // require null in some function + bool requireNull; // require null in some function bool stableQuery; - int16_t functionId; // function id - void * aInputElemBuf; - char * aOutputBuf; // final result output buffer, point to sdata->data - uint8_t currentStage; // record current running step, default: 0 - int64_t nStartQueryTimestamp; // timestamp range of current query when function is executed on a specific data block + int16_t functionId; // function id + char * pOutput; // final result output buffer, point to sdata->data + uint8_t currentStage; // record current running step, default: 0 + int64_t startTs; // timestamp range of current query when function is executed on a specific data block int32_t numOfParams; - tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param */ + tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param int64_t *ptsList; // corresponding timestamp array list void *ptsOutputBuf; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ SQLPreAggVal preAggVals; @@ -198,17 +196,16 @@ typedef struct SQLFunctionCtx { SPoint1 end; } SQLFunctionCtx; -typedef struct SQLAggFuncElem { - char aName[TSDB_FUNCTIONS_NAME_MAX_LENGTH]; - - uint8_t nAggIdx; // index of function in aAggs +typedef struct SAggFunctionInfo { + char name[TSDB_FUNCTIONS_NAME_MAX_LENGTH]; + uint8_t index; // index of function in aAggs int8_t stableFuncId; // transfer function for super table query - uint16_t nStatus; + uint16_t status; bool (*init)(SQLFunctionCtx *pCtx); // setup the execute environment void (*xFunction)(SQLFunctionCtx *pCtx); // blocks version function - void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version + void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function // some sql function require scan data twice or more, e.g.,stddev, percentile void (*xNextStep)(SQLFunctionCtx *pCtx); @@ -218,7 +215,7 @@ typedef struct SQLAggFuncElem { void (*mergeFunc)(SQLFunctionCtx *pCtx); int32_t (*dataReqFunc)(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId); -} SQLAggFuncElem; +} SAggFunctionInfo; #define GET_RES_INFO(ctx) ((ctx)->resultInfo) @@ -230,7 +227,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI #define IS_SINGLEOUTPUT(x) (((x)&TSDB_FUNCSTATE_SO) != 0) #define IS_OUTER_FORWARD(x) (((x)&TSDB_FUNCSTATE_OF) != 0) -/* determine the real data need to calculated the result */ +// determine the real data need to calculated the result enum { BLK_DATA_NO_NEEDED = 0x0, BLK_DATA_STATIS_NEEDED = 0x1, @@ -246,7 +243,7 @@ typedef struct STwaInfo { } STwaInfo; /* global sql function array */ -extern struct SQLAggFuncElem aAggs[]; +extern struct SAggFunctionInfo aAggs[]; extern int32_t functionCompatList[]; // compatible check array list diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 87c16f2ee4..ec1261da0a 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -33,6 +33,36 @@ struct SColumnFilterElem; typedef bool (*__filter_func_t)(struct SColumnFilterElem* pFilter, const char* val1, const char* val2, int16_t type); typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order); +#define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED) +#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u) +#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP) + +#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) +#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) + +#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) + +enum { + // when query starts to execute, this status will set + QUERY_NOT_COMPLETED = 0x1u, + + /* result output buffer is full, current query is paused. + * this status is only exist in group-by clause and diff/add/division/multiply/ query. + */ + QUERY_RESBUF_FULL = 0x2u, + + /* query is over + * 1. this status is used in one row result query process, e.g., count/sum/first/last/ avg...etc. + * 2. when all data within queried time window, it is also denoted as query_completed + */ + QUERY_COMPLETED = 0x4u, + + /* when the result is not completed return to client, this status will be + * usually used in case of interval query with interpolation option + */ + QUERY_OVER = 0x8u, +}; + typedef struct SResultRowPool { int32_t elemSize; int32_t blockSize; @@ -66,7 +96,8 @@ typedef struct SResultRow { } SResultRow; typedef struct SGroupResInfo { - int32_t rowId; + int32_t totalGroup; + int32_t currentGroup; int32_t index; SArray* pRows; // SArray } SGroupResInfo; @@ -112,7 +143,7 @@ typedef struct STableQueryInfo { STimeWindow win; STSCursor cur; void* pTable; // for retrieve the page id list - SResultRowInfo windowResInfo; + SResultRowInfo resInfo; } STableQueryInfo; typedef struct SQueryCostInfo { @@ -143,6 +174,11 @@ typedef struct { int64_t ts; } SOrderedPrjQueryInfo; +typedef struct { + char* tags; + SArray* pResult; // SArray +} SInterResult; + typedef struct SQuery { int16_t numOfCols; int16_t numOfTags; @@ -152,9 +188,14 @@ typedef struct SQuery { int16_t precision; int16_t numOfOutput; int16_t fillType; - int16_t checkResultBuf; // check if the buffer is full during scan each block + int16_t checkResultBuf; // check if the buffer is full during scan each block SLimitVal limit; - int32_t rowSize; + + int32_t srcRowSize; // todo extract struct + int32_t resultRowSize; + int32_t maxSrcColumnSize; + int32_t tagLen; // tag value length of current query + SSqlGroupbyExpr* pGroupbyExpr; SExprInfo* pExpr1; SExprInfo* pExpr2; @@ -183,19 +224,19 @@ typedef struct SQueryRuntimeEnv { uint16_t* offset; uint16_t scanFlag; // denotes reversed scan of data or not SFillInfo* pFillInfo; - SResultRowInfo windowResInfo; - STSBuf* pTsBuf; - STSCursor cur; + SResultRowInfo resultRowInfo; + SQueryCostInfo summary; void* pQueryHandle; void* pSecQueryHandle; // another thread for bool stableQuery; // super table query or not bool topBotQuery; // TODO used bitwise flag - bool groupbyColumn; // denote if this is a groupby normal column query + bool groupbyColumn; // denote if this is a groupby normal column query bool hasTagResults; // if there are tag values in final result or not bool timeWindowInterpo;// if the time window start/end required interpolation bool queryWindowIdentical; // all query time windows are identical for all tables in one group - bool queryBlockDist; // if query data block distribution + bool queryBlockDist; // if query data block distribution + bool stabledev; // super table stddev query int32_t interBufSize; // intermediate buffer sizse int32_t prevGroupId; // previous executed group id SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file @@ -205,8 +246,12 @@ typedef struct SQueryRuntimeEnv { int32_t* rowCellInfoOffset;// offset value for each row result cell info char** prevRow; - char** nextRow; + SArray* prevResult; // intermediate result, SArray + STSBuf* pTsBuf; // timestamp filter list + STSCursor cur; + + char* tagVal; // tag value of current data block SArithmeticSupport *sasArray; } SQueryRuntimeEnv; @@ -244,4 +289,51 @@ typedef struct SQInfo { char* sql; // query sql string } SQInfo; +typedef struct SQueryParam { + char *sql; + char *tagCond; + char *tbnameCond; + char *prevResult; + SArray *pTableIdList; + SSqlFuncMsg **pExprMsg; + SSqlFuncMsg **pSecExprMsg; + SExprInfo *pExprs; + SExprInfo *pSecExprs; + + SColIndex *pGroupColIndex; + SColumnInfo *pTagColumnInfo; + SSqlGroupbyExpr *pGroupbyExpr; +} SQueryParam; + +void freeParam(SQueryParam *param); +int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param); +int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, + SColumnInfo* pTagCols); +SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code); +SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, + SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery, char* sql); +int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, SQueryParam* param, bool isSTable); +void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters); + +bool isQueryKilled(SQInfo *pQInfo); +int32_t checkForQueryBuf(size_t numOfTables); +bool doBuildResCheck(SQInfo* pQInfo); +void setQueryStatus(SQuery *pQuery, int8_t status); + +bool onlyQueryTags(SQuery* pQuery); +void buildTagQueryResult(SQInfo *pQInfo); +void stableQueryImpl(SQInfo *pQInfo); +void buildTableBlockDistResult(SQInfo *pQInfo); +void tableQueryImpl(SQInfo *pQInfo); +bool isValidQInfo(void *param); + +int32_t doDumpQueryResult(SQInfo *pQInfo, char *data); + +size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows); +void setQueryKilled(SQInfo *pQInfo); +void queryCostStatis(SQInfo *pQInfo); +void freeQInfo(SQInfo *pQInfo); + +int32_t getMaximumIdleDurationSec(); + #endif // TDENGINE_QUERYEXECUTOR_H diff --git a/src/query/inc/qFill.h b/src/query/inc/qFill.h index 9b7f0fb529..aa6df9279a 100644 --- a/src/query/inc/qFill.h +++ b/src/query/inc/qFill.h @@ -68,7 +68,7 @@ typedef struct SPoint { void * val; } SPoint; -SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, +SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol, void* handle); @@ -78,7 +78,7 @@ void* taosDestroyFillInfo(SFillInfo *pFillInfo); void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey); -void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput); +void taosFillSetDataBlockFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput); void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* pInput); diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 27bf7f2d96..d4a0c25886 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -15,6 +15,8 @@ #ifndef TDENGINE_QUERYUTIL_H #define TDENGINE_QUERYUTIL_H +#include "tbuffer.h" + #define SET_RES_WINDOW_KEY(_k, _ori, _len, _uid) \ do { \ assert(sizeof(_uid) == sizeof(uint64_t)); \ @@ -74,5 +76,21 @@ int32_t getNumOfUsedResultRows(SResultRowPool* p); bool isPointInterpoQuery(SQuery *pQuery); +typedef struct { + SArray* pResult; // SArray + int32_t colId; +} SStddevInterResult; + +void interResToBinary(SBufferWriter* bw, SArray* pRes, int32_t tagLen); +SArray* interResFromBinary(const char* data, int32_t len); +void freeInterResult(void* param); + +void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo, int32_t offset); +void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo); +bool hasRemainData(SGroupResInfo* pGroupResInfo); +bool incNextGroup(SGroupResInfo* pGroupResInfo); +int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo); + +int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQInfo *pQInfo); #endif // TDENGINE_QUERYUTIL_H diff --git a/src/query/inc/queryLog.h b/src/query/inc/queryLog.h index 26544ab0f9..5c48c43c45 100644 --- a/src/query/inc/queryLog.h +++ b/src/query/inc/queryLog.h @@ -30,6 +30,7 @@ extern uint32_t qDebugFlag; #define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} while(0) #define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) +#define qDump(a, l) do { if (qDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)a, l); }} while(0) #ifdef __cplusplus } diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 12c8a16f1f..fb8b2394f2 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -26,10 +26,12 @@ #include "qTsbuf.h" #include "queryLog.h" -#define GET_INPUT_DATA_LIST(x) (((char *)((x)->aInputElemBuf)) + ((x)->startOffset) * ((x)->inputBytes)) +//#define GET_INPUT_DATA_LIST(x) (((char *)((x)->pInput)) + ((x)->startOffset) * ((x)->inputBytes)) +#define GET_INPUT_DATA_LIST(x) ((char *)((x)->pInput)) #define GET_INPUT_DATA(x, y) (GET_INPUT_DATA_LIST(x) + (y) * (x)->inputBytes) -#define GET_TS_LIST(x) ((TSKEY*)&((x)->ptsList[(x)->startOffset])) +//#define GET_TS_LIST(x) ((TSKEY*)&((x)->ptsList[(x)->startOffset])) +#define GET_TS_LIST(x) ((TSKEY*)((x)->ptsList)) #define GET_TS_DATA(x, y) (GET_TS_LIST(x)[(y)]) #define GET_TRUE_DATA_TYPE() \ @@ -109,6 +111,11 @@ typedef struct SStddevInfo { int8_t stage; } SStddevInfo; +typedef struct SStddevdstInfo { + int64_t num; + double res; +} SStddevdstInfo; + typedef struct SFirstLastInfo { int8_t hasResult; TSKEY ts; @@ -335,6 +342,11 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *type = (int16_t)dataType; *bytes = (int16_t)dataBytes; *interBytes = dataBytes; + } else if (functionId == TSDB_FUNC_STDDEV_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = sizeof(SStddevdstInfo); + *interBytes = (*bytes); + } else { return TSDB_CODE_TSC_INVALID_SQL; } @@ -354,7 +366,7 @@ static bool function_setup(SQLFunctionCtx *pCtx) { return false; } - memset(pCtx->aOutputBuf, 0, (size_t)pCtx->outputBytes); + memset(pCtx->pOutput, 0, (size_t)pCtx->outputBytes); initResultInfo(pResInfo, pCtx->interBufBytes); return true; } @@ -369,11 +381,7 @@ static bool function_setup(SQLFunctionCtx *pCtx) { static void function_finalizer(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult != DATA_SET_FLAG) { - if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pCtx->aOutputBuf, pCtx->outputType); - } else { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); - } + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); } doFinalizer(pCtx); @@ -404,10 +412,7 @@ static void count_function(SQLFunctionCtx *pCtx) { numOfElem += 1; } } else { - /* - * when counting on the primary time stamp column and no statistics data is provided, - * simple use the size value - */ + //when counting on the primary time stamp column and no statistics data is presented, use the size value directly. numOfElem = pCtx->size; } } @@ -416,7 +421,7 @@ static void count_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; } - *((int64_t *)pCtx->aOutputBuf) += numOfElem; + *((int64_t *)pCtx->pOutput) += numOfElem; SET_VAL(pCtx, numOfElem, 1); } @@ -427,7 +432,7 @@ static void count_function_f(SQLFunctionCtx *pCtx, int32_t index) { } SET_VAL(pCtx, 1, 1); - *((int64_t *)pCtx->aOutputBuf) += pCtx->size; + *((int64_t *)pCtx->pOutput) += pCtx->size; // do not need it actually SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); @@ -437,7 +442,7 @@ static void count_function_f(SQLFunctionCtx *pCtx, int32_t index) { static void count_func_merge(SQLFunctionCtx *pCtx) { int64_t *pData = (int64_t *)GET_INPUT_DATA_LIST(pCtx); for (int32_t i = 0; i < pCtx->size; ++i) { - *((int64_t *)pCtx->aOutputBuf) += pData[i]; + *((int64_t *)pCtx->pOutput) += pData[i]; } SET_VAL(pCtx, pCtx->size, 1); @@ -519,13 +524,13 @@ static void do_sum(SQLFunctionCtx *pCtx) { assert(pCtx->size >= pCtx->preAggVals.statis.numOfNull); if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { - int64_t *retVal = (int64_t *)pCtx->aOutputBuf; + int64_t *retVal = (int64_t *)pCtx->pOutput; *retVal += pCtx->preAggVals.statis.sum; } else if (IS_UNSIGNED_NUMERIC_TYPE(pCtx->inputType)) { - uint64_t *retVal = (uint64_t *)pCtx->aOutputBuf; + uint64_t *retVal = (uint64_t *)pCtx->pOutput; *retVal += (uint64_t)pCtx->preAggVals.statis.sum; } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = (double*) pCtx->aOutputBuf; + double *retVal = (double*) pCtx->pOutput; *retVal += GET_DOUBLE_VAL((const char*)&(pCtx->preAggVals.statis.sum)); } } else { // computing based on the true data block @@ -533,7 +538,7 @@ static void do_sum(SQLFunctionCtx *pCtx) { notNullElems = 0; if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { - int64_t *retVal = (int64_t *)pCtx->aOutputBuf; + int64_t *retVal = (int64_t *)pCtx->pOutput; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { LIST_ADD_N(*retVal, pCtx, pData, int8_t, notNullElems, pCtx->inputType); @@ -545,7 +550,7 @@ static void do_sum(SQLFunctionCtx *pCtx) { LIST_ADD_N(*retVal, pCtx, pData, int64_t, notNullElems, pCtx->inputType); } } else if (IS_UNSIGNED_NUMERIC_TYPE(pCtx->inputType)) { - uint64_t *retVal = (uint64_t *)pCtx->aOutputBuf; + uint64_t *retVal = (uint64_t *)pCtx->pOutput; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { LIST_ADD_N(*retVal, pCtx, pData, uint8_t, notNullElems, pCtx->inputType); @@ -557,10 +562,10 @@ static void do_sum(SQLFunctionCtx *pCtx) { LIST_ADD_N(*retVal, pCtx, pData, uint64_t, notNullElems, pCtx->inputType); } } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *retVal = (double *)pCtx->aOutputBuf; + double *retVal = (double *)pCtx->pOutput; LIST_ADD_N(*retVal, pCtx, pData, double, notNullElems, pCtx->inputType); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = (double *)pCtx->aOutputBuf; + double *retVal = (double *)pCtx->pOutput; LIST_ADD_N(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType); } } @@ -580,7 +585,7 @@ static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) { } SET_VAL(pCtx, 1, 1); - int64_t *res = (int64_t*) pCtx->aOutputBuf; + int64_t *res = (int64_t*) pCtx->pOutput; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { *res += GET_INT8_VAL(pData); @@ -591,10 +596,10 @@ static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) { } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { *res += GET_INT64_VAL(pData); } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *retVal = (double*) pCtx->aOutputBuf; + double *retVal = (double*) pCtx->pOutput; *retVal += GET_DOUBLE_VAL(pData); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = (double*) pCtx->aOutputBuf; + double *retVal = (double*) pCtx->pOutput; *retVal += GET_FLOAT_VAL(pData); } @@ -608,7 +613,7 @@ static void sum_function(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { // set the flag for super table query - SSumInfo *pSum = (SSumInfo *)pCtx->aOutputBuf; + SSumInfo *pSum = (SSumInfo *)pCtx->pOutput; pSum->hasResult = DATA_SET_FLAG; } } @@ -619,7 +624,7 @@ static void sum_function_f(SQLFunctionCtx *pCtx, int32_t index) { // keep the result data in output buffer, not in the intermediate buffer SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { - SSumInfo *pSum = (SSumInfo *)pCtx->aOutputBuf; + SSumInfo *pSum = (SSumInfo *)pCtx->pOutput; pSum->hasResult = DATA_SET_FLAG; } } @@ -644,12 +649,12 @@ static void sum_func_merge(SQLFunctionCtx *pCtx) { case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_BIGINT: { - *(int64_t *)pCtx->aOutputBuf += pInput->isum; + *(int64_t *)pCtx->pOutput += pInput->isum; break; }; case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_DOUBLE: { - *(double *)pCtx->aOutputBuf += pInput->dsum; + *(double *)pCtx->pOutput += pInput->dsum; } } } @@ -702,13 +707,13 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY en } // not initialized yet, it is the first block, load it. - if (pCtx->aOutputBuf == NULL) { + if (pCtx->pOutput == NULL) { return BLK_DATA_ALL_NEEDED; } - // the pCtx should be set to current Ctx and output buffer before call this function. Otherwise, pCtx->aOutputBuf is + // the pCtx should be set to current Ctx and output buffer before call this function. Otherwise, pCtx->pOutput is // the previous windowRes output buffer, not current unloaded block. In this case, the following filter is invalid - SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->pOutput + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { // data in current block is not earlier than current result @@ -722,13 +727,13 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end } // not initialized yet, it is the first block, load it. - if (pCtx->aOutputBuf == NULL) { + if (pCtx->pOutput == NULL) { return BLK_DATA_ALL_NEEDED; } - // the pCtx should be set to current Ctx and output buffer before call this function. Otherwise, pCtx->aOutputBuf is + // the pCtx should be set to current Ctx and output buffer before call this function. Otherwise, pCtx->pOutput is // the previous windowRes output buffer, not current unloaded block. In this case, the following filter is invalid - SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->pOutput + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { @@ -801,7 +806,7 @@ static void avg_function(SQLFunctionCtx *pCtx) { // keep the data into the final output buffer for super table query since this execution may be the last one if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); } } @@ -848,14 +853,14 @@ static void avg_function_f(SQLFunctionCtx *pCtx, int32_t index) { // keep the data into the final output buffer for super table query since this execution may be the last one if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); } } static void avg_func_merge(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - double *sum = (double*) pCtx->aOutputBuf; + double *sum = (double*) pCtx->pOutput; char *input = GET_INPUT_DATA_LIST(pCtx); for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { @@ -881,21 +886,21 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); if (GET_INT64_VAL(GET_ROWCELL_INTERBUF(pResInfo)) <= 0) { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); return; } - *(double *)pCtx->aOutputBuf = (*(double *)pCtx->aOutputBuf) / *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo); + *(double *)pCtx->pOutput = (*(double *)pCtx->pOutput) / *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo); } else { // this is the secondary merge, only in the secondary merge, the input type is TSDB_DATA_TYPE_BINARY assert(IS_NUMERIC_TYPE(pCtx->inputType)); SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); if (pAvgInfo->num == 0) { // all data are NULL or empty table - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); return; } - *(double *)pCtx->aOutputBuf = pAvgInfo->sum / pAvgInfo->num; + *(double *)pCtx->pOutput = pAvgInfo->sum / pAvgInfo->num; } // cannot set the numOfIteratedElems again since it is set during previous iteration @@ -934,9 +939,9 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, * * The following codes of 3 lines will be removed later. */ - if (index < 0 || index >= pCtx->size + pCtx->startOffset) { - index = 0; - } +// if (index < 0 || index >= pCtx->size + pCtx->startOffset) { +// index = 0; +// } // the index is the original position, not the relative position key = pCtx->ptsList[index]; @@ -1065,34 +1070,34 @@ static bool min_func_setup(SQLFunctionCtx *pCtx) { switch (type) { case TSDB_DATA_TYPE_TINYINT: - *((int8_t *)pCtx->aOutputBuf) = INT8_MAX; + *((int8_t *)pCtx->pOutput) = INT8_MAX; break; case TSDB_DATA_TYPE_UTINYINT: - *(uint8_t *) pCtx->aOutputBuf = UINT8_MAX; + *(uint8_t *) pCtx->pOutput = UINT8_MAX; break; case TSDB_DATA_TYPE_SMALLINT: - *((int16_t *)pCtx->aOutputBuf) = INT16_MAX; + *((int16_t *)pCtx->pOutput) = INT16_MAX; break; case TSDB_DATA_TYPE_USMALLINT: - *((uint16_t *)pCtx->aOutputBuf) = UINT16_MAX; + *((uint16_t *)pCtx->pOutput) = UINT16_MAX; break; case TSDB_DATA_TYPE_INT: - *((int32_t *)pCtx->aOutputBuf) = INT32_MAX; + *((int32_t *)pCtx->pOutput) = INT32_MAX; break; case TSDB_DATA_TYPE_UINT: - *((uint32_t *)pCtx->aOutputBuf) = UINT32_MAX; + *((uint32_t *)pCtx->pOutput) = UINT32_MAX; break; case TSDB_DATA_TYPE_BIGINT: - *((int64_t *)pCtx->aOutputBuf) = INT64_MAX; + *((int64_t *)pCtx->pOutput) = INT64_MAX; break; case TSDB_DATA_TYPE_UBIGINT: - *((uint64_t *)pCtx->aOutputBuf) = UINT64_MAX; + *((uint64_t *)pCtx->pOutput) = UINT64_MAX; break; case TSDB_DATA_TYPE_FLOAT: - *((float *)pCtx->aOutputBuf) = FLT_MAX; + *((float *)pCtx->pOutput) = FLT_MAX; break; case TSDB_DATA_TYPE_DOUBLE: - *((double *)pCtx->aOutputBuf) = DBL_MAX; + *((double *)pCtx->pOutput) = DBL_MAX; break; default: qError("illegal data type:%d in min/max query", pCtx->inputType); @@ -1110,34 +1115,34 @@ static bool max_func_setup(SQLFunctionCtx *pCtx) { switch (type) { case TSDB_DATA_TYPE_INT: - *((int32_t *)pCtx->aOutputBuf) = INT32_MIN; + *((int32_t *)pCtx->pOutput) = INT32_MIN; break; case TSDB_DATA_TYPE_UINT: - *((uint32_t *)pCtx->aOutputBuf) = 0; + *((uint32_t *)pCtx->pOutput) = 0; break; case TSDB_DATA_TYPE_FLOAT: - *((float *)pCtx->aOutputBuf) = -FLT_MAX; + *((float *)pCtx->pOutput) = -FLT_MAX; break; case TSDB_DATA_TYPE_DOUBLE: - *((double *)pCtx->aOutputBuf) = -DBL_MAX; + *((double *)pCtx->pOutput) = -DBL_MAX; break; case TSDB_DATA_TYPE_BIGINT: - *((int64_t *)pCtx->aOutputBuf) = INT64_MIN; + *((int64_t *)pCtx->pOutput) = INT64_MIN; break; case TSDB_DATA_TYPE_UBIGINT: - *((uint64_t *)pCtx->aOutputBuf) = 0; + *((uint64_t *)pCtx->pOutput) = 0; break; case TSDB_DATA_TYPE_SMALLINT: - *((int16_t *)pCtx->aOutputBuf) = INT16_MIN; + *((int16_t *)pCtx->pOutput) = INT16_MIN; break; case TSDB_DATA_TYPE_USMALLINT: - *((uint16_t *)pCtx->aOutputBuf) = 0; + *((uint16_t *)pCtx->pOutput) = 0; break; case TSDB_DATA_TYPE_TINYINT: - *((int8_t *)pCtx->aOutputBuf) = INT8_MIN; + *((int8_t *)pCtx->pOutput) = INT8_MIN; break; case TSDB_DATA_TYPE_UTINYINT: - *((uint8_t *)pCtx->aOutputBuf) = 0; + *((uint8_t *)pCtx->pOutput) = 0; break; default: qError("illegal data type:%d in min/max query", pCtx->inputType); @@ -1151,7 +1156,7 @@ static bool max_func_setup(SQLFunctionCtx *pCtx) { */ static void min_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - minMax_function(pCtx, pCtx->aOutputBuf, 1, ¬NullElems); + minMax_function(pCtx, pCtx->pOutput, 1, ¬NullElems); SET_VAL(pCtx, notNullElems, 1); @@ -1161,14 +1166,14 @@ static void min_function(SQLFunctionCtx *pCtx) { // set the flag for super table query if (pCtx->stableQuery) { - *(pCtx->aOutputBuf + pCtx->inputBytes) = DATA_SET_FLAG; + *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG; } } } static void max_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - minMax_function(pCtx, pCtx->aOutputBuf, 0, ¬NullElems); + minMax_function(pCtx, pCtx->pOutput, 0, ¬NullElems); SET_VAL(pCtx, notNullElems, 1); @@ -1178,7 +1183,7 @@ static void max_function(SQLFunctionCtx *pCtx) { // set the flag for super table query if (pCtx->stableQuery) { - *(pCtx->aOutputBuf + pCtx->inputBytes) = DATA_SET_FLAG; + *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG; } } } @@ -1244,7 +1249,7 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp } static void min_func_merge(SQLFunctionCtx *pCtx) { - int32_t notNullElems = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->aOutputBuf, 1); + int32_t notNullElems = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 1); SET_VAL(pCtx, notNullElems, 1); @@ -1255,7 +1260,7 @@ static void min_func_merge(SQLFunctionCtx *pCtx) { } static void max_func_merge(SQLFunctionCtx *pCtx) { - int32_t numOfElem = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->aOutputBuf, 0); + int32_t numOfElem = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 0); SET_VAL(pCtx, numOfElem, 1); @@ -1271,32 +1276,32 @@ static void minMax_function_f(SQLFunctionCtx *pCtx, int32_t index, int32_t isMin int32_t num = 0; if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { - int8_t *output = (int8_t *)pCtx->aOutputBuf; + int8_t *output = (int8_t *)pCtx->pOutput; int8_t i = GET_INT8_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - int16_t *output = (int16_t*) pCtx->aOutputBuf; + int16_t *output = (int16_t*) pCtx->pOutput; int16_t i = GET_INT16_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - int32_t *output = (int32_t*) pCtx->aOutputBuf; + int32_t *output = (int32_t*) pCtx->pOutput; int32_t i = GET_INT32_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { - int64_t *output = (int64_t*) pCtx->aOutputBuf; + int64_t *output = (int64_t*) pCtx->pOutput; int64_t i = GET_INT64_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - float *output = (float*) pCtx->aOutputBuf; + float *output = (float*) pCtx->pOutput; float i = GET_FLOAT_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *output = (double*) pCtx->aOutputBuf; + double *output = (double*) pCtx->pOutput; double i = GET_DOUBLE_VAL(pData); UPDATE_DATA(pCtx, *output, i, num, isMin, key); @@ -1316,7 +1321,7 @@ static void max_function_f(SQLFunctionCtx *pCtx, int32_t index) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { - char *flag = pCtx->aOutputBuf + pCtx->inputBytes; + char *flag = pCtx->pOutput + pCtx->inputBytes; *flag = DATA_SET_FLAG; } } @@ -1332,17 +1337,18 @@ static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { - char *flag = pCtx->aOutputBuf + pCtx->inputBytes; + char *flag = pCtx->pOutput + pCtx->inputBytes; *flag = DATA_SET_FLAG; } } -#define LOOP_STDDEV_IMPL(type, r, d, ctx, delta, tsdbType) \ - for (int32_t i = 0; i < (ctx)->size; ++i) { \ - if ((ctx)->hasNull && isNull((char *)&((type *)d)[i], tsdbType)) { \ - continue; \ - } \ - (r) += POW2(((type *)d)[i] - (delta)); \ +#define LOOP_STDDEV_IMPL(type, r, d, ctx, delta, _type, num) \ + for (int32_t i = 0; i < (ctx)->size; ++i) { \ + if ((ctx)->hasNull && isNull((char *)&((type *)d)[i], (_type))) { \ + continue; \ + } \ + (num) += 1; \ + (r) += POW2(((type *)d)[i] - (delta)); \ } static void stddev_function(SQLFunctionCtx *pCtx) { @@ -1358,35 +1364,53 @@ static void stddev_function(SQLFunctionCtx *pCtx) { double avg = pStd->avg; void *pData = GET_INPUT_DATA_LIST(pCtx); - + int32_t num = 0; + switch (pCtx->inputType) { case TSDB_DATA_TYPE_INT: { for (int32_t i = 0; i < pCtx->size; ++i) { if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) { continue; } + num += 1; *retVal += POW2(((int32_t *)pData)[i] - avg); } break; } case TSDB_DATA_TYPE_FLOAT: { - LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx, avg, pCtx->inputType); + LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx, avg, pCtx->inputType, num); break; } case TSDB_DATA_TYPE_DOUBLE: { - LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx, avg, pCtx->inputType); + LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx, avg, pCtx->inputType, num); break; } case TSDB_DATA_TYPE_BIGINT: { - LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx, avg, pCtx->inputType); + LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); break; } case TSDB_DATA_TYPE_SMALLINT: { - LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx, avg, pCtx->inputType); + LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); break; } case TSDB_DATA_TYPE_TINYINT: { - LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType); + LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + LOOP_STDDEV_IMPL(uint64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + LOOP_STDDEV_IMPL(uint16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + LOOP_STDDEV_IMPL(uint8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UINT: { + LOOP_STDDEV_IMPL(uint32_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); break; } default: @@ -1438,6 +1462,22 @@ static void stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) { pStd->res += POW2(GET_INT8_VAL(pData) - avg); break; } + case TSDB_DATA_TYPE_UINT: { + pStd->res += POW2(GET_UINT32_VAL(pData) - avg); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + pStd->res += POW2(GET_UINT64_VAL(pData) - avg); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + pStd->res += POW2(GET_UINT16_VAL(pData) - avg); + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + pStd->res += POW2(GET_UINT8_VAL(pData) - avg); + break; + } default: qError("stddev function not support data type:%d", pCtx->inputType); } @@ -1469,7 +1509,7 @@ static void stddev_next_step(SQLFunctionCtx *pCtx) { // save average value into tmpBuf, for second stage scan SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo); - pStd->avg = GET_DOUBLE_VAL(pCtx->aOutputBuf); + pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput); assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum)); } else { pResInfo->complete = true; @@ -1480,9 +1520,9 @@ static void stddev_finalizer(SQLFunctionCtx *pCtx) { SStddevInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); if (pStd->num <= 0) { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); } else { - double *retValue = (double *)pCtx->aOutputBuf; + double *retValue = (double *)pCtx->pOutput; *retValue = sqrt(pStd->res / pStd->num); SET_VAL(pCtx, 1, 1); } @@ -1490,6 +1530,230 @@ static void stddev_finalizer(SQLFunctionCtx *pCtx) { doFinalizer(pCtx); } +////////////////////////////////////////////////////////////////////////////////////// +int32_t tsCompare(const void* p1, const void* p2) { + TSKEY k = *(TSKEY*)p1; + SResPair* pair = (SResPair*)p2; + + if (k == pair->key) { + return 0; + } else { + return k < pair->key? -1:1; + } +} + +static void stddev_dst_function(SQLFunctionCtx *pCtx) { + SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + + // the second stage to calculate standard deviation + double *retVal = &pStd->res; + + // all data are null, no need to proceed + SArray* resList = (SArray*) pCtx->param[0].pz; + if (resList == NULL) { + return; + } + + // find the correct group average results according to the tag value + int32_t len = (int32_t) taosArrayGetSize(resList); + assert(len > 0); + + double avg = 0; + if (len == 1) { + SResPair* p = taosArrayGet(resList, 0); + avg = p->avg; + } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result + SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare); + if (p == NULL) { + return; + } + + avg = p->avg; + } + + void *pData = GET_INPUT_DATA_LIST(pCtx); + int32_t num = 0; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + for (int32_t i = 0; i < pCtx->size; ++i) { + if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) { + continue; + } + num += 1; + *retVal += POW2(((int32_t *)pData)[i] - avg); + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { + LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + LOOP_STDDEV_IMPL(uint16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UINT: { + LOOP_STDDEV_IMPL(uint32_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + LOOP_STDDEV_IMPL(uint64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + default: + qError("stddev function not support data type:%d", pCtx->inputType); + } + + pStd->num += num; + SET_VAL(pCtx, num, 1); + + // copy to the final output buffer for super table + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo)); +} + +static void stddev_dst_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_DATA(pCtx, index); + if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { + return; + } + + // the second stage to calculate standard deviation + SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + double *retVal = &pStd->res; + + // all data are null, no need to proceed + SArray* resList = (SArray*) pCtx->param[0].pz; + if (resList == NULL) { + return; + } + + // find the correct group average results according to the tag value + int32_t len = (int32_t) taosArrayGetSize(resList); + assert(len > 0); + + double avg = 0; + if (len == 1) { + SResPair* p = taosArrayGet(resList, 0); + avg = p->avg; + } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result + SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare); + assert(p != NULL); + + avg = p->avg; + } + + int32_t num = 0; + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + for (int32_t i = 0; i < pCtx->size; ++i) { + if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) { + continue; + } + num += 1; + *retVal += POW2(((int32_t *)pData)[i] - avg); + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { + LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + LOOP_STDDEV_IMPL(uint16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UINT: { + LOOP_STDDEV_IMPL(uint32_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + LOOP_STDDEV_IMPL(uint64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + default: + qError("stddev function not support data type:%d", pCtx->inputType); + } + + pStd->num += num; + SET_VAL(pCtx, num, 1); + + // copy to the final output buffer for super table + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo)); +} + + +static void stddev_dst_merge(SQLFunctionCtx *pCtx) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SStddevdstInfo* pRes = GET_ROWCELL_INTERBUF(pResInfo); + + char *input = GET_INPUT_DATA_LIST(pCtx); + + for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { + SStddevdstInfo *pInput = (SStddevdstInfo *)input; + if (pInput->num == 0) { // current input is null + continue; + } + + pRes->num += pInput->num; + pRes->res += pInput->res; + } +} + +static void stddev_dst_finalizer(SQLFunctionCtx *pCtx) { + SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + + if (pStd->num <= 0) { + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); + } else { + double *retValue = (double *)pCtx->pOutput; + *retValue = sqrt(pStd->res / pStd->num); + SET_VAL(pCtx, 1, 1); + } + + doFinalizer(pCtx); +} + ////////////////////////////////////////////////////////////////////////////////////// static bool first_last_function_setup(SQLFunctionCtx *pCtx) { if (!function_setup(pCtx)) { @@ -1518,7 +1782,7 @@ static void first_function(SQLFunctionCtx *pCtx) { continue; } - memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes); + memcpy(pCtx->pOutput, data, pCtx->inputBytes); TSKEY k = GET_TS_DATA(pCtx, i); DO_UPDATE_TAG_COLUMNS(pCtx, k); @@ -1545,7 +1809,7 @@ static void first_function_f(SQLFunctionCtx *pCtx, int32_t index) { } SET_VAL(pCtx, 1, 1); - memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + memcpy(pCtx->pOutput, pData, pCtx->inputBytes); TSKEY ts = GET_TS_DATA(pCtx, index); DO_UPDATE_TAG_COLUMNS(pCtx, ts); @@ -1558,10 +1822,10 @@ static void first_function_f(SQLFunctionCtx *pCtx, int32_t index) { static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { int64_t *timestamp = GET_TS_LIST(pCtx); - SFirstLastInfo *pInfo = (SFirstLastInfo *)(pCtx->aOutputBuf + pCtx->inputBytes); + SFirstLastInfo *pInfo = (SFirstLastInfo *)(pCtx->pOutput + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG || timestamp[index] < pInfo->ts) { - memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + memcpy(pCtx->pOutput, pData, pCtx->inputBytes); pInfo->hasResult = DATA_SET_FLAG; pInfo->ts = timestamp[index]; @@ -1630,7 +1894,7 @@ static void first_dist_func_merge(SQLFunctionCtx *pCtx) { // The param[1] is used to keep the initial value of max ts value if (pCtx->param[1].nType != pCtx->outputType || pCtx->param[1].i64 > pInput->ts) { - memcpy(pCtx->aOutputBuf, pData, pCtx->outputBytes); + memcpy(pCtx->pOutput, pData, pCtx->outputBytes); pCtx->param[1].i64 = pInput->ts; pCtx->param[1].nType = pCtx->outputType; @@ -1663,7 +1927,7 @@ static void last_function(SQLFunctionCtx *pCtx) { continue; } } - memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes); + memcpy(pCtx->pOutput, data, pCtx->inputBytes); TSKEY ts = GET_TS_DATA(pCtx, i); DO_UPDATE_TAG_COLUMNS(pCtx, ts); @@ -1692,7 +1956,7 @@ static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) { if (pCtx->order == TSDB_ORDER_DESC) { SET_VAL(pCtx, 1, 1); - memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + memcpy(pCtx->pOutput, pData, pCtx->inputBytes); TSKEY ts = GET_TS_DATA(pCtx, index); DO_UPDATE_TAG_COLUMNS(pCtx, ts); @@ -1707,7 +1971,7 @@ static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) { char* buf = GET_ROWCELL_INTERBUF(pResInfo); if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) { pResInfo->hasResult = DATA_SET_FLAG; - memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + memcpy(pCtx->pOutput, pData, pCtx->inputBytes); *(TSKEY*)buf = ts; DO_UPDATE_TAG_COLUMNS(pCtx, ts); @@ -1718,14 +1982,14 @@ static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) { static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { int64_t *timestamp = GET_TS_LIST(pCtx); - SFirstLastInfo *pInfo = (SFirstLastInfo *)(pCtx->aOutputBuf + pCtx->inputBytes); + SFirstLastInfo *pInfo = (SFirstLastInfo *)(pCtx->pOutput + pCtx->inputBytes); if (pInfo->hasResult != DATA_SET_FLAG || pInfo->ts < timestamp[index]) { #if defined(_DEBUG_VIEW) qDebug("assign index:%d, ts:%" PRId64 ", val:%d, ", index, timestamp[index], *(int32_t *)pData); #endif - memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + memcpy(pCtx->pOutput, pData, pCtx->inputBytes); pInfo->hasResult = DATA_SET_FLAG; pInfo->ts = timestamp[index]; @@ -1810,7 +2074,7 @@ static void last_dist_func_merge(SQLFunctionCtx *pCtx) { * the true last result */ if (pCtx->param[1].nType != pCtx->outputType || pCtx->param[1].i64 < pInput->ts) { - memcpy(pCtx->aOutputBuf, pData, pCtx->outputBytes); + memcpy(pCtx->pOutput, pData, pCtx->outputBytes); pCtx->param[1].i64 = pInput->ts; pCtx->param[1].nType = pCtx->outputType; @@ -1830,14 +2094,14 @@ static void last_row_function(SQLFunctionCtx *pCtx) { char *pData = GET_INPUT_DATA_LIST(pCtx); // assign the last element in current data block - assignVal(pCtx->aOutputBuf, pData + (pCtx->size - 1) * pCtx->inputBytes, pCtx->inputBytes, pCtx->inputType); + assignVal(pCtx->pOutput, pData + (pCtx->size - 1) * pCtx->inputBytes, pCtx->inputBytes, pCtx->inputType); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; // set the result to final result buffer in case of super table query if (pCtx->stableQuery) { - SLastrowInfo *pInfo1 = (SLastrowInfo *)(pCtx->aOutputBuf + pCtx->inputBytes); + SLastrowInfo *pInfo1 = (SLastrowInfo *)(pCtx->pOutput + pCtx->inputBytes); pInfo1->ts = GET_TS_DATA(pCtx, pCtx->size - 1); pInfo1->hasResult = DATA_SET_FLAG; @@ -1855,9 +2119,9 @@ static void last_row_finalizer(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult != DATA_SET_FLAG) { if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pCtx->aOutputBuf, pCtx->outputType); + setVardataNull(pCtx->pOutput, pCtx->outputType); } else { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); } return; @@ -2055,7 +2319,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { switch (type) { case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_INT: { - int32_t *output = (int32_t *)pCtx->aOutputBuf; + int32_t *output = (int32_t *)pCtx->pOutput; for (int32_t i = 0; i < len; ++i, output += step) { *output = (int32_t)tvp[i]->v.i64; } @@ -2063,21 +2327,21 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { } case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_BIGINT: { - int64_t *output = (int64_t *)pCtx->aOutputBuf; + int64_t *output = (int64_t *)pCtx->pOutput; for (int32_t i = 0; i < len; ++i, output += step) { *output = tvp[i]->v.i64; } break; } case TSDB_DATA_TYPE_DOUBLE: { - double *output = (double *)pCtx->aOutputBuf; + double *output = (double *)pCtx->pOutput; for (int32_t i = 0; i < len; ++i, output += step) { *output = tvp[i]->v.dKey; } break; } case TSDB_DATA_TYPE_FLOAT: { - float *output = (float *)pCtx->aOutputBuf; + float *output = (float *)pCtx->pOutput; for (int32_t i = 0; i < len; ++i, output += step) { *output = (float)tvp[i]->v.dKey; } @@ -2085,7 +2349,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { } case TSDB_DATA_TYPE_USMALLINT: case TSDB_DATA_TYPE_SMALLINT: { - int16_t *output = (int16_t *)pCtx->aOutputBuf; + int16_t *output = (int16_t *)pCtx->pOutput; for (int32_t i = 0; i < len; ++i, output += step) { *output = (int16_t)tvp[i]->v.i64; } @@ -2093,7 +2357,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { } case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_TINYINT: { - int8_t *output = (int8_t *)pCtx->aOutputBuf; + int8_t *output = (int8_t *)pCtx->pOutput; for (int32_t i = 0; i < len; ++i, output += step) { *output = (int8_t)tvp[i]->v.i64; } @@ -2115,7 +2379,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { // todo check malloc failure char **pData = calloc(pCtx->tagInfo.numOfTagCols, POINTER_BYTES); for (int32_t i = 0; i < pCtx->tagInfo.numOfTagCols; ++i) { - pData[i] = pCtx->tagInfo.pTagCtxList[i]->aOutputBuf; + pData[i] = pCtx->tagInfo.pTagCtxList[i]->pOutput; } for (int32_t i = 0; i < len; ++i, output += step) { @@ -2143,7 +2407,7 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) { // only the first_stage_merge is directly written data into final output buffer if (pCtx->stableQuery && pCtx->currentStage != MERGE_STAGE) { - return (STopBotInfo*) pCtx->aOutputBuf; + return (STopBotInfo*) pCtx->pOutput; } else { // during normal table query and super table at the secondary_stage, result is written to intermediate buffer return GET_ROWCELL_INTERBUF(pResInfo); } @@ -2527,9 +2791,9 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) { tMemBucket * pMemBucket = ppInfo->pMemBucket; if (pMemBucket == NULL || pMemBucket->total == 0) { // check for null assert(ppInfo->numOfElems == 0); - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); } else { - *(double *)pCtx->aOutputBuf = getPercentile(pMemBucket, v); + *(double *)pCtx->pOutput = getPercentile(pMemBucket, v); } tMemBucketDestroy(pMemBucket); @@ -2565,7 +2829,7 @@ static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) { SAPercentileInfo* pInfo = NULL; if (pCtx->stableQuery && pCtx->currentStage != MERGE_STAGE) { - pInfo = (SAPercentileInfo*) pCtx->aOutputBuf; + pInfo = (SAPercentileInfo*) pCtx->pOutput; } else { pInfo = GET_ROWCELL_INTERBUF(pResInfo); } @@ -2679,10 +2943,10 @@ static void apercentile_finalizer(SQLFunctionCtx *pCtx) { double ratio[] = {v}; double *res = tHistogramUniform(pOutput->pHisto, ratio, 1); - memcpy(pCtx->aOutputBuf, res, sizeof(double)); + memcpy(pCtx->pOutput, res, sizeof(double)); free(res); } else { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); return; } } else { @@ -2690,10 +2954,10 @@ static void apercentile_finalizer(SQLFunctionCtx *pCtx) { double ratio[] = {v}; double *res = tHistogramUniform(pOutput->pHisto, ratio, 1); - memcpy(pCtx->aOutputBuf, res, sizeof(double)); + memcpy(pCtx->pOutput, res, sizeof(double)); free(res); } else { // no need to free - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); return; } } @@ -2859,7 +3123,7 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) { SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (pInfo->num == 0) { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); return; } @@ -2878,16 +3142,16 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) { param[1][2] /= param[1][1]; int32_t maxOutputSize = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE - VARSTR_HEADER_SIZE; - size_t n = snprintf(varDataVal(pCtx->aOutputBuf), maxOutputSize, "{slop:%.6lf, intercept:%.6lf}", + size_t n = snprintf(varDataVal(pCtx->pOutput), maxOutputSize, "{slop:%.6lf, intercept:%.6lf}", param[0][2], param[1][2]); - varDataSetLen(pCtx->aOutputBuf, n); + varDataSetLen(pCtx->pOutput, n); doFinalizer(pCtx); } static void date_col_output_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, pCtx->size, 1); - *(int64_t *)(pCtx->aOutputBuf) = pCtx->nStartQueryTimestamp; + *(int64_t *)(pCtx->pOutput) = pCtx->startTs; } static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_t index) { @@ -2904,15 +3168,15 @@ static void col_project_function(SQLFunctionCtx *pCtx) { char *pData = GET_INPUT_DATA_LIST(pCtx); if (pCtx->order == TSDB_ORDER_ASC) { - memcpy(pCtx->aOutputBuf, pData, (size_t) pCtx->size * pCtx->inputBytes); + memcpy(pCtx->pOutput, pData, (size_t) pCtx->size * pCtx->inputBytes); } else { for(int32_t i = 0; i < pCtx->size; ++i) { - memcpy(pCtx->aOutputBuf + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes, + memcpy(pCtx->pOutput + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes, pCtx->inputBytes); } } - pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes; + pCtx->pOutput += pCtx->size * pCtx->outputBytes; } static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { @@ -2928,9 +3192,9 @@ static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { INC_INIT_VAL(pCtx, 1); char *pData = GET_INPUT_DATA(pCtx, index); - memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + memcpy(pCtx->pOutput, pData, pCtx->inputBytes); - pCtx->aOutputBuf += pCtx->inputBytes; + pCtx->pOutput += pCtx->inputBytes; } /** @@ -2943,22 +3207,22 @@ static void tag_project_function(SQLFunctionCtx *pCtx) { assert(pCtx->inputBytes == pCtx->outputBytes); - tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType, true); - char* data = pCtx->aOutputBuf; - pCtx->aOutputBuf += pCtx->outputBytes; + tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true); + char* data = pCtx->pOutput; + pCtx->pOutput += pCtx->outputBytes; // directly copy from the first one for (int32_t i = 1; i < pCtx->size; ++i) { - memmove(pCtx->aOutputBuf, data, pCtx->outputBytes); - pCtx->aOutputBuf += pCtx->outputBytes; + memmove(pCtx->pOutput, data, pCtx->outputBytes); + pCtx->pOutput += pCtx->outputBytes; } } static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { INC_INIT_VAL(pCtx, 1); - tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->tag.nType, true); - pCtx->aOutputBuf += pCtx->outputBytes; + tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->tag.nType, true); + pCtx->pOutput += pCtx->outputBytes; } /** @@ -2970,19 +3234,19 @@ static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { */ static void tag_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, 1, 1); - tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType, true); + tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true); } static void tag_function_f(SQLFunctionCtx *pCtx, int32_t index) { SET_VAL(pCtx, 1, 1); - tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType, true); + tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true); } static void copy_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, pCtx->size, 1); char *pData = GET_INPUT_DATA_LIST(pCtx); - assignVal(pCtx->aOutputBuf, pData, pCtx->inputBytes, pCtx->inputType); + assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); } enum { @@ -3015,7 +3279,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { switch (pCtx->inputType) { case TSDB_DATA_TYPE_INT: { int32_t *pData = (int32_t *)data; - int32_t *pOutput = (int32_t *)pCtx->aOutputBuf; + int32_t *pOutput = (int32_t *)pCtx->pOutput; for (; i < pCtx->size && i >= 0; i += step) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { @@ -3047,7 +3311,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { }; case TSDB_DATA_TYPE_BIGINT: { int64_t *pData = (int64_t *)data; - int64_t *pOutput = (int64_t *)pCtx->aOutputBuf; + int64_t *pOutput = (int64_t *)pCtx->pOutput; for (; i < pCtx->size && i >= 0; i += step) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { @@ -3079,7 +3343,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { } case TSDB_DATA_TYPE_DOUBLE: { double *pData = (double *)data; - double *pOutput = (double *)pCtx->aOutputBuf; + double *pOutput = (double *)pCtx->pOutput; for (; i < pCtx->size && i >= 0; i += step) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { @@ -3109,7 +3373,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { } case TSDB_DATA_TYPE_FLOAT: { float *pData = (float *)data; - float *pOutput = (float *)pCtx->aOutputBuf; + float *pOutput = (float *)pCtx->pOutput; for (; i < pCtx->size && i >= 0; i += step) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { @@ -3142,7 +3406,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { } case TSDB_DATA_TYPE_SMALLINT: { int16_t *pData = (int16_t *)data; - int16_t *pOutput = (int16_t *)pCtx->aOutputBuf; + int16_t *pOutput = (int16_t *)pCtx->pOutput; for (; i < pCtx->size && i >= 0; i += step) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { @@ -3173,7 +3437,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { } case TSDB_DATA_TYPE_TINYINT: { int8_t *pData = (int8_t *)data; - int8_t *pOutput = (int8_t *)pCtx->aOutputBuf; + int8_t *pOutput = (int8_t *)pCtx->pOutput; for (; i < pCtx->size && i >= 0; i += step) { if (pCtx->hasNull && isNull((char *)&pData[i], pCtx->inputType)) { @@ -3219,7 +3483,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += forwardStep; - pCtx->aOutputBuf += forwardStep * pCtx->outputBytes; + pCtx->pOutput += forwardStep * pCtx->outputBytes; pCtx->ptsOutputBuf = (char*)pCtx->ptsOutputBuf + forwardStep * TSDB_KEYSIZE; } } @@ -3230,7 +3494,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { (ctx)->param[1].nType = (ctx)->inputType; \ *(type *)&(ctx)->param[1].i64 = *(type *)(d); \ } else { \ - *(type *)(ctx)->aOutputBuf = *(type *)(d) - (*(type *)(&(ctx)->param[1].i64)); \ + *(type *)(ctx)->pOutput = *(type *)(d) - (*(type *)(&(ctx)->param[1].i64)); \ *(type *)(&(ctx)->param[1].i64) = *(type *)(d); \ *(int64_t *)(ctx)->ptsOutputBuf = GET_TS_DATA(ctx, index); \ } \ @@ -3255,7 +3519,7 @@ static void diff_function_f(SQLFunctionCtx *pCtx, int32_t index) { pCtx->param[1].nType = pCtx->inputType; pCtx->param[1].i64 = *(int32_t *)pData; } else { - *(int32_t *)pCtx->aOutputBuf = *(int32_t *)pData - (int32_t)pCtx->param[1].i64; + *(int32_t *)pCtx->pOutput = *(int32_t *)pData - (int32_t)pCtx->param[1].i64; pCtx->param[1].i64 = *(int32_t *)pData; *(int64_t *)pCtx->ptsOutputBuf = GET_TS_DATA(pCtx, index); } @@ -3286,7 +3550,7 @@ static void diff_function_f(SQLFunctionCtx *pCtx, int32_t index) { } if (GET_RES_INFO(pCtx)->numOfRes > 0) { - pCtx->aOutputBuf += pCtx->outputBytes * step; + pCtx->pOutput += pCtx->outputBytes * step; pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * step; } } @@ -3310,10 +3574,8 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += pCtx->size; SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; - arithmeticTreeTraverse(sas->pArithExpr->pExpr, pCtx->size, pCtx->aOutputBuf, sas, pCtx->order, getArithColumnData); - - pCtx->aOutputBuf += pCtx->outputBytes * pCtx->size; - pCtx->param[1].pz = NULL; + arithmeticTreeTraverse(sas->pArithExpr->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData); + pCtx->pOutput += pCtx->outputBytes * pCtx->size; } static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { @@ -3321,9 +3583,9 @@ static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; sas->offset = index; - arithmeticTreeTraverse(sas->pArithExpr->pExpr, 1, pCtx->aOutputBuf, sas, pCtx->order, getArithColumnData); + arithmeticTreeTraverse(sas->pArithExpr->pExpr, 1, pCtx->pOutput, sas, pCtx->order, getArithColumnData); - pCtx->aOutputBuf += pCtx->outputBytes; + pCtx->pOutput += pCtx->outputBytes; } #define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \ @@ -3432,7 +3694,7 @@ static void spread_function(SQLFunctionCtx *pCtx) { // keep the data into the final output buffer for super table query since this execution may be the last one if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo)); } } @@ -3475,7 +3737,7 @@ static void spread_function_f(SQLFunctionCtx *pCtx, int32_t index) { pInfo->hasResult = DATA_SET_FLAG; if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo)); } } @@ -3511,21 +3773,21 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); if (pResInfo->hasResult != DATA_SET_FLAG) { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); return; } - *(double *)pCtx->aOutputBuf = pCtx->param[3].dKey - pCtx->param[0].dKey; + *(double *)pCtx->pOutput = pCtx->param[3].dKey - pCtx->param[0].dKey; } else { assert(IS_NUMERIC_TYPE(pCtx->inputType) || (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP)); SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); if (pInfo->hasResult != DATA_SET_FLAG) { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); return; } - *(double *)pCtx->aOutputBuf = pInfo->max - pInfo->min; + *(double *)pCtx->pOutput = pInfo->max - pInfo->min; } GET_RES_INFO(pCtx)->numOfRes = 1; // todo add test case @@ -3715,7 +3977,7 @@ static void twa_function(SQLFunctionCtx *pCtx) { } if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, pInfo, sizeof(STwaInfo)); + memcpy(pCtx->pOutput, pInfo, sizeof(STwaInfo)); } } @@ -3735,7 +3997,7 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { } if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(STwaInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(STwaInfo)); } } @@ -3748,8 +4010,8 @@ void twa_function_copy(SQLFunctionCtx *pCtx) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->aInputElemBuf, (size_t)pCtx->inputBytes); - pResInfo->hasResult = ((STwaInfo *)pCtx->aInputElemBuf)->hasResult; + memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->pInput, (size_t)pCtx->inputBytes); + pResInfo->hasResult = ((STwaInfo *)pCtx->pInput)->hasResult; } void twa_function_finalizer(SQLFunctionCtx *pCtx) { @@ -3757,15 +4019,15 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) { STwaInfo *pInfo = (STwaInfo *)GET_ROWCELL_INTERBUF(pResInfo); if (pInfo->hasResult != DATA_SET_FLAG) { - setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + setNull(pCtx->pOutput, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); return; } assert(pInfo->win.ekey == pInfo->p.key && pInfo->hasResult == pResInfo->hasResult); if (pInfo->win.ekey == pInfo->win.skey) { - *(double *)pCtx->aOutputBuf = pInfo->p.val; + *(double *)pCtx->pOutput = pInfo->p.val; } else { - *(double *)pCtx->aOutputBuf = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey); + *(double *)pCtx->pOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey); } GET_RES_INFO(pCtx)->numOfRes = 1; @@ -3784,7 +4046,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { } if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { - *(TSKEY *) pCtx->aOutputBuf = pCtx->nStartQueryTimestamp; + *(TSKEY *) pCtx->pOutput = pCtx->startTs; } else { if (pCtx->start.key == INT64_MIN) { assert(pCtx->end.key == INT64_MIN); @@ -3792,29 +4054,35 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { } if (type == TSDB_FILL_NULL) { - setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); } else if (type == TSDB_FILL_SET_VALUE) { - tVariantDump(&pCtx->param[1], pCtx->aOutputBuf, pCtx->inputType, true); + tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true); } else if (type == TSDB_FILL_PREV) { if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) { - SET_TYPED_DATA(pCtx->aOutputBuf, pCtx->inputType, pCtx->start.val); + SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val); } else { - assignVal(pCtx->aOutputBuf, pCtx->start.ptr, pCtx->outputBytes, pCtx->inputType); + assignVal(pCtx->pOutput, pCtx->start.ptr, pCtx->outputBytes, pCtx->inputType); + } + } else if (type == TSDB_FILL_NEXT) { + if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) { + SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->end.val); + } else { + assignVal(pCtx->pOutput, pCtx->end.ptr, pCtx->outputBytes, pCtx->inputType); } } else if (type == TSDB_FILL_LINEAR) { SPoint point1 = {.key = pCtx->start.key, .val = &pCtx->start.val}; SPoint point2 = {.key = pCtx->end.key, .val = &pCtx->end.val}; - SPoint point = {.key = pCtx->nStartQueryTimestamp, .val = pCtx->aOutputBuf}; + SPoint point = {.key = pCtx->startTs, .val = pCtx->pOutput}; int32_t srcType = pCtx->inputType; if (IS_NUMERIC_TYPE(srcType)) { // TODO should find the not null data? if (isNull((char *)&pCtx->start.val, srcType) || isNull((char *)&pCtx->end.val, srcType)) { - setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes); + setNull(pCtx->pOutput, srcType, pCtx->inputBytes); } else { taosGetLinearInterpolationVal(&point, pCtx->outputType, &point1, &point2, TSDB_DATA_TYPE_DOUBLE); } } else { - setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes); + setNull(pCtx->pOutput, srcType, pCtx->inputBytes); } } } @@ -3827,9 +4095,9 @@ static void interp_function(SQLFunctionCtx *pCtx) { if (pCtx->size > 0) { // impose the timestamp check TSKEY key = GET_TS_DATA(pCtx, 0); - if (key == pCtx->nStartQueryTimestamp) { + if (key == pCtx->startTs) { char *pData = GET_INPUT_DATA(pCtx, 0); - assignVal(pCtx->aOutputBuf, pData, pCtx->inputBytes, pCtx->inputType); + assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); SET_VAL(pCtx, 1, 1); } else { interp_function_impl(pCtx); @@ -3897,7 +4165,7 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) { tsBufFlush(pTSbuf); - *(FILE **)pCtx->aOutputBuf = pTSbuf->f; + *(FILE **)pCtx->pOutput = pTSbuf->f; pTSbuf->remainOpen = true; tsBufDestroy(pTSbuf); @@ -3942,7 +4210,7 @@ static bool rate_function_setup(SQLFunctionCtx *pCtx) { return false; } - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); //->aOutputBuf + pCtx->outputBytes; + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); //->pOutput + pCtx->outputBytes; SRateInfo * pInfo = GET_ROWCELL_INTERBUF(pResInfo); pInfo->CorrectionValue = 0; @@ -4011,7 +4279,7 @@ static void rate_function(SQLFunctionCtx *pCtx) { // keep the data into the final output buffer for super table query since this execution may be the last one if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } @@ -4053,7 +4321,7 @@ static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) { // keep the data into the final output buffer for super table query since this execution may be the last one if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } @@ -4061,10 +4329,10 @@ static void rate_func_copy(SQLFunctionCtx *pCtx) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->aInputElemBuf, (size_t)pCtx->inputBytes); - pResInfo->hasResult = ((SRateInfo*)pCtx->aInputElemBuf)->hasResult; + memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->pInput, (size_t)pCtx->inputBytes); + pResInfo->hasResult = ((SRateInfo*)pCtx->pInput)->hasResult; - SRateInfo* pRateInfo = (SRateInfo*)pCtx->aInputElemBuf; + SRateInfo* pRateInfo = (SRateInfo*)pCtx->pInput; qDebug("%p rate_func_merge() firstKey:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " lastValue:%" PRId64 " CorrectionValue:%" PRId64 " hasResult:%d", pCtx, pRateInfo->firstKey, pRateInfo->lastKey, pRateInfo->firstValue, pRateInfo->lastValue, pRateInfo->CorrectionValue, pRateInfo->hasResult); } @@ -4077,13 +4345,13 @@ static void rate_finalizer(SQLFunctionCtx *pCtx) { pCtx, pRateInfo->isIRate, pRateInfo->firstKey, pRateInfo->lastKey, pRateInfo->firstValue, pRateInfo->lastValue, pRateInfo->CorrectionValue, pRateInfo->hasResult); if (pRateInfo->hasResult != DATA_SET_FLAG) { - setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + setNull(pCtx->pOutput, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); return; } - *(double*)pCtx->aOutputBuf = do_calc_rate(pRateInfo); + *(double*)pCtx->pOutput = do_calc_rate(pRateInfo); - qDebug("rate_finalizer() output result:%f", *(double *)pCtx->aOutputBuf); + qDebug("rate_finalizer() output result:%f", *(double *)pCtx->pOutput); // cannot set the numOfIteratedElems again since it is set during previous iteration pResInfo->numOfRes = 1; @@ -4144,7 +4412,7 @@ static void irate_function(SQLFunctionCtx *pCtx) { // keep the data into the final output buffer for super table query since this execution may be the last one if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } @@ -4178,7 +4446,7 @@ static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) { // keep the data into the final output buffer for super table query since this execution may be the last one if (pCtx->stableQuery) { - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } @@ -4210,7 +4478,7 @@ static void do_sumrate_merge(SQLFunctionCtx *pCtx) { if (DATA_SET_FLAG == pRateInfo->hasResult) { pResInfo->hasResult = DATA_SET_FLAG; SET_VAL(pCtx, pRateInfo->num, 1); - memcpy(pCtx->aOutputBuf, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); } } @@ -4226,17 +4494,17 @@ static void sumrate_finalizer(SQLFunctionCtx *pCtx) { qDebug("%p sumrate_finalizer() superTableQ:%d num:%" PRId64 " sum:%f hasResult:%d", pCtx, pCtx->stableQuery, pRateInfo->num, pRateInfo->sum, pRateInfo->hasResult); if (pRateInfo->hasResult != DATA_SET_FLAG) { - setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + setNull(pCtx->pOutput, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); return; } if (pRateInfo->num == 0) { // from meter - *(double*)pCtx->aOutputBuf = do_calc_rate(pRateInfo); + *(double*)pCtx->pOutput = do_calc_rate(pRateInfo); } else if (pCtx->functionId == TSDB_FUNC_SUM_RATE || pCtx->functionId == TSDB_FUNC_SUM_IRATE) { - *(double*)pCtx->aOutputBuf = pRateInfo->sum; + *(double*)pCtx->pOutput = pRateInfo->sum; } else { - *(double*)pCtx->aOutputBuf = pRateInfo->sum / pRateInfo->num; + *(double*)pCtx->pOutput = pRateInfo->sum / pRateInfo->num; } pResInfo->numOfRes = 1; @@ -4270,7 +4538,7 @@ int32_t functionCompatList[] = { 1, 1, 1, 1, }; -SQLAggFuncElem aAggs[] = {{ +SAggFunctionInfo aAggs[] = {{ // 0, count function does not invoke the finalize function "count", TSDB_FUNC_COUNT, @@ -4344,7 +4612,7 @@ SQLAggFuncElem aAggs[] = {{ // 5 "stddev", TSDB_FUNC_STDDEV, - TSDB_FUNC_INVALID_ID, + TSDB_FUNC_STDDEV_DST, TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, function_setup, stddev_function, @@ -4654,6 +4922,20 @@ SQLAggFuncElem aAggs[] = {{ }, { // 27 + "stddev", // return table id and the corresponding tags for join match and subscribe + TSDB_FUNC_STDDEV_DST, + TSDB_FUNC_AVG, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE, + function_setup, + stddev_dst_function, + stddev_dst_function_f, + no_next_step, + stddev_dst_finalizer, + stddev_dst_merge, + dataBlockRequired, + }, + { + // 28 "interp", TSDB_FUNC_INTERP, TSDB_FUNC_INTERP, @@ -4667,7 +4949,7 @@ SQLAggFuncElem aAggs[] = {{ dataBlockRequired, }, { - // 28 + // 29 "rate", TSDB_FUNC_RATE, TSDB_FUNC_RATE, @@ -4681,7 +4963,7 @@ SQLAggFuncElem aAggs[] = {{ dataBlockRequired, }, { - // 29 + // 30 "irate", TSDB_FUNC_IRATE, TSDB_FUNC_IRATE, @@ -4695,7 +4977,7 @@ SQLAggFuncElem aAggs[] = {{ dataBlockRequired, }, { - // 30 + // 31 "sum_rate", TSDB_FUNC_SUM_RATE, TSDB_FUNC_SUM_RATE, @@ -4709,7 +4991,7 @@ SQLAggFuncElem aAggs[] = {{ dataBlockRequired, }, { - // 31 + // 32 "sum_irate", TSDB_FUNC_SUM_IRATE, TSDB_FUNC_SUM_IRATE, @@ -4723,7 +5005,7 @@ SQLAggFuncElem aAggs[] = {{ dataBlockRequired, }, { - // 32 + // 33 "avg_rate", TSDB_FUNC_AVG_RATE, TSDB_FUNC_AVG_RATE, @@ -4737,7 +5019,7 @@ SQLAggFuncElem aAggs[] = {{ dataBlockRequired, }, { - // 33 + // 34 "avg_irate", TSDB_FUNC_AVG_IRATE, TSDB_FUNC_AVG_IRATE, @@ -4751,7 +5033,7 @@ SQLAggFuncElem aAggs[] = {{ dataBlockRequired, }, { - // 34 + // 35 "tid_tag", // return table id and the corresponding tags for join match and subscribe TSDB_FUNC_TID_TAG, TSDB_FUNC_TID_TAG, @@ -4763,4 +5045,4 @@ SQLAggFuncElem aAggs[] = {{ noop1, noop1, dataBlockRequired, - }}; + } }; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 38ba6b5400..72ae0d566c 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -15,7 +15,6 @@ #include "os.h" #include "qFill.h" #include "taosmsg.h" -#include "tcache.h" #include "tglobal.h" #include "exception.h" @@ -24,11 +23,9 @@ #include "qExecutor.h" #include "qResultbuf.h" #include "qUtil.h" -#include "query.h" #include "queryLog.h" #include "tlosertree.h" #include "ttype.h" -#include "tcompare.h" #define MAX_ROWS_PER_RESBUF_PAGE ((1u<<12) - 1) @@ -36,8 +33,6 @@ * check if the primary column is load by default, otherwise, the program will * forced to load primary column explicitly. */ -#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u) -#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP) #define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN) #define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) @@ -56,27 +51,6 @@ (_dst).ekey = (_src).ekey;\ } while (0) -enum { - // when query starts to execute, this status will set - QUERY_NOT_COMPLETED = 0x1u, - - /* result output buffer is full, current query is paused. - * this status is only exist in group-by clause and diff/add/division/multiply/ query. - */ - QUERY_RESBUF_FULL = 0x2u, - - /* query is over - * 1. this status is used in one row result query process, e.g., count/sum/first/last/ avg...etc. - * 2. when all data within queried time window, it is also denoted as query_completed - */ - QUERY_COMPLETED = 0x4u, - - /* when the result is not completed return to client, this status will be - * usually used in case of interval query with interpolation option - */ - QUERY_OVER = 0x8u, -}; - enum { TS_JOIN_TS_EQUAL = 0, TS_JOIN_TS_NOT_EQUALS = 1, @@ -134,13 +108,11 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) #define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) -#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) -static void setQueryStatus(SQuery *pQuery, int8_t status); static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv); -static int32_t getMaximumIdleDurationSec() { +int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; } @@ -181,27 +153,19 @@ static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { tw->ekey -= 1; } -#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) -#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) - -// todo move to utility -static int32_t mergeIntoGroupResultImpl(SGroupResInfo* pGroupResInfo, SArray *pTableList, SQInfo* pQInfo); - static void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult); static void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult); static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, - SDataStatis *pStatis, void *param, int32_t colIndex, int32_t vgId); + SDataStatis *pStatis, SExprInfo* pExprInfo); static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv); static void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo); static void resetDefaultResInfoOutputBuf(SQueryRuntimeEnv *pRuntimeEnv); static bool hasMainOutput(SQuery *pQuery); -static void buildTagQueryResult(SQInfo *pQInfo); -static int32_t setAdditionalInfo(SQInfo *pQInfo, void *pTable, STableQueryInfo *pTableQueryInfo); -static int32_t checkForQueryBuf(size_t numOfTables); +static int32_t setTimestampListJoinInfo(SQInfo *pQInfo, STableQueryInfo *pTableQueryInfo); static void releaseQueryBuf(size_t numOfTables); static int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order); static void doRowwiseTimeWindowInterpolation(SQueryRuntimeEnv* pRuntimeEnv, SArray* pDataBlock, TSKEY prevTs, int32_t prevRowIndex, TSKEY curTs, int32_t curRowIndex, TSKEY windowKey, int32_t type); @@ -296,12 +260,7 @@ void updateNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfRes) { } } -static UNUSED_FUNC int32_t getMergeResultGroupId(int32_t groupIndex) { - int32_t base = 50000000; - return base + (groupIndex * 10000); -} - -bool isGroupbyNormalCol(SSqlGroupbyExpr *pGroupbyExpr) { +bool isGroupbyColumn(SSqlGroupbyExpr *pGroupbyExpr) { if (pGroupbyExpr == NULL || pGroupbyExpr->numOfGroupCols == 0) { return false; } @@ -321,6 +280,17 @@ bool isGroupbyNormalCol(SSqlGroupbyExpr *pGroupbyExpr) { return false; } +bool isStabledev(SQuery* pQuery) { + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + int32_t functId = pQuery->pExpr1[i].base.functionId; + if (functId == TSDB_FUNC_STDDEV_DST) { + return true; + } + } + + return false; +} + int16_t getGroupbyColumnType(SQuery *pQuery, SSqlGroupbyExpr *pGroupbyExpr) { assert(pGroupbyExpr != NULL); @@ -345,7 +315,7 @@ int16_t getGroupbyColumnType(SQuery *pQuery, SSqlGroupbyExpr *pGroupbyExpr) { return type; } -bool isSelectivityWithTagsQuery(SQuery *pQuery) { +static bool isSelectivityWithTagsQuery(SQuery *pQuery) { bool hasTags = false; int32_t numOfSelectivity = 0; @@ -356,7 +326,7 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) { continue; } - if ((aAggs[functId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { + if ((aAggs[functId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { numOfSelectivity++; } } @@ -368,7 +338,7 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) { return false; } -bool isProjQuery(SQuery *pQuery) { +static bool isProjQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { int32_t functId = pQuery->pExpr1[i].base.functionId; if (functId != TSDB_FUNC_PRJ && functId != TSDB_FUNC_TAGPRJ) { @@ -379,17 +349,14 @@ bool isProjQuery(SQuery *pQuery) { return true; } -bool isTSCompQuery(SQuery *pQuery) { return pQuery->pExpr1[0].base.functionId == TSDB_FUNC_TS_COMP; } - -static bool limitResults(SQueryRuntimeEnv* pRuntimeEnv) { - SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); - SQuery* pQuery = pRuntimeEnv->pQuery; +static bool isTsCompQuery(SQuery *pQuery) { return pQuery->pExpr1[0].base.functionId == TSDB_FUNC_TS_COMP; } +static bool limitOperator(SQuery* pQuery, void* qinfo) { if ((pQuery->limit.limit > 0) && (pQuery->rec.total + pQuery->rec.rows > pQuery->limit.limit)) { pQuery->rec.rows = pQuery->limit.limit - pQuery->rec.total; qDebug("QInfo:%p discard remain data due to result limitation, limit:%"PRId64", current return:%" PRId64 ", total:%"PRId64, - pQInfo, pQuery->limit.limit, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); + qinfo, pQuery->limit.limit, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); assert(pQuery->rec.rows >= 0); setQueryStatus(pQuery, QUERY_COMPLETED); return true; @@ -643,7 +610,7 @@ static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf } static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, STimeWindow *win, - bool masterscan, SResultRow** pResult, int64_t groupId) { + bool masterscan, SResultRow **pResult, int64_t groupId) { assert(win->skey <= win->ekey); SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf; @@ -826,17 +793,29 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo return num; } -// TODO decouple the data block and the SQLFunctionCtx -static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow *pWin, int32_t offset, int32_t forwardStep, TSKEY *tsCol, int32_t numOfTotal) { - SQuery *pQuery = pRuntimeEnv->pQuery; +static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas, int32_t col, int32_t size, SArray *pDataBlock); + +static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow *pWin, int32_t offset, + int32_t forwardStep, TSKEY *tsCol, int32_t numOfTotal, SArray *pDataBlock) { + SQuery * pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; bool hasPrev = pCtx[0].preAggVals.isSet; for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { pCtx[k].size = forwardStep; - pCtx[k].nStartQueryTimestamp = pWin->skey; - pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); + pCtx[k].startTs = pWin->skey; + + char *dataBlock = getDataBlock(pRuntimeEnv, &pRuntimeEnv->sasArray[k], k, numOfTotal, pDataBlock); + + int32_t pos = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); + if (dataBlock != NULL) { + pCtx[k].pInput = (char *)dataBlock + pos * pCtx[k].inputBytes; + } + + if (tsCol != NULL) { + pCtx[k].ptsList = &tsCol[pos]; + } int32_t functionId = pQuery->pExpr1[k].base.functionId; @@ -860,7 +839,7 @@ static void doRowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow * SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - pCtx[k].nStartQueryTimestamp = pWin->skey; + pCtx[k].startTs = pWin->skey; int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { @@ -976,6 +955,7 @@ static void* getDataBlockImpl(SArray* pDataBlock, int32_t colId) { return NULL; } +// todo refactor static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas, int32_t col, int32_t size, SArray *pDataBlock) { if (pDataBlock == NULL) { return NULL; @@ -1190,10 +1170,9 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * tsCols = (TSKEY *)(pColInfo->pData); } - SQInfo *pQInfo = GET_QINFO_ADDR(pRuntimeEnv); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { char *dataBlock = getDataBlock(pRuntimeEnv, &pRuntimeEnv->sasArray[k], k, pDataBlockInfo->rows, pDataBlock); - setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &pRuntimeEnv->sasArray[k], k, pQInfo->vgId); + setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &pQuery->pExpr1[k]); } int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); @@ -1234,7 +1213,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_START_INTERP); - doBlockwiseApplyFunctions(pRuntimeEnv, &w, startPos, 0, tsCols, pDataBlockInfo->rows); + doBlockwiseApplyFunctions(pRuntimeEnv, &w, startPos, 0, tsCols, pDataBlockInfo->rows, pDataBlock); } // restore current time window @@ -1244,7 +1223,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * // window start key interpolation doWindowBorderInterpolation(pRuntimeEnv, pDataBlockInfo, pDataBlock, pResult, &win, pQuery->pos, forwardStep); - doBlockwiseApplyFunctions(pRuntimeEnv, &win, startPos, forwardStep, tsCols, pDataBlockInfo->rows); + doBlockwiseApplyFunctions(pRuntimeEnv, &win, startPos, forwardStep, tsCols, pDataBlockInfo->rows, pDataBlock); STimeWindow nextWin = win; while (1) { @@ -1265,7 +1244,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * // window start(end) key interpolation doWindowBorderInterpolation(pRuntimeEnv, pDataBlockInfo, pDataBlock, pResult, &nextWin, startPos, forwardStep); - doBlockwiseApplyFunctions(pRuntimeEnv, &nextWin, startPos, forwardStep, tsCols, pDataBlockInfo->rows); + doBlockwiseApplyFunctions(pRuntimeEnv, &nextWin, startPos, forwardStep, tsCols, pDataBlockInfo->rows, pDataBlock); } } else { @@ -1277,7 +1256,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { - pCtx[k].nStartQueryTimestamp = pQuery->window.skey; + pCtx[k].startTs = pQuery->window.skey; aAggs[functionId].xFunction(&pCtx[k]); } } @@ -1305,7 +1284,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat return -1; } - SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, d, len, true, groupIndex); + SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->resultRowInfo, d, len, true, groupIndex); assert (pResultRow != NULL); int64_t v = -1; @@ -1386,7 +1365,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { return TS_JOIN_TAG_NOT_EQUALS; } - TSKEY key = *(TSKEY *)((char*)pCtx[0].aInputElemBuf + TSDB_KEYSIZE * offset); + TSKEY key = *(TSKEY *)((char*)pCtx[0].pInput + TSDB_KEYSIZE * offset); #if defined(_DEBUG_VIEW) printf("elem in comp ts file:%" PRId64 ", key:%" PRId64 ", tag:%"PRIu64", query order:%d, ts order:%d, traverse:%d, index:%d\n", @@ -1556,7 +1535,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { char *dataBlock = getDataBlock(pRuntimeEnv, &pRuntimeEnv->sasArray[k], k, pDataBlockInfo->rows, pDataBlock); - setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &pRuntimeEnv->sasArray[k], k, pQInfo->vgId); + setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &pQuery->pExpr1[k]); pCtx[k].size = 1; } @@ -1669,8 +1648,9 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS pWindowResInfo->curIndex = index; } else { // other queries // decide which group this rows belongs to according to current state value + char* val = NULL; if (groupbyColumnValue) { - char *val = groupbyColumnData + bytes * offset; + val = groupbyColumnData + bytes * offset; if (isNull(val, type)) { // ignore the null value continue; } @@ -1681,6 +1661,34 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } } + if (pRuntimeEnv->stabledev) { + for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { + int32_t functionId = pQuery->pExpr1[k].base.functionId; + if (functionId != TSDB_FUNC_STDDEV_DST) { + continue; + } + + pRuntimeEnv->pCtx[k].param[0].arr = NULL; + pRuntimeEnv->pCtx[k].param[0].nType = TSDB_DATA_TYPE_INT; // avoid freeing the memory by setting the type to be int + + // todo opt perf + int32_t numOfGroup = (int32_t)taosArrayGetSize(pRuntimeEnv->prevResult); + for (int32_t i = 0; i < numOfGroup; ++i) { + SInterResult *p = taosArrayGet(pRuntimeEnv->prevResult, i); + if (memcmp(p->tags, val, bytes) == 0) { + int32_t numOfCols = (int32_t)taosArrayGetSize(p->pResult); + for (int32_t f = 0; f < numOfCols; ++f) { + SStddevInterResult *pres = taosArrayGet(p->pResult, f); + if (pres->colId == pQuery->pExpr1[k].base.colInfo.colId) { + pRuntimeEnv->pCtx[k].param[0].arr = pres->pResult; + break; + } + } + } + } + } + } + for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { @@ -1723,7 +1731,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl SQuery *pQuery = pRuntimeEnv->pQuery; STableQueryInfo* pTableQueryInfo = pQuery->current; - SResultRowInfo* pResultRowInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo* pResultRowInfo = &pRuntimeEnv->resultRowInfo; if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL || pRuntimeEnv->groupbyColumn) { rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pResultRowInfo, pDataBlock); @@ -1767,14 +1775,13 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl } void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, - SDataStatis *pStatis, void *param, int32_t colIndex, int32_t vgId) { + SDataStatis *pStatis, SExprInfo* pExprInfo) { - int32_t functionId = pQuery->pExpr1[colIndex].base.functionId; - int32_t colId = pQuery->pExpr1[colIndex].base.colInfo.colId; + int32_t functionId = pExprInfo->base.functionId; + int32_t colId = pExprInfo->base.colInfo.colId; SDataStatis *tpField = NULL; - pCtx->hasNull = hasNullValue(&pQuery->pExpr1[colIndex].base.colInfo, pStatis, &tpField); - pCtx->aInputElemBuf = inputData; + pCtx->hasNull = hasNullValue(&pExprInfo->base.colInfo, pStatis, &tpField); if (tpField != NULL) { pCtx->preAggVals.isSet = true; @@ -1789,73 +1796,24 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY // limit/offset query will affect this value pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->rows - pQuery->pos : pQuery->pos + 1; - // minimum value no matter ascending/descending order query - pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos: (pQuery->pos - pCtx->size + 1); - assert(pCtx->startOffset >= 0); - - uint32_t status = aAggs[functionId].nStatus; - if (((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) && (tsCol != NULL)) { - pCtx->ptsList = tsCol; + // set the start position in current block + int32_t offset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos: (pQuery->pos - pCtx->size + 1); + if (inputData != NULL) { + pCtx->pInput = (char*)inputData + offset * pCtx->inputBytes; } - if (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) { - // last_dist or first_dist function - // store the first&last timestamp into the intermediate buffer [1], the true - // value may be null but timestamp will never be null - } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TWA || - functionId == TSDB_FUNC_DIFF || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) { - /* - * least squares function needs two columns of input, currently, the x value of linear equation is set to - * timestamp column, and the y-value is the column specified in pQuery->pExpr1[i].colIdxInBuffer - * - * top/bottom function needs timestamp to indicate when the - * top/bottom values emerge, so does diff function - */ - if (functionId == TSDB_FUNC_TWA) { - pCtx->param[1].i64 = pQuery->window.skey; - pCtx->param[1].nType = TSDB_DATA_TYPE_BIGINT; - pCtx->param[2].i64 = pQuery->window.ekey; - pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; - } + uint32_t status = aAggs[functionId].status; + if (((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) && (tsCol != NULL)) { + pCtx->ptsList = tsCol + offset; + } - } else if (functionId == TSDB_FUNC_ARITHM) { - pCtx->param[1].pz = param; - } else if (functionId == TSDB_FUNC_SPREAD) { // set the statistics data for primary time stamp column + if (functionId == TSDB_FUNC_SPREAD) { // set the statistics data for primary time stamp column if (colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { pCtx->preAggVals.isSet = true; pCtx->preAggVals.statis.min = pBlockInfo->window.skey; pCtx->preAggVals.statis.max = pBlockInfo->window.ekey; } - } else if (functionId == TSDB_FUNC_INTERP) { - pCtx->param[2].i64 = (int8_t) pQuery->fillType; - if (pQuery->fillVal != NULL) { - if (isNull((const char*) &pQuery->fillVal[colIndex], pCtx->inputType)) { - pCtx->param[1].nType = TSDB_DATA_TYPE_NULL; - } else { // todo refactor, tVariantCreateFromBinary should handle the NULL value - if (pCtx->inputType != TSDB_DATA_TYPE_BINARY && pCtx->inputType != TSDB_DATA_TYPE_NCHAR) { - tVariantCreateFromBinary(&pCtx->param[1], (char*) &pQuery->fillVal[colIndex], pCtx->inputBytes, pCtx->inputType); - } - } - } - } else if (functionId == TSDB_FUNC_TS_COMP) { - pCtx->param[0].i64 = vgId; - pCtx->param[0].nType = TSDB_DATA_TYPE_BIGINT; } - -#if defined(_DEBUG_VIEW) - // int64_t *tsList = (int64_t *)primaryColumnData; -// int64_t s = tsList[0]; -// int64_t e = tsList[size - 1]; - -// if (IS_DATA_BLOCK_LOADED(blockStatus)) { -// qDebug("QInfo:%p query ts:%lld-%lld, offset:%d, rows:%d, bstatus:%d, -// functId:%d", GET_QINFO_ADDR(pQuery), -// s, e, startOffset, size, blockStatus, functionId); -// } else { -// qDebug("QInfo:%p block not loaded, bstatus:%d", -// GET_QINFO_ADDR(pQuery), blockStatus); -// } -#endif } // set the output buffer for the selectivity + tag query @@ -1878,7 +1836,7 @@ static int32_t setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx if (pSqlFuncMsg->functionId == TSDB_FUNC_TAG_DUMMY || pSqlFuncMsg->functionId == TSDB_FUNC_TS_DUMMY) { tagLen += pCtx[i].outputBytes; pTagCtx[num++] = &pCtx[i]; - } else if ((aAggs[pSqlFuncMsg->functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { + } else if ((aAggs[pSqlFuncMsg->functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { p = &pCtx[i]; } else if (pSqlFuncMsg->functionId == TSDB_FUNC_TS || pSqlFuncMsg->functionId == TSDB_FUNC_TAG) { // tag function may be the group by tag column @@ -1900,26 +1858,41 @@ static int32_t setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx return TSDB_CODE_SUCCESS; } -// todo refactor -static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order) { +static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfTables, int16_t order, int32_t vgId) { qDebug("QInfo:%p setup runtime env", GET_QINFO_ADDR(pRuntimeEnv)); SQuery *pQuery = pRuntimeEnv->pQuery; - pRuntimeEnv->pCtx = (SQLFunctionCtx *)calloc(pQuery->numOfOutput, sizeof(SQLFunctionCtx)); - pRuntimeEnv->offset = calloc(pQuery->numOfOutput, sizeof(int16_t)); + pRuntimeEnv->interBufSize = getOutputInterResultBufSize(pQuery); + pRuntimeEnv->summary.tableInfoSize += (numOfTables * sizeof(STableQueryInfo)); + + pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + pRuntimeEnv->keyBuf = malloc(pQuery->maxSrcColumnSize + sizeof(int64_t)); + pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); + pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQuery->numOfCols + pQuery->srcRowSize); + pRuntimeEnv->tagVal = malloc(pQuery->tagLen); + pRuntimeEnv->pCtx = (SQLFunctionCtx *)calloc(pQuery->numOfOutput, sizeof(SQLFunctionCtx)); + pRuntimeEnv->offset = calloc(pQuery->numOfOutput, sizeof(int16_t)); pRuntimeEnv->rowCellInfoOffset = calloc(pQuery->numOfOutput, sizeof(int32_t)); pRuntimeEnv->sasArray = calloc(pQuery->numOfOutput, sizeof(SArithmeticSupport)); - if (pRuntimeEnv->offset == NULL || pRuntimeEnv->pCtx == NULL || pRuntimeEnv->rowCellInfoOffset == NULL || pRuntimeEnv->sasArray == NULL) { + if (pRuntimeEnv->offset == NULL || pRuntimeEnv->pCtx == NULL || pRuntimeEnv->rowCellInfoOffset == NULL || + pRuntimeEnv->sasArray == NULL || pRuntimeEnv->pResultRowHashTable == NULL || pRuntimeEnv->keyBuf == NULL || + pRuntimeEnv->prevRow == NULL || pRuntimeEnv->tagVal == NULL) { goto _clean; } + char* start = POINTER_BYTES * pQuery->numOfCols + (char*) pRuntimeEnv->prevRow; + pRuntimeEnv->prevRow[0] = start; + for(int32_t i = 1; i < pQuery->numOfCols; ++i) { + pRuntimeEnv->prevRow[i] = pRuntimeEnv->prevRow[i - 1] + pQuery->colList[i-1].bytes; + } + pRuntimeEnv->offset[0] = 0; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SSqlFuncMsg *pSqlFuncMsg = &pQuery->pExpr1[i].base; SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; - SColIndex* pIndex = &pSqlFuncMsg->colInfo; + SColIndex * pIndex = &pSqlFuncMsg->colInfo; if (TSDB_COL_REQ_NULL(pIndex->flag)) { pCtx->requireNull = true; @@ -1931,10 +1904,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order int32_t index = pSqlFuncMsg->colInfo.colIndex; if (TSDB_COL_IS_TAG(pIndex->flag)) { if (pIndex->colId == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor - SSchema s = tGetTableNameColumnSchema(); + SSchema *s = tGetTbnameColumnSchema(); - pCtx->inputBytes = s.bytes; - pCtx->inputType = s.type; + pCtx->inputBytes = s->bytes; + pCtx->inputType = s->type; } else if (pIndex->colId == TSDB_BLOCK_DIST_COLUMN_INDEX) { SSchema s = tGetBlockDistColumnSchema(); pCtx->inputBytes = s.bytes; @@ -1968,6 +1941,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order for (int32_t j = 0; j < pCtx->numOfParams; ++j) { int16_t type = pSqlFuncMsg->arg[j].argType; int16_t bytes = pSqlFuncMsg->arg[j].argBytes; + if (pSqlFuncMsg->functionId == TSDB_FUNC_STDDEV_DST) { + continue; + } + if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { tVariantCreateFromBinary(&pCtx->param[j], pSqlFuncMsg->arg[j].argValue.pz, bytes, type); } else { @@ -1988,18 +1965,38 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order pCtx->param[3].nType = TSDB_DATA_TYPE_BIGINT; pCtx->param[1].i64 = pQuery->order.orderColId; - } - - if (functionId == TSDB_FUNC_ARITHM) { + } else if (functionId == TSDB_FUNC_INTERP) { + pCtx->param[2].i64 = (int8_t)pQuery->fillType; + if (pQuery->fillVal != NULL) { + if (isNull((const char *)&pQuery->fillVal[i], pCtx->inputType)) { + pCtx->param[1].nType = TSDB_DATA_TYPE_NULL; + } else { // todo refactor, tVariantCreateFromBinary should handle the NULL value + if (pCtx->inputType != TSDB_DATA_TYPE_BINARY && pCtx->inputType != TSDB_DATA_TYPE_NCHAR) { + tVariantCreateFromBinary(&pCtx->param[1], (char *)&pQuery->fillVal[i], pCtx->inputBytes, pCtx->inputType); + } + } + } + } else if (functionId == TSDB_FUNC_TS_COMP) { + pCtx->param[0].i64 = vgId; + pCtx->param[0].nType = TSDB_DATA_TYPE_BIGINT; + } else if (functionId == TSDB_FUNC_TWA) { + pCtx->param[1].i64 = pQuery->window.skey; + pCtx->param[1].nType = TSDB_DATA_TYPE_BIGINT; + pCtx->param[2].i64 = pQuery->window.ekey; + pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; + } else if (functionId == TSDB_FUNC_ARITHM) { pRuntimeEnv->sasArray[i].data = calloc(pQuery->numOfCols, POINTER_BYTES); if (pRuntimeEnv->sasArray[i].data == NULL) { goto _clean; } + + pCtx->param[1].pz = (char*) &pRuntimeEnv->sasArray[i]; } if (i > 0) { pRuntimeEnv->offset[i] = pRuntimeEnv->offset[i - 1] + pRuntimeEnv->pCtx[i - 1].outputBytes; - pRuntimeEnv->rowCellInfoOffset[i] = pRuntimeEnv->rowCellInfoOffset[i - 1] + sizeof(SResultRowCellInfo) + pQuery->pExpr1[i - 1].interBytes; + pRuntimeEnv->rowCellInfoOffset[i] = + pRuntimeEnv->rowCellInfoOffset[i - 1] + sizeof(SResultRowCellInfo) + pQuery->pExpr1[i - 1].interBytes; } } @@ -2023,6 +2020,10 @@ _clean: tfree(pRuntimeEnv->offset); tfree(pRuntimeEnv->rowCellInfoOffset); tfree(pRuntimeEnv->sasArray); + tfree(pRuntimeEnv->pResultRowHashTable); + tfree(pRuntimeEnv->keyBuf); + tfree(pRuntimeEnv->prevRow); + tfree(pRuntimeEnv->tagVal); return TSDB_CODE_QRY_OUT_OF_MEMORY; } @@ -2050,9 +2051,9 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { SQInfo* pQInfo = (SQInfo*) GET_QINFO_ADDR(pRuntimeEnv); qDebug("QInfo:%p teardown runtime env", pQInfo); - cleanupResultRowInfo(&pRuntimeEnv->windowResInfo); + cleanupResultRowInfo(&pRuntimeEnv->resultRowInfo); - if (isTSCompQuery(pQuery)) { + if (isTsCompQuery(pQuery)) { FILE *f = *(FILE **)pQuery->sdata[0]->data; if (f) { @@ -2096,20 +2097,21 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { tfree(pRuntimeEnv->keyBuf); tfree(pRuntimeEnv->rowCellInfoOffset); tfree(pRuntimeEnv->prevRow); + tfree(pRuntimeEnv->tagVal); taosHashCleanup(pRuntimeEnv->pResultRowHashTable); pRuntimeEnv->pResultRowHashTable = NULL; pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); + taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult); + pRuntimeEnv->prevResult = NULL; } static bool needBuildResAfterQueryComplete(SQInfo* pQInfo) { return pQInfo->rspContext != NULL; } -#define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED) - -static bool isQueryKilled(SQInfo *pQInfo) { +bool isQueryKilled(SQInfo *pQInfo) { if (IS_QUERY_KILLED(pQInfo)) { return true; } @@ -2128,7 +2130,7 @@ static bool isQueryKilled(SQInfo *pQInfo) { return false; } -static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED;} +void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED;} static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { SQuery* pQuery = pRuntimeEnv->pQuery; @@ -2154,7 +2156,7 @@ static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { continue; } - if (!IS_MULTIOUTPUT(aAggs[pExprMsg->functionId].nStatus)) { + if (!IS_MULTIOUTPUT(aAggs[pExprMsg->functionId].status)) { return true; } } @@ -2229,7 +2231,7 @@ static bool needReverseScan(SQuery *pQuery) { * The following 4 kinds of query are treated as the tags query * tagprj, tid_tag query, count(tbname), 'abc' (user defined constant value column) query */ -static bool onlyQueryTags(SQuery* pQuery) { +bool onlyQueryTags(SQuery* pQuery) { for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { SExprInfo* pExprInfo = &pQuery->pExpr1[i]; @@ -2269,7 +2271,7 @@ void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int6 static void setScanLimitationByResultBuffer(SQuery *pQuery) { if (isTopBottomQuery(pQuery)) { pQuery->checkResultBuf = 0; - } else if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + } else if (isGroupbyColumn(pQuery->pGroupbyExpr)) { pQuery->checkResultBuf = 0; } else { bool hasMultioutput = false; @@ -2279,7 +2281,7 @@ static void setScanLimitationByResultBuffer(SQuery *pQuery) { continue; } - hasMultioutput = IS_MULTIOUTPUT(aAggs[pExprMsg->functionId].nStatus); + hasMultioutput = IS_MULTIOUTPUT(aAggs[pExprMsg->functionId].status); if (!hasMultioutput) { break; } @@ -2327,7 +2329,6 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } -// todo refactor, add iterator static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { size_t t = taosArrayGetSize(pQInfo->tableGroupInfo.pGroupList); for(int32_t i = 0; i < t; ++i) { @@ -2365,7 +2366,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo return; } - if (isGroupbyNormalCol(pQuery->pGroupbyExpr) && pQuery->order.order == TSDB_ORDER_DESC) { + if (isGroupbyColumn(pQuery->pGroupbyExpr) && pQuery->order.order == TSDB_ORDER_DESC) { pQuery->order.order = TSDB_ORDER_ASC; if (pQuery->window.skey > pQuery->window.ekey) { SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); @@ -2442,7 +2443,7 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) { int32_t num = 0; - if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { + if (isGroupbyColumn(pQuery->pGroupbyExpr)) { num = 128; } else if (QUERY_IS_INTERVAL_QUERY(pQuery)) { // time window query, allocate one page for each table size_t s = pQInfo->tableqinfoGroupInfo.numOfTables; @@ -2459,7 +2460,7 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i SQuery* pQuery = pRuntimeEnv->pQuery; int32_t MIN_ROWS_PER_PAGE = 4; - *rowsize = (int32_t)(pQuery->rowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery)); + *rowsize = (int32_t)(pQuery->resultRowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery)); int32_t overhead = sizeof(tFilePage); // one page contains at least two rows @@ -2745,70 +2746,51 @@ int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) { return midPos; } -static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capacity) { +static void expandBuffer(SQueryRuntimeEnv* pRuntimeEnv, int32_t newSize, void* qinfo) { SQuery* pQuery = pRuntimeEnv->pQuery; + SResultRec *pRec = &pQuery->rec; - if (capacity < pQuery->rec.capacity) { - return; - } + assert(newSize > 0); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { int32_t bytes = pQuery->pExpr1[i].bytes; - assert(bytes > 0 && capacity > 0); - char *tmp = realloc(pQuery->sdata[i], bytes * capacity + sizeof(tFilePage)); + char *tmp = realloc(pQuery->sdata[i], bytes * newSize + sizeof(tFilePage)); if (tmp == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } else { + memset(tmp + sizeof(tFilePage) + bytes * pRec->rows, 0, (size_t)((newSize - pRec->rows) * bytes)); pQuery->sdata[i] = (tFilePage *)tmp; } - - // set the pCtx output buffer position - pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data; } - qDebug("QInfo:%p realloc output buffer to inc output buffer from: %" PRId64 " rows to:%d rows", GET_QINFO_ADDR(pRuntimeEnv), - pQuery->rec.capacity, capacity); - - pQuery->rec.capacity = capacity; + pRec->capacity = newSize; + qDebug("QInfo:%p realloc output buffer, new size: %d rows, old:%" PRId64 ", remain:%" PRId64, qinfo, newSize, + pRec->capacity, newSize - pRec->rows); } -// TODO merge with enuserOutputBufferSimple -static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { +static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, int32_t numOfRows) { // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block SQuery* pQuery = pRuntimeEnv->pQuery; - if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyColumn && !isFixedOutputQuery(pRuntimeEnv) && !isTSCompQuery(pQuery)) { + if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyColumn && !isFixedOutputQuery(pRuntimeEnv) && !isTsCompQuery(pQuery)) { SResultRec *pRec = &pQuery->rec; - if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) { - int32_t remain = (int32_t)(pRec->capacity - pRec->rows); - int32_t newSize = (int32_t)(pRec->capacity + (pBlockInfo->rows - remain)); + int32_t remain = (int32_t)(pRec->capacity - pRec->rows); + if (remain < numOfRows) { + int32_t newSize = (int32_t)(pRec->capacity + (numOfRows - remain)); + expandBuffer(pRuntimeEnv, newSize, GET_QINFO_ADDR(pRuntimeEnv)); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { int32_t bytes = pQuery->pExpr1[i].bytes; - assert(bytes > 0 && newSize > 0); - - char *tmp = realloc(pQuery->sdata[i], bytes * newSize + sizeof(tFilePage)); - if (tmp == NULL) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } else { - memset(tmp + sizeof(tFilePage) + bytes * pRec->rows, 0, (size_t)((newSize - pRec->rows) * bytes)); - pQuery->sdata[i] = (tFilePage *)tmp; - } // set the pCtx output buffer position - pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data + pRec->rows * bytes; + pRuntimeEnv->pCtx[i].pOutput = pQuery->sdata[i]->data + pRec->rows * bytes; int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { - pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; + pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].pOutput; } } - - qDebug("QInfo:%p realloc output buffer, new size: %d rows, old:%" PRId64 ", remain:%" PRId64, GET_QINFO_ADDR(pRuntimeEnv), - newSize, pRec->capacity, newSize - pRec->rows); - - pRec->capacity = newSize; } } } @@ -2816,9 +2798,9 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB static void doSetInitialTimewindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { SQuery* pQuery = pRuntimeEnv->pQuery; - if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) { + if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->resultRowInfo.prevSKey == TSKEY_INITIAL_VAL) { STimeWindow w = TSWINDOW_INITIALIZER; - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; if (QUERY_IS_ASC_QUERY(pQuery)) { getAlignQueryTimeWindow(pQuery, pBlockInfo->window.skey, pBlockInfo->window.skey, pQuery->window.ekey, &w); @@ -2858,13 +2840,13 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { doSetInitialTimewindow(pRuntimeEnv, &blockInfo); // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block - ensureOutputBuffer(pRuntimeEnv, &blockInfo); + ensureOutputBuffer(pRuntimeEnv, blockInfo.rows); SDataStatis *pStatis = NULL; SArray * pDataBlock = NULL; uint32_t status = 0; - int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); + int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pRuntimeEnv->resultRowInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); if (ret != TSDB_CODE_SUCCESS) { break; } @@ -2899,8 +2881,8 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } if (QUERY_IS_INTERVAL_QUERY(pQuery)) { - closeAllResultRows(&pRuntimeEnv->windowResInfo); - pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window + closeAllResultRows(&pRuntimeEnv->resultRowInfo); + pRuntimeEnv->resultRowInfo.curIndex = pRuntimeEnv->resultRowInfo.size - 1; // point to the last time window } return 0; @@ -2910,7 +2892,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { * set tag value in SQLFunctionCtx * e.g.,tag information into input buffer */ -static void doSetTagValueInParam(void *tsdb, void* pTable, int32_t tagColId, tVariant *tag, int16_t type, int16_t bytes) { +static void doSetTagValueInParam(void* pTable, int32_t tagColId, tVariant *tag, int16_t type, int16_t bytes) { tVariantDestroy(tag); if (tagColId == TSDB_TBNAME_COLUMN_INDEX) { @@ -2955,7 +2937,7 @@ static SColumnInfo* doGetTagColumnInfoById(SColumnInfo* pTagColList, int32_t num return NULL; } -void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { +void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable) { SQuery *pQuery = pRuntimeEnv->pQuery; SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); @@ -2966,9 +2948,11 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { int16_t tagColId = (int16_t)pExprInfo->base.arg->argValue.i64; SColumnInfo* pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId); - doSetTagValueInParam(tsdb, pTable, tagColId, &pRuntimeEnv->pCtx[0].tag, pColInfo->type, pColInfo->bytes); + doSetTagValueInParam(pTable, tagColId, &pRuntimeEnv->pCtx[0].tag, pColInfo->type, pColInfo->bytes); } else { // set tag value, by which the results are aggregated. + int32_t offset = 0; + memset(pRuntimeEnv->tagVal, 0, pQuery->tagLen); for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) { SExprInfo* pLocalExprInfo = &pQuery->pExpr1[idx]; @@ -2978,8 +2962,16 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { } // todo use tag column index to optimize performance - doSetTagValueInParam(tsdb, pTable, pLocalExprInfo->base.colInfo.colId, &pRuntimeEnv->pCtx[idx].tag, + doSetTagValueInParam(pTable, pLocalExprInfo->base.colInfo.colId, &pRuntimeEnv->pCtx[idx].tag, pLocalExprInfo->type, pLocalExprInfo->bytes); + + if (IS_NUMERIC_TYPE(pLocalExprInfo->type) || pLocalExprInfo->type == TSDB_DATA_TYPE_BOOL) { + memcpy(pRuntimeEnv->tagVal + offset, &pRuntimeEnv->pCtx[idx].tag.i64, pLocalExprInfo->bytes); + } else { + memcpy(pRuntimeEnv->tagVal + offset, pRuntimeEnv->pCtx[idx].tag.pz, pRuntimeEnv->pCtx[idx].tag.nLen); + } + + offset += pLocalExprInfo->bytes; } // set the join tag for first column @@ -2991,7 +2983,7 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { int16_t tagColId = (int16_t)pExprInfo->base.arg->argValue.i64; SColumnInfo *pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId); - doSetTagValueInParam(tsdb, pTable, tagColId, &pRuntimeEnv->pCtx[0].tag, pColInfo->type, pColInfo->bytes); + doSetTagValueInParam(pTable, tagColId, &pRuntimeEnv->pCtx[0].tag, pColInfo->type, pColInfo->bytes); int16_t tagType = pRuntimeEnv->pCtx[0].tag.nType; if (tagType == TSDB_DATA_TYPE_BINARY || tagType == TSDB_DATA_TYPE_NCHAR) { @@ -3108,253 +3100,35 @@ void UNUSED_FUNC displayInterResult(tFilePage **pdata, SQueryRuntimeEnv* pRuntim } } -typedef struct SCompSupporter { - STableQueryInfo **pTableQueryInfo; - int32_t *rowIndex; - int32_t order; -} SCompSupporter; - -int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) { - int32_t left = *(int32_t *)pLeft; - int32_t right = *(int32_t *)pRight; - - SCompSupporter * supporter = (SCompSupporter *)param; - - int32_t leftPos = supporter->rowIndex[left]; - int32_t rightPos = supporter->rowIndex[right]; - - /* left source is exhausted */ - if (leftPos == -1) { - return 1; - } - - /* right source is exhausted*/ - if (rightPos == -1) { - return -1; - } - - STableQueryInfo** pList = supporter->pTableQueryInfo; - - SResultRowInfo *pWindowResInfo1 = &(pList[left]->windowResInfo); - SResultRow * pWindowRes1 = getResultRow(pWindowResInfo1, leftPos); - TSKEY leftTimestamp = pWindowRes1->win.skey; - - SResultRowInfo *pWindowResInfo2 = &(pList[right]->windowResInfo); - SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); - TSKEY rightTimestamp = pWindowRes2->win.skey; - - if (leftTimestamp == rightTimestamp) { - return 0; - } - - if (supporter->order == TSDB_ORDER_ASC) { - return (leftTimestamp > rightTimestamp)? 1:-1; - } else { - return (leftTimestamp < rightTimestamp)? 1:-1; - } -} - -int32_t mergeGroupResult(SQInfo *pQInfo) { - int64_t st = taosGetTimestampUs(); - - SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; - - int32_t numOfGroups = (int32_t)(GET_NUM_OF_TABLEGROUP(pQInfo)); - while (pQInfo->groupIndex < numOfGroups) { - SArray *group = GET_TABLEGROUP(pQInfo, pQInfo->groupIndex); - - int32_t ret = mergeIntoGroupResultImpl(pGroupResInfo, group, pQInfo); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - // this group generates at least one result, return results - pQInfo->groupIndex += 1; - if (taosArrayGetSize(pGroupResInfo->pRows) > 0) { - break; - } - - qDebug("QInfo:%p no result in group %d, continue", pQInfo, pQInfo->groupIndex - 1); - taosArrayClear(pGroupResInfo->pRows); - - pGroupResInfo->index = 0; - pGroupResInfo->rowId = 0; - } - - if (pQInfo->groupIndex == numOfGroups && taosArrayGetSize(pGroupResInfo->pRows) == 0) { - SET_STABLE_QUERY_OVER(pQInfo); - } - - int64_t elapsedTime = taosGetTimestampUs() - st; - qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", pQInfo, - pQInfo->groupIndex - 1, numOfGroups, elapsedTime); - - pQInfo->runtimeEnv.summary.firstStageMergeTime += elapsedTime; - return TSDB_CODE_SUCCESS; -} - -static int32_t doCopyToSData(SQInfo *pQInfo, SResultRow **pRows, int32_t numOfRows, int32_t* index, int32_t orderType); +static int32_t doCopyToSData(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType); void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; - // all results in current group have been returned to client, try next group - if (pGroupResInfo->index >= taosArrayGetSize(pGroupResInfo->pRows)) { - // current results of group has been sent to client, try next group - pGroupResInfo->index = 0; - pGroupResInfo->rowId = 0; - taosArrayClear(pGroupResInfo->pRows); - - if (mergeGroupResult(pQInfo) != TSDB_CODE_SUCCESS) { - return; // failed to save data in the disk - } - - // check if all results has been sent to client - int32_t numOfGroup = (int32_t)(GET_NUM_OF_TABLEGROUP(pQInfo)); - if (taosArrayGetSize(pGroupResInfo->pRows) == 0 && pQInfo->groupIndex == numOfGroup) { - SET_STABLE_QUERY_OVER(pQInfo); - return; - } - } - - int32_t size = (int32_t) taosArrayGetSize(pGroupResInfo->pRows); - pQuery->rec.rows = doCopyToSData(pQInfo, pGroupResInfo->pRows->pData, (int32_t) size, &pGroupResInfo->index, TSDB_ORDER_ASC); -} - -int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow) { - SQuery* pQuery = pRuntimeEnv->pQuery; - - for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pExpr1[j].base.functionId; - - /* - * ts, tag, tagprj function can not decide the output number of current query - * the number of output result is decided by main output - */ - if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) { - continue; - } - - SResultRowCellInfo *pResultInfo = getResultCell(pRuntimeEnv, pResultRow, j); - assert(pResultInfo != NULL); - - if (pResultInfo->numOfRes > 0) { - return pResultInfo->numOfRes; - } - } - - return 0; -} - -int32_t mergeIntoGroupResultImpl(SGroupResInfo* pGroupResInfo, SArray *pTableList, SQInfo* pQInfo) { - SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQuery); - - int32_t code = TSDB_CODE_SUCCESS; - - int32_t *posList = NULL; - SLoserTreeInfo *pTree = NULL; - STableQueryInfo **pTableQueryInfoList = NULL; - - size_t size = taosArrayGetSize(pTableList); - if (pGroupResInfo->pRows == NULL) { - pGroupResInfo->pRows = taosArrayInit(100, POINTER_BYTES); - } - - posList = calloc(size, sizeof(int32_t)); - pTableQueryInfoList = malloc(POINTER_BYTES * size); - - if (pTableQueryInfoList == NULL || posList == NULL || pGroupResInfo->pRows == NULL) { - qError("QInfo:%p failed alloc memory", pQInfo); - code = TSDB_CODE_QRY_OUT_OF_MEMORY; - goto _end; - } - - int32_t numOfTables = 0; - for (int32_t i = 0; i < size; ++i) { - STableQueryInfo *item = taosArrayGetP(pTableList, i); - if (item->windowResInfo.size > 0) { - pTableQueryInfoList[numOfTables++] = item; - } - } - - // there is no data in current group - // no need to merge results since only one table in each group - if (numOfTables == 0) { - goto _end; - } - - SCompSupporter cs = {pTableQueryInfoList, posList, pRuntimeEnv->pQuery->order.order}; - - int32_t ret = tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn); - if (ret != TSDB_CODE_SUCCESS) { - code = TSDB_CODE_QRY_OUT_OF_MEMORY; - goto _end; - } - - int64_t lastTimestamp = ascQuery? INT64_MIN:INT64_MAX; - int64_t startt = taosGetTimestampMs(); - - while (1) { - if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%p it is already killed, abort", pQInfo); - code = TSDB_CODE_TSC_QUERY_CANCELLED; - goto _end; - } - - int32_t tableIndex = pTree->pNode[0].index; - - SResultRowInfo *pWindowResInfo = &pTableQueryInfoList[tableIndex]->windowResInfo; - SResultRow *pWindowRes = getResultRow(pWindowResInfo, cs.rowIndex[tableIndex]); - - int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pWindowRes); - if (num <= 0) { - cs.rowIndex[tableIndex] += 1; - - if (cs.rowIndex[tableIndex] >= pWindowResInfo->size) { - cs.rowIndex[tableIndex] = -1; - if (--numOfTables == 0) { // all input sources are exhausted - break; - } - } - } else { - assert((pWindowRes->win.skey >= lastTimestamp && ascQuery) || (pWindowRes->win.skey <= lastTimestamp && !ascQuery)); - - if (pWindowRes->win.skey != lastTimestamp) { - taosArrayPush(pGroupResInfo->pRows, &pWindowRes); - pWindowRes->numOfRows = (uint32_t) num; - } - - lastTimestamp = pWindowRes->win.skey; - - // move to the next row of current entry - if ((++cs.rowIndex[tableIndex]) >= pWindowResInfo->size) { - cs.rowIndex[tableIndex] = -1; - - // all input sources are exhausted - if ((--numOfTables) == 0) { - break; - } + while(pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) { + // all results in current group have been returned to client, try next group + if ((pGroupResInfo->pRows == NULL) || taosArrayGetSize(pGroupResInfo->pRows) == 0) { + assert(pGroupResInfo->index == 0); + if ((pQInfo->code = mergeIntoGroupResult(&pQInfo->groupResInfo, pQInfo)) != TSDB_CODE_SUCCESS) { + return; } } - tLoserTreeAdjust(pTree, tableIndex + pTree->numOfEntries); + pQuery->rec.rows = doCopyToSData(&pQInfo->runtimeEnv, pGroupResInfo, TSDB_ORDER_ASC); + + // current data are all dumped to result buffer, clear it + if (!hasRemainData(pGroupResInfo)) { + cleanupGroupResInfo(pGroupResInfo); + if (!incNextGroup(pGroupResInfo)) { + SET_STABLE_QUERY_OVER(pQInfo); + } + } + + // enough results in data buffer, return + if (pQuery->rec.rows >= pQuery->rec.threshold) { + break; + } } - - int64_t endt = taosGetTimestampMs(); - -#ifdef _DEBUG_VIEW - displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num); -#endif - - qDebug("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pQInfo, pQInfo->groupIndex, endt - startt); - - _end: - tfree(pTableQueryInfoList); - tfree(posList); - tfree(pTree); - - return code; } static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo *pTableQueryInfo) { @@ -3369,7 +3143,7 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * pTableQueryInfo->cur.vgroupIndex = -1; // set the index at the end of time window - pTableQueryInfo->windowResInfo.curIndex = pTableQueryInfo->windowResInfo.size - 1; + pTableQueryInfo->resInfo.curIndex = pTableQueryInfo->resInfo.size - 1; } static void disableFuncInReverseScanImpl(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo *pWindowResInfo, int32_t order) { @@ -3404,7 +3178,7 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { int32_t order = pQuery->order.order; // group by normal columns and interval query on normal table - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; if (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery)) { disableFuncInReverseScanImpl(pRuntimeEnv, pWindowResInfo, order); } else { // for simple result of table query, @@ -3468,11 +3242,11 @@ void resetDefaultResInfoOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { int32_t tid = 0; int64_t uid = 0; - SResultRow* pRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&tid, sizeof(tid), true, uid); + SResultRow* pRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->resultRowInfo, (char *)&tid, sizeof(tid), true, uid); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; - pCtx->aOutputBuf = pQuery->sdata[i]->data; + pCtx->pOutput = pQuery->sdata[i]->data; /* * set the output buffer information and intermediate buffer @@ -3485,7 +3259,7 @@ void resetDefaultResInfoOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { // set the timestamp output buffer for top/bottom/diff query int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { - pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; + pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].pOutput; } memset(pQuery->sdata[i]->data, 0, (size_t)(pQuery->pExpr1[i].bytes * pQuery->rec.capacity)); @@ -3503,8 +3277,8 @@ void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { assert(functionId != TSDB_FUNC_DIFF); // set next output position - if (IS_OUTER_FORWARD(aAggs[functionId].nStatus)) { - pRuntimeEnv->pCtx[j].aOutputBuf += pRuntimeEnv->pCtx[j].outputBytes * output; + if (IS_OUTER_FORWARD(aAggs[functionId].status)) { + pRuntimeEnv->pCtx[j].pOutput += pRuntimeEnv->pCtx[j].outputBytes * output; } if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { @@ -3568,10 +3342,10 @@ void skipResults(SQueryRuntimeEnv *pRuntimeEnv) { int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; memmove(pQuery->sdata[i]->data, (char*)pQuery->sdata[i]->data + bytes * numOfSkip, (size_t)(pQuery->rec.rows * bytes)); - pRuntimeEnv->pCtx[i].aOutputBuf = ((char*) pQuery->sdata[i]->data) + pQuery->rec.rows * bytes; + pRuntimeEnv->pCtx[i].pOutput = ((char*) pQuery->sdata[i]->data) + pQuery->rec.rows * bytes; if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { - pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; + pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].pOutput; } } @@ -3595,7 +3369,7 @@ bool needRepeatScan(SQueryRuntimeEnv *pRuntimeEnv) { bool toContinue = false; if (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery)) { // for each group result, call the finalize function for each column - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; for (int32_t i = 0; i < pWindowResInfo->size; ++i) { SResultRow *pResult = getResultRow(pWindowResInfo, i); @@ -3639,7 +3413,7 @@ static SQueryStatusInfo getQueryStatusInfo(SQueryRuntimeEnv *pRuntimeEnv, TSKEY SQueryStatusInfo info = { .status = pQuery->status, - .windowIndex = pRuntimeEnv->windowResInfo.curIndex, + .windowIndex = pRuntimeEnv->resultRowInfo.curIndex, .lastKey = start, }; @@ -3762,7 +3536,7 @@ static void handleInterpolationQuery(SQInfo* pQInfo) { } pCtx->param[2].i64 = (int8_t)pQuery->fillType; - pCtx->nStartQueryTimestamp = pQuery->window.skey; + pCtx->startTs = pQuery->window.skey; if (pQuery->fillVal != NULL) { if (isNull((const char *)&pQuery->fillVal[i], pCtx->inputType)) { pCtx->param[1].nType = TSDB_DATA_TYPE_NULL; @@ -3789,7 +3563,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { SET_MASTER_SCAN_FLAG(pRuntimeEnv); if (!pRuntimeEnv->groupbyColumn && pRuntimeEnv->hasTagResults) { - setTagVal(pRuntimeEnv, pTableQueryInfo->pTable, pQInfo->tsdb); + setTagVal(pRuntimeEnv, pTableQueryInfo->pTable); } while (1) { @@ -3824,7 +3598,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { longjmp(pRuntimeEnv->env, terrno); } - pRuntimeEnv->windowResInfo.curIndex = qstatus.windowIndex; + pRuntimeEnv->resultRowInfo.curIndex = qstatus.windowIndex; setQueryStatus(pQuery, QUERY_NOT_COMPLETED); pRuntimeEnv->scanFlag = REPEAT_SCAN; @@ -3855,7 +3629,7 @@ void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { if (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery)) { // for each group result, call the finalize function for each column - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; if (pRuntimeEnv->groupbyColumn) { closeAllResultRows(pWindowResInfo); } @@ -3898,9 +3672,7 @@ static bool hasMainOutput(SQuery *pQuery) { return false; } -static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void* pTable, STimeWindow win, void* buf) { - SQuery *pQuery = pRuntimeEnv->pQuery; - +static STableQueryInfo *createTableQueryInfo(SQuery* pQuery, void* pTable, bool groupbyColumn, STimeWindow win, void* buf) { STableQueryInfo *pTableQueryInfo = buf; pTableQueryInfo->win = win; @@ -3910,9 +3682,9 @@ static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void pTableQueryInfo->cur.vgroupIndex = -1; // set more initial size of interval/groupby query - if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyColumn) { + if (QUERY_IS_INTERVAL_QUERY(pQuery) || groupbyColumn) { int32_t initialSize = 128; - int32_t code = initResultRowInfo(&pTableQueryInfo->windowResInfo, initialSize, TSDB_DATA_TYPE_INT); + int32_t code = initResultRowInfo(&pTableQueryInfo->resInfo, initialSize, TSDB_DATA_TYPE_INT); if (code != TSDB_CODE_SUCCESS) { return NULL; } @@ -3928,7 +3700,7 @@ void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) { } tVariantDestroy(&pTableQueryInfo->tag); - cleanupResultRowInfo(&pTableQueryInfo->windowResInfo); + cleanupResultRowInfo(&pTableQueryInfo->resInfo); } /** @@ -3939,7 +3711,7 @@ void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) { void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; STableQueryInfo *pTableQueryInfo = pRuntimeEnv->pQuery->current; - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; // lastKey needs to be updated pTableQueryInfo->lastKey = nextKey; @@ -3977,11 +3749,11 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; - pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult, page); + pCtx->pOutput = getPosInResultPage(pRuntimeEnv, i, pResult, page); int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { - pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; + pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].pOutput; } /* @@ -4006,12 +3778,12 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe continue; } - pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult, bufPage); + pCtx->pOutput = getPosInResultPage(pRuntimeEnv, i, pResult, bufPage); pCtx->currentStage = 0; int32_t functionId = pCtx->functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { - pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; + pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].pOutput; } if (!pCtx->resultInfo->initialized) { @@ -4020,46 +3792,82 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe } } -int32_t setAdditionalInfo(SQInfo *pQInfo, void* pTable, STableQueryInfo *pTableQueryInfo) { +int32_t setTimestampListJoinInfo(SQInfo *pQInfo, STableQueryInfo *pTableQueryInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - - setTagVal(pRuntimeEnv, pTable, pQInfo->tsdb); + assert(pRuntimeEnv->pTsBuf != NULL); // both the master and supplement scan needs to set the correct ts comp start position - if (pRuntimeEnv->pTsBuf != NULL) { - tVariant* pTag = &pRuntimeEnv->pCtx[0].tag; + tVariant* pTag = &pRuntimeEnv->pCtx[0].tag; - if (pTableQueryInfo->cur.vgroupIndex == -1) { - tVariantAssign(&pTableQueryInfo->tag, pTag); + if (pTableQueryInfo->cur.vgroupIndex == -1) { + tVariantAssign(&pTableQueryInfo->tag, pTag); - STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTsBuf, pQInfo->vgId, &pTableQueryInfo->tag); + STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTsBuf, pQInfo->vgId, &pTableQueryInfo->tag); - // failed to find data with the specified tag value and vnodeId - if (!tsBufIsValidElem(&elem)) { - if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { - qError("QInfo:%p failed to find tag:%s in ts_comp", pQInfo, pTag->pz); - } else { - qError("QInfo:%p failed to find tag:%" PRId64 " in ts_comp", pQInfo, pTag->i64); - } - - return false; - } - - // keep the cursor info of current meter - pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); + // failed to find data with the specified tag value and vnodeId + if (!tsBufIsValidElem(&elem)) { if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { - qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qError("QInfo:%p failed to find tag:%s in ts_comp", pQInfo, pTag->pz); } else { - qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qError("QInfo:%p failed to find tag:%" PRId64 " in ts_comp", pQInfo, pTag->i64); } + return false; + } + + // keep the cursor info of current meter + pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } else { - tsBufSetCursor(pRuntimeEnv->pTsBuf, &pTableQueryInfo->cur); + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } - if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { - qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); - } else { - qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } else { + tsBufSetCursor(pRuntimeEnv->pTsBuf, &pTableQueryInfo->cur); + + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } else { + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } + } + + return 0; +} + +int32_t setParamValue(SQueryRuntimeEnv* pRuntimeEnv) { + SQuery* pQuery = pRuntimeEnv->pQuery; + + if (pRuntimeEnv->prevResult == NULL || pRuntimeEnv->groupbyColumn) { + return TSDB_CODE_SUCCESS; + } + + int32_t numOfExprs = pQuery->numOfOutput; + for(int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExprInfo = &(pQuery->pExpr1[i]); + if(pExprInfo->base.functionId != TSDB_FUNC_STDDEV_DST) { + continue; + } + + SSqlFuncMsg* pFuncMsg = &pExprInfo->base; + + pRuntimeEnv->pCtx[i].param[0].arr = NULL; + pRuntimeEnv->pCtx[i].param[0].nType = TSDB_DATA_TYPE_INT; // avoid freeing the memory by setting the type to be int + + int32_t numOfGroup = (int32_t) taosArrayGetSize(pRuntimeEnv->prevResult); + for(int32_t j = 0; j < numOfGroup; ++j) { + SInterResult *p = taosArrayGet(pRuntimeEnv->prevResult, j); + if (pQuery->tagLen == 0 || memcmp(p->tags, pRuntimeEnv->tagVal, pQuery->tagLen) == 0) { + + int32_t numOfCols = (int32_t) taosArrayGetSize(p->pResult); + for(int32_t k = 0; k < numOfCols; ++k) { + SStddevInterResult* pres = taosArrayGet(p->pResult, k); + if (pres->colId == pFuncMsg->colInfo.colId) { + pRuntimeEnv->pCtx[i].param[0].arr = pres->pResult; + break; + } + } } } } @@ -4100,7 +3908,7 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) { * operations involve. */ STimeWindow w = TSWINDOW_INITIALIZER; - SResultRowInfo *pWindowResInfo = &pTableQueryInfo->windowResInfo; + SResultRowInfo *pWindowResInfo = &pTableQueryInfo->resInfo; TSKEY sk = MIN(win.skey, win.ekey); TSKEY ek = MAX(win.skey, win.ekey); @@ -4122,7 +3930,7 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) { bool requireTimestamp(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; i++) { int32_t functionId = pQuery->pExpr1[i].base.functionId; - if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_NEED_TS) != 0) { + if ((aAggs[functionId].status & TSDB_FUNCSTATE_NEED_TS) != 0) { return true; } } @@ -4143,73 +3951,63 @@ bool needPrimaryTimestampCol(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo) { return loadPrimaryTS; } -static int32_t doCopyToSData(SQInfo *pQInfo, SResultRow **pRows, int32_t numOfRows, int32_t *index, int32_t orderType) { - SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery * pQuery = pRuntimeEnv->pQuery; +static int32_t doCopyToSData(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType) { + void* qinfo = GET_QINFO_ADDR(pRuntimeEnv); + SQuery *pQuery = pRuntimeEnv->pQuery; + + int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); + int32_t numOfResult = (int32_t) pQuery->rec.rows; // there are already exists result rows - int32_t numOfResult = 0; int32_t start = 0; int32_t step = -1; - qDebug("QInfo:%p start to copy data from windowResInfo to query buf", pQInfo); + qDebug("QInfo:%p start to copy data from windowResInfo to output buf", qinfo); if (orderType == TSDB_ORDER_ASC) { - start = (*index); + start = pGroupResInfo->index; step = 1; } else { // desc order copy all data - start = numOfRows - (*index) - 1; + start = numOfRows - pGroupResInfo->index - 1; step = -1; } - SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; - for (int32_t i = start; (i < numOfRows) && (i >= 0); i += step) { - if (pRows[i]->numOfRows == 0) { - (*index) += 1; - pGroupResInfo->rowId = 0; + SResultRow* pRow = taosArrayGetP(pGroupResInfo->pRows, i); + if (pRow->numOfRows == 0) { + pGroupResInfo->index += 1; continue; } - int32_t numOfRowsToCopy = pRows[i]->numOfRows - pGroupResInfo->rowId; - int32_t oldOffset = pGroupResInfo->rowId; + int32_t numOfRowsToCopy = pRow->numOfRows; - /* - * current output space is not enough to accommodate all data of this page, only partial results - * will be copied to SQuery object's result buffer - */ - if (numOfRowsToCopy > pQuery->rec.capacity - numOfResult) { - numOfRowsToCopy = (int32_t) pQuery->rec.capacity - numOfResult; - pGroupResInfo->rowId += numOfRowsToCopy; - } else { - pGroupResInfo->rowId = 0; - (*index) += 1; + //current output space is not enough to accommodate all data of this page, prepare more space + if (numOfRowsToCopy > (pQuery->rec.capacity - numOfResult)) { + int32_t newSize = (int32_t) (pQuery->rec.capacity + (numOfRowsToCopy - numOfResult)); + expandBuffer(pRuntimeEnv, newSize, GET_QINFO_ADDR(pRuntimeEnv)); } - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pRows[i]->pageId); + pGroupResInfo->index += 1; + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pRow->pageId); for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { int32_t size = pRuntimeEnv->pCtx[j].outputBytes; char *out = pQuery->sdata[j]->data + numOfResult * size; - char *in = getPosInResultPage(pRuntimeEnv, j, pRows[i], page); - memcpy(out, in + oldOffset * size, size * numOfRowsToCopy); + char *in = getPosInResultPage(pRuntimeEnv, j, pRow, page); + memcpy(out, in, size * numOfRowsToCopy); } numOfResult += numOfRowsToCopy; - if (numOfResult == pQuery->rec.capacity) { + if (numOfResult == pQuery->rec.capacity) { // output buffer is full break; } } - qDebug("QInfo:%p copy data to query buf completed", pQInfo); - -#ifdef _DEBUG_VIEW - displayInterResult(pQuery->sdata, pRuntimeEnv, numOfResult); -#endif + qDebug("QInfo:%p copy data to query buf completed", qinfo); return numOfResult; } /** - * copyFromWindowResToSData support copy data in ascending/descending order + * copyToOutputBuf support copy data in ascending/descending order * For interval query of both super table and table, copy the data in ascending order, since the output results are * ordered in SWindowResutl already. While handling the group by query for both table and super table, * all group result are completed already. @@ -4217,14 +4015,17 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SResultRow **pRows, int32_t numOfRo * @param pQInfo * @param result */ -void copyFromWindowResToSData(SQInfo *pQInfo, SResultRowInfo *pResultInfo) { - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; +void copyToOutputBuf(SQInfo *pQInfo, SResultRowInfo *pResultInfo) { + SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + SGroupResInfo *pGroupResInfo = &pQInfo->groupResInfo; + + assert(pQuery->rec.rows == 0 && pGroupResInfo->currentGroup <= pGroupResInfo->totalGroup); + if (!hasRemainData(pGroupResInfo)) { + return; + } int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSDB_ORDER_ASC; - int32_t numOfResult = doCopyToSData(pQInfo, pResultInfo->pResult, pResultInfo->size, &pQInfo->groupIndex, orderType); - - pQuery->rec.rows += numOfResult; - assert(pQuery->rec.rows <= pQuery->rec.capacity); + pQuery->rec.rows = doCopyToSData(&pQInfo->runtimeEnv, pGroupResInfo, orderType); } static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) { @@ -4235,8 +4036,8 @@ static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) { return; } - for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) { - SResultRow *pResult = pRuntimeEnv->windowResInfo.pResult[i]; + for (int32_t i = 0; i < pRuntimeEnv->resultRowInfo.size; ++i) { + SResultRow *pResult = pRuntimeEnv->resultRowInfo.pResult[i]; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { int32_t functionId = pRuntimeEnv->pCtx[j].functionId; @@ -4255,7 +4056,7 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc SQuery * pQuery = pRuntimeEnv->pQuery; STableQueryInfo* pTableQueryInfo = pQuery->current; - SResultRowInfo * pResultRowInfo = &pTableQueryInfo->windowResInfo; + SResultRowInfo * pResultRowInfo = &pTableQueryInfo->resInfo; pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1; if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL || pRuntimeEnv->groupbyColumn) { @@ -4269,10 +4070,14 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc } } -bool hasNotReturnedResults(SQueryRuntimeEnv* pRuntimeEnv) { +bool hasNotReturnedResults(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; SFillInfo *pFillInfo = pRuntimeEnv->pFillInfo; + if (!Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { + return false; + } + if (pQuery->limit.limit > 0 && pQuery->rec.total >= pQuery->limit.limit) { return false; } @@ -4292,16 +4097,11 @@ bool hasNotReturnedResults(SQueryRuntimeEnv* pRuntimeEnv) { * set is the FIRST result block, the gap between the start time of query time window and the timestamp of the * first result row in the actual result set will fill nothing. */ - if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - int32_t numOfTotal = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity); - return numOfTotal > 0; - } - - } else { - // there are results waiting for returned to client. - if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) && - (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery)) && - (pRuntimeEnv->windowResInfo.size > 0)) { + int32_t numOfTotal = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity); + return numOfTotal > 0; + } else { // there are results waiting for returned to client. + if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) && hasRemainData(pGroupResInfo) && + (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery))) { return true; } } @@ -4361,15 +4161,15 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data setQueryStatus(pQuery, QUERY_OVER); } } else { - if (!hasNotReturnedResults(&pQInfo->runtimeEnv)) { + if (!hasNotReturnedResults(&pQInfo->runtimeEnv, &pQInfo->groupResInfo)) { setQueryStatus(pQuery, QUERY_OVER); } } } } -int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int32_t *numOfFilled) { - SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); +int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst) { + SQInfo *pQInfo = GET_QINFO_ADDR(pRuntimeEnv); SQuery *pQuery = pRuntimeEnv->pQuery; SFillInfo* pFillInfo = pRuntimeEnv->pFillInfo; @@ -4388,7 +4188,7 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int pQInfo, pFillInfo->numOfRows, ret, pQuery->limit.offset, ret - pQuery->limit.offset, 0); ret -= (int32_t)pQuery->limit.offset; - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { //???pExpr1 or pExpr2 memmove(pDst[i]->data, pDst[i]->data + pQuery->pExpr1[i].bytes * pQuery->limit.offset, ret * pQuery->pExpr1[i].bytes); } @@ -4401,17 +4201,18 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int pQuery->limit.offset - ret); pQuery->limit.offset -= ret; - pQuery->rec.rows = 0; ret = 0; } - if (!hasNotReturnedResults(pRuntimeEnv)) { - return ret; + // no data in current data after fill + int32_t numOfTotal = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, (int32_t)pQuery->rec.capacity); + if (numOfTotal == 0) { + return 0; } } } -static void queryCostStatis(SQInfo *pQInfo) { +void queryCostStatis(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryCostInfo *pSummary = &pRuntimeEnv->summary; @@ -4423,8 +4224,13 @@ static void queryCostStatis(SQInfo *pQInfo) { pSummary->elapsedTime += pSummary->firstStageMergeTime; SResultRowPool* p = pQInfo->runtimeEnv.pool; - pSummary->winInfoSize = getResultRowPoolMemSize(p); - pSummary->numOfTimeWindows = getNumOfAllocatedResultRows(p); + if (p != NULL) { + pSummary->winInfoSize = getResultRowPoolMemSize(p); + pSummary->numOfTimeWindows = getNumOfAllocatedResultRows(p); + } else { + pSummary->winInfoSize = 0; + pSummary->numOfTimeWindows = 0; + } qDebug("QInfo:%p :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, " "load block statis:%d, load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64, @@ -4498,7 +4304,7 @@ static void generateBlockDistResult(STableBlockDist *pTableBlockDist) { if (pTableBlockDist == NULL) { return; } - int64_t min = INT64_MAX, max = INT64_MIN, avg = 0; + int64_t min = 0, max = 0, avg = 0; SArray* blockInfos= pTableBlockDist->dataBlockInfos; int64_t totalRows = 0, totalBlocks = taosArrayGetSize(blockInfos); for (size_t i = 0; i < taosArrayGetSize(blockInfos); i++) { @@ -4564,7 +4370,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { static TSKEY doSkipIntervalProcess(SQueryRuntimeEnv* pRuntimeEnv, STimeWindow* win, SDataBlockInfo* pBlockInfo, STableQueryInfo* pTableQueryInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; assert(pQuery->limit.offset == 0); STimeWindow tw = *win; @@ -4592,10 +4398,10 @@ static TSKEY doSkipIntervalProcess(SQueryRuntimeEnv* pRuntimeEnv, STimeWindow* w TSKEY key = pTableQueryInfo->win.skey; pWindowResInfo->prevSKey = tw.skey; - int32_t index = pRuntimeEnv->windowResInfo.curIndex; + int32_t index = pRuntimeEnv->resultRowInfo.curIndex; int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); - pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index + pRuntimeEnv->resultRowInfo.curIndex = index; // restore the window index qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64, GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, @@ -4631,12 +4437,12 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { * pQuery->limit.offset times. Since hole exists, pQuery->interval.interval*pQuery->limit.offset value is * not valid. otherwise, we only forward pQuery->limit.offset number of points */ - assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL); + assert(pRuntimeEnv->resultRowInfo.prevSKey == TSKEY_INITIAL_VAL); STimeWindow w = TSWINDOW_INITIALIZER; bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; STableQueryInfo *pTableQueryInfo = pQuery->current; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; @@ -4743,7 +4549,7 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) && (pQInfo->tableqinfoGroupInfo.numOfTables == 1) && (cond.order == TSDB_ORDER_ASC) && (!QUERY_IS_INTERVAL_QUERY(pQuery)) - && (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) + && (!isGroupbyColumn(pQuery->pGroupbyExpr)) && (!isFixedOutputQuery(pRuntimeEnv)) ) { SArray* pa = GET_TABLEGROUP(pQInfo, 0); @@ -4809,7 +4615,7 @@ static SFillColInfo* createFillColInfo(SQuery* pQuery) { return pFillCol; } -int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) { +int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *tsdb, int32_t vgId, bool isSTableQuery) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -4817,6 +4623,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery); pRuntimeEnv->hasTagResults = hasTagValOutput(pQuery); pRuntimeEnv->timeWindowInterpo = timeWindowInterpoRequired(pQuery); + pRuntimeEnv->prevResult = prevResult; setScanLimitationByResultBuffer(pQuery); @@ -4827,13 +4634,15 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo pQInfo->tsdb = tsdb; pQInfo->vgId = vgId; + pQInfo->groupResInfo.totalGroup = (int32_t) (isSTableQuery? GET_NUM_OF_TABLEGROUP(pQInfo):0); pRuntimeEnv->pQuery = pQuery; pRuntimeEnv->pTsBuf = pTsBuf; pRuntimeEnv->cur.vgroupIndex = -1; pRuntimeEnv->stableQuery = isSTableQuery; pRuntimeEnv->prevGroupId = INT32_MIN; - pRuntimeEnv->groupbyColumn = isGroupbyNormalCol(pQuery->pGroupbyExpr); + pRuntimeEnv->groupbyColumn = isGroupbyColumn(pQuery->pGroupbyExpr); + pRuntimeEnv->stabledev = isStabledev(pQuery); if (pTsBuf != NULL) { int16_t order = (pQuery->order.order == pRuntimeEnv->pTsBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; @@ -4859,7 +4668,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo type = TSDB_DATA_TYPE_INT; // group id } - code = initResultRowInfo(&pRuntimeEnv->windowResInfo, 8, type); + code = initResultRowInfo(&pRuntimeEnv->resultRowInfo, 8, type); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4879,14 +4688,14 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo type = TSDB_DATA_TYPE_TIMESTAMP; } - code = initResultRowInfo(&pRuntimeEnv->windowResInfo, numOfResultRows, type); + code = initResultRowInfo(&pRuntimeEnv->resultRowInfo, numOfResultRows, type); if (code != TSDB_CODE_SUCCESS) { return code; } } // create runtime environment - code = setupQueryRuntimeEnv(pRuntimeEnv, pQuery->order.order); + code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQInfo->tableGroupInfo.numOfTables, pQuery->order.order, pQInfo->vgId); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4900,7 +4709,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w); int32_t numOfCols = getNumOfFinalResCol(pQuery); - pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, numOfCols, + pRuntimeEnv->pFillInfo = taosCreateFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, numOfCols, pQuery->interval.sliding, pQuery->interval.slidingUnit, (int8_t)pQuery->precision, pQuery->fillType, pColInfo, pQInfo); } @@ -4926,7 +4735,11 @@ static FORCE_INLINE void setEnvForEachBlock(SQInfo* pQInfo, STableQueryInfo* pTa int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTsBuf != NULL) { - setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo); + setTagVal(pRuntimeEnv, pTableQueryInfo->pTable); + } + + if (pRuntimeEnv->pTsBuf != NULL) { + setTimestampListJoinInfo(pQInfo, pTableQueryInfo); } if (QUERY_IS_INTERVAL_QUERY(pQuery)) { @@ -4982,11 +4795,20 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { setEnvForEachBlock(pQInfo, *pTableQueryInfo, &blockInfo); } + if (pRuntimeEnv->stabledev) { + for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { + if (pQuery->pExpr1[i].base.functionId == TSDB_FUNC_STDDEV_DST) { + setParamValue(pRuntimeEnv); + break; + } + } + } + uint32_t status = 0; SDataStatis *pStatis = NULL; SArray *pDataBlock = NULL; - int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pQuery->current->windowResInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); + int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pQuery->current->resInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); if (ret != TSDB_CODE_SUCCESS) { break; } @@ -5024,7 +4846,7 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { STableQueryInfo* pCheckInfo = taosArrayGetP(group, index); if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTsBuf != NULL) { - setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb); + setTagVal(pRuntimeEnv, pCheckInfo->pTable); } STableId* id = TSDB_TABLEID(pCheckInfo->pTable); @@ -5212,7 +5034,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { SArray *s = tsdbGetQueriedTableList(pRuntimeEnv->pQueryHandle); assert(taosArrayGetSize(s) >= 1); - setTagVal(pRuntimeEnv, taosArrayGetP(s, 0), pQInfo->tsdb); + setTagVal(pRuntimeEnv, taosArrayGetP(s, 0)); taosArrayDestroy(s); // here we simply set the first table as current table @@ -5271,7 +5093,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { SArray *s = tsdbGetQueriedTableList(pRuntimeEnv->pQueryHandle); assert(taosArrayGetSize(s) >= 1); - setTagVal(pRuntimeEnv, taosArrayGetP(s, 0), pQInfo->tsdb); + setTagVal(pRuntimeEnv, taosArrayGetP(s, 0)); // here we simply set the first table as current table scanMultiTableDataBlocks(pQInfo); @@ -5280,7 +5102,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { taosArrayDestroy(s); // no results generated for current group, continue to try the next group - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; if (pWindowResInfo->size <= 0) { continue; } @@ -5297,20 +5119,21 @@ static void sequentialTableProcess(SQInfo *pQInfo) { qDebug("QInfo:%p generated groupby columns results %d rows for group %d completed", pQInfo, pWindowResInfo->size, pQInfo->groupIndex); - int32_t currentGroupIndex = pQInfo->groupIndex; pQuery->rec.rows = 0; - pQInfo->groupIndex = 0; + if (pWindowResInfo->size > pQuery->rec.capacity) { + expandBuffer(pRuntimeEnv, pWindowResInfo->size, pQInfo); + } - ensureOutputBufferSimple(pRuntimeEnv, pWindowResInfo->size); - copyFromWindowResToSData(pQInfo, pWindowResInfo); - - pQInfo->groupIndex = currentGroupIndex; // restore the group index + initGroupResInfo(&pQInfo->groupResInfo, &pRuntimeEnv->resultRowInfo, 0); + copyToOutputBuf(pQInfo, pWindowResInfo); assert(pQuery->rec.rows == pWindowResInfo->size); - resetResultRowInfo(pRuntimeEnv, &pRuntimeEnv->windowResInfo); + + resetResultRowInfo(pRuntimeEnv, &pRuntimeEnv->resultRowInfo); + cleanupGroupResInfo(&pQInfo->groupResInfo); break; } - } else if (pRuntimeEnv->queryWindowIdentical && pRuntimeEnv->pTsBuf == NULL && !isTSCompQuery(pQuery)) { + } else if (pRuntimeEnv->queryWindowIdentical && pRuntimeEnv->pTsBuf == NULL && !isTsCompQuery(pQuery)) { //super table projection query with identical query time range for all tables. SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; resetDefaultResInfoOutputBuf(pRuntimeEnv); @@ -5360,10 +5183,10 @@ static void sequentialTableProcess(SQInfo *pQInfo) { doTableQueryInfoTimeWindowCheck(pQuery, *pTableQueryInfo); if (pRuntimeEnv->hasTagResults) { - setTagVal(pRuntimeEnv, pQuery->current->pTable, pQInfo->tsdb); + setTagVal(pRuntimeEnv, pQuery->current->pTable); } - if (pQuery->prjInfo.vgroupLimit > 0 && pQuery->current->windowResInfo.size > pQuery->prjInfo.vgroupLimit) { + if (pQuery->prjInfo.vgroupLimit > 0 && pQuery->current->resInfo.size > pQuery->prjInfo.vgroupLimit) { pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step; continue; @@ -5386,7 +5209,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { SDataStatis *pStatis = NULL; SArray *pDataBlock = NULL; - int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pQuery->current->windowResInfo, pQueryHandle, &blockInfo, + int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pQuery->current->resInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); if (ret != TSDB_CODE_SUCCESS) { break; @@ -5398,7 +5221,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { continue; } - ensureOutputBuffer(pRuntimeEnv, &blockInfo); + ensureOutputBuffer(pRuntimeEnv, blockInfo.rows); int64_t prev = getNumOfResult(pRuntimeEnv); pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : blockInfo.rows - 1; @@ -5412,7 +5235,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { pQuery->rec.rows = getNumOfResult(pRuntimeEnv); int64_t inc = pQuery->rec.rows - prev; - pQuery->current->windowResInfo.size += (int32_t) inc; + pQuery->current->resInfo.size += (int32_t) inc; // the flag may be set by tableApplyFunctionsOnBlock, clear it here CLEAR_QUERY_STATUS(pQuery, QUERY_COMPLETED); @@ -5430,8 +5253,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } else { // the limitation of output result is reached, set the query completed skipResults(pRuntimeEnv); - if (limitResults(pRuntimeEnv)) { - setQueryStatus(pQuery, QUERY_COMPLETED); + if (limitOperator(pQuery, pQInfo)) { SET_STABLE_QUERY_OVER(pQInfo); break; } @@ -5454,8 +5276,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { * If the subgroup index is larger than 0, results generated by group by tbname,k is existed. * we need to return it to client in the first place. */ - if (pQInfo->groupIndex > 0) { - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + if (hasRemainData(&pQInfo->groupResInfo)) { + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); pQuery->rec.total += pQuery->rec.rows; if (pQuery->rec.rows > 0) { @@ -5469,7 +5291,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } resetDefaultResInfoOutputBuf(pRuntimeEnv); - resetResultRowInfo(pRuntimeEnv, &pRuntimeEnv->windowResInfo); + resetResultRowInfo(pRuntimeEnv, &pRuntimeEnv->resultRowInfo); SArray *group = GET_TABLEGROUP(pQInfo, 0); assert(taosArrayGetSize(group) == pQInfo->tableqinfoGroupInfo.numOfTables && @@ -5498,7 +5320,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { skipResults(pRuntimeEnv); // the limitation of output result is reached, set the query completed - if (limitResults(pRuntimeEnv)) { + if (limitOperator(pQuery, pQInfo)) { SET_STABLE_QUERY_OVER(pQInfo); break; } @@ -5553,7 +5375,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { * * Only the ts-comp query requires the finalizer function to be executed here. */ - if (isTSCompQuery(pQuery)) { + if (isTsCompQuery(pQuery)) { finalizeQueryResult(pRuntimeEnv); } @@ -5623,11 +5445,11 @@ static void doCloseAllTimeWindow(SQInfo *pQInfo) { size_t num = taosArrayGetSize(group); for (int32_t j = 0; j < num; ++j) { STableQueryInfo* item = taosArrayGetP(group, j); - closeAllResultRows(&item->windowResInfo); + closeAllResultRows(&item->resInfo); } } } else { // close results for group result - closeAllResultRows(&pQInfo->runtimeEnv.windowResInfo); + closeAllResultRows(&pQInfo->runtimeEnv.resultRowInfo); } } @@ -5635,18 +5457,14 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pRuntimeEnv->pQuery; - if (pQInfo->groupIndex > 0) { - /* - * if the groupIndex > 0, the query process must be completed yet, we only need to - * copy the data into output buffer - */ + if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { if (QUERY_IS_INTERVAL_QUERY(pQuery)) { copyResToQueryResultBuf(pQInfo, pQuery); } else { - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); } - qDebug("QInfo:%p current:%"PRId64", total:%"PRId64"", pQInfo, pQuery->rec.rows, pQuery->rec.total); + qDebug("QInfo:%p current:%"PRId64", total:%"PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total); return; } @@ -5688,18 +5506,10 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { } if (QUERY_IS_INTERVAL_QUERY(pQuery) || isSumAvgRateQuery(pQuery)) { - int32_t code = mergeGroupResult(pQInfo); - if (code == TSDB_CODE_SUCCESS) { - copyResToQueryResultBuf(pQInfo, pQuery); - -#ifdef _DEBUG_VIEW - displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num); -#endif - } else { // set the error code - pQInfo->code = code; - } + copyResToQueryResultBuf(pQInfo, pQuery); } else { // not a interval query - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + initGroupResInfo(&pQInfo->groupResInfo, &pRuntimeEnv->resultRowInfo, 0); + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); } // handle the limitation of output buffer @@ -5801,8 +5611,9 @@ static void tableAggregationProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } + // TODO limit/offset refactor to be one operator skipResults(pRuntimeEnv); - limitResults(pRuntimeEnv); + limitOperator(pQuery, pQInfo); } static void tableProjectionProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { @@ -5810,7 +5621,7 @@ static void tableProjectionProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) // for ts_comp query, re-initialized is not allowed SQuery *pQuery = pRuntimeEnv->pQuery; - if (!isTSCompQuery(pQuery)) { + if (!isTsCompQuery(pQuery)) { resetDefaultResInfoOutputBuf(pRuntimeEnv); } @@ -5844,7 +5655,7 @@ static void tableProjectionProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) resetDefaultResInfoOutputBuf(pRuntimeEnv); } - limitResults(pRuntimeEnv); + limitOperator(pQuery, pQInfo); if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { qDebug("QInfo:%p query paused due to output limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo, pQuery->current->lastKey, pQuery->window.ekey); @@ -5853,11 +5664,45 @@ static void tableProjectionProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) taosHashPut(pQInfo->arrTableIdInfo, &tidInfo.tid, sizeof(tidInfo.tid), &tidInfo, sizeof(STableIdInfo)); } - if (!isTSCompQuery(pQuery)) { + if (!isTsCompQuery(pQuery)) { assert(pQuery->rec.rows <= pQuery->rec.capacity); } } +static void copyAndFillResult(SQInfo* pQInfo) { + SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; + SQuery* pQuery = pRuntimeEnv->pQuery; + + while(1) { + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); + doSecondaryArithmeticProcess(pQuery); + + TSKEY lastKey = 0; + if (!hasRemainData(&pQInfo->groupResInfo)) { + lastKey = pQuery->window.ekey; + } else { + lastKey = ((TSKEY*)pQuery->sdata[0]->data)[pQuery->rec.rows - 1]; + } + + assert(lastKey <= pQuery->window.ekey); + + taosFillSetStartInfo(pRuntimeEnv->pFillInfo, (int32_t)pQuery->rec.rows, lastKey); + taosFillSetDataBlockFromFilePage(pRuntimeEnv->pFillInfo, (const tFilePage **)pQuery->sdata); + + pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata); + + if (pQuery->rec.rows > 0) { + limitOperator(pQuery, pQInfo); + break; + } + + // here the pQuery->rec.rows == 0 + if (!hasRemainData(&pQInfo->groupResInfo) && !taosFillHasMoreResults(pRuntimeEnv->pFillInfo)) { + break; + } + } +} + // handle time interval query on table static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { SQueryRuntimeEnv *pRuntimeEnv = &(pQInfo->runtimeEnv); @@ -5881,69 +5726,56 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { pQuery->rec.rows = 0; // not fill or no result generated during this query - if (pQuery->fillType == TSDB_FILL_NONE || pRuntimeEnv->windowResInfo.size == 0 || isPointInterpoQuery(pQuery)) { + if (pQuery->fillType == TSDB_FILL_NONE || pRuntimeEnv->resultRowInfo.size == 0 || isPointInterpoQuery(pQuery)) { // all data scanned, the group by normal column can return - int32_t numOfClosed = numOfClosedResultRows(&pRuntimeEnv->windowResInfo); - if (pQuery->limit.offset > numOfClosed) { + int32_t numOfClosed = numOfClosedResultRows(&pRuntimeEnv->resultRowInfo); + if (pQuery->limit.offset > numOfClosed || numOfClosed == 0) { return; } - pQInfo->groupIndex = (int32_t) pQuery->limit.offset; - - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + initGroupResInfo(&pQInfo->groupResInfo, &pRuntimeEnv->resultRowInfo, (int32_t) pQuery->limit.offset); + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); doSecondaryArithmeticProcess(pQuery); - limitResults(pRuntimeEnv); + limitOperator(pQuery, pQInfo); } else { - - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); - doSecondaryArithmeticProcess(pQuery); - - taosFillSetStartInfo(pRuntimeEnv->pFillInfo, (int32_t)pQuery->rec.rows, pQuery->window.ekey); - taosFillCopyInputDataFromFilePage(pRuntimeEnv->pFillInfo, (const tFilePage **)pQuery->sdata); - - int32_t numOfFilled = 0; - pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfFilled); - - if (pQuery->rec.rows > 0 || Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - limitResults(pRuntimeEnv); - } + initGroupResInfo(&pQInfo->groupResInfo, &pRuntimeEnv->resultRowInfo, 0); + copyAndFillResult(pQInfo); } } -static void tableQueryImpl(SQInfo *pQInfo) { +void tableQueryImpl(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; - if (hasNotReturnedResults(pRuntimeEnv)) { + if (hasNotReturnedResults(pRuntimeEnv, &pQInfo->groupResInfo)) { if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { /* * There are remain results that are not returned due to result interpolation * So, we do keep in this procedure instead of launching retrieve procedure for next results. */ - int32_t numOfFilled = 0; - pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfFilled); - + pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata); if (pQuery->rec.rows > 0) { - limitResults(pRuntimeEnv); + limitOperator(pQuery, pQInfo); + qDebug("QInfo:%p current:%" PRId64 " returned, total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total); + } else { + copyAndFillResult(pQInfo); } - qDebug("QInfo:%p current:%" PRId64 " returned, total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total); } else { pQuery->rec.rows = 0; - assert(pRuntimeEnv->windowResInfo.size > 0); + assert(pRuntimeEnv->resultRowInfo.size > 0); + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); + doSecondaryArithmeticProcess(pQuery); - if (pQInfo->groupIndex < pRuntimeEnv->windowResInfo.size) { - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + if (pQuery->rec.rows > 0) { + limitOperator(pQuery, pQInfo); } if (pQuery->rec.rows > 0) { qDebug("QInfo:%p %" PRId64 " rows returned from group results, total:%" PRId64 "", pQInfo, pQuery->rec.rows, pQuery->rec.total); - } - - // there are not data remains - if (pQuery->rec.rows <= 0 || pRuntimeEnv->windowResInfo.size <= pQInfo->groupIndex) { + } else { qDebug("QInfo:%p query over, %" PRId64 " rows are returned", pQInfo, pQuery->rec.total); } } @@ -5975,7 +5807,8 @@ static void tableQueryImpl(SQInfo *pQInfo) { pRuntimeEnv->summary.elapsedTime += (taosGetTimestampUs() - st); assert(pQInfo->tableqinfoGroupInfo.numOfTables == 1); } -static void buildTableBlockDistResult(SQInfo *pQInfo) { + +void buildTableBlockDistResult(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pRuntimeEnv->pQuery; pQuery->pos = 0; @@ -6028,7 +5861,7 @@ static void buildTableBlockDistResult(SQInfo *pQInfo) { return; } -static void stableQueryImpl(SQInfo *pQInfo) { +void stableQueryImpl(SQInfo *pQInfo) { SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pRuntimeEnv->pQuery; pQuery->rec.rows = 0; @@ -6040,7 +5873,6 @@ static void stableQueryImpl(SQInfo *pQInfo) { multiTableQueryProcess(pQInfo); } else { assert(pQuery->checkResultBuf == 1 || isPointInterpoQuery(pQuery) || pRuntimeEnv->groupbyColumn); - sequentialTableProcess(pQInfo); } @@ -6168,8 +6000,7 @@ static char *createTableIdList(SQueryTableMsg *pQueryMsg, char *pMsg, SArray **p * @param pExpr * @return */ -static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, SSqlFuncMsg ***pExpr, SSqlFuncMsg ***pSecStageExpr, - char **tagCond, char** tbnameCond, SColIndex **groupbyCols, SColumnInfo** tagCols, char** sql) { +int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { int32_t code = TSDB_CODE_SUCCESS; if (taosCheckVersion(pQueryMsg->version, version, 3) != 0) { @@ -6204,6 +6035,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pQueryMsg->tbnameCondLen = htonl(pQueryMsg->tbnameCondLen); pQueryMsg->secondStageOutput = htonl(pQueryMsg->secondStageOutput); pQueryMsg->sqlstrLen = htonl(pQueryMsg->sqlstrLen); + pQueryMsg->prevResultLen = htonl(pQueryMsg->prevResultLen); // query msg safety check if (!validateQueryMsg(pQueryMsg)) { @@ -6264,8 +6096,8 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, } } - *pExpr = calloc(pQueryMsg->numOfOutput, POINTER_BYTES); - if (*pExpr == NULL) { + param->pExprMsg = calloc(pQueryMsg->numOfOutput, POINTER_BYTES); + if (param->pExprMsg == NULL) { code = TSDB_CODE_QRY_OUT_OF_MEMORY; goto _cleanup; } @@ -6273,7 +6105,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, SSqlFuncMsg *pExprMsg = (SSqlFuncMsg *)pMsg; for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) { - (*pExpr)[i] = pExprMsg; + param->pExprMsg[i] = pExprMsg; pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex); pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId); @@ -6309,10 +6141,10 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, if (pQueryMsg->secondStageOutput) { pExprMsg = (SSqlFuncMsg *)pMsg; - *pSecStageExpr = calloc(pQueryMsg->secondStageOutput, POINTER_BYTES); + param->pSecExprMsg = calloc(pQueryMsg->secondStageOutput, POINTER_BYTES); for (int32_t i = 0; i < pQueryMsg->secondStageOutput; ++i) { - (*pSecStageExpr)[i] = pExprMsg; + param->pSecExprMsg[i] = pExprMsg; pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex); pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId); @@ -6346,27 +6178,27 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, } } - pMsg = createTableIdList(pQueryMsg, pMsg, pTableIdList); + pMsg = createTableIdList(pQueryMsg, pMsg, &(param->pTableIdList)); if (pQueryMsg->numOfGroupCols > 0) { // group by tag columns - *groupbyCols = malloc(pQueryMsg->numOfGroupCols * sizeof(SColIndex)); - if (*groupbyCols == NULL) { + param->pGroupColIndex = malloc(pQueryMsg->numOfGroupCols * sizeof(SColIndex)); + if (param->pGroupColIndex == NULL) { code = TSDB_CODE_QRY_OUT_OF_MEMORY; goto _cleanup; } for (int32_t i = 0; i < pQueryMsg->numOfGroupCols; ++i) { - (*groupbyCols)[i].colId = htons(*(int16_t *)pMsg); - pMsg += sizeof((*groupbyCols)[i].colId); + param->pGroupColIndex[i].colId = htons(*(int16_t *)pMsg); + pMsg += sizeof(param->pGroupColIndex[i].colId); - (*groupbyCols)[i].colIndex = htons(*(int16_t *)pMsg); - pMsg += sizeof((*groupbyCols)[i].colIndex); + param->pGroupColIndex[i].colIndex = htons(*(int16_t *)pMsg); + pMsg += sizeof(param->pGroupColIndex[i].colIndex); - (*groupbyCols)[i].flag = htons(*(int16_t *)pMsg); - pMsg += sizeof((*groupbyCols)[i].flag); + param->pGroupColIndex[i].flag = htons(*(int16_t *)pMsg); + pMsg += sizeof(param->pGroupColIndex[i].flag); - memcpy((*groupbyCols)[i].name, pMsg, tListLen(groupbyCols[i]->name)); - pMsg += tListLen((*groupbyCols)[i].name); + memcpy(param->pGroupColIndex[i].name, pMsg, tListLen(param->pGroupColIndex[i].name)); + pMsg += tListLen(param->pGroupColIndex[i].name); } pQueryMsg->orderByIdx = htons(pQueryMsg->orderByIdx); @@ -6386,8 +6218,8 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, } if (pQueryMsg->numOfTags > 0) { - (*tagCols) = calloc(1, sizeof(SColumnInfo) * pQueryMsg->numOfTags); - if (*tagCols == NULL) { + param->pTagColumnInfo = calloc(1, sizeof(SColumnInfo) * pQueryMsg->numOfTags); + if (param->pTagColumnInfo == NULL) { code = TSDB_CODE_QRY_OUT_OF_MEMORY; goto _cleanup; } @@ -6400,32 +6232,42 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pTagCol->type = htons(pTagCol->type); pTagCol->numOfFilters = 0; - (*tagCols)[i] = *pTagCol; + param->pTagColumnInfo[i] = *pTagCol; pMsg += sizeof(SColumnInfo); } } // the tag query condition expression string is located at the end of query msg if (pQueryMsg->tagCondLen > 0) { - *tagCond = calloc(1, pQueryMsg->tagCondLen); - - if (*tagCond == NULL) { + param->tagCond = calloc(1, pQueryMsg->tagCondLen); + if (param->tagCond == NULL) { code = TSDB_CODE_QRY_OUT_OF_MEMORY; goto _cleanup; - } - memcpy(*tagCond, pMsg, pQueryMsg->tagCondLen); + + memcpy(param->tagCond, pMsg, pQueryMsg->tagCondLen); pMsg += pQueryMsg->tagCondLen; } - if (pQueryMsg->tbnameCondLen > 0) { - *tbnameCond = calloc(1, pQueryMsg->tbnameCondLen + 1); - if (*tbnameCond == NULL) { + if (pQueryMsg->prevResultLen > 0) { + param->prevResult = calloc(1, pQueryMsg->prevResultLen); + if (param->prevResult == NULL) { code = TSDB_CODE_QRY_OUT_OF_MEMORY; goto _cleanup; } - strncpy(*tbnameCond, pMsg, pQueryMsg->tbnameCondLen); + memcpy(param->prevResult, pMsg, pQueryMsg->prevResultLen); + pMsg += pQueryMsg->prevResultLen; + } + + if (pQueryMsg->tbnameCondLen > 0) { + param->tbnameCond = calloc(1, pQueryMsg->tbnameCondLen + 1); + if (param->tbnameCond == NULL) { + code = TSDB_CODE_QRY_OUT_OF_MEMORY; + goto _cleanup; + } + + strncpy(param->tbnameCond, pMsg, pQueryMsg->tbnameCondLen); pMsg += pQueryMsg->tbnameCondLen; } @@ -6434,9 +6276,9 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pMsg = (char *)pQueryMsg + pQueryMsg->tsOffset + pQueryMsg->tsLen; } - *sql = strndup(pMsg, pQueryMsg->sqlstrLen); + param->sql = strndup(pMsg, pQueryMsg->sqlstrLen); - if (!validateQuerySourceCols(pQueryMsg, *pExpr, *tagCols)) { + if (!validateQuerySourceCols(pQueryMsg, param->pExprMsg, param->pTagColumnInfo)) { code = TSDB_CODE_QRY_INVALID_MSG; goto _cleanup; } @@ -6447,19 +6289,11 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->interval.interval, pQueryMsg->fillType, pQueryMsg->tsLen, pQueryMsg->tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset); - qDebug("qmsg:%p, sql:%s", pQueryMsg, *sql); + qDebug("qmsg:%p, sql:%s", pQueryMsg, param->sql); return TSDB_CODE_SUCCESS; _cleanup: - tfree(*pExpr); - taosArrayDestroy(*pTableIdList); - *pTableIdList = NULL; - tfree(*tbnameCond); - tfree(*groupbyCols); - tfree(*tagCols); - tfree(*tagCond); - tfree(*sql); - + freeParam(param); return code; } @@ -6484,7 +6318,7 @@ static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTable return TSDB_CODE_SUCCESS; } -static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, +int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, SColumnInfo* pTagCols) { *pExprInfo = NULL; int32_t code = TSDB_CODE_SUCCESS; @@ -6516,9 +6350,9 @@ static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t num type = TSDB_DATA_TYPE_DOUBLE; bytes = tDataTypes[type].bytes; } else if (pExprs[i].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX && pExprs[i].base.functionId == TSDB_FUNC_TAGPRJ) { // parse the normal column - SSchema s = tGetTableNameColumnSchema(); - type = s.type; - bytes = s.bytes; + SSchema* s = tGetTbnameColumnSchema(); + type = s->type; + bytes = s->bytes; } else if (pExprs[i].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX) { SSchema s = tGetBlockDistColumnSchema(); type = s.type; @@ -6550,10 +6384,10 @@ static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t num type = pCol->type; bytes = pCol->bytes; } else { - SSchema s = tGetTableNameColumnSchema(); + SSchema* s = tGetTbnameColumnSchema(); - type = s.type; - bytes = s.bytes; + type = s->type; + bytes = s->bytes; } } @@ -6594,7 +6428,7 @@ static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t num return TSDB_CODE_SUCCESS; } -static SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code) { +SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code) { if (pQueryMsg->numOfGroupCols == 0) { return NULL; } @@ -6711,15 +6545,13 @@ static void doUpdateExprColumnIndex(SQuery *pQuery) { } } -static void freeQInfo(SQInfo *pQInfo); - static void calResultBufSize(SQuery* pQuery) { const int32_t RESULT_MSG_MIN_SIZE = 1024 * (1024 + 512); // bytes const int32_t RESULT_MSG_MIN_ROWS = 8192; const float RESULT_THRESHOLD_RATIO = 0.85f; if (isProjQuery(pQuery)) { - int32_t numOfRes = RESULT_MSG_MIN_SIZE / pQuery->rowSize; + int32_t numOfRes = RESULT_MSG_MIN_SIZE / pQuery->resultRowSize; if (numOfRes < RESULT_MSG_MIN_ROWS) { numOfRes = RESULT_MSG_MIN_ROWS; } @@ -6732,7 +6564,7 @@ static void calResultBufSize(SQuery* pQuery) { } } -static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, +SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery, char* sql) { int16_t numOfCols = pQueryMsg->numOfCols; int16_t numOfOutput = pQueryMsg->numOfOutput; @@ -6775,17 +6607,27 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou goto _cleanup; } - int32_t srcSize = 0; + pQuery->srcRowSize = 0; + pQuery->maxSrcColumnSize = 0; for (int16_t i = 0; i < numOfCols; ++i) { pQuery->colList[i] = pQueryMsg->colList[i]; pQuery->colList[i].filters = tFilterInfoDup(pQueryMsg->colList[i].filters, pQuery->colList[i].numOfFilters); - srcSize += pQuery->colList[i].bytes; + + pQuery->srcRowSize += pQuery->colList[i].bytes; + if (pQuery->maxSrcColumnSize < pQuery->colList[i].bytes) { + pQuery->maxSrcColumnSize = pQuery->colList[i].bytes; + } } // calculate the result row size for (int16_t col = 0; col < numOfOutput; ++col) { assert(pExprs[col].bytes > 0); - pQuery->rowSize += pExprs[col].bytes; + pQuery->resultRowSize += pExprs[col].bytes; + + // keep the tag length + if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) { + pQuery->tagLen += pExprs[col].bytes; + } } doUpdateExprColumnIndex(pQuery); @@ -6833,26 +6675,11 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou size_t numOfGroups = 0; if (pTableGroupInfo->pGroupList != NULL) { numOfGroups = taosArrayGetSize(pTableGroupInfo->pGroupList); + STableGroupInfo* pTableqinfo = &pQInfo->tableqinfoGroupInfo; - pQInfo->tableqinfoGroupInfo.pGroupList = taosArrayInit(numOfGroups, POINTER_BYTES); - pQInfo->tableqinfoGroupInfo.numOfTables = pTableGroupInfo->numOfTables; - pQInfo->tableqinfoGroupInfo.map = taosHashInit(pTableGroupInfo->numOfTables, - taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - } - - pQInfo->runtimeEnv.interBufSize = getOutputInterResultBufSize(pQuery); - pQInfo->runtimeEnv.summary.tableInfoSize += (pTableGroupInfo->numOfTables * sizeof(STableQueryInfo)); - - pQInfo->runtimeEnv.pResultRowHashTable = taosHashInit(pTableGroupInfo->numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - pQInfo->runtimeEnv.keyBuf = malloc(TSDB_MAX_BYTES_PER_ROW); // todo opt size - pQInfo->runtimeEnv.pool = initResultRowPool(getResultRowSize(&pQInfo->runtimeEnv)); - pQInfo->runtimeEnv.prevRow = malloc(POINTER_BYTES * pQuery->numOfCols + srcSize); - - char* start = POINTER_BYTES * pQuery->numOfCols + (char*) pQInfo->runtimeEnv.prevRow; - pQInfo->runtimeEnv.prevRow[0] = start; - - for(int32_t i = 1; i < pQuery->numOfCols; ++i) { - pQInfo->runtimeEnv.prevRow[i] = pQInfo->runtimeEnv.prevRow[i - 1] + pQuery->colList[i-1].bytes; + pTableqinfo->pGroupList = taosArrayInit(numOfGroups, POINTER_BYTES); + pTableqinfo->numOfTables = pTableGroupInfo->numOfTables; + pTableqinfo->map = taosHashInit(pTableGroupInfo->numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); } pQInfo->pBuf = calloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo)); @@ -6873,6 +6700,8 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou changeExecuteScanOrder(pQInfo, pQueryMsg, stableQuery); pQInfo->runtimeEnv.queryWindowIdentical = true; + bool groupByCol = isGroupbyColumn(pQuery->pGroupbyExpr); + STimeWindow window = pQuery->window; int32_t index = 0; @@ -6896,7 +6725,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou } void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo); - STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, info->pTable, window, buf); + STableQueryInfo* item = createTableQueryInfo(pQuery, info->pTable, groupByCol, window, buf); if (item == NULL) { goto _cleanup; } @@ -6909,8 +6738,10 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou index += 1; } } + colIdCheck(pQuery); + // todo refactor pQInfo->runtimeEnv.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX); qDebug("qmsg:%p QInfo:%p created", pQueryMsg, pQInfo); @@ -6941,7 +6772,7 @@ _cleanup: return NULL; } -static bool isValidQInfo(void *param) { +bool isValidQInfo(void *param) { SQInfo *pQInfo = (SQInfo *)param; if (pQInfo == NULL) { return false; @@ -6955,21 +6786,26 @@ static bool isValidQInfo(void *param) { return (sig == (uint64_t)pQInfo); } -static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, bool isSTable) { +int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, SQueryParam* param, bool isSTable) { int32_t code = TSDB_CODE_SUCCESS; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - STSBuf *pTSBuf = NULL; + STSBuf *pTsBuf = NULL; if (pQueryMsg->tsLen > 0) { // open new file to save the result char *tsBlock = (char *) pQueryMsg + pQueryMsg->tsOffset; - pTSBuf = tsBufCreateFromCompBlocks(tsBlock, pQueryMsg->tsNumOfBlocks, pQueryMsg->tsLen, pQueryMsg->tsOrder, vgId); + pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pQueryMsg->tsNumOfBlocks, pQueryMsg->tsLen, pQueryMsg->tsOrder, vgId); - tsBufResetPos(pTSBuf); - bool ret = tsBufNextPos(pTSBuf); + tsBufResetPos(pTsBuf); + bool ret = tsBufNextPos(pTsBuf); UNUSED(ret); } + SArray* prevResult = NULL; + if (pQueryMsg->prevResultLen > 0) { + prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen); + } + pQuery->precision = tsdbGetCfg(tsdb)->precision; if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->window.skey > pQuery->window.ekey)) || @@ -6978,6 +6814,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ pQuery->window.ekey, pQuery->order.order); setQueryStatus(pQuery, QUERY_COMPLETED); pQInfo->tableqinfoGroupInfo.numOfTables = 0; + // todo free memory return TSDB_CODE_SUCCESS; } @@ -6988,7 +6825,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ } // filter the qualified - if ((code = doInitQInfo(pQInfo, pTSBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { + if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { goto _error; } @@ -7000,7 +6837,7 @@ _error: return code; } -static void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { +void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { if (pFilter == NULL || numOfFilters == 0) { return; } @@ -7054,7 +6891,7 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) { return NULL; } -static void freeQInfo(SQInfo *pQInfo) { +void freeQInfo(SQInfo *pQInfo) { if (!isValidQInfo(pQInfo)) { return; } @@ -7062,7 +6899,6 @@ static void freeQInfo(SQInfo *pQInfo) { qDebug("QInfo:%p start to free QInfo", pQInfo); releaseQueryBuf(pQInfo->tableqinfoGroupInfo.numOfTables); - teardownQueryRuntimeEnv(&pQInfo->runtimeEnv); SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -7124,7 +6960,7 @@ static void freeQInfo(SQInfo *pQInfo) { tfree(pQInfo); } -static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { +size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { SQuery *pQuery = pQInfo->runtimeEnv.pQuery; /* @@ -7132,7 +6968,7 @@ static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { * the returned row size is equalled to 1 * TODO handle the case that the file is too large to send back one time */ - if (isTSCompQuery(pQuery) && (*numOfRows) > 0) { + if (isTsCompQuery(pQuery) && (*numOfRows) > 0) { struct stat fStat; FILE *f = *(FILE **)pQuery->sdata[0]->data; if ((f != NULL) && (fstat(fileno(f), &fStat) == 0)) { @@ -7143,16 +6979,16 @@ static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { return 0; } } else { - return (size_t)(pQuery->rowSize * (*numOfRows)); + return (size_t)(pQuery->resultRowSize * (*numOfRows)); } } -static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { +int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { // the remained number of retrieved rows, not the interpolated result SQuery *pQuery = pQInfo->runtimeEnv.pQuery; // load data from file to msg buffer - if (isTSCompQuery(pQuery)) { + if (isTsCompQuery(pQuery)) { FILE *f = *(FILE **)pQuery->sdata[0]->data; // TODO refactor @@ -7160,14 +6996,23 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { if (f) { off_t s = lseek(fileno(f), 0, SEEK_END); - qDebug("QInfo:%p ts comp data return, file:%p, size:%"PRId64, pQInfo, f, s); + qDebug("QInfo:%p ts comp data return, file:%p, size:%"PRId64, pQInfo, f, (uint64_t)s); if (fseek(f, 0, SEEK_SET) >= 0) { size_t sz = fread(data, 1, s, f); if(sz < s) { // todo handle error + qError("fread(f:%p,%d) failed, rsize:%" PRId64 ", expect size:%" PRId64, f, fileno(f), (uint64_t)sz, (uint64_t)s); assert(0); } } else { UNUSED(s); + qError("fseek(f:%p,%d) failed, error:%s", f, fileno(f), strerror(errno)); + assert(0); + } + + if (s <= (sizeof(STSBufFileHeader) + sizeof(STSGroupBlockInfo) + 6 * sizeof(int32_t))) { + qDump(data, s); + + assert(0); } fclose(f); @@ -7193,176 +7038,7 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { return TSDB_CODE_SUCCESS; } -typedef struct SQueryMgmt { - SCacheObj *qinfoPool; // query handle pool - int32_t vgId; - bool closed; - pthread_mutex_t lock; -} SQueryMgmt; - -int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qinfo_t* pQInfo) { - assert(pQueryMsg != NULL && tsdb != NULL); - - int32_t code = TSDB_CODE_SUCCESS; - - char *sql = NULL; - char *tagCond = NULL; - char *tbnameCond = NULL; - SArray *pTableIdList = NULL; - SSqlFuncMsg **pExprMsg = NULL; - SSqlFuncMsg **pSecExprMsg = NULL; - SExprInfo *pExprs = NULL; - SExprInfo *pSecExprs = NULL; - - SColIndex *pGroupColIndex = NULL; - SColumnInfo *pTagColumnInfo = NULL; - SSqlGroupbyExpr *pGroupbyExpr = NULL; - - code = convertQueryMsg(pQueryMsg, &pTableIdList, &pExprMsg, &pSecExprMsg, &tagCond, &tbnameCond, &pGroupColIndex, &pTagColumnInfo, &sql); - if (code != TSDB_CODE_SUCCESS) { - goto _over; - } - - if (pQueryMsg->numOfTables <= 0) { - qError("Invalid number of tables to query, numOfTables:%d", pQueryMsg->numOfTables); - code = TSDB_CODE_QRY_INVALID_MSG; - goto _over; - } - - if (pTableIdList == NULL || taosArrayGetSize(pTableIdList) == 0) { - qError("qmsg:%p, SQueryTableMsg wrong format", pQueryMsg); - code = TSDB_CODE_QRY_INVALID_MSG; - goto _over; - } - - if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->numOfOutput, &pExprs, pExprMsg, pTagColumnInfo)) != TSDB_CODE_SUCCESS) { - goto _over; - } - - if (pSecExprMsg != NULL) { - if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, &pSecExprs, pSecExprMsg, pTagColumnInfo)) != TSDB_CODE_SUCCESS) { - goto _over; - } - } - - pGroupbyExpr = createGroupbyExprFromMsg(pQueryMsg, pGroupColIndex, &code); - if ((pGroupbyExpr == NULL && pQueryMsg->numOfGroupCols != 0) || code != TSDB_CODE_SUCCESS) { - goto _over; - } - - bool isSTableQuery = false; - STableGroupInfo tableGroupInfo = {0}; - int64_t st = taosGetTimestampUs(); - - if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_TABLE_QUERY)) { - STableIdInfo *id = taosArrayGet(pTableIdList, 0); - - qDebug("qmsg:%p query normal table, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); - if ((code = tsdbGetOneTableGroup(tsdb, id->uid, pQueryMsg->window.skey, &tableGroupInfo)) != TSDB_CODE_SUCCESS) { - goto _over; - } - } else if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY|TSDB_QUERY_TYPE_STABLE_QUERY)) { - isSTableQuery = true; - - // also note there's possibility that only one table in the super table - if (!TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY)) { - STableIdInfo *id = taosArrayGet(pTableIdList, 0); - - // group by normal column, do not pass the group by condition to tsdb to group table into different group - int32_t numOfGroupByCols = pQueryMsg->numOfGroupCols; - if (pQueryMsg->numOfGroupCols == 1 && !TSDB_COL_IS_TAG(pGroupColIndex->flag)) { - numOfGroupByCols = 0; - } - - qDebug("qmsg:%p query stable, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); - code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, tagCond, pQueryMsg->tagCondLen, - pQueryMsg->tagNameRelType, tbnameCond, &tableGroupInfo, pGroupColIndex, numOfGroupByCols); - - if (code != TSDB_CODE_SUCCESS) { - qError("qmsg:%p failed to query stable, reason: %s", pQueryMsg, tstrerror(code)); - goto _over; - } - } else { - code = tsdbGetTableGroupFromIdList(tsdb, pTableIdList, &tableGroupInfo); - if (code != TSDB_CODE_SUCCESS) { - goto _over; - } - - qDebug("qmsg:%p query on %" PRIzu " tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables); - } - - int64_t el = taosGetTimestampUs() - st; - qDebug("qmsg:%p tag filter completed, numOfTables:%" PRIzu ", elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el); - } else { - assert(0); - } - - code = checkForQueryBuf(tableGroupInfo.numOfTables); - if (code != TSDB_CODE_SUCCESS) { // not enough query buffer, abort - goto _over; - } - - (*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, pSecExprs, &tableGroupInfo, pTagColumnInfo, isSTableQuery, sql); - - sql = NULL; - pExprs = NULL; - pSecExprs = NULL; - pGroupbyExpr = NULL; - pTagColumnInfo = NULL; - - if ((*pQInfo) == NULL) { - code = TSDB_CODE_QRY_OUT_OF_MEMORY; - goto _over; - } - - code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, isSTableQuery); - -_over: - free(tagCond); - free(tbnameCond); - free(pGroupColIndex); - - if (pGroupbyExpr != NULL) { - taosArrayDestroy(pGroupbyExpr->columnInfo); - free(pGroupbyExpr); - } - - free(pTagColumnInfo); - free(sql); - free(pExprs); - free(pSecExprs); - - free(pExprMsg); - free(pSecExprMsg); - - taosArrayDestroy(pTableIdList); - - for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) { - SColumnInfo* column = pQueryMsg->colList + i; - freeColumnFilterInfo(column->filters, column->numOfFilters); - } - - //pQInfo already freed in initQInfo, but *pQInfo may not pointer to null; - if (code != TSDB_CODE_SUCCESS) { - *pQInfo = NULL; - } - - // if failed to add ref for all tables in this query, abort current query - return code; -} - -void qDestroyQueryInfo(qinfo_t qHandle) { - SQInfo* pQInfo = (SQInfo*) qHandle; - if (!isValidQInfo(pQInfo)) { - return; - } - - qDebug("QInfo:%p query completed", pQInfo); - queryCostStatis(pQInfo); // print the query cost summary - freeQInfo(pQInfo); -} - -static bool doBuildResCheck(SQInfo* pQInfo) { +bool doBuildResCheck(SQInfo* pQInfo) { bool buildRes = false; pthread_mutex_lock(&pQInfo->lock); @@ -7382,212 +7058,20 @@ static bool doBuildResCheck(SQInfo* pQInfo) { return buildRes; } -bool qTableQuery(qinfo_t qinfo) { - SQInfo *pQInfo = (SQInfo *)qinfo; - assert(pQInfo && pQInfo->signature == pQInfo); - int64_t threadId = taosGetSelfPthreadId(); - - int64_t curOwner = 0; - if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { - qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner); - pQInfo->code = TSDB_CODE_QRY_IN_EXEC; - return false; - } - - pQInfo->startExecTs = taosGetTimestampSec(); - - if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%p it is already killed, abort", pQInfo); - return doBuildResCheck(pQInfo); - } - - if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) { - qDebug("QInfo:%p no table exists for query, abort", pQInfo); - setQueryStatus(pQInfo->runtimeEnv.pQuery, QUERY_COMPLETED); - return doBuildResCheck(pQInfo); - } - - // error occurs, record the error code and return to client - int32_t ret = setjmp(pQInfo->runtimeEnv.env); - if (ret != TSDB_CODE_SUCCESS) { - pQInfo->code = ret; - qDebug("QInfo:%p query abort due to error/cancel occurs, code:%s", pQInfo, tstrerror(pQInfo->code)); - return doBuildResCheck(pQInfo); - } - - qDebug("QInfo:%p query task is launched", pQInfo); - - SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; - if (onlyQueryTags(pQInfo->runtimeEnv.pQuery)) { - assert(pQInfo->runtimeEnv.pQueryHandle == NULL); - buildTagQueryResult(pQInfo); - } else if (pQInfo->runtimeEnv.stableQuery) { - stableQueryImpl(pQInfo); - } else if (pQInfo->runtimeEnv.queryBlockDist){ - buildTableBlockDistResult(pQInfo); - } else { - tableQueryImpl(pQInfo); - } - - SQuery* pQuery = pRuntimeEnv->pQuery; - if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%p query is killed", pQInfo); - } else if (pQuery->rec.rows == 0) { - qDebug("QInfo:%p over, %" PRIzu " tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); - } else { - qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows", - pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); - } - - return doBuildResCheck(pQInfo); -} - -int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContext) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - qError("QInfo:%p invalid qhandle", pQInfo); - return TSDB_CODE_QRY_INVALID_QHANDLE; - } - - *buildRes = false; - if (IS_QUERY_KILLED(pQInfo)) { - qDebug("QInfo:%p query is killed, code:0x%08x", pQInfo, pQInfo->code); - return pQInfo->code; - } - - int32_t code = TSDB_CODE_SUCCESS; - - if (tsRetrieveBlockingModel) { - pQInfo->rspContext = pRspContext; - tsem_wait(&pQInfo->ready); - *buildRes = true; - code = pQInfo->code; - } else { - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - - pthread_mutex_lock(&pQInfo->lock); - - assert(pQInfo->rspContext == NULL); - if (pQInfo->dataReady == QUERY_RESULT_READY) { - *buildRes = true; - qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%" PRId64 ", code:%s", pQInfo, pQuery->rowSize, - pQuery->rec.rows, tstrerror(pQInfo->code)); - } else { - *buildRes = false; - qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); - pQInfo->rspContext = pRspContext; - assert(pQInfo->rspContext != NULL); - } - - code = pQInfo->code; - pthread_mutex_unlock(&pQInfo->lock); - } - - return code; -} - -int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *contLen, bool* continueExec) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - return TSDB_CODE_QRY_INVALID_QHANDLE; - } - - SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - size_t size = getResultSize(pQInfo, &pQuery->rec.rows); - - size += sizeof(int32_t); - size += sizeof(STableIdInfo) * taosHashGetSize(pQInfo->arrTableIdInfo); - - *contLen = (int32_t)(size + sizeof(SRetrieveTableRsp)); - - // todo proper handle failed to allocate memory, - // current solution only avoid crash, but cannot return error code to client - *pRsp = (SRetrieveTableRsp *)rpcMallocCont(*contLen); - if (*pRsp == NULL) { - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } - - (*pRsp)->numOfRows = htonl((int32_t)pQuery->rec.rows); - - if (pQInfo->code == TSDB_CODE_SUCCESS) { - (*pRsp)->offset = htobe64(pQuery->limit.offset); - (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); - } else { - (*pRsp)->offset = 0; - (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); - } - - (*pRsp)->precision = htons(pQuery->precision); - if (pQuery->rec.rows > 0 && pQInfo->code == TSDB_CODE_SUCCESS) { - doDumpQueryResult(pQInfo, (*pRsp)->data); - } else { - setQueryStatus(pQuery, QUERY_OVER); - } - - pQInfo->rspContext = NULL; - pQInfo->dataReady = QUERY_RESULT_NOT_READY; - - if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { - // here current thread hold the refcount, so it is safe to free tsdbQueryHandle. - *continueExec = false; - (*pRsp)->completed = 1; // notify no more result to client - } else { - *continueExec = true; - qDebug("QInfo:%p has more results to retrieve", pQInfo); - } - - return pQInfo->code; -} - -int32_t qQueryCompleted(qinfo_t qinfo) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - return TSDB_CODE_QRY_INVALID_QHANDLE; - } - - SQuery* pQuery = pQInfo->runtimeEnv.pQuery; - return isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); -} - -int32_t qKillQuery(qinfo_t qinfo) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - return TSDB_CODE_QRY_INVALID_QHANDLE; - } - - setQueryKilled(pQInfo); - - // Wait for the query executing thread being stopped/ - // Once the query is stopped, the owner of qHandle will be cleared immediately. - while (pQInfo->owner != 0) { - taosMsleep(100); - } - - return TSDB_CODE_SUCCESS; -} - static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes) { + if (val == NULL) { + setNull(output, type, bytes); + return; + } + if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - if (val == NULL) { - setVardataNull(output, type); - } else { - memcpy(output, val, varDataTLen(val)); - } + memcpy(output, val, varDataTLen(val)); } else { - if (val == NULL) { - setNull(output, type, bytes); - } else { // todo here stop will cause client crash - memcpy(output, val, bytes); - } + memcpy(output, val, bytes); } } -static void buildTagQueryResult(SQInfo* pQInfo) { +void buildTagQueryResult(SQInfo* pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; @@ -7666,7 +7150,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) { qDebug("QInfo:%p create count(tbname) query, res:%d rows:1", pQInfo, count); } else { // return only the tags|table name etc. count = 0; - SSchema tbnameSchema = tGetTableNameColumnSchema(); + SSchema* tbnameSchema = tGetTbnameColumnSchema(); int32_t maxNumOfTables = (int32_t)pQuery->rec.capacity; if (pQuery->limit.limit >= 0 && pQuery->limit.limit < pQuery->rec.capacity) { @@ -7694,11 +7178,11 @@ static void buildTagQueryResult(SQInfo* pQInfo) { } if (pExprInfo[j].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { - bytes = tbnameSchema.bytes; - type = tbnameSchema.type; + bytes = tbnameSchema->bytes; + type = tbnameSchema->type; data = tsdbGetTableName(item->pTable); - dst = pQuery->sdata[j]->data + count * tbnameSchema.bytes; + dst = pQuery->sdata[j]->data + count * tbnameSchema->bytes; } else { type = pExprInfo[j].type; bytes = pExprInfo[j].bytes; @@ -7761,157 +7245,3 @@ void releaseQueryBuf(size_t numOfTables) { // restore value is not enough buffer available atomic_add_fetch_64(&tsQueryBufferSizeBytes, t); } - -void* qGetResultRetrieveMsg(qinfo_t qinfo) { - SQInfo* pQInfo = (SQInfo*) qinfo; - assert(pQInfo != NULL); - - return pQInfo->rspContext; -} - -void freeqinfoFn(void *qhandle) { - void** handle = qhandle; - if (handle == NULL || *handle == NULL) { - return; - } - - qKillQuery(*handle); - qDestroyQueryInfo(*handle); -} - -void* qOpenQueryMgmt(int32_t vgId) { - const int32_t REFRESH_HANDLE_INTERVAL = 30; // every 30 seconds, refresh handle pool - - char cacheName[128] = {0}; - sprintf(cacheName, "qhandle_%d", vgId); - - SQueryMgmt* pQueryMgmt = calloc(1, sizeof(SQueryMgmt)); - if (pQueryMgmt == NULL) { - terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; - return NULL; - } - - pQueryMgmt->qinfoPool = taosCacheInit(TSDB_CACHE_PTR_KEY, REFRESH_HANDLE_INTERVAL, true, freeqinfoFn, cacheName); - pQueryMgmt->closed = false; - pQueryMgmt->vgId = vgId; - - pthread_mutex_init(&pQueryMgmt->lock, NULL); - - qDebug("vgId:%d, open querymgmt success", vgId); - return pQueryMgmt; -} - -static void queryMgmtKillQueryFn(void* handle) { - void** fp = (void**)handle; - qKillQuery(*fp); -} - -void qQueryMgmtNotifyClosed(void* pQMgmt) { - if (pQMgmt == NULL) { - return; - } - - SQueryMgmt* pQueryMgmt = pQMgmt; - qDebug("vgId:%d, set querymgmt closed, wait for all queries cancelled", pQueryMgmt->vgId); - - pthread_mutex_lock(&pQueryMgmt->lock); - pQueryMgmt->closed = true; - pthread_mutex_unlock(&pQueryMgmt->lock); - - taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); -} - -void qQueryMgmtReOpen(void *pQMgmt) { - if (pQMgmt == NULL) { - return; - } - - SQueryMgmt *pQueryMgmt = pQMgmt; - qDebug("vgId:%d, set querymgmt reopen", pQueryMgmt->vgId); - - pthread_mutex_lock(&pQueryMgmt->lock); - pQueryMgmt->closed = false; - pthread_mutex_unlock(&pQueryMgmt->lock); -} - -void qCleanupQueryMgmt(void* pQMgmt) { - if (pQMgmt == NULL) { - return; - } - - SQueryMgmt* pQueryMgmt = pQMgmt; - int32_t vgId = pQueryMgmt->vgId; - - assert(pQueryMgmt->closed); - - SCacheObj* pqinfoPool = pQueryMgmt->qinfoPool; - pQueryMgmt->qinfoPool = NULL; - - taosCacheCleanup(pqinfoPool); - pthread_mutex_destroy(&pQueryMgmt->lock); - tfree(pQueryMgmt); - - qDebug("vgId:%d, queryMgmt cleanup completed", vgId); -} - -void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { - if (pMgmt == NULL) { - terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - return NULL; - } - - SQueryMgmt *pQueryMgmt = pMgmt; - if (pQueryMgmt->qinfoPool == NULL) { - qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo); - terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - return NULL; - } - - pthread_mutex_lock(&pQueryMgmt->lock); - if (pQueryMgmt->closed) { - pthread_mutex_unlock(&pQueryMgmt->lock); - qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo); - terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - return NULL; - } else { - TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE) qInfo; - void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), - (getMaximumIdleDurationSec()*1000)); - pthread_mutex_unlock(&pQueryMgmt->lock); - - return handle; - } -} - -void** qAcquireQInfo(void* pMgmt, uint64_t _key) { - SQueryMgmt *pQueryMgmt = pMgmt; - - if (pQueryMgmt->closed) { - terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - return NULL; - } - - if (pQueryMgmt->qinfoPool == NULL) { - terrno = TSDB_CODE_QRY_INVALID_QHANDLE; - return NULL; - } - - TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key; - void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE)); - if (handle == NULL || *handle == NULL) { - terrno = TSDB_CODE_QRY_INVALID_QHANDLE; - return NULL; - } else { - return handle; - } -} - -void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { - SQueryMgmt *pQueryMgmt = pMgmt; - if (pQueryMgmt->qinfoPool == NULL) { - return NULL; - } - - taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, freeHandle); - return 0; -} diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index c82f8f632d..bc6376b807 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -321,7 +321,7 @@ static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { return pFillInfo->numOfRows - pFillInfo->index; } -SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, +SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pCol, void* handle) { if (fillType == TSDB_FILL_NONE) { @@ -414,7 +414,7 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) } // copy the data into source data buffer -void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput) { +void taosFillSetDataBlockFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput) { for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { memcpy(pFillInfo->pData[i], pInput[i]->data, pFillInfo->numOfRows * pFillInfo->pFillCol[i].col.bytes); } diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index d0839b3dc4..2efd4f76ea 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -279,7 +279,7 @@ tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { pExpr->nSQLOptr = optrType; pExpr->pLeft = pLeft; - if (pRight == NULL) { + if (pLeft != NULL && pRight == NULL) { pRight = calloc(1, sizeof(tSQLExpr)); } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index dc01de0f92..b7a7fd28e9 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -19,6 +19,15 @@ #include "qExecutor.h" #include "qUtil.h" +#include "tbuffer.h" +#include "tlosertree.h" +#include "queryLog.h" + +typedef struct SCompSupporter { + STableQueryInfo **pTableQueryInfo; + int32_t *rowIndex; + int32_t order; +} SCompSupporter; int32_t getOutputInterResultBufSize(SQuery* pQuery) { int32_t size = 0; @@ -228,4 +237,336 @@ void* destroyResultRowPool(SResultRowPool* p) { tfree(p); return NULL; -} \ No newline at end of file +} + +void interResToBinary(SBufferWriter* bw, SArray* pRes, int32_t tagLen) { + uint32_t numOfGroup = (uint32_t) taosArrayGetSize(pRes); + tbufWriteUint32(bw, numOfGroup); + tbufWriteUint16(bw, tagLen); + + for(int32_t i = 0; i < numOfGroup; ++i) { + SInterResult* pOne = taosArrayGet(pRes, i); + if (tagLen > 0) { + tbufWriteBinary(bw, pOne->tags, tagLen); + } + + uint32_t numOfCols = (uint32_t) taosArrayGetSize(pOne->pResult); + tbufWriteUint32(bw, numOfCols); + for(int32_t j = 0; j < numOfCols; ++j) { + SStddevInterResult* p = taosArrayGet(pOne->pResult, j); + uint32_t numOfRows = (uint32_t) taosArrayGetSize(p->pResult); + + tbufWriteUint16(bw, p->colId); + tbufWriteUint32(bw, numOfRows); + + for(int32_t k = 0; k < numOfRows; ++k) { + SResPair v = *(SResPair*) taosArrayGet(p->pResult, k); + tbufWriteDouble(bw, v.avg); + tbufWriteInt64(bw, v.key); + } + } + } +} + +SArray* interResFromBinary(const char* data, int32_t len) { + SBufferReader br = tbufInitReader(data, len, false); + uint32_t numOfGroup = tbufReadUint32(&br); + uint16_t tagLen = tbufReadUint16(&br); + + char* tag = NULL; + if (tagLen > 0) { + tag = calloc(1, tagLen); + } + + SArray* pResult = taosArrayInit(4, sizeof(SInterResult)); + + for(int32_t i = 0; i < numOfGroup; ++i) { + if (tagLen > 0) { + memset(tag, 0, tagLen); + tbufReadToBinary(&br, tag, tagLen); + } + + uint32_t numOfCols = tbufReadUint32(&br); + + SArray* p = taosArrayInit(numOfCols, sizeof(SStddevInterResult)); + for(int32_t j = 0; j < numOfCols; ++j) { + int16_t colId = tbufReadUint16(&br); + int32_t numOfRows = tbufReadUint32(&br); + + SStddevInterResult interRes = {.colId = colId, .pResult = taosArrayInit(4, sizeof(struct SResPair)),}; + for(int32_t k = 0; k < numOfRows; ++k) { + SResPair px = {0}; + px.avg = tbufReadDouble(&br); + px.key = tbufReadInt64(&br); + + taosArrayPush(interRes.pResult, &px); + } + + taosArrayPush(p, &interRes); + } + + char* p1 = NULL; + if (tagLen > 0) { + p1 = malloc(tagLen); + memcpy(p1, tag, tagLen); + } + + SInterResult d = {.pResult = p, .tags = p1,}; + taosArrayPush(pResult, &d); + } + + tfree(tag); + return pResult; +} + +void freeInterResult(void* param) { + SInterResult* pResult = (SInterResult*) param; + tfree(pResult->tags); + + int32_t numOfCols = (int32_t) taosArrayGetSize(pResult->pResult); + for(int32_t i = 0; i < numOfCols; ++i) { + SStddevInterResult *p = taosArrayGet(pResult->pResult, i); + taosArrayDestroy(p->pResult); + } + + taosArrayDestroy(pResult->pResult); +} + +void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) { + assert(pGroupResInfo != NULL); + + taosArrayDestroy(pGroupResInfo->pRows); + pGroupResInfo->pRows = NULL; + pGroupResInfo->index = 0; +} + +void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo, int32_t offset) { + if (pGroupResInfo->pRows != NULL) { + taosArrayDestroy(pGroupResInfo->pRows); + } + + pGroupResInfo->pRows = taosArrayFromList(pResultInfo->pResult, pResultInfo->size, POINTER_BYTES); + pGroupResInfo->index = offset; + + assert(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo)); +} + +bool hasRemainData(SGroupResInfo* pGroupResInfo) { + if (pGroupResInfo->pRows == NULL) { + return false; + } + + return pGroupResInfo->index < taosArrayGetSize(pGroupResInfo->pRows); +} + +bool incNextGroup(SGroupResInfo* pGroupResInfo) { + return (++pGroupResInfo->currentGroup) < pGroupResInfo->totalGroup; +} + +int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) { + assert(pGroupResInfo != NULL); + if (pGroupResInfo->pRows == 0) { + return 0; + } + + return (int32_t) taosArrayGetSize(pGroupResInfo->pRows); +} + +static int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow) { + SQuery* pQuery = pRuntimeEnv->pQuery; + + for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { + int32_t functionId = pQuery->pExpr1[j].base.functionId; + + /* + * ts, tag, tagprj function can not decide the output number of current query + * the number of output result is decided by main output + */ + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) { + continue; + } + + SResultRowCellInfo *pResultInfo = getResultCell(pRuntimeEnv, pResultRow, j); + assert(pResultInfo != NULL); + + if (pResultInfo->numOfRes > 0) { + return pResultInfo->numOfRes; + } + } + + return 0; +} + +static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) { + int32_t left = *(int32_t *)pLeft; + int32_t right = *(int32_t *)pRight; + + SCompSupporter * supporter = (SCompSupporter *)param; + + int32_t leftPos = supporter->rowIndex[left]; + int32_t rightPos = supporter->rowIndex[right]; + + /* left source is exhausted */ + if (leftPos == -1) { + return 1; + } + + /* right source is exhausted*/ + if (rightPos == -1) { + return -1; + } + + STableQueryInfo** pList = supporter->pTableQueryInfo; + + SResultRowInfo *pWindowResInfo1 = &(pList[left]->resInfo); + SResultRow * pWindowRes1 = getResultRow(pWindowResInfo1, leftPos); + TSKEY leftTimestamp = pWindowRes1->win.skey; + + SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo); + SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); + TSKEY rightTimestamp = pWindowRes2->win.skey; + + if (leftTimestamp == rightTimestamp) { + return 0; + } + + if (supporter->order == TSDB_ORDER_ASC) { + return (leftTimestamp > rightTimestamp)? 1:-1; + } else { + return (leftTimestamp < rightTimestamp)? 1:-1; + } +} + +static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, void* qinfo) { + bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQuery); + + int32_t code = TSDB_CODE_SUCCESS; + + int32_t *posList = NULL; + SLoserTreeInfo *pTree = NULL; + STableQueryInfo **pTableQueryInfoList = NULL; + + size_t size = taosArrayGetSize(pTableList); + if (pGroupResInfo->pRows == NULL) { + pGroupResInfo->pRows = taosArrayInit(100, POINTER_BYTES); + } + + posList = calloc(size, sizeof(int32_t)); + pTableQueryInfoList = malloc(POINTER_BYTES * size); + + if (pTableQueryInfoList == NULL || posList == NULL || pGroupResInfo->pRows == NULL || pGroupResInfo->pRows == NULL) { + qError("QInfo:%p failed alloc memory", qinfo); + code = TSDB_CODE_QRY_OUT_OF_MEMORY; + goto _end; + } + + int32_t numOfTables = 0; + for (int32_t i = 0; i < size; ++i) { + STableQueryInfo *item = taosArrayGetP(pTableList, i); + if (item->resInfo.size > 0) { + pTableQueryInfoList[numOfTables++] = item; + } + } + + // there is no data in current group + // no need to merge results since only one table in each group + if (numOfTables == 0) { + goto _end; + } + + SCompSupporter cs = {pTableQueryInfoList, posList, pRuntimeEnv->pQuery->order.order}; + + int32_t ret = tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn); + if (ret != TSDB_CODE_SUCCESS) { + code = TSDB_CODE_QRY_OUT_OF_MEMORY; + goto _end; + } + + int64_t lastTimestamp = ascQuery? INT64_MIN:INT64_MAX; + int64_t startt = taosGetTimestampMs(); + + while (1) { + int32_t tableIndex = pTree->pNode[0].index; + + SResultRowInfo *pWindowResInfo = &pTableQueryInfoList[tableIndex]->resInfo; + SResultRow *pWindowRes = getResultRow(pWindowResInfo, cs.rowIndex[tableIndex]); + + int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pWindowRes); + if (num <= 0) { + cs.rowIndex[tableIndex] += 1; + + if (cs.rowIndex[tableIndex] >= pWindowResInfo->size) { + cs.rowIndex[tableIndex] = -1; + if (--numOfTables == 0) { // all input sources are exhausted + break; + } + } + } else { + assert((pWindowRes->win.skey >= lastTimestamp && ascQuery) || (pWindowRes->win.skey <= lastTimestamp && !ascQuery)); + + if (pWindowRes->win.skey != lastTimestamp) { + taosArrayPush(pGroupResInfo->pRows, &pWindowRes); + pWindowRes->numOfRows = (uint32_t) num; + } + + lastTimestamp = pWindowRes->win.skey; + + // move to the next row of current entry + if ((++cs.rowIndex[tableIndex]) >= pWindowResInfo->size) { + cs.rowIndex[tableIndex] = -1; + + // all input sources are exhausted + if ((--numOfTables) == 0) { + break; + } + } + } + + tLoserTreeAdjust(pTree, tableIndex + pTree->numOfEntries); + } + + int64_t endt = taosGetTimestampMs(); + + qDebug("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", qinfo, + pGroupResInfo->currentGroup, endt - startt); + + _end: + tfree(pTableQueryInfoList); + tfree(posList); + tfree(pTree); + + return code; +} + +int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQInfo *pQInfo) { + int64_t st = taosGetTimestampUs(); + + while (pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) { + SArray *group = GET_TABLEGROUP(pQInfo, pGroupResInfo->currentGroup); + + int32_t ret = mergeIntoGroupResultImpl(&pQInfo->runtimeEnv, pGroupResInfo, group, pQInfo); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + // this group generates at least one result, return results + if (taosArrayGetSize(pGroupResInfo->pRows) > 0) { + break; + } + + qDebug("QInfo:%p no result in group %d, continue", pQInfo, pGroupResInfo->currentGroup); + cleanupGroupResInfo(pGroupResInfo); + incNextGroup(pGroupResInfo); + } + + if (pGroupResInfo->currentGroup >= pGroupResInfo->totalGroup && !hasRemainData(pGroupResInfo)) { + SET_STABLE_QUERY_OVER(pQInfo); + } + + int64_t elapsedTime = taosGetTimestampUs() - st; + qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", pQInfo, + pGroupResInfo->currentGroup, pGroupResInfo->totalGroup, elapsedTime); + + pQInfo->runtimeEnv.summary.firstStageMergeTime += elapsedTime; + return TSDB_CODE_SUCCESS; +} diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c new file mode 100644 index 0000000000..e262a3ad38 --- /dev/null +++ b/src/query/src/queryMain.c @@ -0,0 +1,542 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "qFill.h" +#include "taosmsg.h" +#include "tcache.h" +#include "tglobal.h" + +#include "exception.h" +#include "hash.h" +#include "texpr.h" +#include "qExecutor.h" +#include "qResultbuf.h" +#include "qUtil.h" +#include "query.h" +#include "queryLog.h" +#include "tlosertree.h" +#include "ttype.h" +#include "tcompare.h" + +typedef struct SQueryMgmt { + pthread_mutex_t lock; + SCacheObj *qinfoPool; // query handle pool + int32_t vgId; + bool closed; +} SQueryMgmt; + +static void queryMgmtKillQueryFn(void* handle) { + void** fp = (void**)handle; + qKillQuery(*fp); +} + +static void freeqinfoFn(void *qhandle) { + void** handle = qhandle; + if (handle == NULL || *handle == NULL) { + return; + } + + qKillQuery(*handle); + qDestroyQueryInfo(*handle); +} + +void freeParam(SQueryParam *param) { + tfree(param->sql); + tfree(param->tagCond); + tfree(param->tbnameCond); + tfree(param->pTableIdList); + tfree(param->pExprMsg); + tfree(param->pSecExprMsg); + tfree(param->pExprs); + tfree(param->pSecExprs); + tfree(param->pGroupColIndex); + tfree(param->pTagColumnInfo); + tfree(param->pGroupbyExpr); + tfree(param->prevResult); +} + +int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qinfo_t* pQInfo) { + assert(pQueryMsg != NULL && tsdb != NULL); + + int32_t code = TSDB_CODE_SUCCESS; + + SQueryParam param = {0}; + code = convertQueryMsg(pQueryMsg, ¶m); + if (code != TSDB_CODE_SUCCESS) { + goto _over; + } + + if (pQueryMsg->numOfTables <= 0) { + qError("Invalid number of tables to query, numOfTables:%d", pQueryMsg->numOfTables); + code = TSDB_CODE_QRY_INVALID_MSG; + goto _over; + } + + if (param.pTableIdList == NULL || taosArrayGetSize(param.pTableIdList) == 0) { + qError("qmsg:%p, SQueryTableMsg wrong format", pQueryMsg); + code = TSDB_CODE_QRY_INVALID_MSG; + goto _over; + } + + if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->numOfOutput, ¶m.pExprs, param.pExprMsg, param.pTagColumnInfo)) != TSDB_CODE_SUCCESS) { + goto _over; + } + + if (param.pSecExprMsg != NULL) { + if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExprMsg, param.pTagColumnInfo)) != TSDB_CODE_SUCCESS) { + goto _over; + } + } + + param.pGroupbyExpr = createGroupbyExprFromMsg(pQueryMsg, param.pGroupColIndex, &code); + if ((param.pGroupbyExpr == NULL && pQueryMsg->numOfGroupCols != 0) || code != TSDB_CODE_SUCCESS) { + goto _over; + } + + bool isSTableQuery = false; + STableGroupInfo tableGroupInfo = {0}; + int64_t st = taosGetTimestampUs(); + + if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_TABLE_QUERY)) { + STableIdInfo *id = taosArrayGet(param.pTableIdList, 0); + + qDebug("qmsg:%p query normal table, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); + if ((code = tsdbGetOneTableGroup(tsdb, id->uid, pQueryMsg->window.skey, &tableGroupInfo)) != TSDB_CODE_SUCCESS) { + goto _over; + } + } else if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY|TSDB_QUERY_TYPE_STABLE_QUERY)) { + isSTableQuery = true; + + // also note there's possibility that only one table in the super table + if (!TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY)) { + STableIdInfo *id = taosArrayGet(param.pTableIdList, 0); + + // group by normal column, do not pass the group by condition to tsdb to group table into different group + int32_t numOfGroupByCols = pQueryMsg->numOfGroupCols; + if (pQueryMsg->numOfGroupCols == 1 && !TSDB_COL_IS_TAG(param.pGroupColIndex->flag)) { + numOfGroupByCols = 0; + } + + qDebug("qmsg:%p query stable, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); + code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, param.tagCond, pQueryMsg->tagCondLen, + pQueryMsg->tagNameRelType, param.tbnameCond, &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols); + + if (code != TSDB_CODE_SUCCESS) { + qError("qmsg:%p failed to query stable, reason: %s", pQueryMsg, tstrerror(code)); + goto _over; + } + } else { + code = tsdbGetTableGroupFromIdList(tsdb, param.pTableIdList, &tableGroupInfo); + if (code != TSDB_CODE_SUCCESS) { + goto _over; + } + + qDebug("qmsg:%p query on %" PRIzu " tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables); + } + + int64_t el = taosGetTimestampUs() - st; + qDebug("qmsg:%p tag filter completed, numOfTables:%" PRIzu ", elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el); + } else { + assert(0); + } + + code = checkForQueryBuf(tableGroupInfo.numOfTables); + if (code != TSDB_CODE_SUCCESS) { // not enough query buffer, abort + goto _over; + } + + (*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo, param.pTagColumnInfo, isSTableQuery, param.sql); + + param.sql = NULL; + param.pExprs = NULL; + param.pSecExprs = NULL; + param.pGroupbyExpr = NULL; + param.pTagColumnInfo = NULL; + + if ((*pQInfo) == NULL) { + code = TSDB_CODE_QRY_OUT_OF_MEMORY; + goto _over; + } + + code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, ¶m, isSTableQuery); + + _over: + if (param.pGroupbyExpr != NULL) { + taosArrayDestroy(param.pGroupbyExpr->columnInfo); + } + + taosArrayDestroy(param.pTableIdList); + param.pTableIdList = NULL; + + freeParam(¶m); + + for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) { + SColumnInfo* column = pQueryMsg->colList + i; + freeColumnFilterInfo(column->filters, column->numOfFilters); + } + + //pQInfo already freed in initQInfo, but *pQInfo may not pointer to null; + if (code != TSDB_CODE_SUCCESS) { + *pQInfo = NULL; + } + + // if failed to add ref for all tables in this query, abort current query + return code; +} + +bool qTableQuery(qinfo_t qinfo) { + SQInfo *pQInfo = (SQInfo *)qinfo; + assert(pQInfo && pQInfo->signature == pQInfo); + int64_t threadId = taosGetSelfPthreadId(); + + int64_t curOwner = 0; + if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { + qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner); + pQInfo->code = TSDB_CODE_QRY_IN_EXEC; + return false; + } + + pQInfo->startExecTs = taosGetTimestampSec(); + + if (isQueryKilled(pQInfo)) { + qDebug("QInfo:%p it is already killed, abort", pQInfo); + return doBuildResCheck(pQInfo); + } + + if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) { + qDebug("QInfo:%p no table exists for query, abort", pQInfo); + setQueryStatus(pQInfo->runtimeEnv.pQuery, QUERY_COMPLETED); + return doBuildResCheck(pQInfo); + } + + // error occurs, record the error code and return to client + int32_t ret = setjmp(pQInfo->runtimeEnv.env); + if (ret != TSDB_CODE_SUCCESS) { + pQInfo->code = ret; + qDebug("QInfo:%p query abort due to error/cancel occurs, code:%s", pQInfo, tstrerror(pQInfo->code)); + return doBuildResCheck(pQInfo); + } + + qDebug("QInfo:%p query task is launched", pQInfo); + + SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; + if (onlyQueryTags(pQInfo->runtimeEnv.pQuery)) { + assert(pQInfo->runtimeEnv.pQueryHandle == NULL); + buildTagQueryResult(pQInfo); + } else if (pQInfo->runtimeEnv.stableQuery) { + stableQueryImpl(pQInfo); + } else if (pQInfo->runtimeEnv.queryBlockDist){ + buildTableBlockDistResult(pQInfo); + } else { + tableQueryImpl(pQInfo); + } + + SQuery* pQuery = pRuntimeEnv->pQuery; + if (isQueryKilled(pQInfo)) { + qDebug("QInfo:%p query is killed", pQInfo); + } else if (pQuery->rec.rows == 0) { + qDebug("QInfo:%p over, %" PRIzu " tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); + } else { + qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows", + pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); + } + + return doBuildResCheck(pQInfo); +} + +int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContext) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + qError("QInfo:%p invalid qhandle", pQInfo); + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + *buildRes = false; + if (IS_QUERY_KILLED(pQInfo)) { + qDebug("QInfo:%p query is killed, code:0x%08x", pQInfo, pQInfo->code); + return pQInfo->code; + } + + int32_t code = TSDB_CODE_SUCCESS; + + if (tsRetrieveBlockingModel) { + pQInfo->rspContext = pRspContext; + tsem_wait(&pQInfo->ready); + *buildRes = true; + code = pQInfo->code; + } else { + SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + + pthread_mutex_lock(&pQInfo->lock); + + assert(pQInfo->rspContext == NULL); + if (pQInfo->dataReady == QUERY_RESULT_READY) { + *buildRes = true; + qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%" PRId64 ", code:%s", pQInfo, pQuery->resultRowSize, + pQuery->rec.rows, tstrerror(pQInfo->code)); + } else { + *buildRes = false; + qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); + pQInfo->rspContext = pRspContext; + assert(pQInfo->rspContext != NULL); + } + + code = pQInfo->code; + pthread_mutex_unlock(&pQInfo->lock); + } + + return code; +} + +int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *contLen, bool* continueExec) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; + SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + size_t size = getResultSize(pQInfo, &pQuery->rec.rows); + + size += sizeof(int32_t); + size += sizeof(STableIdInfo) * taosHashGetSize(pQInfo->arrTableIdInfo); + + *contLen = (int32_t)(size + sizeof(SRetrieveTableRsp)); + + // current solution only avoid crash, but cannot return error code to client + *pRsp = (SRetrieveTableRsp *)rpcMallocCont(*contLen); + if (*pRsp == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + (*pRsp)->numOfRows = htonl((int32_t)pQuery->rec.rows); + + if (pQInfo->code == TSDB_CODE_SUCCESS) { + (*pRsp)->offset = htobe64(pQuery->limit.offset); + (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); + } else { + (*pRsp)->offset = 0; + (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); + } + + (*pRsp)->precision = htons(pQuery->precision); + if (pQuery->rec.rows > 0 && pQInfo->code == TSDB_CODE_SUCCESS) { + doDumpQueryResult(pQInfo, (*pRsp)->data); + } else { + setQueryStatus(pQuery, QUERY_OVER); + } + + pQInfo->rspContext = NULL; + pQInfo->dataReady = QUERY_RESULT_NOT_READY; + + if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { + // here current thread hold the refcount, so it is safe to free tsdbQueryHandle. + *continueExec = false; + (*pRsp)->completed = 1; // notify no more result to client + } else { + *continueExec = true; + qDebug("QInfo:%p has more results to retrieve", pQInfo); + } + + // the memory should be freed if the code of pQInfo is not TSDB_CODE_SUCCESS + if (pQInfo->code != TSDB_CODE_SUCCESS) { + rpcFreeCont(*pRsp); + *pRsp = NULL; + } + + return pQInfo->code; +} + +void* qGetResultRetrieveMsg(qinfo_t qinfo) { + SQInfo* pQInfo = (SQInfo*) qinfo; + assert(pQInfo != NULL); + + return pQInfo->rspContext; +} + +int32_t qKillQuery(qinfo_t qinfo) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + setQueryKilled(pQInfo); + + // Wait for the query executing thread being stopped/ + // Once the query is stopped, the owner of qHandle will be cleared immediately. + while (pQInfo->owner != 0) { + taosMsleep(100); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qQueryCompleted(qinfo_t qinfo) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + SQuery* pQuery = pQInfo->runtimeEnv.pQuery; + return isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); +} + +void qDestroyQueryInfo(qinfo_t qHandle) { + SQInfo* pQInfo = (SQInfo*) qHandle; + if (!isValidQInfo(pQInfo)) { + return; + } + + qDebug("QInfo:%p query completed", pQInfo); + queryCostStatis(pQInfo); // print the query cost summary + freeQInfo(pQInfo); +} + +void* qOpenQueryMgmt(int32_t vgId) { + const int32_t refreshHandleInterval = 30; // every 30 seconds, refresh handle pool + + char cacheName[128] = {0}; + sprintf(cacheName, "qhandle_%d", vgId); + + SQueryMgmt* pQueryMgmt = calloc(1, sizeof(SQueryMgmt)); + if (pQueryMgmt == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return NULL; + } + + pQueryMgmt->qinfoPool = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshHandleInterval, true, freeqinfoFn, cacheName); + pQueryMgmt->closed = false; + pQueryMgmt->vgId = vgId; + + pthread_mutex_init(&pQueryMgmt->lock, NULL); + + qDebug("vgId:%d, open querymgmt success", vgId); + return pQueryMgmt; +} + +void qQueryMgmtNotifyClosed(void* pQMgmt) { + if (pQMgmt == NULL) { + return; + } + + SQueryMgmt* pQueryMgmt = pQMgmt; + qDebug("vgId:%d, set querymgmt closed, wait for all queries cancelled", pQueryMgmt->vgId); + + pthread_mutex_lock(&pQueryMgmt->lock); + pQueryMgmt->closed = true; + pthread_mutex_unlock(&pQueryMgmt->lock); + + taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); +} + +void qQueryMgmtReOpen(void *pQMgmt) { + if (pQMgmt == NULL) { + return; + } + + SQueryMgmt *pQueryMgmt = pQMgmt; + qDebug("vgId:%d, set querymgmt reopen", pQueryMgmt->vgId); + + pthread_mutex_lock(&pQueryMgmt->lock); + pQueryMgmt->closed = false; + pthread_mutex_unlock(&pQueryMgmt->lock); +} + +void qCleanupQueryMgmt(void* pQMgmt) { + if (pQMgmt == NULL) { + return; + } + + SQueryMgmt* pQueryMgmt = pQMgmt; + int32_t vgId = pQueryMgmt->vgId; + + assert(pQueryMgmt->closed); + + SCacheObj* pqinfoPool = pQueryMgmt->qinfoPool; + pQueryMgmt->qinfoPool = NULL; + + taosCacheCleanup(pqinfoPool); + pthread_mutex_destroy(&pQueryMgmt->lock); + tfree(pQueryMgmt); + + qDebug("vgId:%d, queryMgmt cleanup completed", vgId); +} + +void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { + if (pMgmt == NULL) { + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + return NULL; + } + + SQueryMgmt *pQueryMgmt = pMgmt; + if (pQueryMgmt->qinfoPool == NULL) { + qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo); + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + return NULL; + } + + pthread_mutex_lock(&pQueryMgmt->lock); + if (pQueryMgmt->closed) { + pthread_mutex_unlock(&pQueryMgmt->lock); + qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo); + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + return NULL; + } else { + TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE) qInfo; + void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), + (getMaximumIdleDurationSec()*1000)); + pthread_mutex_unlock(&pQueryMgmt->lock); + + return handle; + } +} + +void** qAcquireQInfo(void* pMgmt, uint64_t _key) { + SQueryMgmt *pQueryMgmt = pMgmt; + + if (pQueryMgmt->closed) { + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + return NULL; + } + + if (pQueryMgmt->qinfoPool == NULL) { + terrno = TSDB_CODE_QRY_INVALID_QHANDLE; + return NULL; + } + + TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key; + void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE)); + if (handle == NULL || *handle == NULL) { + terrno = TSDB_CODE_QRY_INVALID_QHANDLE; + return NULL; + } else { + return handle; + } +} + +void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { + SQueryMgmt *pQueryMgmt = pMgmt; + if (pQueryMgmt->qinfoPool == NULL) { + return NULL; + } + + taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, freeHandle); + return 0; +} diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt index c3798b869e..1856223391 100644 --- a/src/query/tests/CMakeLists.txt +++ b/src/query/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) diff --git a/src/rpc/CMakeLists.txt b/src/rpc/CMakeLists.txt index 02a1e7c2d8..f94b4aeb6d 100644 --- a/src/rpc/CMakeLists.txt +++ b/src/rpc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 111b722f76..3162ab2e4c 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -17,7 +17,7 @@ #include "tsocket.h" #include "tutil.h" #include "taosdef.h" -#include "taoserror.h" +#include "taoserror.h" #include "rpcLog.h" #include "rpcHead.h" #include "rpcTcp.h" @@ -81,7 +81,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread pServerObj = (SServerObj *)calloc(sizeof(SServerObj), 1); if (pServerObj == NULL) { tError("TCP:%s no enough memory", label); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TAOS_SYSTEM_ERROR(errno); return NULL; } @@ -95,7 +95,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread pServerObj->pThreadObj = (SThreadObj **)calloc(sizeof(SThreadObj *), numOfThreads); if (pServerObj->pThreadObj == NULL) { tError("TCP:%s no enough memory", label); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TAOS_SYSTEM_ERROR(errno); free(pServerObj); return NULL; } @@ -105,18 +105,18 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); - // initialize parameters in case it may encounter error later + // initialize parameters in case it may encounter error later for (int i = 0; i < numOfThreads; ++i) { pThreadObj = (SThreadObj *)calloc(sizeof(SThreadObj), 1); if (pThreadObj == NULL) { tError("TCP:%s no enough memory", label); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TAOS_SYSTEM_ERROR(errno); for (int j=0; jpThreadObj[j]); free(pServerObj->pThreadObj); free(pServerObj); return NULL; } - + pServerObj->pThreadObj[i] = pThreadObj; pThreadObj->pollFd = -1; taosResetPthread(&pThreadObj->thread); @@ -152,9 +152,9 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread } pServerObj->fd = taosOpenTcpServerSocket(pServerObj->ip, pServerObj->port); - if (pServerObj->fd < 0) code = -1; + if (pServerObj->fd < 0) code = -1; - if (code == 0) { + if (code == 0) { code = pthread_create(&pServerObj->thread, &thattr, taosAcceptTcpConnection, (void *)pServerObj); if (code != 0) { tError("%s failed to create TCP accept thread(%s)", label, strerror(code)); @@ -162,7 +162,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread } if (code != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TAOS_SYSTEM_ERROR(errno); taosCleanUpTcpServer(pServerObj); pServerObj = NULL; } else { @@ -175,8 +175,8 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread static void taosStopTcpThread(SThreadObj* pThreadObj) { if (pThreadObj == NULL) { return;} - // save thread into local variable and signal thread to stop - pthread_t thread = pThreadObj->thread; + // save thread into local variable and signal thread to stop + pthread_t thread = pThreadObj->thread; if (!taosCheckPthreadValid(thread)) { return; } @@ -197,6 +197,11 @@ void taosStopTcpServer(void *handle) { if (pServerObj->fd >= 0) { #ifdef WINDOWS closesocket(pServerObj->fd); +#elif defined(__APPLE__) + if (pServerObj->fd!=-1) { + close(pServerObj->fd); + pServerObj->fd = -1; + } #else shutdown(pServerObj->fd, SHUT_RD); #endif @@ -265,7 +270,7 @@ static void *taosAcceptTcpConnection(void *arg) { taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port)); continue; } - + // pick up the thread to handle this connection pThreadObj = pServerObj->pThreadObj[threadId]; @@ -274,13 +279,13 @@ static void *taosAcceptTcpConnection(void *arg) { if (pFdObj) { pFdObj->ip = caddr.sin_addr.s_addr; pFdObj->port = htons(caddr.sin_port); - tDebug("%s new TCP connection from %s:%hu, fd:%d FD:%p numOfFds:%d", pServerObj->label, + tDebug("%s new TCP connection from %s:%hu, fd:%d FD:%p numOfFds:%d", pServerObj->label, taosInetNtoa(caddr.sin_addr), pFdObj->port, connFd, pFdObj, pThreadObj->numOfFds); } else { taosCloseSocket(connFd); tError("%s failed to malloc FdObj(%s) for connection from:%s:%hu", pServerObj->label, strerror(errno), taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port)); - } + } // pick up next thread for next connection threadId++; @@ -295,17 +300,17 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int numOfThread SClientObj *pClientObj = (SClientObj *)calloc(1, sizeof(SClientObj)); if (pClientObj == NULL) { tError("TCP:%s no enough memory", label); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TAOS_SYSTEM_ERROR(errno); return NULL; - } + } tstrncpy(pClientObj->label, label, sizeof(pClientObj->label)); pClientObj->numOfThreads = numOfThreads; - pClientObj->pThreadObj = (SThreadObj **)calloc(numOfThreads, sizeof(SThreadObj*)); + pClientObj->pThreadObj = (SThreadObj **)calloc(numOfThreads, sizeof(SThreadObj*)); if (pClientObj->pThreadObj == NULL) { tError("TCP:%s no enough memory", label); tfree(pClientObj); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TAOS_SYSTEM_ERROR(errno); } int code = 0; @@ -317,7 +322,7 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int numOfThread SThreadObj *pThreadObj = (SThreadObj *)calloc(1, sizeof(SThreadObj)); if (pThreadObj == NULL) { tError("TCP:%s no enough memory", label); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TAOS_SYSTEM_ERROR(errno); for (int j=0; jpThreadObj[j]); free(pClientObj); pthread_attr_destroy(&thattr); @@ -356,37 +361,38 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int numOfThread pThreadObj->threadId = i; } if (code != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TAOS_SYSTEM_ERROR(errno); taosCleanUpTcpClient(pClientObj); pClientObj = NULL; - } - return pClientObj; + } + return pClientObj; } void taosStopTcpClient(void *chandle) { - SThreadObj *pThreadObj = chandle; - if (pThreadObj == NULL) return; + SClientObj *pClientObj = chandle; - tDebug ("%s TCP client is stopped", pThreadObj->label); + if (pClientObj == NULL) return; + + tDebug ("%s TCP client is stopped", pClientObj->label); } void taosCleanUpTcpClient(void *chandle) { SClientObj *pClientObj = chandle; if (pClientObj == NULL) return; - for (int i = 0; i < pClientObj->numOfThreads; ++i) { + for (int i = 0; i < pClientObj->numOfThreads; ++i) { SThreadObj *pThreadObj= pClientObj->pThreadObj[i]; - taosStopTcpThread(pThreadObj); + taosStopTcpThread(pThreadObj); } - + tDebug("%s TCP client is cleaned up", pClientObj->label); tfree(pClientObj->pThreadObj); - tfree(pClientObj); + tfree(pClientObj); } void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uint16_t port) { SClientObj * pClientObj = shandle; int32_t index = atomic_load_32(&pClientObj->index) % pClientObj->numOfThreads; - atomic_store_32(&pClientObj->index, index + 1); + atomic_store_32(&pClientObj->index, index + 1); SThreadObj *pThreadObj = pClientObj->pThreadObj[index]; SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip); @@ -401,12 +407,12 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin } SFdObj *pFdObj = taosMallocFdObj(pThreadObj, fd); - + if (pFdObj) { pFdObj->thandle = thandle; pFdObj->port = port; pFdObj->ip = ip; - tDebug("%s %p TCP connection to 0x%x:%hu is created, localPort:%hu FD:%p numOfFds:%d", + tDebug("%s %p TCP connection to 0x%x:%hu is created, localPort:%hu FD:%p numOfFds:%d", pThreadObj->label, thandle, ip, port, localPort, pFdObj, pThreadObj->numOfFds); } else { tError("%s failed to malloc client FdObj(%s)", pThreadObj->label, strerror(errno)); @@ -421,7 +427,7 @@ void taosCloseTcpConnection(void *chandle) { if (pFdObj == NULL || pFdObj->signature != pFdObj) return; SThreadObj *pThreadObj = pFdObj->pThreadObj; - tDebug("%s %p TCP connection will be closed, FD:%p", pThreadObj->label, pFdObj->thandle, pFdObj); + tDebug("%s %p TCP connection will be closed, FD:%p", pThreadObj->label, pFdObj->thandle, pFdObj); // pFdObj->thandle = NULL; pFdObj->closedByApp = 1; @@ -434,7 +440,7 @@ int taosSendTcpData(uint32_t ip, uint16_t port, void *data, int len, void *chand SThreadObj *pThreadObj = pFdObj->pThreadObj; int ret = taosWriteMsg(pFdObj->fd, data, len); - tTrace("%s %p TCP data is sent, FD:%p fd:%d bytes:%d", pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, ret); + tTrace("%s %p TCP data is sent, FD:%p fd:%d bytes:%d", pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, ret); return ret; } @@ -457,7 +463,7 @@ static void taosReportBrokenLink(SFdObj *pFdObj) { recvInfo.chandle = NULL; recvInfo.connType = RPC_CONN_TCP; (*(pThreadObj->processData))(&recvInfo); - } + } taosFreeFdObj(pFdObj); } @@ -472,7 +478,7 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) { headLen = taosReadMsg(pFdObj->fd, &rpcHead, sizeof(SRpcHead)); if (headLen != sizeof(SRpcHead)) { tDebug("%s %p read error, FD:%p headLen:%d", pThreadObj->label, pFdObj->thandle, pFdObj, headLen); - return -1; + return -1; } msgLen = (int32_t)htonl((uint32_t)rpcHead.msgLen); @@ -490,14 +496,14 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) { retLen = taosReadMsg(pFdObj->fd, msg + headLen, leftLen); if (leftLen != retLen) { - tError("%s %p read error, leftLen:%d retLen:%d FD:%p", + tError("%s %p read error, leftLen:%d retLen:%d FD:%p", pThreadObj->label, pFdObj->thandle, leftLen, retLen, pFdObj); free(buffer); return -1; } memcpy(msg, &rpcHead, sizeof(SRpcHead)); - + pInfo->msg = msg; pInfo->msgLen = msgLen; pInfo->ip = pFdObj->ip; @@ -508,7 +514,7 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) { pInfo->connType = RPC_CONN_TCP; if (pFdObj->closedByApp) { - free(buffer); + free(buffer); return -1; } @@ -556,7 +562,7 @@ static void *taosProcessTcpData(void *param) { } if (taosReadTcpData(pFdObj, &recvInfo) < 0) { - shutdown(pFdObj->fd, SHUT_WR); + shutdown(pFdObj->fd, SHUT_WR); continue; } @@ -564,7 +570,7 @@ static void *taosProcessTcpData(void *param) { if (pFdObj->thandle == NULL) taosFreeFdObj(pFdObj); } - if (pThreadObj->stop) break; + if (pThreadObj->stop) break; } if (pThreadObj->pollFd >=0) { @@ -602,7 +608,7 @@ static SFdObj *taosMallocFdObj(SThreadObj *pThreadObj, SOCKET fd) { event.data.ptr = pFdObj; if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) { tfree(pFdObj); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TAOS_SYSTEM_ERROR(errno); return NULL; } @@ -636,7 +642,7 @@ static void taosFreeFdObj(SFdObj *pFdObj) { pThreadObj->numOfFds--; if (pThreadObj->numOfFds < 0) - tError("%s %p TCP thread:%d, number of FDs is negative!!!", + tError("%s %p TCP thread:%d, number of FDs is negative!!!", pThreadObj->label, pFdObj->thandle, pThreadObj->threadId); if (pFdObj->prev) { @@ -651,7 +657,7 @@ static void taosFreeFdObj(SFdObj *pFdObj) { pthread_mutex_unlock(&pThreadObj->mutex); - tDebug("%s %p TCP connection is closed, FD:%p fd:%d numOfFds:%d", + tDebug("%s %p TCP connection is closed, FD:%p fd:%d numOfFds:%d", pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, pThreadObj->numOfFds); tfree(pFdObj); diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c index 2599bca075..7a46dbe5c3 100644 --- a/src/rpc/src/rpcUdp.c +++ b/src/rpc/src/rpcUdp.c @@ -15,7 +15,6 @@ #include "os.h" #include "tsocket.h" -#include "tsystem.h" #include "ttimer.h" #include "tutil.h" #include "taosdef.h" diff --git a/src/rpc/test/CMakeLists.txt b/src/rpc/test/CMakeLists.txt index e923105860..c10cea6c9d 100644 --- a/src/rpc/test/CMakeLists.txt +++ b/src/rpc/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) diff --git a/src/sync/CMakeLists.txt b/src/sync/CMakeLists.txt index cc86bf704c..82d0bbf520 100644 --- a/src/sync/CMakeLists.txt +++ b/src/sync/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index e43140d4e6..91613ae351 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -108,14 +108,17 @@ typedef struct SSyncNode { SSyncFwds * pSyncFwds; // saved forward info if quorum >1 void * pFwdTimer; void * pRoleTimer; - FGetFileInfo getFileInfo; - FGetWalInfo getWalInfo; - FWriteToCache writeToCache; + void * pTsdb; + FGetWalInfo getWalInfoFp; + FWriteToCache writeToCacheFp; FConfirmForward confirmForward; - FNotifyRole notifyRole; - FNotifyFlowCtrl notifyFlowCtrl; - FNotifyFileSynced notifyFileSynced; - FGetVersion getVersion; + FNotifyRole notifyRoleFp; + FNotifyFlowCtrl notifyFlowCtrlFp; + FStartSyncFile startSyncFileFp; + FStopSyncFile stopSyncFileFp; + FGetVersion getVersionFp; + FSendFile sendFileFp; + FRecvFile recvFileFp; pthread_mutex_t mutex; } SSyncNode; diff --git a/src/sync/inc/syncMsg.h b/src/sync/inc/syncMsg.h index f589379aa2..85ac9c78af 100644 --- a/src/sync/inc/syncMsg.h +++ b/src/sync/inc/syncMsg.h @@ -99,16 +99,12 @@ typedef struct { typedef struct { SSyncHead head; - char name[TSDB_FILENAME_LEN]; - uint32_t magic; - uint32_t index; uint64_t fversion; - int64_t size; -} SFileInfo; +} SFileVersion; typedef struct { SSyncHead head; - int8_t sync; + int8_t ack; } SFileAck; typedef struct { @@ -136,7 +132,7 @@ void syncBuildPeersStatus(SPeersStatus *pMsg, int32_t vgId); void syncBuildSyncTestMsg(SSyncMsg *pMsg, int32_t vgId); void syncBuildFileAck(SFileAck *pMsg, int32_t vgId); -void syncBuildFileInfo(SFileInfo *pMsg, int32_t vgId); +void syncBuildFileVersion(SFileVersion *pMsg, int32_t vgId); #ifdef __cplusplus } diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 8dac89544b..956ccdc073 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -174,19 +174,22 @@ int64_t syncStart(const SSyncInfo *pInfo) { tstrncpy(pNode->path, pInfo->path, sizeof(pNode->path)); pthread_mutex_init(&pNode->mutex, NULL); - pNode->getFileInfo = pInfo->getFileInfo; - pNode->getWalInfo = pInfo->getWalInfo; - pNode->writeToCache = pInfo->writeToCache; - pNode->notifyRole = pInfo->notifyRole; + pNode->getWalInfoFp = pInfo->getWalInfoFp; + pNode->writeToCacheFp = pInfo->writeToCacheFp; + pNode->notifyRoleFp = pInfo->notifyRoleFp; pNode->confirmForward = pInfo->confirmForward; - pNode->notifyFlowCtrl = pInfo->notifyFlowCtrl; - pNode->notifyFileSynced = pInfo->notifyFileSynced; - pNode->getVersion = pInfo->getVersion; + pNode->notifyFlowCtrlFp = pInfo->notifyFlowCtrlFp; + pNode->startSyncFileFp = pInfo->startSyncFileFp; + pNode->stopSyncFileFp = pInfo->stopSyncFileFp; + pNode->getVersionFp = pInfo->getVersionFp; + pNode->sendFileFp = pInfo->sendFileFp; + pNode->recvFileFp = pInfo->recvFileFp; pNode->selfIndex = -1; pNode->vgId = pInfo->vgId; pNode->replica = pCfg->replica; pNode->quorum = pCfg->quorum; + pNode->pTsdb = pInfo->pTsdb; if (pNode->quorum > pNode->replica) pNode->quorum = pNode->replica; pNode->refCount = 1; @@ -248,8 +251,8 @@ int64_t syncStart(const SSyncInfo *pInfo) { syncAddArbitrator(pNode); taosHashPut(tsVgIdHash, &pNode->vgId, sizeof(int32_t), &pNode, sizeof(SSyncNode *)); - if (pNode->notifyRole) { - (*pNode->notifyRole)(pNode->vgId, nodeRole); + if (pNode->notifyRoleFp) { + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); } syncStartCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb @@ -357,7 +360,7 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) { if (pNewCfg->replica <= 1) { sInfo("vgId:%d, no peers are configured, work as master!", pNode->vgId); nodeRole = TAOS_SYNC_ROLE_MASTER; - (*pNode->notifyRole)(pNode->vgId, nodeRole); + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); } syncStartCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb @@ -417,7 +420,7 @@ void syncRecover(int64_t rid) { // if take this node to unsync state, the whole system may not work nodeRole = TAOS_SYNC_ROLE_UNSYNCED; - (*pNode->notifyRole)(pNode->vgId, nodeRole); + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); nodeVersion = 0; pthread_mutex_lock(&pNode->mutex); @@ -625,8 +628,8 @@ static void syncResetFlowCtrl(SSyncNode *pNode) { pNode->peerInfo[index]->numOfRetrieves = 0; } - if (pNode->notifyFlowCtrl) { - (*pNode->notifyFlowCtrl)(pNode->vgId, 0); + if (pNode->notifyFlowCtrlFp) { + (*pNode->notifyFlowCtrlFp)(pNode->vgId, 0); } } @@ -694,7 +697,7 @@ static void syncChooseMaster(SSyncNode *pNode) { taosMsleep(SYNC_WAIT_AFTER_CHOOSE_MASTER); syncResetFlowCtrl(pNode); - (*pNode->notifyRole)(pNode->vgId, nodeRole); + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); } else { pPeer = pNode->peerInfo[index]; sInfo("%s, it shall work as master", pPeer->id); @@ -730,7 +733,7 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { nodeRole = TAOS_SYNC_ROLE_UNSYNCED; sInfo("vgId:%d, self change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica); } - (*pNode->notifyRole)(pNode->vgId, nodeRole); + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); } } else { for (int32_t index = 0; index < pNode->replica; ++index) { @@ -738,11 +741,14 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { if (pTemp->role != TAOS_SYNC_ROLE_MASTER) continue; if (masterIndex < 0) { masterIndex = index; + sDebug("vgId:%d, peer:%s is master, index:%d", pNode->vgId, pTemp->id, index); } else { // multiple masters, it shall not happen if (masterIndex == pNode->selfIndex) { sError("%s, peer is master, work as slave instead", pTemp->id); nodeRole = TAOS_SYNC_ROLE_SLAVE; - (*pNode->notifyRole)(pNode->vgId, nodeRole); + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); + } else { + sError("vgId:%d, peer:%s is master too, masterIndex:%d index:%d", pNode->vgId, pTemp->id, masterIndex, index); } } } @@ -759,7 +765,7 @@ static int32_t syncValidateMaster(SSyncPeer *pPeer) { if (nodeRole == TAOS_SYNC_ROLE_MASTER && nodeVersion < pPeer->version) { sDebug("%s, peer has higher sver:%" PRIu64 ", restart all peer connections", pPeer->id, pPeer->version); nodeRole = TAOS_SYNC_ROLE_UNSYNCED; - (*pNode->notifyRole)(pNode->vgId, nodeRole); + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); code = -1; for (int32_t index = 0; index < pNode->replica; ++index) { @@ -796,7 +802,7 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus* peersStatus, int8_t new } else { sInfo("%s, is master, work as slave, self sver:%" PRIu64, pMaster->id, nodeVersion); nodeRole = TAOS_SYNC_ROLE_SLAVE; - (*pNode->notifyRole)(pNode->vgId, nodeRole); + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); } } else if (nodeRole == TAOS_SYNC_ROLE_SLAVE && pMaster == pPeer) { sDebug("%s, is master, continue work as slave, self sver:%" PRIu64, pMaster->id, nodeVersion); @@ -830,7 +836,7 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus* peersStatus, int8_t new } if (oldPeerRole != newPeerRole || nodeRole != oldSelfRole) { - sDebug("vgId:%d, roles changed, broadcast status", pNode->vgId); + sDebug("vgId:%d, roles changed, broadcast status, replica:%d", pNode->vgId, pNode->replica); syncBroadcastStatus(pNode); } @@ -857,8 +863,12 @@ static void syncRestartPeer(SSyncPeer *pPeer) { void syncRestartConnection(SSyncPeer *pPeer) { if (pPeer->ip == 0) return; + if (syncAcquirePeer(pPeer->rid) == NULL) return; + syncRestartPeer(pPeer); syncCheckRole(pPeer, NULL, TAOS_SYNC_ROLE_OFFLINE); + + syncReleasePeer(pPeer); } static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) { @@ -989,7 +999,7 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) { if (nodeRole == TAOS_SYNC_ROLE_SLAVE) { // nodeVersion = pHead->version; - (*pNode->writeToCache)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL); + (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL); } else { if (nodeSStatus != TAOS_SYNC_STATUS_INIT) { syncSaveIntoBuffer(pPeer, pHead); diff --git a/src/sync/src/syncMsg.c b/src/sync/src/syncMsg.c index 9718a3414e..3348f1ec33 100644 --- a/src/sync/src/syncMsg.c +++ b/src/sync/src/syncMsg.c @@ -102,9 +102,9 @@ void syncBuildFileAck(SFileAck *pMsg, int32_t vgId) { syncBuildHead(&pMsg->head); } -void syncBuildFileInfo(SFileInfo *pMsg, int32_t vgId) { +void syncBuildFileVersion(SFileVersion *pMsg, int32_t vgId) { pMsg->head.type = TAOS_SMSG_SYNC_FILE; pMsg->head.vgId = vgId; - pMsg->head.len = sizeof(SFileInfo) - sizeof(SSyncHead); + pMsg->head.len = sizeof(SFileVersion) - sizeof(SSyncHead); syncBuildHead(&pMsg->head); } \ No newline at end of file diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index 99f4ce1c17..2545f4c5ea 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -25,139 +25,44 @@ #include "tsync.h" #include "syncInt.h" -static void syncRemoveExtraFile(SSyncPeer *pPeer, int32_t sindex, int32_t eindex) { - char name[TSDB_FILENAME_LEN * 2] = {0}; - char fname[TSDB_FILENAME_LEN * 3] = {0}; - uint32_t magic; - uint64_t fversion; - int64_t size; - uint32_t index = sindex; +static int32_t syncRecvFileVersion(SSyncPeer *pPeer, uint64_t *fversion) { SSyncNode *pNode = pPeer->pSyncNode; - if (sindex < 0 || eindex < sindex) return; - - sDebug("%s, extra files will be removed between sindex:%d and eindex:%d", pPeer->id, sindex, eindex); - - while (1) { - name[0] = 0; - magic = (*pNode->getFileInfo)(pNode->vgId, name, &index, eindex, &size, &fversion); - if (magic == 0) break; - - snprintf(fname, sizeof(fname), "%s/%s", pNode->path, name); - (void)remove(fname); - sInfo("%s, %s is removed for its extra", pPeer->id, fname); - - index++; - if (index > eindex) break; + SFileVersion fileVersion; + memset(&fileVersion, 0, sizeof(SFileVersion)); + int32_t ret = taosReadMsg(pPeer->syncFd, &fileVersion, sizeof(SFileVersion)); + if (ret != sizeof(SFileVersion)) { + sError("%s, failed to read fver since %s", pPeer->id, strerror(errno)); + return -1; } + + SFileAck fileVersionAck; + memset(&fileVersionAck, 0, sizeof(SFileAck)); + syncBuildFileAck(&fileVersionAck, pNode->vgId); + ret = taosWriteMsg(pPeer->syncFd, &fileVersionAck, sizeof(SFileAck)); + if (ret != sizeof(SFileAck)) { + sError("%s, failed to write fver ack since %s", pPeer->id, strerror(errno)); + return -1; + } + + *fversion = htobe64(fileVersion.fversion); + return 0; } static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { SSyncNode *pNode = pPeer->pSyncNode; - SFileInfo minfo; memset(&minfo, 0, sizeof(SFileInfo)); /* = {0}; */ - SFileInfo sinfo; memset(&sinfo, 0, sizeof(SFileInfo)); /* = {0}; */ - SFileAck fileAck; memset(&fileAck, 0, sizeof(SFileAck)); - int32_t code = -1; - char name[TSDB_FILENAME_LEN * 2] = {0}; - uint32_t pindex = 0; // index in last restore - bool fileChanged = false; - *fversion = 0; - sinfo.index = -1; - while (1) { - // read file info - minfo.index = -1; - int32_t ret = taosReadMsg(pPeer->syncFd, &minfo, sizeof(SFileInfo)); - if (ret != sizeof(SFileInfo) || minfo.index == -1) { - sError("%s, failed to read fileinfo while restore file since %s", pPeer->id, strerror(errno)); - break; - } - - assert(ret == sizeof(SFileInfo)); - ret = syncCheckHead((SSyncHead *)(&minfo)); - if (ret != 0) { - sError("%s, failed to check fileinfo while restore file since %s", pPeer->id, strerror(ret)); - break; - } - - // if no more file from master, break; - if (minfo.name[0] == 0 || minfo.magic == 0) { - sDebug("%s, no more files to restore", pPeer->id); - - // remove extra files after the current index - if (sinfo.index != -1) syncRemoveExtraFile(pPeer, sinfo.index + 1, TAOS_SYNC_MAX_INDEX); - code = 0; - break; - } - - sDebug("%s, file:%s info is received from master, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%u", pPeer->id, - minfo.name, minfo.index, minfo.size, minfo.fversion, minfo.magic); - - // remove extra files on slave between the current and last index - syncRemoveExtraFile(pPeer, pindex + 1, minfo.index - 1); - pindex = minfo.index; - - // check the file info - sinfo = minfo; - sinfo.magic = (*pNode->getFileInfo)(pNode->vgId, sinfo.name, &sinfo.index, TAOS_SYNC_MAX_INDEX, &sinfo.size, &sinfo.fversion); - sDebug("%s, local file:%s info, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%u", pPeer->id, sinfo.name, - sinfo.index, sinfo.size, sinfo.fversion, sinfo.magic); - - // if file not there or magic is not the same, file shall be synced - memset(&fileAck, 0, sizeof(SFileAck)); - syncBuildFileAck(&fileAck, pNode->vgId); - fileAck.sync = (sinfo.magic != minfo.magic || sinfo.size != minfo.size || sinfo.name[0] == 0) ? 1 : 0; - - // send file ack - ret = taosWriteMsg(pPeer->syncFd, &fileAck, sizeof(SFileAck)); - if (ret != sizeof(SFileAck)) { - sError("%s, failed to write file:%s ack while restore file since %s", pPeer->id, minfo.name, strerror(errno)); - break; - } - - // if sync is not required, continue - if (fileAck.sync == 0) { - sDebug("%s, %s is the same", pPeer->id, minfo.name); - continue; - } else { - sDebug("%s, %s will be received, size:%" PRId64, pPeer->id, minfo.name, minfo.size); - } - - // if sync is required, open file, receive from master, and write to file - // get the full path to file - minfo.name[sizeof(minfo.name) - 1] = 0; - snprintf(name, sizeof(name), "%s/%s", pNode->path, minfo.name); - - int32_t dfd = open(name, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRWXU | S_IRWXG | S_IRWXO); - if (dfd < 0) { - sError("%s, failed to open file:%s while restore file since %s", pPeer->id, minfo.name, strerror(errno)); - break; - } - - ret = taosCopyFds(pPeer->syncFd, dfd, minfo.size); - fsync(dfd); - close(dfd); - if (ret < 0) { - sError("%s, failed to copy file:%s while restore file since %s", pPeer->id, minfo.name, strerror(errno)); - break; - } - - fileChanged = true; - sDebug("%s, %s is received, size:%" PRId64, pPeer->id, minfo.name, minfo.size); + if (pNode->recvFileFp && (*pNode->recvFileFp)(pNode->pTsdb, pPeer->syncFd) != 0) { + sError("%s, failed to restore file", pPeer->id); + return -1; } - if (code == 0 && fileChanged) { - // data file is changed, code shall be set to 1 - *fversion = minfo.fversion; - code = 1; - sDebug("%s, file changed after restore file, fver:%" PRIu64, pPeer->id, *fversion); + if (syncRecvFileVersion(pPeer, fversion) < 0) { + return -1; } - if (code < 0) { - sError("%s, failed to restore %s since %s", pPeer->id, name, strerror(errno)); - } - - return code; + sInfo("%s, all files are restored, fver:%" PRIu64, pPeer->id, *fversion); + return 0; } static int32_t syncRestoreWal(SSyncPeer *pPeer, uint64_t *wver) { @@ -195,7 +100,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer, uint64_t *wver) { } lastVer = pHead->version; - ret = (*pNode->writeToCache)(pNode->vgId, pHead, TAOS_QTYPE_WAL, NULL); + ret = (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_WAL, NULL); if (ret != 0) { sError("%s, failed to restore record since %s, hver:%" PRIu64, pPeer->id, tstrerror(ret), pHead->version); break; @@ -215,7 +120,7 @@ static char *syncProcessOneBufferedFwd(SSyncPeer *pPeer, char *offset) { SSyncNode *pNode = pPeer->pSyncNode; SWalHead * pHead = (SWalHead *)offset; - (*pNode->writeToCache)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL); + (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL); offset += pHead->len + sizeof(SWalHead); return offset; @@ -226,6 +131,11 @@ static int32_t syncProcessBufferedFwd(SSyncPeer *pPeer) { SRecvBuffer *pRecv = pNode->pRecv; int32_t forwards = 0; + if (pRecv == NULL) { + sError("%s, recv buffer is null, restart connect", pPeer->id); + return -1; + } + sDebug("%s, number of buffered forwards:%d", pPeer->id, pRecv->forwards); char *offset = pRecv->buffer; @@ -274,6 +184,7 @@ int32_t syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead) { static void syncCloseRecvBuffer(SSyncNode *pNode) { if (pNode->pRecv) { + sDebug("vgId:%d, recv buffer:%p is freed", pNode->vgId, pNode->pRecv); tfree(pNode->pRecv->buffer); } @@ -298,6 +209,7 @@ static int32_t syncOpenRecvBuffer(SSyncNode *pNode) { pNode->pRecv = pRecv; + sDebug("vgId:%d, recv buffer:%p is created", pNode->vgId, pNode->pRecv); return 0; } @@ -315,20 +227,16 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) { sDebug("%s, send sync rsp to peer, tranId:%u", pPeer->id, rsp.tranId); sInfo("%s, start to restore file, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); + (*pNode->startSyncFileFp)(pNode->vgId); + int32_t code = syncRestoreFile(pPeer, &fversion); if (code < 0) { - sError("%s, failed to restore file", pPeer->id); + (*pNode->stopSyncFileFp)(pNode->vgId, fversion); + sError("%s, failed to restore files", pPeer->id); return -1; } - // if code > 0, data file is changed, notify app, and pass the version - if (code > 0 && pNode->notifyFileSynced) { - if ((*pNode->notifyFileSynced)(pNode->vgId, fversion) < 0) { - sError("%s, app not in ready state", pPeer->id); - return -1; - } - } - + (*pNode->stopSyncFileFp)(pNode->vgId, fversion); nodeVersion = fversion; sInfo("%s, start to restore wal, fver:%" PRIu64, pPeer->id, nodeVersion); @@ -368,7 +276,7 @@ void *syncRestoreData(void *param) { atomic_add_fetch_32(&tsSyncNum, 1); sInfo("%s, start to restore data, sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); - (*pNode->notifyRole)(pNode->vgId, TAOS_SYNC_ROLE_SYNCING); + (*pNode->notifyRoleFp)(pNode->vgId, TAOS_SYNC_ROLE_SYNCING); if (syncOpenRecvBuffer(pNode) < 0) { sError("%s, failed to allocate recv buffer, restart connection", pPeer->id); @@ -385,7 +293,7 @@ void *syncRestoreData(void *param) { } } - (*pNode->notifyRole)(pNode->vgId, nodeRole); + (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); nodeSStatus = TAOS_SYNC_STATUS_INIT; sInfo("%s, restore data over, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index e748898e6e..ec4bbb33a5 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -27,7 +27,7 @@ static int32_t syncGetWalVersion(SSyncNode *pNode, SSyncPeer *pPeer) { uint64_t fver, wver; - int32_t code = (*pNode->getVersion)(pNode->vgId, &fver, &wver); + int32_t code = (*pNode->getVersionFp)(pNode->vgId, &fver, &wver); if (code != 0) { sDebug("%s, vnode is commiting while retrieve, last wver:%" PRIu64, pPeer->id, pPeer->lastWalVer); return -1; @@ -39,7 +39,7 @@ static int32_t syncGetWalVersion(SSyncNode *pNode, SSyncPeer *pPeer) { static bool syncIsWalModified(SSyncNode *pNode, SSyncPeer *pPeer) { uint64_t fver, wver; - int32_t code = (*pNode->getVersion)(pNode->vgId, &fver, &wver); + int32_t code = (*pNode->getVersionFp)(pNode->vgId, &fver, &wver); if (code != 0) { sDebug("%s, vnode is commiting while retrieve, last wver:%" PRIu64, pPeer->id, pPeer->lastWalVer); return true; @@ -55,7 +55,7 @@ static bool syncIsWalModified(SSyncNode *pNode, SSyncPeer *pPeer) { static int32_t syncGetFileVersion(SSyncNode *pNode, SSyncPeer *pPeer) { uint64_t fver, wver; - int32_t code = (*pNode->getVersion)(pNode->vgId, &fver, &wver); + int32_t code = (*pNode->getVersionFp)(pNode->vgId, &fver, &wver); if (code != 0) { sDebug("%s, vnode is commiting while get fver for retrieve, last fver:%" PRIu64, pPeer->id, pPeer->lastFileVer); return -1; @@ -67,7 +67,7 @@ static int32_t syncGetFileVersion(SSyncNode *pNode, SSyncPeer *pPeer) { static bool syncAreFilesModified(SSyncNode *pNode, SSyncPeer *pPeer) { uint64_t fver, wver; - int32_t code = (*pNode->getVersion)(pNode->vgId, &fver, &wver); + int32_t code = (*pNode->getVersionFp)(pNode->vgId, &fver, &wver); if (code != 0) { sDebug("%s, vnode is commiting while retrieve, last fver:%" PRIu64, pPeer->id, pPeer->lastFileVer); pPeer->fileChanged = 1; @@ -84,104 +84,54 @@ static bool syncAreFilesModified(SSyncNode *pNode, SSyncPeer *pPeer) { return false; } +static int32_t syncSendFileVersion(SSyncPeer *pPeer) { + SSyncNode *pNode = pPeer->pSyncNode; + + SFileVersion fileVersion; + memset(&fileVersion, 0, sizeof(SFileVersion)); + syncBuildFileVersion(&fileVersion, pNode->vgId); + + uint64_t fver = pPeer->lastFileVer; + fileVersion.fversion = htobe64(fver); + int32_t ret = taosWriteMsg(pPeer->syncFd, &fileVersion, sizeof(SFileVersion)); + if (ret != sizeof(SFileVersion)) { + sError("%s, failed to write fver:%" PRIu64 " since %s", pPeer->id, fver, strerror(errno)); + return -1; + } + + SFileAck fileAck; + memset(&fileAck, 0, sizeof(SFileAck)); + ret = taosReadMsg(pPeer->syncFd, &fileAck, sizeof(SFileAck)); + if (ret != sizeof(SFileAck)) { + sError("%s, failed to read fver ack since %s", pPeer->id, strerror(errno)); + return -1; + } + + // set the peer sync version + pPeer->sversion = fver; + + return 0; +} + static int32_t syncRetrieveFile(SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; - SFileInfo fileInfo; memset(&fileInfo, 0, sizeof(SFileInfo)); - SFileAck fileAck; memset(&fileAck, 0, sizeof(SFileAck)); - int32_t code = -1; - char name[TSDB_FILENAME_LEN * 2] = {0}; if (syncGetFileVersion(pNode, pPeer) < 0) { pPeer->fileChanged = 1; return -1; } - while (1) { - // retrieve file info - fileInfo.name[0] = 0; - fileInfo.size = 0; - fileInfo.magic = (*pNode->getFileInfo)(pNode->vgId, fileInfo.name, &fileInfo.index, TAOS_SYNC_MAX_INDEX, - &fileInfo.size, &fileInfo.fversion); - syncBuildFileInfo(&fileInfo, pNode->vgId); - sDebug("%s, file:%s info is sent, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%u", pPeer->id, fileInfo.name, - fileInfo.index, fileInfo.size, fileInfo.fversion, fileInfo.magic); - - // send the file info - int32_t ret = taosWriteMsg(pPeer->syncFd, &(fileInfo), sizeof(SFileInfo)); - if (ret != sizeof(SFileInfo)) { - code = -1; - sError("%s, failed to write file:%s info while retrieve file since %s", pPeer->id, fileInfo.name, strerror(errno)); - break; - } - - // if no file anymore, break - if (fileInfo.magic == 0 || fileInfo.name[0] == 0) { - code = 0; - sDebug("%s, no more files to sync", pPeer->id); - break; - } - - // wait for the ack from peer - ret = taosReadMsg(pPeer->syncFd, &fileAck, sizeof(SFileAck)); - if (ret != sizeof(SFileAck)) { - code = -1; - sError("%s, failed to read file:%s ack while retrieve file since %s", pPeer->id, fileInfo.name, strerror(errno)); - break; - } - - ret = syncCheckHead((SSyncHead*)(&fileAck)); - if (ret != 0) { - code = -1; - sError("%s, failed to check file:%s ack while retrieve file since %s", pPeer->id, fileInfo.name, strerror(ret)); - break; - } - - // set the peer sync version - pPeer->sversion = fileInfo.fversion; - - // if sync is not required, continue - if (fileAck.sync == 0) { - fileInfo.index++; - sDebug("%s, %s is the same, fver:%" PRIu64, pPeer->id, fileInfo.name, fileInfo.fversion); - continue; - } else { - sDebug("%s, %s will be sent, fver:%" PRIu64, pPeer->id, fileInfo.name, fileInfo.fversion); - } - - // get the full path to file - snprintf(name, sizeof(name), "%s/%s", pNode->path, fileInfo.name); - - // send the file to peer - int32_t sfd = open(name, O_RDONLY | O_BINARY); - if (sfd < 0) { - code = -1; - sError("%s, failed to open file:%s while retrieve file since %s", pPeer->id, fileInfo.name, strerror(errno)); - break; - } - - ret = (int32_t)taosSendFile(pPeer->syncFd, sfd, NULL, fileInfo.size); - close(sfd); - if (ret < 0) { - code = -1; - sError("%s, failed to send file:%s while retrieve file since %s", pPeer->id, fileInfo.name, strerror(errno)); - break; - } - - sDebug("%s, file:%s is sent, size:%" PRId64, pPeer->id, fileInfo.name, fileInfo.size); - fileInfo.index++; - - // check if processed files are modified - if (syncAreFilesModified(pNode, pPeer)) { - code = -1; - break; - } + if (pNode->sendFileFp && (*pNode->sendFileFp)(pNode->pTsdb, pPeer->syncFd) != 0) { + sError("%s, failed to retrieve file", pPeer->id); + return -1; } - if (code != TSDB_CODE_SUCCESS) { - sError("%s, failed to retrieve file, code:0x%x", pPeer->id, code); + if (syncSendFileVersion(pPeer) < 0) { + return -1; } - return code; + sInfo("%s, all files are retrieved", pPeer->id); + return 0; } // if only a partial record is read out, upper layer will reload the file to get a complete record @@ -345,7 +295,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { while (1) { // retrieve wal info wname[0] = 0; - code = (*pNode->getWalInfo)(pNode->vgId, wname, &index); + code = (*pNode->getWalInfoFp)(pNode->vgId, wname, &index); if (code < 0) { sError("%s, failed to get wal info since:%s, code:0x%x", pPeer->id, strerror(errno), code); break; @@ -477,7 +427,7 @@ void *syncRetrieveData(void *param) { sInfo("%s, start to retrieve data, sstatus:%s, numOfRetrieves:%d", pPeer->id, syncStatus[pPeer->sstatus], pPeer->numOfRetrieves); - if (pNode->notifyFlowCtrl) (*pNode->notifyFlowCtrl)(pNode->vgId, pPeer->numOfRetrieves); + if (pNode->notifyFlowCtrlFp) (*pNode->notifyFlowCtrlFp)(pNode->vgId, pPeer->numOfRetrieves); pPeer->syncFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0); if (pPeer->syncFd < 0) { @@ -497,10 +447,10 @@ void *syncRetrieveData(void *param) { pPeer->numOfRetrieves++; } else { pPeer->numOfRetrieves = 0; - // if (pNode->notifyFlowCtrl) (*pNode->notifyFlowCtrl)(pNode->vgId, 0); + // if (pNode->notifyFlowCtrlFp) (*pNode->notifyFlowCtrlFp)(pNode->vgId, 0); } - if (pNode->notifyFlowCtrl) (*pNode->notifyFlowCtrl)(pNode->vgId, 0); + if (pNode->notifyFlowCtrlFp) (*pNode->notifyFlowCtrlFp)(pNode->vgId, 0); pPeer->fileChanged = 0; taosCloseSocket(pPeer->syncFd); diff --git a/src/sync/src/syncTcp.c b/src/sync/src/syncTcp.c index 22bdc7e74e..3ad9e9bba0 100644 --- a/src/sync/src/syncTcp.c +++ b/src/sync/src/syncTcp.c @@ -103,6 +103,11 @@ void syncCloseTcpThreadPool(void *param) { #ifdef WINDOWS closesocket(pPool->acceptFd); +#elif defined(__APPLE__) + if (pPool->acceptFd!=-1) { + close(pPool->acceptFd); + pPool->acceptFd = -1; + } #else shutdown(pPool->acceptFd, SHUT_RD); #endif diff --git a/src/sync/test/CMakeLists.txt b/src/sync/test/CMakeLists.txt index ab2e6c307b..f2b05ab226 100644 --- a/src/sync/test/CMakeLists.txt +++ b/src/sync/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c index 161105d86c..eeaa6a08c2 100644 --- a/src/sync/test/syncServer.c +++ b/src/sync/test/syncServer.c @@ -296,11 +296,10 @@ void initSync() { pCfg->replica = 1; pCfg->quorum = 1; syncInfo.vgId = 1; - syncInfo.getFileInfo = getFileInfo; - syncInfo.getWalInfo = getWalInfo; - syncInfo.writeToCache = writeToCache; + syncInfo.getWalInfoFp = getWalInfo; + syncInfo.writeToCacheFp = writeToCache; syncInfo.confirmForward = confirmForward; - syncInfo.notifyRole = notifyRole; + syncInfo.notifyRoleFp = notifyRole; pCfg->nodeInfo[0].nodeId = 1; pCfg->nodeInfo[0].nodePort = 7010; diff --git a/src/tfs/CMakeLists.txt b/src/tfs/CMakeLists.txt new file mode 100644 index 0000000000..b435c84366 --- /dev/null +++ b/src/tfs/CMakeLists.txt @@ -0,0 +1,12 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +PROJECT(TDengine) + +INCLUDE_DIRECTORIES(inc) +AUX_SOURCE_DIRECTORY(src SRC) +ADD_LIBRARY(tfs ${SRC}) +TARGET_LINK_LIBRARIES(tfs tutil) + +IF (TD_LINUX) + # Someone has no gtest directory, so comment it + # ADD_SUBDIRECTORY(tests) +ENDIF () diff --git a/src/tfs/inc/tfsint.h b/src/tfs/inc/tfsint.h new file mode 100644 index 0000000000..fa4cd59723 --- /dev/null +++ b/src/tfs/inc/tfsint.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TD_TFSINT_H +#define TD_TFSINT_H + +#include "tlog.h" +#include "tglobal.h" +#include "tfs.h" +#include "tcoding.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern int fsDebugFlag; + +// For debug purpose +#define fFatal(...) { if (fsDebugFlag & DEBUG_FATAL) { taosPrintLog("TFS FATAL ", 255, __VA_ARGS__); }} +#define fError(...) { if (fsDebugFlag & DEBUG_ERROR) { taosPrintLog("TFS ERROR ", 255, __VA_ARGS__); }} +#define fWarn(...) { if (fsDebugFlag & DEBUG_WARN) { taosPrintLog("TFS WARN ", 255, __VA_ARGS__); }} +#define fInfo(...) { if (fsDebugFlag & DEBUG_INFO) { taosPrintLog("TFS ", 255, __VA_ARGS__); }} +#define fDebug(...) { if (fsDebugFlag & DEBUG_DEBUG) { taosPrintLog("TFS ", cqDebugFlag, __VA_ARGS__); }} +#define fTrace(...) { if (fsDebugFlag & DEBUG_TRACE) { taosPrintLog("TFS ", cqDebugFlag, __VA_ARGS__); }} + +// Global Definitions +#define TFS_MIN_DISK_FREE_SIZE 50 * 1024 * 1024 + +// tdisk.c ====================================================== +typedef struct { + int64_t size; + int64_t free; +} SDiskMeta; + +typedef struct SDisk { + int level; + int id; + char dir[TSDB_FILENAME_LEN]; + SDiskMeta dmeta; +} SDisk; + +#define DISK_LEVEL(pd) ((pd)->level) +#define DISK_ID(pd) ((pd)->id) +#define DISK_DIR(pd) ((pd)->dir) +#define DISK_META(pd) ((pd)->dmeta) +#define DISK_SIZE(pd) ((pd)->dmeta.size) +#define DISK_FREE_SIZE(pd) ((pd)->dmeta.free) + +SDisk *tfsNewDisk(int level, int id, const char *dir); +SDisk *tfsFreeDisk(SDisk *pDisk); +int tfsUpdateDiskInfo(SDisk *pDisk); + +// ttier.c ====================================================== +typedef struct { + int64_t size; + int64_t free; + int16_t nAvailDisks; // # of Available disks +} STierMeta; +typedef struct STier { + pthread_spinlock_t lock; + int level; + int16_t ndisk; // # of disks mounted to this tier + int16_t nextid; // next disk id to allocate + STierMeta tmeta; + SDisk * disks[TSDB_MAX_DISKS_PER_TIER]; +} STier; + +#define TIER_LEVEL(pt) ((pt)->level) +#define TIER_NDISKS(pt) ((pt)->ndisk) +#define TIER_SIZE(pt) ((pt)->tmeta.size) +#define TIER_FREE_SIZE(pt) ((pt)->tmeta.free) +#define TIER_AVAIL_DISKS(pt) ((pt)->tmeta.nAvailDisks) +#define DISK_AT_TIER(pt, id) ((pt)->disks[id]) + +int tfsInitTier(STier *pTier, int level); +void tfsDestroyTier(STier *pTier); +SDisk *tfsMountDiskToTier(STier *pTier, SDiskCfg *pCfg); +void tfsUpdateTierInfo(STier *pTier, STierMeta *pTierMeta); +int tfsAllocDiskOnTier(STier *pTier); +void tfsGetTierMeta(STier *pTier, STierMeta *pTierMeta); +void tfsPosNextId(STier *pTier); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/tfs/src/tdisk.c b/src/tfs/src/tdisk.c new file mode 100644 index 0000000000..7cdaf7fd09 --- /dev/null +++ b/src/tfs/src/tdisk.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#include "os.h" + +#include "taoserror.h" +#include "tfsint.h" + +// PROTECTED ==================================== +SDisk *tfsNewDisk(int level, int id, const char *dir) { + SDisk *pDisk = (SDisk *)calloc(1, sizeof(*pDisk)); + if (pDisk == NULL) { + terrno = TSDB_CODE_FS_OUT_OF_MEMORY; + return NULL; + } + + pDisk->level = level; + pDisk->id = id; + strncpy(pDisk->dir, dir, TSDB_FILENAME_LEN); + + return pDisk; +} + +SDisk *tfsFreeDisk(SDisk *pDisk) { + if (pDisk) { + free(pDisk); + } + return NULL; +} + +int tfsUpdateDiskInfo(SDisk *pDisk) { + ASSERT(pDisk != NULL); + + SysDiskSize diskSize = {0}; + + int code = taosGetDiskSize(pDisk->dir, &diskSize); + if (code != 0) { + fError("failed to update disk information at level %d id %d dir %s since %s", pDisk->level, pDisk->id, pDisk->dir, + strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + } + + pDisk->dmeta.size = diskSize.tsize; + pDisk->dmeta.free = diskSize.tsize - diskSize.avail; + + return code; +} \ No newline at end of file diff --git a/src/tfs/src/tfs.c b/src/tfs/src/tfs.c new file mode 100644 index 0000000000..7b7c9b6127 --- /dev/null +++ b/src/tfs/src/tfs.c @@ -0,0 +1,600 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" + +#include "hash.h" +#include "taosdef.h" +#include "taoserror.h" +#include "tfs.h" +#include "tfsint.h" + +#define TMPNAME_LEN (TSDB_FILENAME_LEN * 2 + 32) + +typedef struct { + pthread_spinlock_t lock; + SFSMeta meta; + int nlevel; + STier tiers[TSDB_MAX_TIERS]; + SHashObj * map; // name to did map +} SFS; + +typedef struct { + SDisk *pDisk; +} SDiskIter; + +#define TFS_META() (pfs->meta) +#define TFS_NLEVEL() (pfs->nlevel) +#define TFS_TIERS() (pfs->tiers) +#define TFS_TIER_AT(level) (TFS_TIERS() + (level)) +#define TFS_DISK_AT(level, id) DISK_AT_TIER(TFS_TIER_AT(level), id) +#define TFS_PRIMARY_DISK() TFS_DISK_AT(TFS_PRIMARY_LEVEL, TFS_PRIMARY_ID) +#define TFS_IS_VALID_LEVEL(level) (((level) >= 0) && ((level) < TFS_NLEVEL())) +#define TFS_IS_VALID_ID(level, id) (((id) >= 0) && ((id) < TIER_NDISKS(TFS_TIER_AT(level)))) +#define TFS_IS_VALID_DISK(level, id) (TFS_IS_VALID_LEVEL(level) && TFS_IS_VALID_ID(level, id)) + +#define tfsLock() pthread_spin_lock(&(pfs->lock)) +#define tfsUnLock() pthread_spin_unlock(&(pfs->lock)) + +static SFS tfs = {0}; +static SFS *pfs = &tfs; + +// STATIC DECLARATION +static int tfsMount(SDiskCfg *pCfg); +static int tfsCheck(); +static int tfsCheckAndFormatCfg(SDiskCfg *pCfg); +static int tfsFormatDir(char *idir, char *odir); +static SDisk *tfsGetDiskByID(SDiskID did); +static SDisk *tfsGetDiskByName(const char *dir); +static int tfsOpendirImpl(TDIR *tdir); +static void tfsInitDiskIter(SDiskIter *pIter); +static SDisk *tfsNextDisk(SDiskIter *pIter); + +// FS APIs ==================================== +int tfsInit(SDiskCfg *pDiskCfg, int ndisk) { + ASSERT(ndisk > 0); + + for (int level = 0; level < TSDB_MAX_TIERS; level++) { + if (tfsInitTier(TFS_TIER_AT(level), level) < 0) { + while (true) { + level--; + if (level < 0) break; + + tfsDestroyTier(TFS_TIER_AT(level)); + } + + return -1; + } + } + + pthread_spin_init(&(pfs->lock), 0); + + pfs->map = taosHashInit(TSDB_MAX_TIERS * TSDB_MAX_DISKS_PER_TIER * 2, + taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + if (pfs->map == NULL) { + terrno = TSDB_CODE_FS_OUT_OF_MEMORY; + tfsDestroy(); + return -1; + } + + for (int idisk = 0; idisk < ndisk; idisk++) { + if (tfsMount(pDiskCfg + idisk) < 0) { + tfsDestroy(); + return -1; + } + } + + if (tfsCheck() < 0) { + tfsDestroy(); + return -1; + } + + tfsUpdateInfo(NULL); + for (int level = 0; level < TFS_NLEVEL(); level++) { + tfsPosNextId(TFS_TIER_AT(level)); + } + + return 0; +} + +void tfsDestroy() { + taosHashCleanup(pfs->map); + pfs->map = NULL; + + pthread_spin_destroy(&(pfs->lock)); + for (int level = 0; level < TFS_NLEVEL(); level++) { + tfsDestroyTier(TFS_TIER_AT(level)); + } +} + +void tfsUpdateInfo(SFSMeta *pFSMeta) { + SFSMeta fsMeta; + STierMeta tierMeta; + + if (pFSMeta == NULL) { + pFSMeta = &fsMeta; + } + + memset(pFSMeta, 0, sizeof(*pFSMeta)); + + for (int level = 0; level < TFS_NLEVEL(); level++) { + STier *pTier = TFS_TIER_AT(level); + tfsUpdateTierInfo(pTier, &tierMeta); + pFSMeta->tsize += tierMeta.size; + pFSMeta->avail += tierMeta.free; + } + + tfsLock(); + pfs->meta = *pFSMeta; + tfsUnLock(); +} + +void tfsGetMeta(SFSMeta *pMeta) { + ASSERT(pMeta); + + tfsLock(); + *pMeta = pfs->meta; + tfsUnLock(); +} + +/* Allocate an existing available tier level + */ +void tfsAllocDisk(int expLevel, int *level, int *id) { + ASSERT(expLevel >= 0); + + *level = expLevel; + *id = TFS_UNDECIDED_ID; + + if (*level >= TFS_NLEVEL()) { + *level = TFS_NLEVEL() - 1; + } + + while (*level >= 0) { + *id = tfsAllocDiskOnTier(TFS_TIER_AT(*level)); + if (*id == TFS_UNDECIDED_ID) { + (*level)--; + continue; + } + + return; + } + + *level = TFS_UNDECIDED_LEVEL; + *id = TFS_UNDECIDED_ID; +} + +const char *TFS_PRIMARY_PATH() { return DISK_DIR(TFS_PRIMARY_DISK()); } +const char *TFS_DISK_PATH(int level, int id) { return DISK_DIR(TFS_DISK_AT(level, id)); } + +// TFILE APIs ==================================== +void tfsInitFile(TFILE *pf, int level, int id, const char *bname) { + ASSERT(TFS_IS_VALID_DISK(level, id)); + + SDisk *pDisk = TFS_DISK_AT(level, id); + + pf->level = level; + pf->id = id; + strncpy(pf->rname, bname, TSDB_FILENAME_LEN); + + char tmpName[TMPNAME_LEN] = {0}; + snprintf(tmpName, TMPNAME_LEN, "%s/%s", DISK_DIR(pDisk), bname); + tstrncpy(pf->aname, tmpName, TSDB_FILENAME_LEN); +} + +bool tfsIsSameFile(const TFILE *pf1, const TFILE *pf2) { + ASSERT(pf1 != NULL || pf2 != NULL); + if (pf1 == NULL || pf2 == NULL) return false; + if (pf1->level != pf2->level) return false; + if (pf1->id != pf2->id) return false; + if (strncmp(pf1->rname, pf2->rname, TSDB_FILENAME_LEN) != 0) return false; + return true; +} + +int tfsEncodeFile(void **buf, TFILE *pf) { + int tlen = 0; + + tlen += taosEncodeVariantI32(buf, pf->level); + tlen += taosEncodeVariantI32(buf, pf->id); + tlen += taosEncodeString(buf, pf->rname); + + return tlen; +} + +void *tfsDecodeFile(void *buf, TFILE *pf) { + int32_t level, id; + char * rname; + + buf = taosDecodeVariantI32(buf, &(level)); + buf = taosDecodeVariantI32(buf, &(id)); + buf = taosDecodeString(buf, &rname); + + tfsInitFile(pf, level, id, rname); + + tfree(rname); + return buf; +} + +void tfsbasename(const TFILE *pf, char *dest) { + char tname[TSDB_FILENAME_LEN] = "\0"; + + strncpy(tname, pf->aname, TSDB_FILENAME_LEN); + strncpy(dest, basename(tname), TSDB_FILENAME_LEN); +} + +void tfsdirname(const TFILE *pf, char *dest) { + char tname[TSDB_FILENAME_LEN] = "\0"; + + strncpy(tname, pf->aname, TSDB_FILENAME_LEN); + strncpy(dest, dirname(tname), TSDB_FILENAME_LEN); +} + +// DIR APIs ==================================== +int tfsMkdirAt(const char *rname, int level, int id) { + SDisk *pDisk = TFS_DISK_AT(level, id); + char aname[TMPNAME_LEN]; + + snprintf(aname, TMPNAME_LEN, "%s/%s", DISK_DIR(pDisk), rname); + if (taosMkDir(aname, 0755) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return 0; +} + +int tfsMkdirRecurAt(const char *rname, int level, int id) { + if (tfsMkdirAt(rname, level, id) < 0) { + if (errno == ENOENT) { + // Try to create upper + char *s = strdup(rname); + + if (tfsMkdirRecurAt(dirname(s), level, id) < 0) { + tfree(s); + return -1; + } + tfree(s); + + if (tfsMkdirAt(rname, level, id) < 0) { + return -1; + } + } else { + return -1; + } + } + + return 0; +} + +int tfsMkdir(const char *rname) { + for (int level = 0; level < TFS_NLEVEL(); level++) { + STier *pTier = TFS_TIER_AT(level); + for (int id = 0; id < TIER_NDISKS(pTier); id++) { + if (tfsMkdirAt(rname, level, id) < 0) { + return -1; + } + } + } + + return 0; +} + +int tfsRmdir(const char *rname) { + char aname[TMPNAME_LEN] = "\0"; + + for (int level = 0; level < TFS_NLEVEL(); level++) { + STier *pTier = TFS_TIER_AT(level); + for (int id = 0; id < TIER_NDISKS(pTier); id++) { + SDisk *pDisk = DISK_AT_TIER(pTier, id); + + snprintf(aname, TMPNAME_LEN, "%s/%s", DISK_DIR(pDisk), rname); + + taosRemoveDir(aname); + } + } + + return 0; +} + +int tfsRename(char *orname, char *nrname) { + char oaname[TMPNAME_LEN] = "\0"; + char naname[TMPNAME_LEN] = "\0"; + + for (int level = 0; level < pfs->nlevel; level++) { + STier *pTier = TFS_TIER_AT(level); + for (int id = 0; id < TIER_NDISKS(pTier); id++) { + SDisk *pDisk = DISK_AT_TIER(pTier, id); + + snprintf(oaname, TMPNAME_LEN, "%s/%s", DISK_DIR(pDisk), orname); + snprintf(naname, TMPNAME_LEN, "%s/%s", DISK_DIR(pDisk), nrname); + + taosRename(oaname, naname); + } + } + + return 0; +} + +struct TDIR { + SDiskIter iter; + int level; + int id; + char dirname[TSDB_FILENAME_LEN]; + TFILE tfile; + DIR * dir; +}; + +TDIR *tfsOpendir(const char *rname) { + TDIR *tdir = (TDIR *)calloc(1, sizeof(*tdir)); + if (tdir == NULL) { + terrno = TSDB_CODE_FS_OUT_OF_MEMORY; + return NULL; + } + + tfsInitDiskIter(&(tdir->iter)); + strncpy(tdir->dirname, rname, TSDB_FILENAME_LEN); + + if (tfsOpendirImpl(tdir) < 0) { + free(tdir); + return NULL; + } + + return tdir; +} + +const TFILE *tfsReaddir(TDIR *tdir) { + if (tdir == NULL || tdir->dir == NULL) return NULL; + char bname[TMPNAME_LEN * 2] = "\0"; + + while (true) { + struct dirent *dp = NULL; + dp = readdir(tdir->dir); + if (dp != NULL) { + // Skip . and .. + if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) continue; + + snprintf(bname, TMPNAME_LEN * 2, "%s/%s", tdir->dirname, dp->d_name); + tfsInitFile(&(tdir->tfile), tdir->level, tdir->id, bname); + return &(tdir->tfile); + } + + if (tfsOpendirImpl(tdir) < 0) { + return NULL; + } + + if (tdir->dir == NULL) { + terrno = TSDB_CODE_SUCCESS; + return NULL; + } + } +} + +void tfsClosedir(TDIR *tdir) { + if (tdir) { + if (tdir->dir != NULL) { + closedir(tdir->dir); + tdir->dir = NULL; + } + free(tdir); + } +} + +// private +static int tfsMount(SDiskCfg *pCfg) { + SDiskID did; + SDisk * pDisk = NULL; + + if (tfsCheckAndFormatCfg(pCfg) < 0) return -1; + + did.level = pCfg->level; + pDisk = tfsMountDiskToTier(TFS_TIER_AT(did.level), pCfg); + if (pDisk == NULL) { + fError("failed to mount disk %s to level %d since %s", pCfg->dir, pCfg->level, tstrerror(terrno)); + return -1; + } + did.id = DISK_ID(pDisk); + + taosHashPut(pfs->map, (void *)(pCfg->dir), strnlen(pCfg->dir, TSDB_FILENAME_LEN), (void *)(&did), sizeof(did)); + if (pfs->nlevel < pCfg->level + 1) pfs->nlevel = pCfg->level + 1; + + return 0; +} + +static int tfsCheckAndFormatCfg(SDiskCfg *pCfg) { + char dirName[TSDB_FILENAME_LEN] = "\0"; + struct stat pstat; + + if (pCfg->level < 0 || pCfg->level >= TSDB_MAX_TIERS) { + fError("failed to mount %s to FS since invalid level %d", pCfg->dir, pCfg->level); + terrno = TSDB_CODE_FS_INVLD_CFG; + return -1; + } + + if (pCfg->primary) { + if (pCfg->level != 0) { + fError("failed to mount %s to FS since disk is primary but level %d not 0", pCfg->dir, pCfg->level); + terrno = TSDB_CODE_FS_INVLD_CFG; + return -1; + } + + if (TFS_PRIMARY_DISK() != NULL) { + fError("failed to mount %s to FS since duplicate primary mount", pCfg->dir); + terrno = TSDB_CODE_FS_DUP_PRIMARY; + return -1; + } + } + + if (tfsFormatDir(pCfg->dir, dirName) < 0) { + fError("failed to mount %s to FS since invalid dir format", pCfg->dir); + terrno = TSDB_CODE_FS_INVLD_CFG; + return -1; + } + + if (tfsGetDiskByName(dirName) != NULL) { + fError("failed to mount %s to FS since duplicate mount", pCfg->dir); + terrno = TSDB_CODE_FS_INVLD_CFG; + return -1; + } + + if (access(dirName, W_OK | R_OK | F_OK) != 0) { + fError("failed to mount %s to FS since no R/W access rights", pCfg->dir); + terrno = TSDB_CODE_FS_INVLD_CFG; + return -1; + } + + if (stat(dirName, &pstat) < 0) { + fError("failed to mount %s to FS since %s", pCfg->dir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + if (!S_ISDIR(pstat.st_mode)) { + fError("failed to mount %s to FS since not a directory", pCfg->dir); + terrno = TSDB_CODE_FS_INVLD_CFG; + return -1; + } + + strncpy(pCfg->dir, dirName, TSDB_FILENAME_LEN); + + return 0; +} + +static int tfsFormatDir(char *idir, char *odir) { + wordexp_t wep = {0}; + + int code = wordexp(idir, &wep, 0); + if (code != 0) { + terrno = TAOS_SYSTEM_ERROR(code); + return -1; + } + + if (realpath(wep.we_wordv[0], odir) == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + wordfree(&wep); + return -1; + } + + wordfree(&wep); + return 0; + +} + +static int tfsCheck() { + if (TFS_PRIMARY_DISK() == NULL) { + fError("no primary disk is set"); + terrno = TSDB_CODE_FS_NO_PRIMARY_DISK; + return -1; + } + + for (int level = 0; level < TFS_NLEVEL(); level++) { + if (TIER_NDISKS(TFS_TIER_AT(level)) == 0) { + fError("no disk at level %d", level); + terrno = TSDB_CODE_FS_NO_MOUNT_AT_TIER; + return -1; + } + } + + return 0; +} + +static SDisk *tfsGetDiskByID(SDiskID did) { return TFS_DISK_AT(did.level, did.id); } +static SDisk *tfsGetDiskByName(const char *dir) { + SDiskID did; + SDisk * pDisk = NULL; + void * pr = NULL; + + pr = taosHashGet(pfs->map, (void *)dir, strnlen(dir, TSDB_FILENAME_LEN)); + if (pr == NULL) return NULL; + + did = *(SDiskID *)pr; + pDisk = tfsGetDiskByID(did); + ASSERT(pDisk != NULL); + + return pDisk; +} + +static int tfsOpendirImpl(TDIR *tdir) { + SDisk *pDisk = NULL; + char adir[TMPNAME_LEN * 2] = "\0"; + + if (tdir->dir != NULL) { + closedir(tdir->dir); + tdir->dir = NULL; + } + + while (true) { + pDisk = tfsNextDisk(&(tdir->iter)); + if (pDisk == NULL) return 0; + + tdir->level = DISK_LEVEL(pDisk); + tdir->id = DISK_ID(pDisk); + + snprintf(adir, TMPNAME_LEN * 2, "%s/%s", DISK_DIR(pDisk), tdir->dirname); + tdir->dir = opendir(adir); + if (tdir->dir != NULL) break; + } + + return 0; +} + +static void tfsInitDiskIter(SDiskIter *pIter) { pIter->pDisk = TFS_DISK_AT(0, 0); } + +static SDisk *tfsNextDisk(SDiskIter *pIter) { + SDisk *pDisk = pIter->pDisk; + + if (pDisk == NULL) return NULL; + + int level = DISK_LEVEL(pDisk); + int id = DISK_ID(pDisk); + + id++; + if (id < TIER_NDISKS(TFS_TIER_AT(level))) { + pIter->pDisk = TFS_DISK_AT(level, id); + ASSERT(pIter->pDisk != NULL); + } else { + level++; + id = 0; + if (level < TFS_NLEVEL()) { + pIter->pDisk = TFS_DISK_AT(level, id); + ASSERT(pIter->pDisk != NULL); + } else { + pIter->pDisk = NULL; + } + } + + return pDisk; +} + +// OTHER FUNCTIONS =================================== +void taosGetDisk() { + const double unit = 1024 * 1024 * 1024; + SysDiskSize diskSize; + SFSMeta fsMeta; + + if (tscEmbedded) { + tfsUpdateInfo(&fsMeta); + tsTotalDataDirGB = (float)(fsMeta.tsize / unit); + tsAvailDataDirGB = (float)(fsMeta.avail / unit); + } + + if (taosGetDiskSize(tsLogDir, &diskSize) == 0) { + tsTotalLogDirGB = (float)(diskSize.tsize / unit); + tsAvailLogDirGB = (float)(diskSize.avail / unit); + } + + if (taosGetDiskSize(tsTempDir, &diskSize) == 0) { + tsTotalTmpDirGB = (float)(diskSize.tsize / unit); + tsAvailTmpDirectorySpace = (float)(diskSize.avail / unit); + } +} diff --git a/src/tfs/src/ttier.c b/src/tfs/src/ttier.c new file mode 100644 index 0000000000..2dce0c3194 --- /dev/null +++ b/src/tfs/src/ttier.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#include "os.h" + +#include "taosdef.h" +#include "taoserror.h" +#include "tfsint.h" + +#define tfsLockTier(pTier) pthread_spin_lock(&((pTier)->lock)) +#define tfsUnLockTier(pTier) pthread_spin_unlock(&((pTier)->lock)) + +// PROTECTED ========================================== +int tfsInitTier(STier *pTier, int level) { + memset((void *)pTier, 0, sizeof(*pTier)); + + int code = pthread_spin_init(&(pTier->lock), 0); + if (code) { + terrno = TAOS_SYSTEM_ERROR(code); + return -1; + } + + pTier->level = level; + + return 0; +} + +void tfsDestroyTier(STier *pTier) { + for (int id = 0; id < TSDB_MAX_DISKS_PER_TIER; id++) { + DISK_AT_TIER(pTier, id) = tfsFreeDisk(DISK_AT_TIER(pTier, id)); + } + + pTier->ndisk = 0; + + pthread_spin_destroy(&(pTier->lock)); +} + +SDisk *tfsMountDiskToTier(STier *pTier, SDiskCfg *pCfg) { + ASSERT(pTier->level == pCfg->level); + + int id = 0; + SDisk *pDisk; + + if (TIER_NDISKS(pTier) >= TSDB_MAX_DISKS_PER_TIER) { + terrno = TSDB_CODE_FS_TOO_MANY_MOUNT; + return NULL; + } + + if (pTier->level == 0) { + if (DISK_AT_TIER(pTier, 0) != NULL) { + id = pTier->ndisk; + } else { + if (pCfg->primary) { + id = 0; + } else { + id = pTier->ndisk + 1; + } + if (id >= TSDB_MAX_DISKS_PER_TIER) { + terrno = TSDB_CODE_FS_TOO_MANY_MOUNT; + return NULL; + } + } + } else { + id = pTier->ndisk; + } + + pDisk = tfsNewDisk(pCfg->level, id, pCfg->dir); + if (pDisk == NULL) return NULL; + DISK_AT_TIER(pTier, id) = pDisk; + pTier->ndisk++; + + fInfo("disk %s is mounted to tier level %d id %d", pCfg->dir, pCfg->level, id); + + return DISK_AT_TIER(pTier, id); +} + +void tfsUpdateTierInfo(STier *pTier, STierMeta *pTierMeta) { + STierMeta tmeta; + + if (pTierMeta == NULL) { + pTierMeta = &tmeta; + } + memset(pTierMeta, 0, sizeof(*pTierMeta)); + + tfsLockTier(pTier); + + for (int id = 0; id < pTier->ndisk; id++) { + if (tfsUpdateDiskInfo(DISK_AT_TIER(pTier, id)) < 0) { + continue; + } + pTierMeta->size += DISK_SIZE(DISK_AT_TIER(pTier, id)); + pTierMeta->free += DISK_FREE_SIZE(DISK_AT_TIER(pTier, id)); + pTierMeta->nAvailDisks++; + } + + pTier->tmeta = *pTierMeta; + + tfsUnLockTier(pTier); +} + +// Round-Robin to allocate disk on a tier +int tfsAllocDiskOnTier(STier *pTier) { + ASSERT(pTier->ndisk > 0); + int id = TFS_UNDECIDED_ID; + SDisk *pDisk; + + tfsLockTier(pTier); + + if (TIER_AVAIL_DISKS(pTier) <= 0) { + tfsUnLockTier(pTier); + return id; + } + + id = pTier->nextid; + while (true) { + pDisk = DISK_AT_TIER(pTier, id); + ASSERT(pDisk != NULL); + + if (DISK_FREE_SIZE(pDisk) < TFS_MIN_DISK_FREE_SIZE) { + id = (id + 1) % pTier->ndisk; + if (id == pTier->nextid) { + tfsUnLockTier(pTier); + return TFS_UNDECIDED_ID; + } else { + continue; + } + } else { + pTier->nextid = (id + 1) % pTier->ndisk; + break; + } + } + + tfsUnLockTier(pTier); + return id; +} + +void tfsGetTierMeta(STier *pTier, STierMeta *pTierMeta) { + ASSERT(pTierMeta != NULL); + + tfsLockTier(pTier); + *pTierMeta = pTier->tmeta; + tfsUnLockTier(pTier); +} + +void tfsPosNextId(STier *pTier) { + ASSERT(pTier->ndisk > 0); + int nextid = 0; + + for (int id = 1; id < pTier->ndisk; id++) { + SDisk *pLDisk = DISK_AT_TIER(pTier, nextid); + SDisk *pDisk = DISK_AT_TIER(pTier, id); + if (DISK_FREE_SIZE(pDisk) > TFS_MIN_DISK_FREE_SIZE && DISK_FREE_SIZE(pDisk) > DISK_FREE_SIZE(pLDisk)) { + nextid = id; + } + } + + pTier->nextid = nextid; +} \ No newline at end of file diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt index d8bc20ca99..31d52aae7d 100644 --- a/src/tsdb/CMakeLists.txt +++ b/src/tsdb/CMakeLists.txt @@ -1,10 +1,10 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tsdb ${SRC}) -TARGET_LINK_LIBRARIES(tsdb common tutil) +TARGET_LINK_LIBRARIES(tsdb tfs common tutil) IF (TD_LINUX) # Someone has no gtest directory, so comment it diff --git a/src/tsdb/inc/tsdbBuffer.h b/src/tsdb/inc/tsdbBuffer.h new file mode 100644 index 0000000000..414ace0009 --- /dev/null +++ b/src/tsdb/inc/tsdbBuffer.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_BUFFER_H_ +#define _TD_TSDB_BUFFER_H_ + +typedef struct { + int64_t blockId; + int offset; + int remain; + char data[]; +} STsdbBufBlock; + +typedef struct { + pthread_cond_t poolNotEmpty; + int bufBlockSize; + int tBufBlocks; + int nBufBlocks; + int64_t index; + SList* bufBlockList; +} STsdbBufPool; + +#define TSDB_BUFFER_RESERVE 1024 // Reseve 1K as commit threshold + +STsdbBufPool* tsdbNewBufPool(); +void tsdbFreeBufPool(STsdbBufPool* pBufPool); +int tsdbOpenBufPool(STsdbRepo* pRepo); +void tsdbCloseBufPool(STsdbRepo* pRepo); +SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); + +#endif /* _TD_TSDB_BUFFER_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbCommit.h b/src/tsdb/inc/tsdbCommit.h new file mode 100644 index 0000000000..5e740081d1 --- /dev/null +++ b/src/tsdb/inc/tsdbCommit.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_COMMIT_H_ +#define _TD_TSDB_COMMIT_H_ + +typedef struct { + int minFid; + int midFid; + int maxFid; + TSKEY minKey; +} SRtn; + +typedef struct { + uint64_t uid; + int64_t offset; + int64_t size; +} SKVRecord; + +void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn); +int tsdbEncodeKVRecord(void **buf, SKVRecord *pRecord); +void *tsdbDecodeKVRecord(void *buf, SKVRecord *pRecord); +void *tsdbCommitData(STsdbRepo *pRepo); + +static FORCE_INLINE int tsdbGetFidLevel(int fid, SRtn *pRtn) { + if (fid >= pRtn->maxFid) { + return 0; + } else if (fid >= pRtn->midFid) { + return 1; + } else if (fid >= pRtn->minFid) { + return 2; + } else { + return -1; + } +} + +#endif /* _TD_TSDB_COMMIT_H_ */ \ No newline at end of file diff --git a/src/common/inc/tsystem.h b/src/tsdb/inc/tsdbCommitQueue.h similarity index 56% rename from src/common/inc/tsystem.h rename to src/tsdb/inc/tsdbCommitQueue.h index 93d305e49c..c2353391f9 100644 --- a/src/common/inc/tsystem.h +++ b/src/tsdb/inc/tsdbCommitQueue.h @@ -13,26 +13,9 @@ * along with this program. If not, see . */ -#ifndef TDENGINE_TSYSTEM_H -#define TDENGINE_TSYSTEM_H +#ifndef _TD_TSDB_COMMIT_QUEUE_H_ +#define _TD_TSDB_COMMIT_QUEUE_H_ -#ifdef __cplusplus -extern "C" { -#endif +int tsdbScheduleCommit(STsdbRepo *pRepo); -bool taosGetSysMemory(float *memoryUsedMB); -bool taosGetProcMemory(float *memoryUsedMB); -bool taosGetDisk(); -bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage); -bool taosGetBandSpeed(float *bandSpeedKb); -bool taosGetProcIO(float *readKB, float *writeKB); -void taosGetSystemInfo(); -void taosPrintOsInfo(); -void taosKillSystem(); -void taosSetCoreDump(); - -#ifdef __cplusplus -} -#endif - -#endif +#endif /* _TD_TSDB_COMMIT_QUEUE_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h new file mode 100644 index 0000000000..d63aeb14ac --- /dev/null +++ b/src/tsdb/inc/tsdbFS.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_FS_H_ +#define _TD_TSDB_FS_H_ + +#define TSDB_FS_VERSION 0 + +// ================== CURRENT file header info +typedef struct { + uint32_t version; // Current file system version (relating to code) + uint32_t len; // Encode content length (including checksum) +} SFSHeader; + +// ================== TSDB File System Meta +typedef struct { + uint32_t version; // Commit version from 0 to increase + int64_t totalPoints; // total points + int64_t totalStorage; // Uncompressed total storage +} STsdbFSMeta; + +// ================== +typedef struct { + STsdbFSMeta meta; // FS meta + SMFile* pmf; // meta file pointer + SMFile mf; // meta file + SArray* df; // data file array +} SFSStatus; + +typedef struct { + pthread_rwlock_t lock; + + SFSStatus* cstatus; // current status + SHashObj* metaCache; // meta cache + bool intxn; + SFSStatus* nstatus; // new status +} STsdbFS; + +#define FS_CURRENT_STATUS(pfs) ((pfs)->cstatus) +#define FS_NEW_STATUS(pfs) ((pfs)->nstatus) +#define FS_IN_TXN(pfs) (pfs)->intxn +#define FS_VERSION(pfs) ((pfs)->cstatus->meta.version) +#define FS_TXN_VERSION(pfs) ((pfs)->nstatus->meta.version) + +typedef struct { + int direction; + uint64_t version; // current FS version + STsdbFS* pfs; + int index; // used to position next fset when version the same + int fid; // used to seek when version is changed + SDFileSet* pSet; +} SFSIter; + +#define TSDB_FS_ITER_FORWARD TSDB_ORDER_ASC +#define TSDB_FS_ITER_BACKWARD TSDB_ORDER_DESC + +STsdbFS *tsdbNewFS(STsdbCfg *pCfg); +void * tsdbFreeFS(STsdbFS *pfs); +int tsdbOpenFS(STsdbRepo *pRepo); +void tsdbCloseFS(STsdbRepo *pRepo); +void tsdbStartFSTxn(STsdbRepo *pRepo, int64_t pointsAdd, int64_t storageAdd); +int tsdbEndFSTxn(STsdbRepo *pRepo); +int tsdbEndFSTxnWithError(STsdbFS *pfs); +void tsdbUpdateFSTxnMeta(STsdbFS *pfs, STsdbFSMeta *pMeta); +void tsdbUpdateMFile(STsdbFS *pfs, const SMFile *pMFile); +int tsdbUpdateDFileSet(STsdbFS *pfs, const SDFileSet *pSet); + +void tsdbFSIterInit(SFSIter *pIter, STsdbFS *pfs, int direction); +void tsdbFSIterSeek(SFSIter *pIter, int fid); +SDFileSet *tsdbFSIterNext(SFSIter *pIter); +int tsdbLoadMetaCache(STsdbRepo *pRepo, bool recoverMeta); + +static FORCE_INLINE int tsdbRLockFS(STsdbFS* pFs) { + int code = pthread_rwlock_rdlock(&(pFs->lock)); + if (code != 0) { + terrno = TAOS_SYSTEM_ERROR(code); + return -1; + } + return 0; +} + +static FORCE_INLINE int tsdbWLockFS(STsdbFS* pFs) { + int code = pthread_rwlock_wrlock(&(pFs->lock)); + if (code != 0) { + terrno = TAOS_SYSTEM_ERROR(code); + return -1; + } + return 0; +} + +static FORCE_INLINE int tsdbUnLockFS(STsdbFS* pFs) { + int code = pthread_rwlock_unlock(&(pFs->lock)); + if (code != 0) { + terrno = TAOS_SYSTEM_ERROR(code); + return -1; + } + return 0; +} + +#endif /* _TD_TSDB_FS_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbFile.h b/src/tsdb/inc/tsdbFile.h new file mode 100644 index 0000000000..f1e2422e45 --- /dev/null +++ b/src/tsdb/inc/tsdbFile.h @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TS_TSDB_FILE_H_ +#define _TS_TSDB_FILE_H_ + +#define TSDB_FILE_HEAD_SIZE 512 +#define TSDB_FILE_DELIMITER 0xF00AFA0F +#define TSDB_FILE_INIT_MAGIC 0xFFFFFFFF +#define TSDB_IVLD_FID INT_MIN +#define TSDB_FILE_STATE_OK 0 +#define TSDB_FILE_STATE_BAD 1 + +#define TSDB_FILE_INFO(tf) (&((tf)->info)) +#define TSDB_FILE_F(tf) (&((tf)->f)) +#define TSDB_FILE_FD(tf) ((tf)->fd) +#define TSDB_FILE_FULL_NAME(tf) TFILE_NAME(TSDB_FILE_F(tf)) +#define TSDB_FILE_OPENED(tf) (TSDB_FILE_FD(tf) >= 0) +#define TSDB_FILE_CLOSED(tf) (!TSDB_FILE_OPENED(tf)) +#define TSDB_FILE_SET_CLOSED(f) (TSDB_FILE_FD(f) = -1) +#define TSDB_FILE_LEVEL(tf) TFILE_LEVEL(TSDB_FILE_F(tf)) +#define TSDB_FILE_ID(tf) TFILE_ID(TSDB_FILE_F(tf)) +#define TSDB_FILE_FSYNC(tf) fsync(TSDB_FILE_FD(tf)) +#define TSDB_FILE_STATE(tf) ((tf)->state) +#define TSDB_FILE_SET_STATE(tf, s) ((tf)->state = (s)) +#define TSDB_FILE_IS_OK(tf) (TSDB_FILE_STATE(tf) == TSDB_FILE_STATE_OK) +#define TSDB_FILE_IS_BAD(tf) (TSDB_FILE_STATE(tf) == TSDB_FILE_STATE_BAD) + +typedef enum { TSDB_FILE_HEAD = 0, TSDB_FILE_DATA, TSDB_FILE_LAST, TSDB_FILE_MAX, TSDB_FILE_META } TSDB_FILE_T; + +// =============== SMFile +typedef struct { + int64_t size; + int64_t tombSize; + int64_t nRecords; + int64_t nDels; + uint32_t magic; +} SMFInfo; + +typedef struct { + SMFInfo info; + TFILE f; + int fd; + uint8_t state; +} SMFile; + +void tsdbInitMFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver); +void tsdbInitMFileEx(SMFile* pMFile, const SMFile* pOMFile); +int tsdbEncodeSMFile(void** buf, SMFile* pMFile); +void* tsdbDecodeSMFile(void* buf, SMFile* pMFile); +int tsdbEncodeSMFileEx(void** buf, SMFile* pMFile); +void* tsdbDecodeSMFileEx(void* buf, SMFile* pMFile); +int tsdbApplyMFileChange(SMFile* from, SMFile* to); +int tsdbCreateMFile(SMFile* pMFile, bool updateHeader); +int tsdbUpdateMFileHeader(SMFile* pMFile); +int tsdbLoadMFileHeader(SMFile* pMFile, SMFInfo* pInfo); +int tsdbScanAndTryFixMFile(STsdbRepo* pRepo); +int tsdbEncodeMFInfo(void** buf, SMFInfo* pInfo); +void* tsdbDecodeMFInfo(void* buf, SMFInfo* pInfo); + +static FORCE_INLINE void tsdbSetMFileInfo(SMFile* pMFile, SMFInfo* pInfo) { pMFile->info = *pInfo; } + +static FORCE_INLINE int tsdbOpenMFile(SMFile* pMFile, int flags) { + ASSERT(TSDB_FILE_CLOSED(pMFile)); + + pMFile->fd = open(TSDB_FILE_FULL_NAME(pMFile), flags); + if (pMFile->fd < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return 0; +} + +static FORCE_INLINE void tsdbCloseMFile(SMFile* pMFile) { + if (TSDB_FILE_OPENED(pMFile)) { + close(pMFile->fd); + TSDB_FILE_SET_CLOSED(pMFile); + } +} + +static FORCE_INLINE int64_t tsdbSeekMFile(SMFile* pMFile, int64_t offset, int whence) { + ASSERT(TSDB_FILE_OPENED(pMFile)); + + int64_t loffset = taosLSeek(TSDB_FILE_FD(pMFile), offset, whence); + if (loffset < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return loffset; +} + +static FORCE_INLINE int64_t tsdbWriteMFile(SMFile* pMFile, void* buf, int64_t nbyte) { + ASSERT(TSDB_FILE_OPENED(pMFile)); + + int64_t nwrite = taosWrite(pMFile->fd, buf, nbyte); + if (nwrite < nbyte) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return nwrite; +} + +static FORCE_INLINE void tsdbUpdateMFileMagic(SMFile* pMFile, void* pCksum) { + pMFile->info.magic = taosCalcChecksum(pMFile->info.magic, (uint8_t*)(pCksum), sizeof(TSCKSUM)); +} + +static FORCE_INLINE int tsdbAppendMFile(SMFile* pMFile, void* buf, int64_t nbyte, int64_t* offset) { + ASSERT(TSDB_FILE_OPENED(pMFile)); + + int64_t toffset; + + if ((toffset = tsdbSeekMFile(pMFile, 0, SEEK_END)) < 0) { + return -1; + } + + ASSERT(pMFile->info.size == toffset); + + if (offset) { + *offset = toffset; + } + + if (tsdbWriteMFile(pMFile, buf, nbyte) < 0) { + return -1; + } + + pMFile->info.size += nbyte; + + return (int)nbyte; +} + +static FORCE_INLINE int tsdbRemoveMFile(SMFile* pMFile) { return tfsremove(TSDB_FILE_F(pMFile)); } + +static FORCE_INLINE int64_t tsdbReadMFile(SMFile* pMFile, void* buf, int64_t nbyte) { + ASSERT(TSDB_FILE_OPENED(pMFile)); + + int64_t nread = taosRead(pMFile->fd, buf, nbyte); + if (nread < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return nread; +} + +// =============== SDFile +typedef struct { + uint32_t magic; + uint32_t len; + uint32_t totalBlocks; + uint32_t totalSubBlocks; + uint32_t offset; + uint64_t size; + uint64_t tombSize; +} SDFInfo; + +typedef struct { + SDFInfo info; + TFILE f; + int fd; + uint8_t state; +} SDFile; + +void tsdbInitDFile(SDFile* pDFile, SDiskID did, int vid, int fid, uint32_t ver, TSDB_FILE_T ftype); +void tsdbInitDFileEx(SDFile* pDFile, SDFile* pODFile); +int tsdbEncodeSDFile(void** buf, SDFile* pDFile); +void* tsdbDecodeSDFile(void* buf, SDFile* pDFile); +int tsdbCreateDFile(SDFile* pDFile, bool updateHeader); +int tsdbUpdateDFileHeader(SDFile* pDFile); +int tsdbLoadDFileHeader(SDFile* pDFile, SDFInfo* pInfo); +int tsdbParseDFilename(const char* fname, int* vid, int* fid, TSDB_FILE_T* ftype, uint32_t* version); + +static FORCE_INLINE void tsdbSetDFileInfo(SDFile* pDFile, SDFInfo* pInfo) { pDFile->info = *pInfo; } + +static FORCE_INLINE int tsdbOpenDFile(SDFile* pDFile, int flags) { + ASSERT(!TSDB_FILE_OPENED(pDFile)); + + pDFile->fd = open(TSDB_FILE_FULL_NAME(pDFile), flags); + if (pDFile->fd < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return 0; +} + +static FORCE_INLINE void tsdbCloseDFile(SDFile* pDFile) { + if (TSDB_FILE_OPENED(pDFile)) { + close(pDFile->fd); + TSDB_FILE_SET_CLOSED(pDFile); + } +} + +static FORCE_INLINE int64_t tsdbSeekDFile(SDFile* pDFile, int64_t offset, int whence) { + ASSERT(TSDB_FILE_OPENED(pDFile)); + + int64_t loffset = taosLSeek(TSDB_FILE_FD(pDFile), offset, whence); + if (loffset < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return loffset; +} + +static FORCE_INLINE int64_t tsdbWriteDFile(SDFile* pDFile, void* buf, int64_t nbyte) { + ASSERT(TSDB_FILE_OPENED(pDFile)); + + int64_t nwrite = taosWrite(pDFile->fd, buf, nbyte); + if (nwrite < nbyte) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return nwrite; +} + +static FORCE_INLINE void tsdbUpdateDFileMagic(SDFile* pDFile, void* pCksm) { + pDFile->info.magic = taosCalcChecksum(pDFile->info.magic, (uint8_t*)(pCksm), sizeof(TSCKSUM)); +} + +static FORCE_INLINE int tsdbAppendDFile(SDFile* pDFile, void* buf, int64_t nbyte, int64_t* offset) { + ASSERT(TSDB_FILE_OPENED(pDFile)); + + int64_t toffset; + + if ((toffset = tsdbSeekDFile(pDFile, 0, SEEK_END)) < 0) { + return -1; + } + + ASSERT(pDFile->info.size == toffset); + + if (offset) { + *offset = toffset; + } + + if (tsdbWriteDFile(pDFile, buf, nbyte) < 0) { + return -1; + } + + pDFile->info.size += nbyte; + + return (int)nbyte; +} + +static FORCE_INLINE int tsdbRemoveDFile(SDFile* pDFile) { return tfsremove(TSDB_FILE_F(pDFile)); } + +static FORCE_INLINE int64_t tsdbReadDFile(SDFile* pDFile, void* buf, int64_t nbyte) { + ASSERT(TSDB_FILE_OPENED(pDFile)); + + int64_t nread = taosRead(pDFile->fd, buf, nbyte); + if (nread < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return nread; +} + +static FORCE_INLINE int tsdbCopyDFile(SDFile* pSrc, SDFile* pDest) { + if (tfscopy(TSDB_FILE_F(pSrc), TSDB_FILE_F(pDest)) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + tsdbSetDFileInfo(pDest, TSDB_FILE_INFO(pSrc)); + return 0; +} + +// =============== SDFileSet +typedef struct { + int fid; + int state; + SDFile files[TSDB_FILE_MAX]; +} SDFileSet; + +#define TSDB_FSET_FID(s) ((s)->fid) +#define TSDB_DFILE_IN_SET(s, t) ((s)->files + (t)) +#define TSDB_FSET_LEVEL(s) TSDB_FILE_LEVEL(TSDB_DFILE_IN_SET(s, 0)) +#define TSDB_FSET_ID(s) TSDB_FILE_ID(TSDB_DFILE_IN_SET(s, 0)) +#define TSDB_FSET_SET_CLOSED(s) \ + do { \ + for (TSDB_FILE_T ftype = TSDB_FILE_HEAD; ftype < TSDB_FILE_MAX; ftype++) { \ + TSDB_FILE_SET_CLOSED(TSDB_DFILE_IN_SET(s, ftype)); \ + } \ + } while (0); +#define TSDB_FSET_FSYNC(s) \ + do { \ + for (TSDB_FILE_T ftype = TSDB_FILE_HEAD; ftype < TSDB_FILE_MAX; ftype++) { \ + TSDB_FILE_FSYNC(TSDB_DFILE_IN_SET(s, ftype)); \ + } \ + } while (0); + +void tsdbInitDFileSet(SDFileSet* pSet, SDiskID did, int vid, int fid, uint32_t ver); +void tsdbInitDFileSetEx(SDFileSet* pSet, SDFileSet* pOSet); +int tsdbEncodeDFileSet(void** buf, SDFileSet* pSet); +void* tsdbDecodeDFileSet(void* buf, SDFileSet* pSet); +int tsdbEncodeDFileSetEx(void** buf, SDFileSet* pSet); +void* tsdbDecodeDFileSetEx(void* buf, SDFileSet* pSet); +int tsdbApplyDFileSetChange(SDFileSet* from, SDFileSet* to); +int tsdbCreateDFileSet(SDFileSet* pSet, bool updateHeader); +int tsdbUpdateDFileSetHeader(SDFileSet* pSet); +int tsdbScanAndTryFixDFileSet(STsdbRepo *pRepo, SDFileSet* pSet); + +static FORCE_INLINE void tsdbCloseDFileSet(SDFileSet* pSet) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + tsdbCloseDFile(TSDB_DFILE_IN_SET(pSet, ftype)); + } +} + +static FORCE_INLINE int tsdbOpenDFileSet(SDFileSet* pSet, int flags) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + if (tsdbOpenDFile(TSDB_DFILE_IN_SET(pSet, ftype), flags) < 0) { + tsdbCloseDFileSet(pSet); + return -1; + } + } + return 0; +} + +static FORCE_INLINE void tsdbRemoveDFileSet(SDFileSet* pSet) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + tsdbRemoveDFile(TSDB_DFILE_IN_SET(pSet, ftype)); + } +} + +static FORCE_INLINE int tsdbCopyDFileSet(SDFileSet* pSrc, SDFileSet* pDest) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + if (tsdbCopyDFile(TSDB_DFILE_IN_SET(pSrc, ftype), TSDB_DFILE_IN_SET(pDest, ftype)) < 0) { + tsdbRemoveDFileSet(pDest); + return -1; + } + } + + return 0; +} + +static FORCE_INLINE void tsdbGetFidKeyRange(int days, int8_t precision, int fid, TSKEY* minKey, TSKEY* maxKey) { + *minKey = fid * days * tsMsPerDay[precision]; + *maxKey = *minKey + days * tsMsPerDay[precision] - 1; +} + +static FORCE_INLINE bool tsdbFSetIsOk(SDFileSet* pSet) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + if (TSDB_FILE_IS_BAD(TSDB_DFILE_IN_SET(pSet, ftype))) { + return false; + } + } + + return true; +} + +#endif /* _TS_TSDB_FILE_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbLog.h b/src/tsdb/inc/tsdbLog.h new file mode 100644 index 0000000000..fdd04e968a --- /dev/null +++ b/src/tsdb/inc/tsdbLog.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_LOG_H_ +#define _TD_TSDB_LOG_H_ + +extern int32_t tsdbDebugFlag; + +#define tsdbFatal(...) do { if (tsdbDebugFlag & DEBUG_FATAL) { taosPrintLog("TDB FATAL ", 255, __VA_ARGS__); }} while(0) +#define tsdbError(...) do { if (tsdbDebugFlag & DEBUG_ERROR) { taosPrintLog("TDB ERROR ", 255, __VA_ARGS__); }} while(0) +#define tsdbWarn(...) do { if (tsdbDebugFlag & DEBUG_WARN) { taosPrintLog("TDB WARN ", 255, __VA_ARGS__); }} while(0) +#define tsdbInfo(...) do { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TDB ", 255, __VA_ARGS__); }} while(0) +#define tsdbDebug(...) do { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TDB ", tsdbDebugFlag, __VA_ARGS__); }} while(0) +#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TDB ", tsdbDebugFlag, __VA_ARGS__); }} while(0) + +#endif /* _TD_TSDB_LOG_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h deleted file mode 100644 index 05335b45d5..0000000000 --- a/src/tsdb/inc/tsdbMain.h +++ /dev/null @@ -1,619 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ -#ifndef _TD_TSDB_MAIN_H_ -#define _TD_TSDB_MAIN_H_ - -#include "os.h" -#include "hash.h" -#include "tcoding.h" -#include "tglobal.h" -#include "tkvstore.h" -#include "tlist.h" -#include "tlog.h" -#include "tlockfree.h" -#include "tsdb.h" -#include "tskiplist.h" -#include "tutil.h" - -#ifdef __cplusplus -extern "C" { -#endif - -extern int32_t tsdbDebugFlag; - -#define tsdbFatal(...) do { if (tsdbDebugFlag & DEBUG_FATAL) { taosPrintLog("TDB FATAL ", 255, __VA_ARGS__); }} while(0) -#define tsdbError(...) do { if (tsdbDebugFlag & DEBUG_ERROR) { taosPrintLog("TDB ERROR ", 255, __VA_ARGS__); }} while(0) -#define tsdbWarn(...) do { if (tsdbDebugFlag & DEBUG_WARN) { taosPrintLog("TDB WARN ", 255, __VA_ARGS__); }} while(0) -#define tsdbInfo(...) do { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TDB ", 255, __VA_ARGS__); }} while(0) -#define tsdbDebug(...) do { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TDB ", tsdbDebugFlag, __VA_ARGS__); }} while(0) -#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TDB ", tsdbDebugFlag, __VA_ARGS__); }} while(0) - -#define TSDB_MAX_TABLE_SCHEMAS 16 -#define TSDB_FILE_HEAD_SIZE 512 -#define TSDB_FILE_DELIMITER 0xF00AFA0F -#define TSDB_FILE_INIT_MAGIC 0xFFFFFFFF - -#define TAOS_IN_RANGE(key, keyMin, keyLast) (((key) >= (keyMin)) && ((key) <= (keyMax))) - -// NOTE: Any file format change must increase this version number by 1 -// Also, implement the convert function -#define TSDB_FILE_VERSION ((uint32_t)0) - -// Definitions -// ------------------ tsdbMeta.c -typedef struct STable { - STableId tableId; - ETableType type; - tstr* name; // NOTE: there a flexible string here - uint64_t suid; - struct STable* pSuper; // super table pointer - uint8_t numOfSchemas; - STSchema* schema[TSDB_MAX_TABLE_SCHEMAS]; - STSchema* tagSchema; - SKVRow tagVal; - SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index - void* eventHandler; // TODO - void* streamHandler; // TODO - TSKEY lastKey; - SDataRow lastRow; - char* sql; - void* cqhandle; - SRWLatch latch; // TODO: implementa latch functions - T_REF_DECLARE() -} STable; - -typedef struct { - pthread_rwlock_t rwLock; - - int32_t nTables; - int32_t maxTables; - STable** tables; - SList* superList; - SHashObj* uidMap; - SKVStore* pStore; - int maxRowBytes; - int maxCols; -} STsdbMeta; - -// ------------------ tsdbBuffer.c -typedef struct { - int64_t blockId; - int offset; - int remain; - char data[]; -} STsdbBufBlock; - -typedef struct { - pthread_cond_t poolNotEmpty; - int bufBlockSize; - int tBufBlocks; - int nBufBlocks; - int64_t index; - SList* bufBlockList; -} STsdbBufPool; - -// ------------------ tsdbMemTable.c -typedef struct { - STable * pTable; - SSkipListIterator *pIter; -} SCommitIter; - -typedef struct { - uint64_t uid; - TSKEY keyFirst; - TSKEY keyLast; - int64_t numOfRows; - SSkipList* pData; -} STableData; - -typedef struct { - T_REF_DECLARE() - SRWLatch latch; - TSKEY keyFirst; - TSKEY keyLast; - int64_t numOfRows; - int32_t maxTables; - STableData** tData; - SList* actList; - SList* extraBuffList; - SList* bufBlockList; -} SMemTable; - -enum { TSDB_UPDATE_META, TSDB_DROP_META }; - -#ifdef WINDOWS -#pragma pack(push ,1) -typedef struct { -#else -typedef struct __attribute__((packed)){ -#endif - char act; - uint64_t uid; -} SActObj; -#ifdef WINDOWS -#pragma pack(pop) -#endif - -typedef struct { - int len; - char cont[]; -} SActCont; - -// ------------------ tsdbFile.c -extern const char* tsdbFileSuffix[]; -typedef enum { - TSDB_FILE_TYPE_HEAD = 0, - TSDB_FILE_TYPE_DATA, - TSDB_FILE_TYPE_LAST, - TSDB_FILE_TYPE_STAT, - TSDB_FILE_TYPE_NHEAD, - TSDB_FILE_TYPE_NDATA, - TSDB_FILE_TYPE_NLAST, - TSDB_FILE_TYPE_NSTAT -} TSDB_FILE_TYPE; - -#ifndef TDINTERNAL -#define TSDB_FILE_TYPE_MAX (TSDB_FILE_TYPE_LAST+1) -#else -#define TSDB_FILE_TYPE_MAX (TSDB_FILE_TYPE_STAT+1) -#endif - -typedef struct { - uint32_t magic; - uint32_t len; - uint32_t totalBlocks; - uint32_t totalSubBlocks; - uint32_t offset; - uint64_t size; // total size of the file - uint64_t tombSize; // unused file size -} STsdbFileInfo; - -typedef struct { - char fname[TSDB_FILENAME_LEN]; - int fd; - - STsdbFileInfo info; -} SFile; - -typedef struct { - int fileId; - int state; // 0 for health, 1 for problem - SFile files[TSDB_FILE_TYPE_MAX]; -} SFileGroup; - -typedef struct { - pthread_rwlock_t fhlock; - - int maxFGroups; - int nFGroups; - SFileGroup* pFGroup; -} STsdbFileH; - -typedef struct { - int direction; - STsdbFileH* pFileH; - int fileId; - int index; -} SFileGroupIter; - -// ------------------ tsdbMain.c -typedef struct { - int32_t totalLen; - int32_t len; - SDataRow row; -} SSubmitBlkIter; - -typedef struct { - int32_t totalLen; - int32_t len; - void * pMsg; -} SSubmitMsgIter; - -typedef struct { - int8_t state; - - char* rootDir; - STsdbCfg config; - STsdbAppH appH; - STsdbStat stat; - STsdbMeta* tsdbMeta; - STsdbBufPool* pPool; - SMemTable* mem; - SMemTable* imem; - STsdbFileH* tsdbFileH; - tsem_t readyToCommit; - pthread_mutex_t mutex; - bool repoLocked; - int32_t code; // Commit code -} STsdbRepo; - -// ------------------ tsdbRWHelper.c -typedef struct { - int32_t tid; - uint32_t len; - uint32_t offset; - uint32_t hasLast : 2; - uint32_t numOfBlocks : 30; - uint64_t uid; - TSKEY maxKey; -} SCompIdx; - -typedef struct { - int64_t last : 1; - int64_t offset : 63; - int32_t algorithm : 8; - int32_t numOfRows : 24; - int32_t len; - int32_t keyLen; // key column length, keyOffset = offset+sizeof(SCompData)+sizeof(SCompCol)*numOfCols - int16_t numOfSubBlocks; - int16_t numOfCols; // not including timestamp column - TSKEY keyFirst; - TSKEY keyLast; -} SCompBlock; - -typedef struct { - int32_t delimiter; // For recovery usage - int32_t tid; - uint64_t uid; - SCompBlock blocks[]; -} SCompInfo; - -typedef struct { - int16_t colId; - int32_t len; - int32_t type : 8; - int32_t offset : 24; - int64_t sum; - int64_t max; - int64_t min; - int16_t maxIndex; - int16_t minIndex; - int16_t numOfNull; - char padding[2]; -} SCompCol; - -typedef struct { - int32_t delimiter; // For recovery usage - int32_t numOfCols; // For recovery usage - uint64_t uid; // For recovery usage - SCompCol cols[]; -} SCompData; - -typedef enum { TSDB_WRITE_HELPER, TSDB_READ_HELPER } tsdb_rw_helper_t; - -typedef struct { - TSKEY minKey; - TSKEY maxKey; - SFileGroup fGroup; - SFile nHeadF; - SFile nLastF; -} SHelperFile; - -typedef struct { - uint64_t uid; - int32_t tid; -} SHelperTable; - -typedef struct { - SCompIdx* pIdxArray; - int numOfIdx; - int curIdx; -} SIdxH; - -typedef struct { - tsdb_rw_helper_t type; - - STsdbRepo* pRepo; - int8_t state; - // For file set usage - SHelperFile files; - SIdxH idxH; - SCompIdx curCompIdx; - void* pWIdx; - // For table set usage - SHelperTable tableInfo; - SCompInfo* pCompInfo; - bool hasOldLastBlock; - // For block set usage - SCompData* pCompData; - SDataCols* pDataCols[2]; - void* pBuffer; // Buffer to hold the whole data block - void* compBuffer; // Buffer for temperary compress/decompress purpose -} SRWHelper; - -typedef struct { - int rowsInserted; - int rowsUpdated; - int rowsDeleteSucceed; - int rowsDeleteFailed; - int nOperations; - TSKEY keyFirst; - TSKEY keyLast; -} SMergeInfo; -// ------------------ tsdbScan.c -typedef struct { - SFileGroup fGroup; - int numOfIdx; - SCompIdx* pCompIdx; - SCompInfo* pCompInfo; - void* pBuf; - FILE* tLogStream; -} STsdbScanHandle; - -// Operations -// ------------------ tsdbMeta.c -#define TSDB_INIT_NTABLES 1024 -#define TABLE_TYPE(t) (t)->type -#define TABLE_NAME(t) (t)->name -#define TABLE_CHAR_NAME(t) TABLE_NAME(t)->data -#define TABLE_UID(t) (t)->tableId.uid -#define TABLE_TID(t) (t)->tableId.tid -#define TABLE_SUID(t) (t)->suid -#define TSDB_META_FILE_MAGIC(m) KVSTORE_MAGIC((m)->pStore) -#define TSDB_RLOCK_TABLE(t) taosRLockLatch(&((t)->latch)) -#define TSDB_RUNLOCK_TABLE(t) taosRUnLockLatch(&((t)->latch)) -#define TSDB_WLOCK_TABLE(t) taosWLockLatch(&((t)->latch)) -#define TSDB_WUNLOCK_TABLE(t) taosWUnLockLatch(&((t)->latch)) - -STsdbMeta* tsdbNewMeta(STsdbCfg* pCfg); -void tsdbFreeMeta(STsdbMeta* pMeta); -int tsdbOpenMeta(STsdbRepo* pRepo); -int tsdbCloseMeta(STsdbRepo* pRepo); -STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid); -STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t version); -int tsdbWLockRepoMeta(STsdbRepo* pRepo); -int tsdbRLockRepoMeta(STsdbRepo* pRepo); -int tsdbUnlockRepoMeta(STsdbRepo* pRepo); -void tsdbRefTable(STable* pTable); -void tsdbUnRefTable(STable* pTable); -void tsdbUpdateTableSchema(STsdbRepo* pRepo, STable* pTable, STSchema* pSchema, bool insertAct); - -static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *key2) { - if (*(int16_t *)key1 < schemaVersion(*(STSchema **)key2)) { - return -1; - } else if (*(int16_t *)key1 > schemaVersion(*(STSchema **)key2)) { - return 1; - } else { - return 0; - } -} - -static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t version) { - STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; - STSchema* pSchema = NULL; - STSchema* pTSchema = NULL; - - if (lock) TSDB_RLOCK_TABLE(pDTable); - if (version < 0) { // get the latest version of schema - pTSchema = pDTable->schema[pDTable->numOfSchemas - 1]; - } else { // get the schema with version - void* ptr = taosbsearch(&version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*), - tsdbCompareSchemaVersion, TD_EQ); - if (ptr == NULL) { - terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; - goto _exit; - } - pTSchema = *(STSchema**)ptr; - } - - ASSERT(pTSchema != NULL); - - if (copy) { - if ((pSchema = tdDupSchema(pTSchema)) == NULL) terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - } else { - pSchema = pTSchema; - } - -_exit: - if (lock) TSDB_RUNLOCK_TABLE(pDTable); - return pSchema; -} - -static FORCE_INLINE STSchema* tsdbGetTableSchema(STable* pTable) { - return tsdbGetTableSchemaImpl(pTable, false, false, -1); -} - -static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) { - if (pTable->type == TSDB_CHILD_TABLE) { // check child table first - STable *pSuper = pTable->pSuper; - if (pSuper == NULL) return NULL; - return pSuper->tagSchema; - } else if (pTable->type == TSDB_SUPER_TABLE) { - return pTable->tagSchema; - } else { - return NULL; - } -} - -static FORCE_INLINE TSKEY tsdbGetTableLastKeyImpl(STable* pTable) { - ASSERT(pTable->lastRow == NULL || pTable->lastKey == dataRowKey(pTable->lastRow)); - return pTable->lastKey; -} - -// ------------------ tsdbBuffer.c -#define TSDB_BUFFER_RESERVE 1024 // Reseve 1K as commit threshold - -STsdbBufPool* tsdbNewBufPool(); -void tsdbFreeBufPool(STsdbBufPool* pBufPool); -int tsdbOpenBufPool(STsdbRepo* pRepo); -void tsdbCloseBufPool(STsdbRepo* pRepo); -SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); - -// ------------------ tsdbMemTable.c -int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); -int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); -int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem); -void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem); -void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes); -int tsdbAsyncCommit(STsdbRepo* pRepo); -int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxKey, int maxRowsToRead, SDataCols* pCols, - TKEY* filterKeys, int nFilterKeys, bool keepDup, SMergeInfo* pMergeInfo); -void* tsdbCommitData(STsdbRepo* pRepo); - -static FORCE_INLINE SDataRow tsdbNextIterRow(SSkipListIterator* pIter) { - if (pIter == NULL) return NULL; - - SSkipListNode* node = tSkipListIterGet(pIter); - if (node == NULL) return NULL; - - return (SDataRow)SL_GET_NODE_DATA(node); -} - -static FORCE_INLINE TSKEY tsdbNextIterKey(SSkipListIterator* pIter) { - SDataRow row = tsdbNextIterRow(pIter); - if (row == NULL) return TSDB_DATA_TIMESTAMP_NULL; - - return dataRowKey(row); -} - -static FORCE_INLINE TKEY tsdbNextIterTKey(SSkipListIterator* pIter) { - SDataRow row = tsdbNextIterRow(pIter); - if (row == NULL) return TKEY_NULL; - - return dataRowTKey(row); -} - -static FORCE_INLINE STsdbBufBlock* tsdbGetCurrBufBlock(STsdbRepo* pRepo) { - ASSERT(pRepo != NULL); - if (pRepo->mem == NULL) return NULL; - - SListNode* pNode = listTail(pRepo->mem->bufBlockList); - if (pNode == NULL) return NULL; - - STsdbBufBlock* pBufBlock = NULL; - tdListNodeGetData(pRepo->mem->bufBlockList, pNode, (void*)(&pBufBlock)); - - return pBufBlock; -} - -// ------------------ tsdbFile.c -#define TSDB_KEY_FILEID(key, daysPerFile, precision) ((key) / tsMsPerDay[(precision)] / (daysPerFile)) -#define TSDB_MAX_FILE(keep, daysPerFile) ((keep) / (daysPerFile) + 3) -#define TSDB_MIN_FILE_ID(fh) (fh)->pFGroup[0].fileId -#define TSDB_MAX_FILE_ID(fh) (fh)->pFGroup[(fh)->nFGroups - 1].fileId -#define TSDB_IS_FILE_OPENED(f) ((f)->fd > 0) -#define TSDB_FGROUP_ITER_FORWARD TSDB_ORDER_ASC -#define TSDB_FGROUP_ITER_BACKWARD TSDB_ORDER_DESC - -STsdbFileH* tsdbNewFileH(STsdbCfg* pCfg); -void tsdbFreeFileH(STsdbFileH* pFileH); -int tsdbOpenFileH(STsdbRepo* pRepo); -void tsdbCloseFileH(STsdbRepo* pRepo); -SFileGroup* tsdbCreateFGroupIfNeed(STsdbRepo* pRepo, char* dataDir, int fid); -void tsdbInitFileGroupIter(STsdbFileH* pFileH, SFileGroupIter* pIter, int direction); -void tsdbSeekFileGroupIter(SFileGroupIter* pIter, int fid); -SFileGroup* tsdbGetFileGroupNext(SFileGroupIter* pIter); -int tsdbOpenFile(SFile* pFile, int oflag); -void tsdbCloseFile(SFile* pFile); -int tsdbCreateFile(SFile* pFile, STsdbRepo* pRepo, int fid, int type); -SFileGroup* tsdbSearchFGroup(STsdbFileH* pFileH, int fid, int flags); -void tsdbFitRetention(STsdbRepo* pRepo); -int tsdbUpdateFileHeader(SFile* pFile); -int tsdbEncodeSFileInfo(void** buf, const STsdbFileInfo* pInfo); -void* tsdbDecodeSFileInfo(void* buf, STsdbFileInfo* pInfo); -void tsdbRemoveFileGroup(STsdbRepo* pRepo, SFileGroup* pFGroup); -int tsdbLoadFileHeader(SFile* pFile, uint32_t* version); -void tsdbGetFileInfoImpl(char* fname, uint32_t* magic, int64_t* size); -void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey); - -// ------------------ tsdbRWHelper.c -#define TSDB_HELPER_CLEAR_STATE 0x0 // Clear state -#define TSDB_HELPER_FILE_SET_AND_OPEN 0x1 // File is set -#define TSDB_HELPER_IDX_LOAD 0x2 // SCompIdx part is loaded -#define TSDB_HELPER_TABLE_SET 0x4 // Table is set -#define TSDB_HELPER_INFO_LOAD 0x8 // SCompInfo part is loaded -#define TSDB_HELPER_FILE_DATA_LOAD 0x10 // SCompData part is loaded -#define helperSetState(h, s) (((h)->state) |= (s)) -#define helperClearState(h, s) ((h)->state &= (~(s))) -#define helperHasState(h, s) ((((h)->state) & (s)) == (s)) -#define blockAtIdx(h, idx) ((h)->pCompInfo->blocks + idx) -#define TSDB_MAX_SUBBLOCKS 8 -#define IS_SUB_BLOCK(pBlock) ((pBlock)->numOfSubBlocks == 0) -#define helperType(h) (h)->type -#define helperRepo(h) (h)->pRepo -#define helperState(h) (h)->state -#define TSDB_NLAST_FILE_OPENED(h) ((h)->files.nLastF.fd > 0) -#define helperFileId(h) ((h)->files.fGroup.fileId) -#define helperHeadF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_HEAD])) -#define helperDataF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_DATA])) -#define helperLastF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_LAST])) -#define helperNewHeadF(h) (&((h)->files.nHeadF)) -#define helperNewLastF(h) (&((h)->files.nLastF)) - -int tsdbInitReadHelper(SRWHelper* pHelper, STsdbRepo* pRepo); -int tsdbInitWriteHelper(SRWHelper* pHelper, STsdbRepo* pRepo); -void tsdbDestroyHelper(SRWHelper* pHelper); -void tsdbResetHelper(SRWHelper* pHelper); -int tsdbSetAndOpenHelperFile(SRWHelper* pHelper, SFileGroup* pGroup); -int tsdbCloseHelperFile(SRWHelper* pHelper, bool hasError, SFileGroup* pGroup); -int tsdbSetHelperTable(SRWHelper* pHelper, STable* pTable, STsdbRepo* pRepo); -int tsdbCommitTableData(SRWHelper* pHelper, SCommitIter* pCommitIter, SDataCols* pDataCols, TSKEY maxKey); -int tsdbMoveLastBlockIfNeccessary(SRWHelper* pHelper); -int tsdbWriteCompInfo(SRWHelper* pHelper); -int tsdbWriteCompIdx(SRWHelper* pHelper); -int tsdbLoadCompIdxImpl(SFile* pFile, uint32_t offset, uint32_t len, void* buffer); -int tsdbDecodeSCompIdxImpl(void* buffer, uint32_t len, SCompIdx** ppCompIdx, int* numOfIdx); -int tsdbLoadCompIdx(SRWHelper* pHelper, void* target); -int tsdbLoadCompInfoImpl(SFile* pFile, SCompIdx* pIdx, SCompInfo** ppCompInfo); -int tsdbLoadCompInfo(SRWHelper* pHelper, void* target); -int tsdbLoadCompData(SRWHelper* phelper, SCompBlock* pcompblock, void* target); -void tsdbGetDataStatis(SRWHelper* pHelper, SDataStatis* pStatis, int numOfCols); -int tsdbLoadBlockDataCols(SRWHelper* pHelper, SCompBlock* pCompBlock, SCompInfo* pCompInfo, int16_t* colIds, - int numOfColIds); -int tsdbLoadBlockData(SRWHelper* pHelper, SCompBlock* pCompBlock, SCompInfo* pCompInfo); - -static FORCE_INLINE int compTSKEY(const void* key1, const void* key2) { - if (*(TSKEY*)key1 > *(TSKEY*)key2) { - return 1; - } else if (*(TSKEY*)key1 == *(TSKEY*)key2) { - return 0; - } else { - return -1; - } -} - -// ------------------ tsdbMain.c -#define REPO_ID(r) (r)->config.tsdbId -#define IS_REPO_LOCKED(r) (r)->repoLocked -#define TSDB_SUBMIT_MSG_HEAD_SIZE sizeof(SSubmitMsg) - -char* tsdbGetMetaFileName(char* rootDir); -void tsdbGetDataFileName(char* rootDir, int vid, int fid, int type, char* fname); -int tsdbLockRepo(STsdbRepo* pRepo); -int tsdbUnlockRepo(STsdbRepo* pRepo); -char* tsdbGetDataDirName(char* rootDir); -int tsdbGetNextMaxTables(int tid); -STsdbMeta* tsdbGetMeta(TSDB_REPO_T* pRepo); -STsdbFileH* tsdbGetFile(TSDB_REPO_T* pRepo); -int tsdbCheckCommit(STsdbRepo* pRepo); - -// ------------------ tsdbScan.c -int tsdbScanFGroup(STsdbScanHandle* pScanHandle, char* rootDir, int fid); -STsdbScanHandle* tsdbNewScanHandle(); -void tsdbSetScanLogStream(STsdbScanHandle* pScanHandle, FILE* fLogStream); -int tsdbSetAndOpenScanFile(STsdbScanHandle* pScanHandle, char* rootDir, int fid); -int tsdbScanSCompIdx(STsdbScanHandle* pScanHandle); -int tsdbScanSCompBlock(STsdbScanHandle* pScanHandle, int idx); -int tsdbCloseScanFile(STsdbScanHandle* pScanHandle); -void tsdbFreeScanHandle(STsdbScanHandle* pScanHandle); - -// ------------------ tsdbCommitQueue.c -int tsdbScheduleCommit(STsdbRepo *pRepo); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/src/tsdb/inc/tsdbMemTable.h b/src/tsdb/inc/tsdbMemTable.h new file mode 100644 index 0000000000..3b3f1dd1f6 --- /dev/null +++ b/src/tsdb/inc/tsdbMemTable.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_MEMTABLE_H_ +#define _TD_TSDB_MEMTABLE_H_ + +typedef struct { + int rowsInserted; + int rowsUpdated; + int rowsDeleteSucceed; + int rowsDeleteFailed; + int nOperations; + TSKEY keyFirst; + TSKEY keyLast; +} SMergeInfo; + +typedef struct { + STable * pTable; + SSkipListIterator *pIter; +} SCommitIter; + +typedef struct { + uint64_t uid; + TSKEY keyFirst; + TSKEY keyLast; + int64_t numOfRows; + SSkipList* pData; +} STableData; + +typedef struct { + T_REF_DECLARE() + SRWLatch latch; + TSKEY keyFirst; + TSKEY keyLast; + int64_t numOfRows; + int32_t maxTables; + STableData** tData; + SList* actList; + SList* extraBuffList; + SList* bufBlockList; + int64_t pointsAdd; // TODO + int64_t storageAdd; // TODO +} SMemTable; + +enum { TSDB_UPDATE_META, TSDB_DROP_META }; + +#ifdef WINDOWS +#pragma pack(push ,1) +typedef struct { +#else +typedef struct __attribute__((packed)){ +#endif + char act; + uint64_t uid; +} SActObj; +#ifdef WINDOWS +#pragma pack(pop) +#endif + +typedef struct { + int len; + char cont[]; +} SActCont; + +int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); +int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); +int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem); +void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem); +void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes); +int tsdbAsyncCommit(STsdbRepo* pRepo); +int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxKey, int maxRowsToRead, SDataCols* pCols, + TKEY* filterKeys, int nFilterKeys, bool keepDup, SMergeInfo* pMergeInfo); +void* tsdbCommitData(STsdbRepo* pRepo); + +static FORCE_INLINE SDataRow tsdbNextIterRow(SSkipListIterator* pIter) { + if (pIter == NULL) return NULL; + + SSkipListNode* node = tSkipListIterGet(pIter); + if (node == NULL) return NULL; + + return (SDataRow)SL_GET_NODE_DATA(node); +} + +static FORCE_INLINE TSKEY tsdbNextIterKey(SSkipListIterator* pIter) { + SDataRow row = tsdbNextIterRow(pIter); + if (row == NULL) return TSDB_DATA_TIMESTAMP_NULL; + + return dataRowKey(row); +} + +static FORCE_INLINE TKEY tsdbNextIterTKey(SSkipListIterator* pIter) { + SDataRow row = tsdbNextIterRow(pIter); + if (row == NULL) return TKEY_NULL; + + return dataRowTKey(row); +} + +#endif /* _TD_TSDB_MEMTABLE_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h new file mode 100644 index 0000000000..cc916fa689 --- /dev/null +++ b/src/tsdb/inc/tsdbMeta.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_META_H_ +#define _TD_TSDB_META_H_ + +#define TSDB_MAX_TABLE_SCHEMAS 16 + +typedef struct STable { + STableId tableId; + ETableType type; + tstr* name; // NOTE: there a flexible string here + uint64_t suid; + struct STable* pSuper; // super table pointer + uint8_t numOfSchemas; + STSchema* schema[TSDB_MAX_TABLE_SCHEMAS]; + STSchema* tagSchema; + SKVRow tagVal; + SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index + void* eventHandler; // TODO + void* streamHandler; // TODO + TSKEY lastKey; + SDataRow lastRow; + char* sql; + void* cqhandle; + SRWLatch latch; // TODO: implementa latch functions + T_REF_DECLARE() +} STable; + +typedef struct { + pthread_rwlock_t rwLock; + + int32_t nTables; + int32_t maxTables; + STable** tables; + SList* superList; + SHashObj* uidMap; + int maxRowBytes; + int maxCols; +} STsdbMeta; + +#define TSDB_INIT_NTABLES 1024 +#define TABLE_TYPE(t) (t)->type +#define TABLE_NAME(t) (t)->name +#define TABLE_CHAR_NAME(t) TABLE_NAME(t)->data +#define TABLE_UID(t) (t)->tableId.uid +#define TABLE_TID(t) (t)->tableId.tid +#define TABLE_SUID(t) (t)->suid +// #define TSDB_META_FILE_MAGIC(m) KVSTORE_MAGIC((m)->pStore) +#define TSDB_RLOCK_TABLE(t) taosRLockLatch(&((t)->latch)) +#define TSDB_RUNLOCK_TABLE(t) taosRUnLockLatch(&((t)->latch)) +#define TSDB_WLOCK_TABLE(t) taosWLockLatch(&((t)->latch)) +#define TSDB_WUNLOCK_TABLE(t) taosWUnLockLatch(&((t)->latch)) + +STsdbMeta* tsdbNewMeta(STsdbCfg* pCfg); +void tsdbFreeMeta(STsdbMeta* pMeta); +int tsdbOpenMeta(STsdbRepo* pRepo); +int tsdbCloseMeta(STsdbRepo* pRepo); +STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid); +STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t version); +int tsdbWLockRepoMeta(STsdbRepo* pRepo); +int tsdbRLockRepoMeta(STsdbRepo* pRepo); +int tsdbUnlockRepoMeta(STsdbRepo* pRepo); +void tsdbRefTable(STable* pTable); +void tsdbUnRefTable(STable* pTable); +void tsdbUpdateTableSchema(STsdbRepo* pRepo, STable* pTable, STSchema* pSchema, bool insertAct); +int tsdbRestoreTable(STsdbRepo* pRepo, void* cont, int contLen); +void tsdbOrgMeta(STsdbRepo* pRepo); + +static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *key2) { + if (*(int16_t *)key1 < schemaVersion(*(STSchema **)key2)) { + return -1; + } else if (*(int16_t *)key1 > schemaVersion(*(STSchema **)key2)) { + return 1; + } else { + return 0; + } +} + +static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t version) { + STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; + STSchema* pSchema = NULL; + STSchema* pTSchema = NULL; + + if (lock) TSDB_RLOCK_TABLE(pDTable); + if (version < 0) { // get the latest version of schema + pTSchema = pDTable->schema[pDTable->numOfSchemas - 1]; + } else { // get the schema with version + void* ptr = taosbsearch(&version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*), + tsdbCompareSchemaVersion, TD_EQ); + if (ptr == NULL) { + terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; + goto _exit; + } + pTSchema = *(STSchema**)ptr; + } + + ASSERT(pTSchema != NULL); + + if (copy) { + if ((pSchema = tdDupSchema(pTSchema)) == NULL) terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + } else { + pSchema = pTSchema; + } + +_exit: + if (lock) TSDB_RUNLOCK_TABLE(pDTable); + return pSchema; +} + +static FORCE_INLINE STSchema* tsdbGetTableSchema(STable* pTable) { + return tsdbGetTableSchemaImpl(pTable, false, false, -1); +} + +static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) { + if (pTable->type == TSDB_CHILD_TABLE) { // check child table first + STable *pSuper = pTable->pSuper; + if (pSuper == NULL) return NULL; + return pSuper->tagSchema; + } else if (pTable->type == TSDB_SUPER_TABLE) { + return pTable->tagSchema; + } else { + return NULL; + } +} + +static FORCE_INLINE TSKEY tsdbGetTableLastKeyImpl(STable* pTable) { + ASSERT(pTable->lastRow == NULL || pTable->lastKey == dataRowKey(pTable->lastRow)); + return pTable->lastKey; +} + +#endif /* _TD_TSDB_META_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbReadImpl.h b/src/tsdb/inc/tsdbReadImpl.h new file mode 100644 index 0000000000..0efbcc55bb --- /dev/null +++ b/src/tsdb/inc/tsdbReadImpl.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_READ_IMPL_H_ +#define _TD_TSDB_READ_IMPL_H_ + +typedef struct SReadH SReadH; + +typedef struct { + int32_t tid; + uint32_t len; + uint32_t offset; + uint32_t hasLast : 2; + uint32_t numOfBlocks : 30; + uint64_t uid; + TSKEY maxKey; +} SBlockIdx; + +typedef struct { + int64_t last : 1; + int64_t offset : 63; + int32_t algorithm : 8; + int32_t numOfRows : 24; + int32_t len; + int32_t keyLen; // key column length, keyOffset = offset+sizeof(SBlockData)+sizeof(SBlockCol)*numOfCols + int16_t numOfSubBlocks; + int16_t numOfCols; // not including timestamp column + TSKEY keyFirst; + TSKEY keyLast; +} SBlock; + +typedef struct { + int32_t delimiter; // For recovery usage + int32_t tid; + uint64_t uid; + SBlock blocks[]; +} SBlockInfo; + +typedef struct { + int16_t colId; + int32_t len; + int32_t type : 8; + int32_t offset : 24; + int64_t sum; + int64_t max; + int64_t min; + int16_t maxIndex; + int16_t minIndex; + int16_t numOfNull; + char padding[2]; +} SBlockCol; + +typedef struct { + int32_t delimiter; // For recovery usage + int32_t numOfCols; // For recovery usage + uint64_t uid; // For recovery usage + SBlockCol cols[]; +} SBlockData; + +struct SReadH { + STsdbRepo * pRepo; + SDFileSet rSet; // FSET to read + SArray * aBlkIdx; // SBlockIdx array + STable * pTable; // table to read + SBlockIdx * pBlkIdx; // current reading table SBlockIdx + int cidx; + SBlockInfo *pBlkInfo; + SBlockData *pBlkData; // Block info + SDataCols * pDCols[2]; + void * pBuf; // buffer + void * pCBuf; // compression buffer +}; + +#define TSDB_READ_REPO(rh) ((rh)->pRepo) +#define TSDB_READ_REPO_ID(rh) REPO_ID(TSDB_READ_REPO(rh)) +#define TSDB_READ_FSET(rh) (&((rh)->rSet)) +#define TSDB_READ_TABLE(rh) ((rh)->pTable) +#define TSDB_READ_HEAD_FILE(rh) TSDB_DFILE_IN_SET(TSDB_READ_FSET(rh), TSDB_FILE_HEAD) +#define TSDB_READ_DATA_FILE(rh) TSDB_DFILE_IN_SET(TSDB_READ_FSET(rh), TSDB_FILE_DATA) +#define TSDB_READ_LAST_FILE(rh) TSDB_DFILE_IN_SET(TSDB_READ_FSET(rh), TSDB_FILE_LAST) +#define TSDB_READ_BUF(rh) ((rh)->pBuf) +#define TSDB_READ_COMP_BUF(rh) ((rh)->pCBuf) + +#define TSDB_BLOCK_STATIS_SIZE(ncols) (sizeof(SBlockData) + sizeof(SBlockCol) * (ncols) + sizeof(TSCKSUM)) + +int tsdbInitReadH(SReadH *pReadh, STsdbRepo *pRepo); +void tsdbDestroyReadH(SReadH *pReadh); +int tsdbSetAndOpenReadFSet(SReadH *pReadh, SDFileSet *pSet); +void tsdbCloseAndUnsetFSet(SReadH *pReadh); +int tsdbLoadBlockIdx(SReadH *pReadh); +int tsdbSetReadTable(SReadH *pReadh, STable *pTable); +int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget); +int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlockInfo); +int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, int16_t *colIds, int numOfColsIds); +int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock); +int tsdbEncodeSBlockIdx(void **buf, SBlockIdx *pIdx); +void *tsdbDecodeSBlockIdx(void *buf, SBlockIdx *pIdx); +void tsdbGetBlockStatis(SReadH *pReadh, SDataStatis *pStatis, int numOfCols); + +static FORCE_INLINE int tsdbMakeRoom(void **ppBuf, size_t size) { + void * pBuf = *ppBuf; + size_t tsize = taosTSizeof(pBuf); + + if (tsize < size) { + if (tsize == 0) tsize = 1024; + + while (tsize < size) { + tsize *= 2; + } + + *ppBuf = taosTRealloc(pBuf, tsize); + if (*ppBuf == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + } + + return 0; +} + +#endif /*_TD_TSDB_READ_IMPL_H_*/ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h new file mode 100644 index 0000000000..074ff20f22 --- /dev/null +++ b/src/tsdb/inc/tsdbint.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_INT_H_ +#define _TD_TSDB_INT_H_ + +// // TODO: remove the include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +#include "os.h" +#include "tlog.h" +#include "taosdef.h" +#include "taoserror.h" +#include "tchecksum.h" +#include "tskiplist.h" +#include "tdataformat.h" +#include "tcoding.h" +#include "tscompression.h" +#include "tlockfree.h" +#include "tlist.h" +#include "hash.h" +#include "tarray.h" +#include "tfs.h" +#include "tsocket.h" + +#include "tsdb.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Log +#include "tsdbLog.h" +// Meta +#include "tsdbMeta.h" +// Buffer +#include "tsdbBuffer.h" +// MemTable +#include "tsdbMemTable.h" +// File +#include "tsdbFile.h" +// FS +#include "tsdbFS.h" +// ReadImpl +#include "tsdbReadImpl.h" +// Commit +#include "tsdbCommit.h" +// Commit Queue +#include "tsdbCommitQueue.h" +// Main definitions +struct STsdbRepo { + uint8_t state; + + STsdbCfg config; + STsdbAppH appH; + STsdbStat stat; + STsdbMeta* tsdbMeta; + STsdbBufPool* pPool; + SMemTable* mem; + SMemTable* imem; + STsdbFS* fs; + tsem_t readyToCommit; + pthread_mutex_t mutex; + bool repoLocked; + int32_t code; // Commit code +}; + +#define REPO_ID(r) (r)->config.tsdbId +#define REPO_CFG(r) (&((r)->config)) +#define REPO_FS(r) ((r)->fs) +#define IS_REPO_LOCKED(r) (r)->repoLocked +#define TSDB_SUBMIT_MSG_HEAD_SIZE sizeof(SSubmitMsg) + +int tsdbLockRepo(STsdbRepo* pRepo); +int tsdbUnlockRepo(STsdbRepo* pRepo); +STsdbMeta* tsdbGetMeta(STsdbRepo* pRepo); +int tsdbCheckCommit(STsdbRepo* pRepo); +int tsdbRestoreInfo(STsdbRepo* pRepo); +void tsdbGetRootDir(int repoid, char dirName[]); +void tsdbGetDataDir(int repoid, char dirName[]); + +static FORCE_INLINE STsdbBufBlock* tsdbGetCurrBufBlock(STsdbRepo* pRepo) { + ASSERT(pRepo != NULL); + if (pRepo->mem == NULL) return NULL; + + SListNode* pNode = listTail(pRepo->mem->bufBlockList); + if (pNode == NULL) return NULL; + + STsdbBufBlock* pBufBlock = NULL; + tdListNodeGetData(pRepo->mem->bufBlockList, pNode, (void*)(&pBufBlock)); + + return pBufBlock; +} + +static FORCE_INLINE int tsdbGetNextMaxTables(int tid) { + ASSERT(tid >= 1 && tid <= TSDB_MAX_TABLES); + int maxTables = TSDB_INIT_NTABLES; + while (true) { + maxTables = MIN(maxTables, TSDB_MAX_TABLES); + if (tid <= maxTables) break; + maxTables *= 2; + } + + return maxTables + 1; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _TD_TSDB_INT_H_ */ \ No newline at end of file diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index 7cea27658c..1798a21b99 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -13,8 +13,7 @@ * along with this program. If not, see . */ -#include "tsdb.h" -#include "tsdbMain.h" +#include "tsdbint.h" #define POOL_IS_EMPTY(b) (listNEles((b)->bufBlockList) == 0) diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index cd8358e5e3..a777b11186 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -12,23 +12,79 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ -#include "tsdbMain.h" +#include "tsdbint.h" + +#define TSDB_MAX_SUBBLOCKS 8 +#define TSDB_KEY_FID(key, days, precision) ((key) / tsMsPerDay[(precision)] / (days)) + +typedef struct { + SRtn rtn; // retention snapshot + SFSIter fsIter; // tsdb file iterator + int niters; // memory iterators + SCommitIter *iters; + bool isRFileSet; // read and commit FSET + SReadH readh; + SDFileSet wSet; + bool isDFileSame; + bool isLFileSame; + TSKEY minKey; + TSKEY maxKey; + SArray * aBlkIdx; // SBlockIdx array + STable * pTable; + SArray * aSupBlk; // Table super-block array + SArray * aSubBlk; // table sub-block array + SDataCols * pDataCols; +} SCommitH; + +#define TSDB_COMMIT_REPO(ch) TSDB_READ_REPO(&(ch->readh)) +#define TSDB_COMMIT_REPO_ID(ch) REPO_ID(TSDB_READ_REPO(&(ch->readh))) +#define TSDB_COMMIT_WRITE_FSET(ch) (&((ch)->wSet)) +#define TSDB_COMMIT_TABLE(ch) ((ch)->pTable) +#define TSDB_COMMIT_HEAD_FILE(ch) TSDB_DFILE_IN_SET(TSDB_COMMIT_WRITE_FSET(ch), TSDB_FILE_HEAD) +#define TSDB_COMMIT_DATA_FILE(ch) TSDB_DFILE_IN_SET(TSDB_COMMIT_WRITE_FSET(ch), TSDB_FILE_DATA) +#define TSDB_COMMIT_LAST_FILE(ch) TSDB_DFILE_IN_SET(TSDB_COMMIT_WRITE_FSET(ch), TSDB_FILE_LAST) +#define TSDB_COMMIT_BUF(ch) TSDB_READ_BUF(&((ch)->readh)) +#define TSDB_COMMIT_COMP_BUF(ch) TSDB_READ_COMP_BUF(&((ch)->readh)) +#define TSDB_COMMIT_DEFAULT_ROWS(ch) (TSDB_COMMIT_REPO(ch)->config.maxRowsPerFileBlock * 4 / 5) +#define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch))) -static int tsdbCommitTSData(STsdbRepo *pRepo); static int tsdbCommitMeta(STsdbRepo *pRepo); +static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen); +static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid); +static int tsdbCommitTSData(STsdbRepo *pRepo); +static void tsdbStartCommit(STsdbRepo *pRepo); static void tsdbEndCommit(STsdbRepo *pRepo, int eno); -static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey); -static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols); -static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo); -static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables); +static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid); +static int tsdbCreateCommitIters(SCommitH *pCommith); +static void tsdbDestroyCommitIters(SCommitH *pCommith); +static void tsdbSeekCommitIter(SCommitH *pCommith, TSKEY key); +static int tsdbInitCommitH(SCommitH *pCommith, STsdbRepo *pRepo); +static void tsdbDestroyCommitH(SCommitH *pCommith); +static int tsdbGetFidLevel(int fid, SRtn *pRtn); +static int tsdbNextCommitFid(SCommitH *pCommith); +static int tsdbCommitToTable(SCommitH *pCommith, int tid); +static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable); +static int tsdbComparKeyBlock(const void *arg1, const void *arg2); +static int tsdbWriteBlockInfo(SCommitH *pCommih); +static int tsdbWriteBlockIdx(SCommitH *pCommih); +static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData); +static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx); +static int tsdbMoveBlock(SCommitH *pCommith, int bidx); +static int tsdbCommitAddBlock(SCommitH *pCommith, const SBlock *pSupBlock, const SBlock *pSubBlocks, int nSubBlocks); +static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols *pDataCols, TSKEY keyLimit, + bool isLastOneBlock); +static void tsdbResetCommitFile(SCommitH *pCommith); +static void tsdbResetCommitTable(SCommitH *pCommith); +static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid); +static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError); +static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo); +static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, + TSKEY maxKey, int maxRows, int8_t update); +static int tsdbApplyRtn(STsdbRepo *pRepo); +static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn); void *tsdbCommitData(STsdbRepo *pRepo) { - SMemTable * pMem = pRepo->imem; - - tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64 " meta rows: %d", - REPO_ID(pRepo), pMem->keyFirst, pMem->keyLast, pMem->numOfRows, listNEles(pMem->actList)); - - pRepo->code = TSDB_CODE_SUCCESS; + tsdbStartCommit(pRepo); // Commit to update meta file if (tsdbCommitMeta(pRepo) < 0) { @@ -42,125 +98,300 @@ void *tsdbCommitData(STsdbRepo *pRepo) { goto _err; } - tsdbFitRetention(pRepo); - - tsdbInfo("vgId:%d commit over, succeed", REPO_ID(pRepo)); tsdbEndCommit(pRepo, TSDB_CODE_SUCCESS); - return NULL; _err: ASSERT(terrno != TSDB_CODE_SUCCESS); pRepo->code = terrno; - tsdbInfo("vgId:%d commit over, failed", REPO_ID(pRepo)); - tsdbEndCommit(pRepo, terrno); + tsdbEndCommit(pRepo, terrno); return NULL; } -static int tsdbCommitTSData(STsdbRepo *pRepo) { - SMemTable * pMem = pRepo->imem; - SDataCols * pDataCols = NULL; - STsdbMeta * pMeta = pRepo->tsdbMeta; - SCommitIter *iters = NULL; - SRWHelper whelper = {0}; - STsdbCfg * pCfg = &(pRepo->config); +// =================== Commit Meta Data +static int tsdbCommitMeta(STsdbRepo *pRepo) { + STsdbFS * pfs = REPO_FS(pRepo); + SMemTable *pMem = pRepo->imem; + SMFile * pOMFile = pfs->cstatus->pmf; + SMFile mf; + SActObj * pAct = NULL; + SActCont * pCont = NULL; + SListNode *pNode = NULL; + SDiskID did; - if (pMem->numOfRows <= 0) return 0; + ASSERT(pOMFile != NULL || listNEles(pMem->actList) > 0); - iters = tsdbCreateCommitIters(pRepo); - if (iters == NULL) { - tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } + if (listNEles(pMem->actList) <= 0) { + // no meta data to commit, just keep the old meta file + tsdbUpdateMFile(pfs, pOMFile); + return 0; + } else { + // Create/Open a meta file or open the existing file + if (pOMFile == NULL) { + // Create a new meta file + did.level = TFS_PRIMARY_LEVEL; + did.id = TFS_PRIMARY_ID; + tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo))); - if (tsdbInitWriteHelper(&whelper, pRepo) < 0) { - tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } + if (tsdbCreateMFile(&mf, true) < 0) { + tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } - if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s", - REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno)); - goto _err; - } - - int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision)); - int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision)); - - // Loop to commit to each file - for (int fid = sfid; fid <= efid; fid++) { - if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) { - tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); - goto _err; + tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf)); + } else { + tsdbInitMFileEx(&mf, pOMFile); + if (tsdbOpenMFile(&mf, O_WRONLY) < 0) { + tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } } } - tdFreeDataCols(pDataCols); - tsdbDestroyCommitIters(iters, pMem->maxTables); - tsdbDestroyHelper(&whelper); - - return 0; - -_err: - tdFreeDataCols(pDataCols); - tsdbDestroyCommitIters(iters, pMem->maxTables); - tsdbDestroyHelper(&whelper); - - return -1; -} - -static int tsdbCommitMeta(STsdbRepo *pRepo) { - SMemTable *pMem = pRepo->imem; - STsdbMeta *pMeta = pRepo->tsdbMeta; - SActObj * pAct = NULL; - SActCont * pCont = NULL; - - if (listNEles(pMem->actList) <= 0) return 0; - - if (tdKVStoreStartCommit(pMeta->pStore) < 0) { - tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - - SListNode *pNode = NULL; - + // Loop to write while ((pNode = tdListPopHead(pMem->actList)) != NULL) { pAct = (SActObj *)pNode->data; if (pAct->act == TSDB_UPDATE_META) { pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj)); - if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) { - tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, + if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) { + tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, tstrerror(terrno)); - tdKVStoreEndCommit(pMeta->pStore); - goto _err; + tsdbCloseMFile(&mf); + tsdbApplyMFileChange(&mf, pOMFile); + // TODO: need to reload metaCache + return -1; } } else if (pAct->act == TSDB_DROP_META) { - if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) { - tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, + if (tsdbDropMetaRecord(pfs, &mf, pAct->uid) < 0) { + tsdbError("vgId:%d failed to drop META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, tstrerror(terrno)); - tdKVStoreEndCommit(pMeta->pStore); - goto _err; + tsdbCloseMFile(&mf); + tsdbApplyMFileChange(&mf, pOMFile); + // TODO: need to reload metaCache + return -1; } } else { ASSERT(false); } } - if (tdKVStoreEndCommit(pMeta->pStore) < 0) { - tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + if (tsdbUpdateMFileHeader(&mf) < 0) { + tsdbError("vgId:%d failed to update META file header since %s, revert it", REPO_ID(pRepo), tstrerror(terrno)); + tsdbApplyMFileChange(&mf, pOMFile); + // TODO: need to reload metaCache + return -1; } - return 0; + TSDB_FILE_FSYNC(&mf); + tsdbCloseMFile(&mf); + tsdbUpdateMFile(pfs, &mf); -_err: - return -1; + return 0; +} + +int tsdbEncodeKVRecord(void **buf, SKVRecord *pRecord) { + int tlen = 0; + tlen += taosEncodeFixedU64(buf, pRecord->uid); + tlen += taosEncodeFixedI64(buf, pRecord->offset); + tlen += taosEncodeFixedI64(buf, pRecord->size); + + return tlen; +} + +void *tsdbDecodeKVRecord(void *buf, SKVRecord *pRecord) { + buf = taosDecodeFixedU64(buf, &(pRecord->uid)); + buf = taosDecodeFixedI64(buf, &(pRecord->offset)); + buf = taosDecodeFixedI64(buf, &(pRecord->size)); + + return buf; +} + +void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn) { + STsdbCfg *pCfg = REPO_CFG(pRepo); + TSKEY minKey, midKey, maxKey, now; + + now = taosGetTimestamp(pCfg->precision); + minKey = now - pCfg->keep * tsMsPerDay[pCfg->precision]; + midKey = now - pCfg->keep2 * tsMsPerDay[pCfg->precision]; + maxKey = now - pCfg->keep1 * tsMsPerDay[pCfg->precision]; + + pRtn->minKey = minKey; + pRtn->minFid = (int)(TSDB_KEY_FID(minKey, pCfg->daysPerFile, pCfg->precision)); + pRtn->midFid = (int)(TSDB_KEY_FID(midKey, pCfg->daysPerFile, pCfg->precision)); + pRtn->maxFid = (int)(TSDB_KEY_FID(maxKey, pCfg->daysPerFile, pCfg->precision)); + tsdbDebug("vgId:%d now:%" PRId64 " minKey:%" PRId64 " minFid:%d, midFid:%d, maxFid:%d", REPO_ID(pRepo), now, minKey, + pRtn->minFid, pRtn->midFid, pRtn->maxFid); +} + +static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen) { + char buf[64] = "\0"; + void * pBuf = buf; + SKVRecord rInfo; + int64_t offset; + + // Seek to end of meta file + offset = tsdbSeekMFile(pMFile, 0, SEEK_END); + if (offset < 0) { + return -1; + } + + rInfo.offset = offset; + rInfo.uid = uid; + rInfo.size = contLen; + + int tlen = tsdbEncodeKVRecord((void **)(&pBuf), &rInfo); + if (tsdbAppendMFile(pMFile, buf, tlen, NULL) < tlen) { + return -1; + } + + if (tsdbAppendMFile(pMFile, cont, contLen, NULL) < contLen) { + return -1; + } + + tsdbUpdateMFileMagic(pMFile, POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM))); + SKVRecord *pRecord = taosHashGet(pfs->metaCache, (void *)&uid, sizeof(uid)); + if (pRecord != NULL) { + pMFile->info.tombSize += (pRecord->size + sizeof(SKVRecord)); + } else { + pMFile->info.nRecords++; + } + taosHashPut(pfs->metaCache, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo)); + + return 0; +} + +static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid) { + SKVRecord rInfo = {0}; + char buf[128] = "\0"; + + SKVRecord *pRecord = taosHashGet(pfs->metaCache, (void *)(&uid), sizeof(uid)); + if (pRecord == NULL) { + tsdbError("failed to drop META record with key %" PRIu64 " since not find", uid); + return -1; + } + + rInfo.offset = -pRecord->offset; + rInfo.uid = pRecord->uid; + rInfo.size = pRecord->size; + + void *pBuf = buf; + tsdbEncodeKVRecord(&pBuf, &rInfo); + + if (tsdbAppendMFile(pMFile, buf, sizeof(SKVRecord), NULL) < 0) { + return -1; + } + + pMFile->info.magic = taosCalcChecksum(pMFile->info.magic, (uint8_t *)buf, sizeof(SKVRecord)); + pMFile->info.nDels++; + pMFile->info.nRecords--; + pMFile->info.tombSize += (rInfo.size + sizeof(SKVRecord) * 2); + + taosHashRemove(pfs->metaCache, (void *)(&uid), sizeof(uid)); + return 0; +} + +// =================== Commit Time-Series Data +static int tsdbCommitTSData(STsdbRepo *pRepo) { + SMemTable *pMem = pRepo->imem; + SCommitH commith; + SDFileSet *pSet = NULL; + int fid; + + memset(&commith, 0, sizeof(SMemTable *)); + + if (pMem->numOfRows <= 0) { + // No memory data, just apply retention on each file on disk + if (tsdbApplyRtn(pRepo) < 0) { + return -1; + } + return 0; + } + + // Resource initialization + if (tsdbInitCommitH(&commith, pRepo) < 0) { + return -1; + } + + // Skip expired memory data and expired FSET + tsdbSeekCommitIter(&commith, commith.rtn.minKey); + while ((pSet = tsdbFSIterNext(&(commith.fsIter)))) { + if (pSet->fid < commith.rtn.minFid) { + tsdbInfo("vgId:%d FSET %d on level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid, + TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet)); + } else { + break; + } + } + + // Loop to commit to each file + fid = tsdbNextCommitFid(&(commith)); + while (true) { + // Loop over both on disk and memory + if (pSet == NULL && fid == TSDB_IVLD_FID) break; + + if (pSet && (fid == TSDB_IVLD_FID || pSet->fid < fid)) { + // Only has existing FSET but no memory data to commit in this + // existing FSET, only check if file in correct retention + if (tsdbApplyRtnOnFSet(pRepo, pSet, &(commith.rtn)) < 0) { + tsdbDestroyCommitH(&commith); + return -1; + } + + pSet = tsdbFSIterNext(&(commith.fsIter)); + } else { + // Has memory data to commit + SDFileSet *pCSet; + int cfid; + + if (pSet == NULL || pSet->fid > fid) { + // Commit to a new FSET with fid: fid + pCSet = NULL; + cfid = fid; + } else { + // Commit to an existing FSET + pCSet = pSet; + cfid = pSet->fid; + pSet = tsdbFSIterNext(&(commith.fsIter)); + } + + if (tsdbCommitToFile(&commith, pCSet, cfid) < 0) { + tsdbDestroyCommitH(&commith); + return -1; + } + + fid = tsdbNextCommitFid(&commith); + } + } + + tsdbDestroyCommitH(&commith); + return 0; +} + +static void tsdbStartCommit(STsdbRepo *pRepo) { + SMemTable *pMem = pRepo->imem; + + ASSERT(pMem->numOfRows > 0 || listNEles(pMem->actList) > 0); + + tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64 " meta rows: %d", + REPO_ID(pRepo), pMem->keyFirst, pMem->keyLast, pMem->numOfRows, listNEles(pMem->actList)); + + tsdbStartFSTxn(pRepo, pMem->pointsAdd, pMem->storageAdd); + + pRepo->code = TSDB_CODE_SUCCESS; } static void tsdbEndCommit(STsdbRepo *pRepo, int eno) { + if (eno != TSDB_CODE_SUCCESS) { + tsdbEndFSTxnWithError(REPO_FS(pRepo)); + } else { + tsdbEndFSTxn(pRepo); + } + + tsdbInfo("vgId:%d commit over, %s", REPO_ID(pRepo), (eno == TSDB_CODE_SUCCESS) ? "succeed" : "failed"); + if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER, eno); + SMemTable *pIMem = pRepo->imem; tsdbLockRepo(pRepo); pRepo->imem = NULL; @@ -169,177 +400,1083 @@ static void tsdbEndCommit(STsdbRepo *pRepo, int eno) { tsem_post(&(pRepo->readyToCommit)); } -static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) { +#if 0 +static bool tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) { for (int i = 0; i < nIters; i++) { TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter); - if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return 1; + if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return true; } + return false; +} +#endif + +static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { + STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); + STsdbCfg * pCfg = REPO_CFG(pRepo); + + ASSERT(pSet == NULL || pSet->fid == fid); + + tsdbResetCommitFile(pCommith); + tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &(pCommith->minKey), &(pCommith->maxKey)); + + // Set and open files + if (tsdbSetAndOpenCommitFile(pCommith, pSet, fid) < 0) { + return -1; + } + + // Loop to commit each table data + for (int tid = 1; tid < pCommith->niters; tid++) { + SCommitIter *pIter = pCommith->iters + tid; + + if (pIter->pTable == NULL) continue; + + if (tsdbCommitToTable(pCommith, tid) < 0) { + tsdbCloseCommitFile(pCommith, true); + // revert the file change + tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet); + return -1; + } + } + + if (tsdbWriteBlockIdx(pCommith) < 0) { + tsdbError("vgId:%d failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + tsdbCloseCommitFile(pCommith, true); + // revert the file change + tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet); + return -1; + } + + // Close commit file + tsdbCloseCommitFile(pCommith, false); + + if (tsdbUpdateDFileSet(REPO_FS(pRepo), &(pCommith->wSet)) < 0) { + return -1; + } + return 0; } -static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) { - char * dataDir = NULL; - STsdbCfg * pCfg = &pRepo->config; - STsdbFileH *pFileH = pRepo->tsdbFileH; - SFileGroup *pGroup = NULL; - SMemTable * pMem = pRepo->imem; - bool newLast = false; +static int tsdbCreateCommitIters(SCommitH *pCommith) { + STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); + SMemTable *pMem = pRepo->imem; + STsdbMeta *pMeta = pRepo->tsdbMeta; - TSKEY minKey = 0, maxKey = 0; - tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey); - - // Check if there are data to commit to this file - int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey); - if (!hasDataToCommit) { - tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid); - return 0; - } - - // Create and open files for commit - dataDir = tsdbGetDataDirName(pRepo->rootDir); - if (dataDir == NULL) { + pCommith->niters = pMem->maxTables; + pCommith->iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter)); + if (pCommith->iters == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } - if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) { - tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); - goto _err; - } - - // Open files for write/read - if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) { - tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - - newLast = TSDB_NLAST_FILE_OPENED(pHelper); - - if (tsdbLoadCompIdx(pHelper, NULL) < 0) { - tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - - // Loop to commit data in each table - for (int tid = 1; tid < pMem->maxTables; tid++) { - SCommitIter *pIter = iters + tid; - if (pIter->pTable == NULL) continue; - - TSDB_RLOCK_TABLE(pIter->pTable); - - if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err; - - if (pIter->pIter != NULL) { - if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) { - TSDB_RUNLOCK_TABLE(pIter->pTable); - tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo), - TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable), - tstrerror(terrno)); - goto _err; - } - } - - TSDB_RUNLOCK_TABLE(pIter->pTable); - - // Move the last block to the new .l file if neccessary - if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) { - tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - - // Write the SCompBlock part - if (tsdbWriteCompInfo(pHelper) < 0) { - tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - } - - if (tsdbWriteCompIdx(pHelper) < 0) { - tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); - goto _err; - } - - tfree(dataDir); - tsdbCloseHelperFile(pHelper, 0, pGroup); - - pthread_rwlock_wrlock(&(pFileH->fhlock)); - - (void)taosRename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname); - pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info; - - if (newLast) { - (void)taosRename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname); - pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info; - } else { - pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info; - } - - pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info; - - pthread_rwlock_unlock(&(pFileH->fhlock)); - - return 0; - -_err: - tfree(dataDir); - tsdbCloseHelperFile(pHelper, 1, pGroup); - return -1; -} - -static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) { - SMemTable *pMem = pRepo->imem; - STsdbMeta *pMeta = pRepo->tsdbMeta; - - SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter)); - if (iters == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return NULL; - } - - if (tsdbRLockRepoMeta(pRepo) < 0) goto _err; + if (tsdbRLockRepoMeta(pRepo) < 0) return -1; // reference all tables for (int i = 0; i < pMem->maxTables; i++) { if (pMeta->tables[i] != NULL) { tsdbRefTable(pMeta->tables[i]); - iters[i].pTable = pMeta->tables[i]; + pCommith->iters[i].pTable = pMeta->tables[i]; } } - if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err; + if (tsdbUnlockRepoMeta(pRepo) < 0) return -1; for (int i = 0; i < pMem->maxTables; i++) { - if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) { - if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) { + if ((pCommith->iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && + (TABLE_UID(pCommith->iters[i].pTable) == pMem->tData[i]->uid)) { + if ((pCommith->iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; + return -1; } - tSkipListIterNext(iters[i].pIter); + tSkipListIterNext(pCommith->iters[i].pIter); } } - return iters; - -_err: - tsdbDestroyCommitIters(iters, pMem->maxTables); - return NULL; + return 0; } -static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) { - if (iters == NULL) return; +static void tsdbDestroyCommitIters(SCommitH *pCommith) { + if (pCommith->iters == NULL) return; - for (int i = 1; i < maxTables; i++) { - if (iters[i].pTable != NULL) { - tsdbUnRefTable(iters[i].pTable); - tSkipListDestroyIter(iters[i].pIter); + for (int i = 1; i < pCommith->niters; i++) { + if (pCommith->iters[i].pTable != NULL) { + tsdbUnRefTable(pCommith->iters[i].pTable); + tSkipListDestroyIter(pCommith->iters[i].pIter); } } - free(iters); + free(pCommith->iters); + pCommith->iters = NULL; + pCommith->niters = 0; } + +// Skip all keys until key (not included) +static void tsdbSeekCommitIter(SCommitH *pCommith, TSKEY key) { + for (int i = 0; i < pCommith->niters; i++) { + SCommitIter *pIter = pCommith->iters + i; + if (pIter->pTable == NULL || pIter->pIter == NULL) continue; + + tsdbLoadDataFromCache(pIter->pTable, pIter->pIter, key - 1, INT32_MAX, NULL, NULL, 0, true, NULL); + } +} + +static int tsdbInitCommitH(SCommitH *pCommith, STsdbRepo *pRepo) { + STsdbCfg *pCfg = REPO_CFG(pRepo); + + memset(pCommith, 0, sizeof(*pCommith)); + tsdbGetRtnSnap(pRepo, &(pCommith->rtn)); + + TSDB_FSET_SET_CLOSED(TSDB_COMMIT_WRITE_FSET(pCommith)); + + // Init read handle + if (tsdbInitReadH(&(pCommith->readh), pRepo) < 0) { + return -1; + } + + // Init file iterator + tsdbFSIterInit(&(pCommith->fsIter), REPO_FS(pRepo), TSDB_FS_ITER_FORWARD); + + if (tsdbCreateCommitIters(pCommith) < 0) { + tsdbDestroyCommitH(pCommith); + return -1; + } + + pCommith->aBlkIdx = taosArrayInit(1024, sizeof(SBlockIdx)); + if (pCommith->aBlkIdx == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbDestroyCommitH(pCommith); + return -1; + } + + pCommith->aSupBlk = taosArrayInit(1024, sizeof(SBlock)); + if (pCommith->aSupBlk == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbDestroyCommitH(pCommith); + return -1; + } + + pCommith->aSubBlk = taosArrayInit(1024, sizeof(SBlock)); + if (pCommith->aSubBlk == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbDestroyCommitH(pCommith); + return -1; + } + + pCommith->pDataCols = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + if (pCommith->pDataCols == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbDestroyCommitH(pCommith); + return -1; + } + + return 0; +} + +static void tsdbDestroyCommitH(SCommitH *pCommith) { + pCommith->pDataCols = tdFreeDataCols(pCommith->pDataCols); + pCommith->aSubBlk = taosArrayDestroy(pCommith->aSubBlk); + pCommith->aSupBlk = taosArrayDestroy(pCommith->aSupBlk); + pCommith->aBlkIdx = taosArrayDestroy(pCommith->aBlkIdx); + tsdbDestroyCommitIters(pCommith); + tsdbDestroyReadH(&(pCommith->readh)); + tsdbCloseDFileSet(TSDB_COMMIT_WRITE_FSET(pCommith)); +} + +static int tsdbNextCommitFid(SCommitH *pCommith) { + STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); + STsdbCfg * pCfg = REPO_CFG(pRepo); + int fid = TSDB_IVLD_FID; + + for (int i = 0; i < pCommith->niters; i++) { + SCommitIter *pIter = pCommith->iters + i; + if (pIter->pTable == NULL || pIter->pIter == NULL) continue; + + TSKEY nextKey = tsdbNextIterKey(pIter->pIter); + if (nextKey == TSDB_DATA_TIMESTAMP_NULL) { + continue; + } else { + int tfid = (int)(TSDB_KEY_FID(nextKey, pCfg->daysPerFile, pCfg->precision)); + if (fid == TSDB_IVLD_FID || fid > tfid) { + fid = tfid; + } + } + } + + return fid; +} + +static int tsdbCommitToTable(SCommitH *pCommith, int tid) { + SCommitIter *pIter = pCommith->iters + tid; + TSKEY nextKey = tsdbNextIterKey(pIter->pIter); + + tsdbResetCommitTable(pCommith); + + TSDB_RLOCK_TABLE(pIter->pTable); + + // Set commit table + if (tsdbSetCommitTable(pCommith, pIter->pTable) < 0) { + TSDB_RUNLOCK_TABLE(pIter->pTable); + return -1; + } + + // No disk data and no memory data, just return + if (pCommith->readh.pBlkIdx == NULL && (nextKey == TSDB_DATA_TIMESTAMP_NULL || nextKey > pCommith->maxKey)) { + TSDB_RUNLOCK_TABLE(pIter->pTable); + return 0; + } + + // Must has disk data or has memory data + int nBlocks; + int bidx = 0; + SBlock *pBlock; + + if (pCommith->readh.pBlkIdx) { + if (tsdbLoadBlockInfo(&(pCommith->readh), NULL) < 0) { + TSDB_RUNLOCK_TABLE(pIter->pTable); + return -1; + } + + nBlocks = pCommith->readh.pBlkIdx->numOfBlocks; + } else { + nBlocks = 0; + } + + if (bidx < nBlocks) { + pBlock = pCommith->readh.pBlkInfo->blocks + bidx; + } else { + pBlock = NULL; + } + + while (true) { + if (pBlock == NULL && (nextKey == TSDB_DATA_TIMESTAMP_NULL || nextKey > pCommith->maxKey)) break; + + if ((nextKey == TSDB_DATA_TIMESTAMP_NULL || nextKey > pCommith->maxKey) || + (pBlock && (!pBlock->last) && tsdbComparKeyBlock((void *)(&nextKey), pBlock) > 0)) { + if (tsdbMoveBlock(pCommith, bidx) < 0) { + TSDB_RUNLOCK_TABLE(pIter->pTable); + return -1; + } + + bidx++; + if (bidx < nBlocks) { + pBlock = pCommith->readh.pBlkInfo->blocks + bidx; + } else { + pBlock = NULL; + } + } else if (pBlock && (pBlock->last || tsdbComparKeyBlock((void *)(&nextKey), pBlock) == 0)) { + // merge pBlock data and memory data + if (tsdbMergeMemData(pCommith, pIter, bidx) < 0) { + TSDB_RUNLOCK_TABLE(pIter->pTable); + return -1; + } + + bidx++; + if (bidx < nBlocks) { + pBlock = pCommith->readh.pBlkInfo->blocks + bidx; + } else { + pBlock = NULL; + } + nextKey = tsdbNextIterKey(pIter->pIter); + } else { + // Only commit memory data + if (pBlock == NULL) { + if (tsdbCommitMemData(pCommith, pIter, pCommith->maxKey, false) < 0) { + TSDB_RUNLOCK_TABLE(pIter->pTable); + return -1; + } + } else { + if (tsdbCommitMemData(pCommith, pIter, pBlock->keyFirst - 1, true) < 0) { + TSDB_RUNLOCK_TABLE(pIter->pTable); + return -1; + } + } + nextKey = tsdbNextIterKey(pIter->pIter); + } + } + + TSDB_RUNLOCK_TABLE(pIter->pTable); + + if (tsdbWriteBlockInfo(pCommith) < 0) { + tsdbError("vgId:%d failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith), + TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno)); + return -1; + } + + return 0; +} + +static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable) { + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + + pCommith->pTable = pTable; + + if (tdInitDataCols(pCommith->pDataCols, pSchema) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + if (pCommith->isRFileSet) { + if (tsdbSetReadTable(&(pCommith->readh), pTable) < 0) { + return -1; + } + } else { + pCommith->readh.pBlkIdx = NULL; + } + return 0; +} + +static int tsdbComparKeyBlock(const void *arg1, const void *arg2) { + TSKEY key = *(TSKEY *)arg1; + SBlock *pBlock = (SBlock *)arg2; + + if (key < pBlock->keyFirst) { + return -1; + } else if (key > pBlock->keyLast) { + return 1; + } else { + return 0; + } +} + +static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, bool isLast, + bool isSuper) { + STsdbRepo * pRepo = TSDB_COMMIT_REPO(pCommith); + STsdbCfg * pCfg = REPO_CFG(pRepo); + SBlockData *pBlockData; + int64_t offset = 0; + STable * pTable = TSDB_COMMIT_TABLE(pCommith); + int rowsToWrite = pDataCols->numOfRows; + + ASSERT(rowsToWrite > 0 && rowsToWrite <= pCfg->maxRowsPerFileBlock); + ASSERT((!isLast) || rowsToWrite < pCfg->minRowsPerFileBlock); + + // Make buffer space + if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommith)), TSDB_BLOCK_STATIS_SIZE(pDataCols->numOfCols)) < 0) { + return -1; + } + pBlockData = (SBlockData *)TSDB_COMMIT_BUF(pCommith); + + // Get # of cols not all NULL(not including key column) + int nColsNotAllNull = 0; + for (int ncol = 1; ncol < pDataCols->numOfCols; ncol++) { // ncol from 1, we skip the timestamp column + SDataCol * pDataCol = pDataCols->cols + ncol; + SBlockCol *pBlockCol = pBlockData->cols + nColsNotAllNull; + + if (isNEleNull(pDataCol, rowsToWrite)) { // all data to commit are NULL, just ignore it + continue; + } + + memset(pBlockCol, 0, sizeof(*pBlockCol)); + + pBlockCol->colId = pDataCol->colId; + pBlockCol->type = pDataCol->type; + if (tDataTypes[pDataCol->type].statisFunc) { + (*tDataTypes[pDataCol->type].statisFunc)(pDataCol->pData, rowsToWrite, &(pBlockCol->min), &(pBlockCol->max), + &(pBlockCol->sum), &(pBlockCol->minIndex), &(pBlockCol->maxIndex), + &(pBlockCol->numOfNull)); + } + nColsNotAllNull++; + } + + ASSERT(nColsNotAllNull >= 0 && nColsNotAllNull <= pDataCols->numOfCols); + + // Compress the data if neccessary + int tcol = 0; // counter of not all NULL and written columns + int32_t toffset = 0; + int32_t tsize = TSDB_BLOCK_STATIS_SIZE(nColsNotAllNull); + int32_t lsize = tsize; + int32_t keyLen = 0; + for (int ncol = 0; ncol < pDataCols->numOfCols; ncol++) { + // All not NULL columns finish + if (ncol != 0 && tcol >= nColsNotAllNull) break; + + SDataCol * pDataCol = pDataCols->cols + ncol; + SBlockCol *pBlockCol = pBlockData->cols + tcol; + + if (ncol != 0 && (pDataCol->colId != pBlockCol->colId)) continue; + + int32_t flen; // final length + int32_t tlen = dataColGetNEleLen(pDataCol, rowsToWrite); + void * tptr; + + // Make room + if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommith)), lsize + tlen + COMP_OVERFLOW_BYTES + sizeof(TSCKSUM)) < 0) { + return -1; + } + pBlockData = (SBlockData *)TSDB_COMMIT_BUF(pCommith); + pBlockCol = pBlockData->cols + tcol; + tptr = POINTER_SHIFT(pBlockData, lsize); + + if (pCfg->compression == TWO_STAGE_COMP && + tsdbMakeRoom((void **)(&TSDB_COMMIT_COMP_BUF(pCommith)), tlen + COMP_OVERFLOW_BYTES) < 0) { + return -1; + } + + // Compress or just copy + if (pCfg->compression) { + flen = (*(tDataTypes[pDataCol->type].compFunc))((char *)pDataCol->pData, tlen, rowsToWrite, tptr, + tlen + COMP_OVERFLOW_BYTES, pCfg->compression, + TSDB_COMMIT_COMP_BUF(pCommith), tlen + COMP_OVERFLOW_BYTES); + } else { + flen = tlen; + memcpy(tptr, pDataCol->pData, flen); + } + + // Add checksum + ASSERT(flen > 0); + flen += sizeof(TSCKSUM); + taosCalcChecksumAppend(0, (uint8_t *)tptr, flen); + tsdbUpdateDFileMagic(pDFile, POINTER_SHIFT(tptr, flen - sizeof(TSCKSUM))); + + if (ncol != 0) { + pBlockCol->offset = toffset; + pBlockCol->len = flen; + tcol++; + } else { + keyLen = flen; + } + + toffset += flen; + lsize += flen; + } + + pBlockData->delimiter = TSDB_FILE_DELIMITER; + pBlockData->uid = TABLE_UID(pTable); + pBlockData->numOfCols = nColsNotAllNull; + + taosCalcChecksumAppend(0, (uint8_t *)pBlockData, tsize); + tsdbUpdateDFileMagic(pDFile, POINTER_SHIFT(pBlockData, tsize - sizeof(TSCKSUM))); + + // Write the whole block to file + if (tsdbAppendDFile(pDFile, (void *)pBlockData, lsize, &offset) < lsize) { + return -1; + } + + // Update pBlock membership vairables + pBlock->last = isLast; + pBlock->offset = offset; + pBlock->algorithm = pCfg->compression; + pBlock->numOfRows = rowsToWrite; + pBlock->len = lsize; + pBlock->keyLen = keyLen; + pBlock->numOfSubBlocks = isSuper ? 1 : 0; + pBlock->numOfCols = nColsNotAllNull; + pBlock->keyFirst = dataColsKeyFirst(pDataCols); + pBlock->keyLast = dataColsKeyLast(pDataCols); + + tsdbDebug("vgId:%d tid:%d a block of data is written to file %s, offset %" PRId64 + " numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64, + REPO_ID(pRepo), TABLE_TID(pTable), TSDB_FILE_FULL_NAME(pDFile), offset, rowsToWrite, pBlock->len, + pBlock->numOfCols, pBlock->keyFirst, pBlock->keyLast); + + return 0; +} + +static int tsdbWriteBlockInfo(SCommitH *pCommih) { + SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih); + SBlockIdx blkIdx; + STable * pTable = TSDB_COMMIT_TABLE(pCommih); + SBlock * pBlock; + size_t nSupBlocks; + size_t nSubBlocks; + uint32_t tlen; + SBlockInfo *pBlkInfo; + int64_t offset; + + nSupBlocks = taosArrayGetSize(pCommih->aSupBlk); + nSubBlocks = taosArrayGetSize(pCommih->aSubBlk); + + if (nSupBlocks <= 0) { + // No data (data all deleted) + return 0; + } + + tlen = (uint32_t)(sizeof(SBlockInfo) + sizeof(SBlock) * (nSupBlocks + nSubBlocks) + sizeof(TSCKSUM)); + + // Write SBlockInfo part + if (tsdbMakeRoom((void **)(&(TSDB_COMMIT_BUF(pCommih))), tlen) < 0) return -1; + pBlkInfo = TSDB_COMMIT_BUF(pCommih); + + pBlkInfo->delimiter = TSDB_FILE_DELIMITER; + pBlkInfo->tid = TABLE_TID(pTable); + pBlkInfo->uid = TABLE_UID(pTable); + + memcpy((void *)(pBlkInfo->blocks), taosArrayGet(pCommih->aSupBlk, 0), nSupBlocks * sizeof(SBlock)); + if (nSubBlocks > 0) { + memcpy((void *)(pBlkInfo->blocks + nSupBlocks), taosArrayGet(pCommih->aSubBlk, 0), nSubBlocks * sizeof(SBlock)); + + for (int i = 0; i < nSupBlocks; i++) { + pBlock = pBlkInfo->blocks + i; + + if (pBlock->numOfSubBlocks > 1) { + pBlock->offset += (sizeof(SBlockInfo) + sizeof(SBlock) * nSupBlocks); + } + } + } + + taosCalcChecksumAppend(0, (uint8_t *)pBlkInfo, tlen); + + if (tsdbAppendDFile(pHeadf, TSDB_COMMIT_BUF(pCommih), tlen, &offset) < 0) { + return -1; + } + + tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(pBlkInfo, tlen - sizeof(TSCKSUM))); + + // Set blkIdx + pBlock = taosArrayGet(pCommih->aSupBlk, nSupBlocks - 1); + + blkIdx.tid = TABLE_TID(pTable); + blkIdx.uid = TABLE_UID(pTable); + blkIdx.hasLast = pBlock->last ? 1 : 0; + blkIdx.maxKey = pBlock->keyLast; + blkIdx.numOfBlocks = (uint32_t)nSupBlocks; + blkIdx.len = tlen; + blkIdx.offset = (uint32_t)offset; + + ASSERT(blkIdx.numOfBlocks > 0); + + if (taosArrayPush(pCommih->aBlkIdx, (void *)(&blkIdx)) == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + return 0; +} + +static int tsdbWriteBlockIdx(SCommitH *pCommih) { + SBlockIdx *pBlkIdx; + SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih); + size_t nidx = taosArrayGetSize(pCommih->aBlkIdx); + int tlen = 0, size; + int64_t offset; + + if (nidx <= 0) { + // All data are deleted + pHeadf->info.offset = 0; + pHeadf->info.len = 0; + return 0; + } + + for (size_t i = 0; i < nidx; i++) { + pBlkIdx = (SBlockIdx *)taosArrayGet(pCommih->aBlkIdx, i); + + size = tsdbEncodeSBlockIdx(NULL, pBlkIdx); + if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommih)), tlen + size) < 0) return -1; + + void *ptr = POINTER_SHIFT(TSDB_COMMIT_BUF(pCommih), tlen); + tsdbEncodeSBlockIdx(&ptr, pBlkIdx); + + tlen += size; + } + + tlen += sizeof(TSCKSUM); + if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommih)), tlen) < 0) return -1; + taosCalcChecksumAppend(0, (uint8_t *)TSDB_COMMIT_BUF(pCommih), tlen); + + if (tsdbAppendDFile(pHeadf, TSDB_COMMIT_BUF(pCommih), tlen, &offset) < tlen) { + tsdbError("vgId:%d failed to write block index part to file %s since %s", TSDB_COMMIT_REPO_ID(pCommih), + TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno)); + return -1; + } + + tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(TSDB_COMMIT_BUF(pCommih), tlen - sizeof(TSCKSUM))); + pHeadf->info.offset = (uint32_t)offset; + pHeadf->info.len = tlen; + + return 0; +} + +static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData) { + STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); + STsdbCfg * pCfg = REPO_CFG(pRepo); + SMergeInfo mInfo; + int32_t defaultRows = TSDB_COMMIT_DEFAULT_ROWS(pCommith); + SDFile * pDFile; + bool isLast; + SBlock block; + + while (true) { + tsdbLoadDataFromCache(pIter->pTable, pIter->pIter, keyLimit, defaultRows, pCommith->pDataCols, NULL, 0, + pCfg->update, &mInfo); + + if (pCommith->pDataCols->numOfRows <= 0) break; + + if (toData || pCommith->pDataCols->numOfRows >= pCfg->minRowsPerFileBlock) { + pDFile = TSDB_COMMIT_DATA_FILE(pCommith); + isLast = false; + } else { + pDFile = TSDB_COMMIT_LAST_FILE(pCommith); + isLast = true; + } + + if (tsdbWriteBlock(pCommith, pDFile, pCommith->pDataCols, &block, isLast, true) < 0) return -1; + + if (tsdbCommitAddBlock(pCommith, &block, NULL, 0) < 0) { + return -1; + } + } + + return 0; +} + +static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx) { + STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); + STsdbCfg * pCfg = REPO_CFG(pRepo); + int nBlocks = pCommith->readh.pBlkIdx->numOfBlocks; + SBlock * pBlock = pCommith->readh.pBlkInfo->blocks + bidx; + TSKEY keyLimit; + int16_t colId = 0; + SMergeInfo mInfo; + SBlock subBlocks[TSDB_MAX_SUBBLOCKS]; + SBlock block, supBlock; + SDFile * pDFile; + + if (bidx == nBlocks - 1) { + keyLimit = pCommith->maxKey; + } else { + keyLimit = pBlock[1].keyFirst - 1; + } + + SSkipListIterator titer = *(pIter->pIter); + if (tsdbLoadBlockDataCols(&(pCommith->readh), pBlock, NULL, &colId, 1) < 0) return -1; + + tsdbLoadDataFromCache(pIter->pTable, &titer, keyLimit, INT32_MAX, NULL, pCommith->readh.pDCols[0]->cols[0].pData, + pCommith->readh.pDCols[0]->numOfRows, pCfg->update, &mInfo); + + if (mInfo.nOperations == 0) { + // no new data to insert (all updates denied) + if (tsdbMoveBlock(pCommith, bidx) < 0) { + return -1; + } + *(pIter->pIter) = titer; + } else if (pBlock->numOfRows + mInfo.rowsInserted - mInfo.rowsDeleteSucceed == 0) { + // Ignore the block + ASSERT(0); + *(pIter->pIter) = titer; + } else if (tsdbCanAddSubBlock(pCommith, pBlock, &mInfo)) { + // Add a sub-block + tsdbLoadDataFromCache(pIter->pTable, pIter->pIter, keyLimit, INT32_MAX, pCommith->pDataCols, + pCommith->readh.pDCols[0]->cols[0].pData, pCommith->readh.pDCols[0]->numOfRows, pCfg->update, + &mInfo); + if (pBlock->last) { + pDFile = TSDB_COMMIT_LAST_FILE(pCommith); + } else { + pDFile = TSDB_COMMIT_DATA_FILE(pCommith); + } + + if (tsdbWriteBlock(pCommith, pDFile, pCommith->pDataCols, &block, pBlock->last, false) < 0) return -1; + + if (pBlock->numOfSubBlocks == 1) { + subBlocks[0] = *pBlock; + subBlocks[0].numOfSubBlocks = 0; + } else { + memcpy(subBlocks, POINTER_SHIFT(pCommith->readh.pBlkInfo, pBlock->offset), + sizeof(SBlock) * pBlock->numOfSubBlocks); + } + subBlocks[pBlock->numOfSubBlocks] = block; + supBlock = *pBlock; + supBlock.keyFirst = mInfo.keyFirst; + supBlock.keyLast = mInfo.keyLast; + supBlock.numOfSubBlocks++; + supBlock.numOfRows = pBlock->numOfRows + mInfo.rowsInserted - mInfo.rowsDeleteSucceed; + supBlock.offset = taosArrayGetSize(pCommith->aSubBlk) * sizeof(SBlock); + + if (tsdbCommitAddBlock(pCommith, &supBlock, subBlocks, supBlock.numOfSubBlocks) < 0) return -1; + } else { + if (tsdbLoadBlockData(&(pCommith->readh), pBlock, NULL) < 0) return -1; + if (tsdbMergeBlockData(pCommith, pIter, pCommith->readh.pDCols[0], keyLimit, bidx == (nBlocks - 1)) < 0) return -1; + } + + return 0; +} + +static int tsdbMoveBlock(SCommitH *pCommith, int bidx) { + SBlock *pBlock = pCommith->readh.pBlkInfo->blocks + bidx; + SDFile *pDFile; + SBlock block; + bool isSameFile; + + ASSERT(pBlock->numOfSubBlocks > 0); + + if (pBlock->last) { + pDFile = TSDB_COMMIT_LAST_FILE(pCommith); + isSameFile = pCommith->isLFileSame; + } else { + pDFile = TSDB_COMMIT_DATA_FILE(pCommith); + isSameFile = pCommith->isDFileSame; + } + + if (isSameFile) { + if (pBlock->numOfSubBlocks == 1) { + if (tsdbCommitAddBlock(pCommith, pBlock, NULL, 0) < 0) { + return -1; + } + } else { + block = *pBlock; + block.offset = sizeof(SBlock) * taosArrayGetSize(pCommith->aSubBlk); + + if (tsdbCommitAddBlock(pCommith, &block, POINTER_SHIFT(pCommith->readh.pBlkInfo, pBlock->offset), + pBlock->numOfSubBlocks) < 0) { + return -1; + } + } + } else { + if (tsdbLoadBlockData(&(pCommith->readh), pBlock, NULL) < 0) return -1; + if (tsdbWriteBlock(pCommith, pDFile, pCommith->readh.pDCols[0], &block, pBlock->last, true) < 0) return -1; + if (tsdbCommitAddBlock(pCommith, &block, NULL, 0) < 0) return -1; + } + + return 0; +} + +static int tsdbCommitAddBlock(SCommitH *pCommith, const SBlock *pSupBlock, const SBlock *pSubBlocks, int nSubBlocks) { + if (taosArrayPush(pCommith->aSupBlk, pSupBlock) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + if (pSubBlocks && taosArrayPushBatch(pCommith->aSubBlk, pSubBlocks, nSubBlocks) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + return 0; +} + +static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols *pDataCols, TSKEY keyLimit, bool isLastOneBlock) { + STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); + STsdbCfg * pCfg = REPO_CFG(pRepo); + SBlock block; + SDFile * pDFile; + bool isLast; + int32_t defaultRows = TSDB_COMMIT_DEFAULT_ROWS(pCommith); + + int biter = 0; + while (true) { + tsdbLoadAndMergeFromCache(pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols, keyLimit, defaultRows, + pCfg->update); + + if (pCommith->pDataCols->numOfRows == 0) break; + + if (isLastOneBlock) { + if (pCommith->pDataCols->numOfRows < pCfg->minRowsPerFileBlock) { + pDFile = TSDB_COMMIT_LAST_FILE(pCommith); + isLast = true; + } else { + pDFile = TSDB_COMMIT_DATA_FILE(pCommith); + isLast = false; + } + } else { + pDFile = TSDB_COMMIT_DATA_FILE(pCommith); + isLast = false; + } + + if (tsdbWriteBlock(pCommith, pDFile, pCommith->pDataCols, &block, isLast, true) < 0) return -1; + if (tsdbCommitAddBlock(pCommith, &block, NULL, 0) < 0) return -1; + } + + return 0; +} + +static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, + TSKEY maxKey, int maxRows, int8_t update) { + TSKEY key1 = INT64_MAX; + TSKEY key2 = INT64_MAX; + STSchema *pSchema = NULL; + + ASSERT(maxRows > 0 && dataColsKeyLast(pDataCols) <= maxKey); + tdResetDataCols(pTarget); + + while (true) { + key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter); + bool isRowDel = false; + SDataRow row = tsdbNextIterRow(pCommitIter->pIter); + if (row == NULL || dataRowKey(row) > maxKey) { + key2 = INT64_MAX; + } else { + key2 = dataRowKey(row); + isRowDel = dataRowDeleted(row); + } + + if (key1 == INT64_MAX && key2 == INT64_MAX) break; + + if (key1 < key2) { + for (int i = 0; i < pDataCols->numOfCols; i++) { + dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, + pTarget->maxPoints); + } + + pTarget->numOfRows++; + (*iter)++; + } else if (key1 > key2) { + if (!isRowDel) { + if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); + ASSERT(pSchema != NULL); + } + + tdAppendDataRowToDataCol(row, pSchema, pTarget); + } + + tSkipListIterNext(pCommitIter->pIter); + } else { + if (update) { + if (!isRowDel) { + if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); + ASSERT(pSchema != NULL); + } + + tdAppendDataRowToDataCol(row, pSchema, pTarget); + } + } else { + ASSERT(!isRowDel); + + for (int i = 0; i < pDataCols->numOfCols; i++) { + dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, + pTarget->maxPoints); + } + + pTarget->numOfRows++; + } + (*iter)++; + tSkipListIterNext(pCommitIter->pIter); + } + + if (pTarget->numOfRows >= maxRows) break; + } +} + +static void tsdbResetCommitFile(SCommitH *pCommith) { + pCommith->isRFileSet = false; + pCommith->isDFileSame = false; + pCommith->isLFileSame = false; + taosArrayClear(pCommith->aBlkIdx); +} + +static void tsdbResetCommitTable(SCommitH *pCommith) { + taosArrayClear(pCommith->aSubBlk); + taosArrayClear(pCommith->aSupBlk); + pCommith->pTable = NULL; +} + +static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { + SDiskID did; + STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); + SDFileSet *pWSet = TSDB_COMMIT_WRITE_FSET(pCommith); + + tfsAllocDisk(tsdbGetFidLevel(fid, &(pCommith->rtn)), &(did.level), &(did.id)); + if (did.level == TFS_UNDECIDED_LEVEL) { + terrno = TSDB_CODE_TDB_NO_AVAIL_DISK; + return -1; + } + + // Open read FSET + if (pSet) { + if (tsdbSetAndOpenReadFSet(&(pCommith->readh), pSet) < 0) { + return -1; + } + + pCommith->isRFileSet = true; + + if (tsdbLoadBlockIdx(&(pCommith->readh)) < 0) { + tsdbCloseAndUnsetFSet(&(pCommith->readh)); + return -1; + } + + tsdbDebug("vgId:%d FSET %d at level %d disk id %d is opened to read to commit", REPO_ID(pRepo), TSDB_FSET_FID(pSet), + TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet)); + } else { + pCommith->isRFileSet = false; + } + + // Set and open commit FSET + if (pSet == NULL || did.level > TSDB_FSET_LEVEL(pSet)) { + // Create a new FSET to write data + tsdbInitDFileSet(pWSet, did, REPO_ID(pRepo), fid, FS_TXN_VERSION(REPO_FS(pRepo))); + + if (tsdbCreateDFileSet(pWSet, true) < 0) { + tsdbError("vgId:%d failed to create FSET %d at level %d disk id %d since %s", REPO_ID(pRepo), + TSDB_FSET_FID(pWSet), TSDB_FSET_LEVEL(pWSet), TSDB_FSET_ID(pWSet), tstrerror(terrno)); + if (pCommith->isRFileSet) { + tsdbCloseAndUnsetFSet(&(pCommith->readh)); + } + return -1; + } + + pCommith->isDFileSame = false; + pCommith->isLFileSame = false; + + tsdbDebug("vgId:%d FSET %d at level %d disk id %d is created to commit", REPO_ID(pRepo), TSDB_FSET_FID(pWSet), + TSDB_FSET_LEVEL(pWSet), TSDB_FSET_ID(pWSet)); + } else { + did.level = TSDB_FSET_LEVEL(pSet); + did.id = TSDB_FSET_ID(pSet); + + pCommith->wSet.fid = fid; + pCommith->wSet.state = 0; + + // TSDB_FILE_HEAD + SDFile *pWHeadf = TSDB_COMMIT_HEAD_FILE(pCommith); + tsdbInitDFile(pWHeadf, did, REPO_ID(pRepo), fid, FS_TXN_VERSION(REPO_FS(pRepo)), TSDB_FILE_HEAD); + if (tsdbCreateDFile(pWHeadf, true) < 0) { + tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWHeadf), + tstrerror(terrno)); + + if (pCommith->isRFileSet) { + tsdbCloseAndUnsetFSet(&(pCommith->readh)); + return -1; + } + } + + // TSDB_FILE_DATA + SDFile *pRDataf = TSDB_READ_DATA_FILE(&(pCommith->readh)); + SDFile *pWDataf = TSDB_COMMIT_DATA_FILE(pCommith); + tsdbInitDFileEx(pWDataf, pRDataf); + if (tsdbOpenDFile(pWDataf, O_WRONLY) < 0) { + tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWDataf), + tstrerror(terrno)); + + tsdbCloseDFileSet(pWSet); + tsdbRemoveDFile(pWHeadf); + if (pCommith->isRFileSet) { + tsdbCloseAndUnsetFSet(&(pCommith->readh)); + return -1; + } + } + pCommith->isDFileSame = true; + + // TSDB_FILE_LAST + SDFile *pRLastf = TSDB_READ_LAST_FILE(&(pCommith->readh)); + SDFile *pWLastf = TSDB_COMMIT_LAST_FILE(pCommith); + if (pRLastf->info.size < 32 * 1024) { + tsdbInitDFileEx(pWLastf, pRLastf); + pCommith->isLFileSame = true; + + if (tsdbOpenDFile(pWLastf, O_WRONLY) < 0) { + tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf), + tstrerror(terrno)); + + tsdbCloseDFileSet(pWSet); + tsdbRemoveDFile(pWHeadf); + if (pCommith->isRFileSet) { + tsdbCloseAndUnsetFSet(&(pCommith->readh)); + return -1; + } + } + } else { + tsdbInitDFile(pWLastf, did, REPO_ID(pRepo), fid, FS_TXN_VERSION(REPO_FS(pRepo)), TSDB_FILE_LAST); + pCommith->isLFileSame = false; + + if (tsdbCreateDFile(pWLastf, true) < 0) { + tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf), + tstrerror(terrno)); + + tsdbCloseDFileSet(pWSet); + tsdbRemoveDFile(pWHeadf); + if (pCommith->isRFileSet) { + tsdbCloseAndUnsetFSet(&(pCommith->readh)); + return -1; + } + } + } + } + + return 0; +} + +static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError) { + if (pCommith->isRFileSet) { + tsdbCloseAndUnsetFSet(&(pCommith->readh)); + } + + if (!hasError) { + TSDB_FSET_FSYNC(TSDB_COMMIT_WRITE_FSET(pCommith)); + } + tsdbCloseDFileSet(TSDB_COMMIT_WRITE_FSET(pCommith)); +} + +static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo) { + STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); + STsdbCfg * pCfg = REPO_CFG(pRepo); + int mergeRows = pBlock->numOfRows + pInfo->rowsInserted - pInfo->rowsDeleteSucceed; + + ASSERT(mergeRows > 0); + + if (pBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && pInfo->nOperations <= pCfg->maxRowsPerFileBlock) { + if (pBlock->last) { + if (pCommith->isLFileSame && mergeRows < pCfg->minRowsPerFileBlock) return true; + } else { + if (pCommith->isDFileSame && mergeRows <= pCfg->maxRowsPerFileBlock) return true; + } + } + + return false; +} + +static int tsdbApplyRtn(STsdbRepo *pRepo) { + SRtn rtn; + SFSIter fsiter; + STsdbFS * pfs = REPO_FS(pRepo); + SDFileSet *pSet; + + // Get retention snapshot + tsdbGetRtnSnap(pRepo, &rtn); + + tsdbFSIterInit(&fsiter, pfs, TSDB_FS_ITER_FORWARD); + while ((pSet = tsdbFSIterNext(&fsiter))) { + if (pSet->fid < rtn.minFid) { + tsdbInfo("vgId:%d FSET %d at level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid, + TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet)); + continue; + } + + if (tsdbApplyRtnOnFSet(pRepo, pSet, &rtn) < 0) { + return -1; + } + } + + return 0; +} + +static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn) { + SDiskID did; + SDFileSet nSet; + STsdbFS * pfs = REPO_FS(pRepo); + int level; + + ASSERT(pSet->fid >= pRtn->minFid); + + level = tsdbGetFidLevel(pSet->fid, pRtn); + + tfsAllocDisk(level, &(did.level), &(did.id)); + if (did.level == TFS_UNDECIDED_LEVEL) { + terrno = TSDB_CODE_TDB_NO_AVAIL_DISK; + return -1; + } + + if (did.level > TSDB_FSET_LEVEL(pSet)) { + // Need to move the FSET to higher level + tsdbInitDFileSet(&nSet, did, REPO_ID(pRepo), pSet->fid, FS_TXN_VERSION(pfs)); + + if (tsdbCopyDFileSet(pSet, &nSet) < 0) { + tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid, + TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno)); + return -1; + } + + if (tsdbUpdateDFileSet(pfs, &nSet) < 0) { + return -1; + } + + tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid, + TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id); + } else { + // On a correct level + if (tsdbUpdateDFileSet(pfs, pSet) < 0) { + return -1; + } + } + + return 0; +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c index 75a2cbcb8d..9e8e4acd7e 100644 --- a/src/tsdb/src/tsdbCommitQueue.c +++ b/src/tsdb/src/tsdbCommitQueue.c @@ -13,11 +13,7 @@ * along with this program. If not, see . */ -#include "os.h" -#include "tglobal.h" -#include "tlist.h" -#include "tref.h" -#include "tsdbMain.h" +#include "tsdbint.h" typedef struct { bool stop; @@ -35,7 +31,7 @@ typedef struct { static void *tsdbLoopCommit(void *arg); -SCommitQueue tsCommitQueue = {0}; +static SCommitQueue tsCommitQueue = {0}; int tsdbInitCommitQueue() { int nthreads = tsNumOfCommitThreads; diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c new file mode 100644 index 0000000000..cbff4fbeaa --- /dev/null +++ b/src/tsdb/src/tsdbFS.c @@ -0,0 +1,1204 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "tsdbint.h" +#include + +typedef enum { TSDB_TXN_TEMP_FILE = 0, TSDB_TXN_CURR_FILE } TSDB_TXN_FILE_T; +static const char *tsdbTxnFname[] = {"current.t", "current"}; +#define TSDB_MAX_FSETS(keep, days) ((keep) / (days) + 3) + +static int tsdbComparFidFSet(const void *arg1, const void *arg2); +static void tsdbResetFSStatus(SFSStatus *pStatus); +static int tsdbSaveFSStatus(SFSStatus *pStatus, int vid); +static void tsdbApplyFSTxnOnDisk(SFSStatus *pFrom, SFSStatus *pTo); +static void tsdbGetTxnFname(int repoid, TSDB_TXN_FILE_T ftype, char fname[]); +static int tsdbOpenFSFromCurrent(STsdbRepo *pRepo); +static int tsdbScanAndTryFixFS(STsdbRepo *pRepo); +static int tsdbScanRootDir(STsdbRepo *pRepo); +static int tsdbScanDataDir(STsdbRepo *pRepo); +static bool tsdbIsTFileInFS(STsdbFS *pfs, const TFILE *pf); +static int tsdbRestoreCurrent(STsdbRepo *pRepo); +static int tsdbComparTFILE(const void *arg1, const void *arg2); + +// ================== CURRENT file header info +static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { + int tlen = 0; + + tlen += taosEncodeFixedU32(buf, pHeader->version); + tlen += taosEncodeFixedU32(buf, pHeader->len); + + return tlen; +} + +static void *tsdbDecodeFSHeader(void *buf, SFSHeader *pHeader) { + buf = taosDecodeFixedU32(buf, &(pHeader->version)); + buf = taosDecodeFixedU32(buf, &(pHeader->len)); + + return buf; +} + +// ================== STsdbFSMeta +static int tsdbEncodeFSMeta(void **buf, STsdbFSMeta *pMeta) { + int tlen = 0; + + tlen += taosEncodeFixedU32(buf, pMeta->version); + tlen += taosEncodeFixedI64(buf, pMeta->totalPoints); + tlen += taosEncodeFixedI64(buf, pMeta->totalStorage); + + return tlen; +} + +static void *tsdbDecodeFSMeta(void *buf, STsdbFSMeta *pMeta) { + buf = taosDecodeFixedU32(buf, &(pMeta->version)); + buf = taosDecodeFixedI64(buf, &(pMeta->totalPoints)); + buf = taosDecodeFixedI64(buf, &(pMeta->totalStorage)); + + return buf; +} + +// ================== SFSStatus +static int tsdbEncodeDFileSetArray(void **buf, SArray *pArray) { + int tlen = 0; + uint64_t nset = taosArrayGetSize(pArray); + + tlen += taosEncodeFixedU64(buf, nset); + for (size_t i = 0; i < nset; i++) { + SDFileSet *pSet = taosArrayGet(pArray, i); + + tlen += tsdbEncodeDFileSet(buf, pSet); + } + + return tlen; +} + +static void *tsdbDecodeDFileSetArray(void *buf, SArray *pArray) { + uint64_t nset; + SDFileSet dset; + + taosArrayClear(pArray); + + buf = taosDecodeFixedU64(buf, &nset); + for (size_t i = 0; i < nset; i++) { + buf = tsdbDecodeDFileSet(buf, &dset); + taosArrayPush(pArray, (void *)(&dset)); + } + return buf; +} + +static int tsdbEncodeFSStatus(void **buf, SFSStatus *pStatus) { + ASSERT(pStatus->pmf); + + int tlen = 0; + + tlen += tsdbEncodeSMFile(buf, pStatus->pmf); + tlen += tsdbEncodeDFileSetArray(buf, pStatus->df); + + return tlen; +} + +static void *tsdbDecodeFSStatus(void *buf, SFSStatus *pStatus) { + tsdbResetFSStatus(pStatus); + + pStatus->pmf = &(pStatus->mf); + + buf = tsdbDecodeSMFile(buf, pStatus->pmf); + buf = tsdbDecodeDFileSetArray(buf, pStatus->df); + + return buf; +} + +static SFSStatus *tsdbNewFSStatus(int maxFSet) { + SFSStatus *pStatus = (SFSStatus *)calloc(1, sizeof(*pStatus)); + if (pStatus == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return NULL; + } + + TSDB_FILE_SET_CLOSED(&(pStatus->mf)); + + pStatus->df = taosArrayInit(maxFSet, sizeof(SDFileSet)); + if (pStatus->df == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + free(pStatus); + return NULL; + } + + return pStatus; +} + +static SFSStatus *tsdbFreeFSStatus(SFSStatus *pStatus) { + if (pStatus) { + pStatus->df = taosArrayDestroy(pStatus->df); + free(pStatus); + } + + return NULL; +} + +static void tsdbResetFSStatus(SFSStatus *pStatus) { + if (pStatus == NULL) { + return; + } + + TSDB_FILE_SET_CLOSED(&(pStatus->mf)); + + pStatus->pmf = NULL; + taosArrayClear(pStatus->df); +} + +static void tsdbSetStatusMFile(SFSStatus *pStatus, const SMFile *pMFile) { + ASSERT(pStatus->pmf == NULL); + + pStatus->pmf = &(pStatus->mf); + tsdbInitMFileEx(pStatus->pmf, (SMFile *)pMFile); +} + +static int tsdbAddDFileSetToStatus(SFSStatus *pStatus, const SDFileSet *pSet) { + if (taosArrayPush(pStatus->df, (void *)pSet) == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + TSDB_FSET_SET_CLOSED(((SDFileSet *)taosArrayGetLast(pStatus->df))); + + return 0; +} + +// ================== STsdbFS +STsdbFS *tsdbNewFS(STsdbCfg *pCfg) { + int keep = pCfg->keep; + int days = pCfg->daysPerFile; + int maxFSet = TSDB_MAX_FSETS(keep, days); + STsdbFS *pfs; + + pfs = (STsdbFS *)calloc(1, sizeof(*pfs)); + if (pfs == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return NULL; + } + + int code = pthread_rwlock_init(&(pfs->lock), NULL); + if (code) { + terrno = TAOS_SYSTEM_ERROR(code); + free(pfs); + return NULL; + } + + pfs->cstatus = tsdbNewFSStatus(maxFSet); + if (pfs->cstatus == NULL) { + tsdbFreeFS(pfs); + return NULL; + } + + pfs->metaCache = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + if (pfs->metaCache == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbFreeFS(pfs); + return NULL; + } + + pfs->nstatus = tsdbNewFSStatus(maxFSet); + if (pfs->nstatus == NULL) { + tsdbFreeFS(pfs); + return NULL; + } + + return pfs; +} + +void *tsdbFreeFS(STsdbFS *pfs) { + if (pfs) { + pfs->nstatus = tsdbFreeFSStatus(pfs->nstatus); + taosHashCleanup(pfs->metaCache); + pfs->metaCache = NULL; + pfs->cstatus = tsdbFreeFSStatus(pfs->cstatus); + pthread_rwlock_destroy(&(pfs->lock)); + free(pfs); + } + + return NULL; +} + +int tsdbOpenFS(STsdbRepo *pRepo) { + STsdbFS *pfs = REPO_FS(pRepo); + char current[TSDB_FILENAME_LEN] = "\0"; + + ASSERT(pfs != NULL); + + tsdbGetTxnFname(REPO_ID(pRepo), TSDB_TXN_CURR_FILE, current); + + if (access(current, F_OK) == 0) { + if (tsdbOpenFSFromCurrent(pRepo) < 0) { + tsdbError("vgId:%d failed to open FS since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + } else { + if (tsdbRestoreCurrent(pRepo) < 0) { + tsdbError("vgId:%d failed to restore current file since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + } + + if (tsdbScanAndTryFixFS(pRepo) < 0) { + tsdbError("vgId:%d failed to scan and fix FS since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + // Load meta cache if has meta file + if ((!(pRepo->state & TSDB_STATE_BAD_META)) && tsdbLoadMetaCache(pRepo, true) < 0) { + tsdbError("vgId:%d failed to open FS while loading meta cache since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + return 0; +} + +void tsdbCloseFS(STsdbRepo *pRepo) { + // Do nothing +} + +// Start a new transaction to modify the file system +void tsdbStartFSTxn(STsdbRepo *pRepo, int64_t pointsAdd, int64_t storageAdd) { + STsdbFS *pfs = REPO_FS(pRepo); + ASSERT(pfs->intxn == false); + + pfs->intxn = true; + tsdbResetFSStatus(pfs->nstatus); + pfs->nstatus->meta = pfs->cstatus->meta; + if (pfs->cstatus->pmf == NULL) { + pfs->nstatus->meta.version = 0; + } else { + pfs->nstatus->meta.version = pfs->cstatus->meta.version + 1; + } + pfs->nstatus->meta.totalPoints = pfs->cstatus->meta.totalPoints + pointsAdd; + pfs->nstatus->meta.totalStorage = pfs->cstatus->meta.totalStorage += storageAdd; +} + +void tsdbUpdateFSTxnMeta(STsdbFS *pfs, STsdbFSMeta *pMeta) { pfs->nstatus->meta = *pMeta; } + +int tsdbEndFSTxn(STsdbRepo *pRepo) { + STsdbFS *pfs = REPO_FS(pRepo); + ASSERT(FS_IN_TXN(pfs)); + SFSStatus *pStatus; + + // Write current file system snapshot + if (tsdbSaveFSStatus(pfs->nstatus, REPO_ID(pRepo)) < 0) { + tsdbEndFSTxnWithError(pfs); + return -1; + } + + // Make new + tsdbWLockFS(pfs); + pStatus = pfs->cstatus; + pfs->cstatus = pfs->nstatus; + pfs->nstatus = pStatus; + tsdbUnLockFS(pfs); + + // Apply actual change to each file and SDFileSet + tsdbApplyFSTxnOnDisk(pfs->nstatus, pfs->cstatus); + + pfs->intxn = false; + return 0; +} + +int tsdbEndFSTxnWithError(STsdbFS *pfs) { + tsdbApplyFSTxnOnDisk(pfs->nstatus, pfs->cstatus); + // TODO: if mf change, reload pfs->metaCache + pfs->intxn = false; + return 0; +} + +void tsdbUpdateMFile(STsdbFS *pfs, const SMFile *pMFile) { tsdbSetStatusMFile(pfs->nstatus, pMFile); } + +int tsdbUpdateDFileSet(STsdbFS *pfs, const SDFileSet *pSet) { return tsdbAddDFileSetToStatus(pfs->nstatus, pSet); } + +static int tsdbSaveFSStatus(SFSStatus *pStatus, int vid) { + SFSHeader fsheader; + void * pBuf = NULL; + void * ptr; + char hbuf[TSDB_FILE_HEAD_SIZE] = "\0"; + char tfname[TSDB_FILENAME_LEN] = "\0"; + char cfname[TSDB_FILENAME_LEN] = "\0"; + + tsdbGetTxnFname(vid, TSDB_TXN_TEMP_FILE, tfname); + tsdbGetTxnFname(vid, TSDB_TXN_CURR_FILE, cfname); + + int fd = open(tfname, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0755); + if (fd < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + fsheader.version = TSDB_FS_VERSION; + if (pStatus->pmf == NULL) { + ASSERT(taosArrayGetSize(pStatus->df) == 0); + fsheader.len = 0; + } else { + fsheader.len = tsdbEncodeFSStatus(NULL, pStatus) + sizeof(TSCKSUM); + } + + // Encode header part and write + ptr = hbuf; + tsdbEncodeFSHeader(&ptr, &fsheader); + tsdbEncodeFSMeta(&ptr, &(pStatus->meta)); + + taosCalcChecksumAppend(0, (uint8_t *)hbuf, TSDB_FILE_HEAD_SIZE); + + if (taosWrite(fd, hbuf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { + terrno = TAOS_SYSTEM_ERROR(errno); + close(fd); + remove(tfname); + return -1; + } + + // Encode file status and write to file + if (fsheader.len > 0) { + if (tsdbMakeRoom(&(pBuf), fsheader.len) < 0) { + close(fd); + remove(tfname); + return -1; + } + + ptr = pBuf; + tsdbEncodeFSStatus(&ptr, pStatus); + taosCalcChecksumAppend(0, (uint8_t *)pBuf, fsheader.len); + + if (taosWrite(fd, pBuf, fsheader.len) < fsheader.len) { + terrno = TAOS_SYSTEM_ERROR(errno); + close(fd); + remove(tfname); + taosTZfree(pBuf); + return -1; + } + } + + // fsync, close and rename + if (fsync(fd) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + close(fd); + remove(tfname); + taosTZfree(pBuf); + return -1; + } + + (void)close(fd); + (void)taosRename(tfname, cfname); + taosTZfree(pBuf); + + return 0; +} + +static void tsdbApplyFSTxnOnDisk(SFSStatus *pFrom, SFSStatus *pTo) { + int ifrom = 0; + int ito = 0; + size_t sizeFrom, sizeTo; + SDFileSet *pSetFrom; + SDFileSet *pSetTo; + + sizeFrom = taosArrayGetSize(pFrom->df); + sizeTo = taosArrayGetSize(pTo->df); + + // Apply meta file change + tsdbApplyMFileChange(pFrom->pmf, pTo->pmf); + + // Apply SDFileSet change + if (ifrom >= sizeFrom) { + pSetFrom = NULL; + } else { + pSetFrom = taosArrayGet(pFrom->df, ifrom); + } + + if (ito >= sizeTo) { + pSetTo = NULL; + } else { + pSetTo = taosArrayGet(pTo->df, ito); + } + + while (true) { + if ((pSetTo == NULL) && (pSetFrom == NULL)) break; + + if (pSetTo == NULL || (pSetFrom && pSetFrom->fid < pSetTo->fid)) { + tsdbApplyDFileSetChange(pSetFrom, NULL); + + ifrom++; + if (ifrom >= sizeFrom) { + pSetFrom = NULL; + } else { + pSetFrom = taosArrayGet(pFrom->df, ifrom); + } + } else if (pSetFrom == NULL || pSetFrom->fid > pSetTo->fid) { + // Do nothing + ito++; + if (ito >= sizeTo) { + pSetTo = NULL; + } else { + pSetTo = taosArrayGet(pTo->df, ito); + } + } else { + tsdbApplyDFileSetChange(pSetFrom, pSetTo); + + ifrom++; + if (ifrom >= sizeFrom) { + pSetFrom = NULL; + } else { + pSetFrom = taosArrayGet(pFrom->df, ifrom); + } + + ito++; + if (ito >= sizeTo) { + pSetTo = NULL; + } else { + pSetTo = taosArrayGet(pTo->df, ito); + } + } + } +} + +// ================== SFSIter +// ASSUMPTIONS: the FS Should be read locked when calling these functions +void tsdbFSIterInit(SFSIter *pIter, STsdbFS *pfs, int direction) { + pIter->pfs = pfs; + pIter->direction = direction; + + size_t size = taosArrayGetSize(pfs->cstatus->df); + + pIter->version = pfs->cstatus->meta.version; + + if (size == 0) { + pIter->index = -1; + pIter->fid = TSDB_IVLD_FID; + } else { + if (direction == TSDB_FS_ITER_FORWARD) { + pIter->index = 0; + } else { + pIter->index = (int)(size - 1); + } + + pIter->fid = ((SDFileSet *)taosArrayGet(pfs->cstatus->df, pIter->index))->fid; + } +} + +void tsdbFSIterSeek(SFSIter *pIter, int fid) { + STsdbFS *pfs = pIter->pfs; + size_t size = taosArrayGetSize(pfs->cstatus->df); + + int flags; + if (pIter->direction == TSDB_FS_ITER_FORWARD) { + flags = TD_GE; + } else { + flags = TD_LE; + } + + void *ptr = taosbsearch(&fid, pfs->cstatus->df->pData, size, sizeof(SDFileSet), tsdbComparFidFSet, flags); + if (ptr == NULL) { + pIter->index = -1; + pIter->fid = TSDB_IVLD_FID; + } else { + pIter->index = (int)(TARRAY_ELEM_IDX(pfs->cstatus->df, ptr)); + pIter->fid = ((SDFileSet *)ptr)->fid; + } +} + +SDFileSet *tsdbFSIterNext(SFSIter *pIter) { + STsdbFS * pfs = pIter->pfs; + SDFileSet *pSet; + + if (pIter->index < 0) { + ASSERT(pIter->fid == TSDB_IVLD_FID); + return NULL; + } + + ASSERT(pIter->fid != TSDB_IVLD_FID); + + if (pIter->version != pfs->cstatus->meta.version) { + pIter->version = pfs->cstatus->meta.version; + tsdbFSIterSeek(pIter, pIter->fid); + } + + if (pIter->index < 0) { + return NULL; + } + + pSet = (SDFileSet *)taosArrayGet(pfs->cstatus->df, pIter->index); + ASSERT(pSet->fid == pIter->fid); + + if (pIter->direction == TSDB_FS_ITER_FORWARD) { + pIter->index++; + if (pIter->index >= taosArrayGetSize(pfs->cstatus->df)) { + pIter->index = -1; + } + } else { + pIter->index--; + } + + if (pIter->index >= 0) { + pIter->fid = ((SDFileSet *)taosArrayGet(pfs->cstatus->df, pIter->index))->fid; + } else { + pIter->fid = TSDB_IVLD_FID; + } + + return pSet; +} + +static int tsdbComparFidFSet(const void *arg1, const void *arg2) { + int fid = *(int *)arg1; + SDFileSet *pSet = (SDFileSet *)arg2; + + if (fid < pSet->fid) { + return -1; + } else if (fid == pSet->fid) { + return 0; + } else { + return 1; + } +} + +static void tsdbGetTxnFname(int repoid, TSDB_TXN_FILE_T ftype, char fname[]) { + snprintf(fname, TSDB_FILENAME_LEN, "%s/vnode/vnode%d/tsdb/%s", TFS_PRIMARY_PATH(), repoid, tsdbTxnFname[ftype]); +} + +static int tsdbOpenFSFromCurrent(STsdbRepo *pRepo) { + STsdbFS * pfs = REPO_FS(pRepo); + int fd = -1; + void * buffer = NULL; + SFSHeader fsheader; + char current[TSDB_FILENAME_LEN] = "\0"; + void * ptr; + + tsdbGetTxnFname(REPO_ID(pRepo), TSDB_TXN_CURR_FILE, current); + + // current file exists, try to recover + fd = open(current, O_RDONLY | O_BINARY); + if (fd < 0) { + tsdbError("vgId:%d failed to open file %s since %s", REPO_ID(pRepo), current, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + goto _err; + } + + if (tsdbMakeRoom(&buffer, TSDB_FILE_HEAD_SIZE) < 0) { + goto _err; + } + + int nread = (int)taosRead(fd, buffer, TSDB_FILE_HEAD_SIZE); + if (nread < 0) { + tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pRepo), TSDB_FILENAME_LEN, current, + strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + goto _err; + } + + if (nread < TSDB_FILE_HEAD_SIZE) { + tsdbError("vgId:%d failed to read header of file %s, read bytes:%d", REPO_ID(pRepo), current, nread); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + goto _err; + } + + if (!taosCheckChecksumWhole((uint8_t *)buffer, TSDB_FILE_HEAD_SIZE)) { + tsdbError("vgId:%d header of file %s failed checksum check", REPO_ID(pRepo), current); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + goto _err; + } + + SFSStatus *pStatus = pfs->cstatus; + ptr = buffer; + ptr = tsdbDecodeFSHeader(ptr, &fsheader); + ptr = tsdbDecodeFSMeta(ptr, &(pStatus->meta)); + + if (fsheader.version != TSDB_FS_VERSION) { + // TODO: handle file version change + } + + if (fsheader.len > 0) { + if (tsdbMakeRoom(&buffer, fsheader.len) < 0) { + goto _err; + } + + nread = (int)taosRead(fd, buffer, fsheader.len); + if (nread < 0) { + tsdbError("vgId:%d failed to read file %s since %s", REPO_ID(pRepo), current, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + goto _err; + } + + if (nread < fsheader.len) { + tsdbError("vgId:%d failed to read %d bytes from file %s", REPO_ID(pRepo), fsheader.len, current); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + goto _err; + } + + if (!taosCheckChecksumWhole((uint8_t *)buffer, fsheader.len)) { + tsdbError("vgId:%d file %s is corrupted since wrong checksum", REPO_ID(pRepo), current); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + goto _err; + } + + ptr = buffer; + ptr = tsdbDecodeFSStatus(ptr, pStatus); + } else { + tsdbResetFSStatus(pStatus); + } + + taosTZfree(buffer); + close(fd); + + return 0; + +_err: + if (fd >= 0) { + close(fd); + } + taosTZfree(buffer); + return -1; +} + +// Scan and try to fix incorrect files +static int tsdbScanAndTryFixFS(STsdbRepo *pRepo) { + STsdbFS * pfs = REPO_FS(pRepo); + SFSStatus *pStatus = pfs->cstatus; + + if (tsdbScanAndTryFixMFile(pRepo) < 0) { + tsdbError("vgId:%d failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + size_t size = taosArrayGetSize(pStatus->df); + + for (size_t i = 0; i < size; i++) { + SDFileSet *pSet = (SDFileSet *)taosArrayGet(pStatus->df, i); + + if (tsdbScanAndTryFixDFileSet(pRepo, pSet) < 0) { + tsdbError("vgId:%d failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + } + + // remove those unused files + tsdbScanRootDir(pRepo); + tsdbScanDataDir(pRepo); + return 0; +} + +int tsdbLoadMetaCache(STsdbRepo *pRepo, bool recoverMeta) { + char tbuf[128]; + STsdbFS * pfs = REPO_FS(pRepo); + SMFile mf; + SMFile * pMFile = &mf; + void * pBuf = NULL; + SKVRecord rInfo; + int64_t maxBufSize = 0; + SMFInfo minfo; + + taosHashEmpty(pfs->metaCache); + + // No meta file, just return + if (pfs->cstatus->pmf == NULL) return 0; + + mf = pfs->cstatus->mf; + // Load cache first + if (tsdbOpenMFile(pMFile, O_RDONLY) < 0) { + return -1; + } + + if (tsdbLoadMFileHeader(pMFile, &minfo) < 0) { + tsdbCloseMFile(pMFile); + return -1; + } + + while (true) { + int64_t tsize = tsdbReadMFile(pMFile, tbuf, sizeof(SKVRecord)); + if (tsize == 0) break; + + if (tsize < 0) { + tsdbError("vgId:%d failed to read META file since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + if (tsize < sizeof(SKVRecord)) { + tsdbError("vgId:%d failed to read %" PRIzu " bytes from file %s", REPO_ID(pRepo), sizeof(SKVRecord), + TSDB_FILE_FULL_NAME(pMFile)); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbCloseMFile(pMFile); + return -1; + } + + void *ptr = tsdbDecodeKVRecord(tbuf, &rInfo); + ASSERT(POINTER_DISTANCE(ptr, tbuf) == sizeof(SKVRecord)); + // ASSERT((rInfo.offset > 0) ? (pStore->info.size == rInfo.offset) : true); + + if (rInfo.offset < 0) { + taosHashRemove(pfs->metaCache, (void *)(&rInfo.uid), sizeof(rInfo.uid)); +#if 0 + pStore->info.size += sizeof(SKVRecord); + pStore->info.nRecords--; + pStore->info.nDels++; + pStore->info.tombSize += (rInfo.size + sizeof(SKVRecord) * 2); +#endif + } else { + ASSERT(rInfo.offset > 0 && rInfo.size > 0); + if (taosHashPut(pfs->metaCache, (void *)(&rInfo.uid), sizeof(rInfo.uid), &rInfo, sizeof(rInfo)) < 0) { + tsdbError("vgId:%d failed to load meta cache from file %s since OOM", REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pMFile)); + terrno = TSDB_CODE_COM_OUT_OF_MEMORY; + tsdbCloseMFile(pMFile); + return -1; + } + + maxBufSize = MAX(maxBufSize, rInfo.size); + + if (tsdbSeekMFile(pMFile, rInfo.size, SEEK_CUR) < 0) { + tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), + tstrerror(terrno)); + tsdbCloseMFile(pMFile); + return -1; + } + +#if 0 + pStore->info.size += (sizeof(SKVRecord) + rInfo.size); + pStore->info.nRecords++; +#endif + } + } + + if (recoverMeta) { + pBuf = malloc((size_t)maxBufSize); + if (pBuf == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbCloseMFile(pMFile); + return -1; + } + + SKVRecord *pRecord = taosHashIterate(pfs->metaCache, NULL); + while (pRecord) { + if (tsdbSeekMFile(pMFile, pRecord->offset + sizeof(SKVRecord), SEEK_SET) < 0) { + tsdbError("vgId:%d failed to seek file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), + tstrerror(terrno)); + tfree(pBuf); + tsdbCloseMFile(pMFile); + return -1; + } + + int nread = (int)tsdbReadMFile(pMFile, pBuf, pRecord->size); + if (nread < 0) { + tsdbError("vgId:%d failed to read file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), + tstrerror(terrno)); + tfree(pBuf); + tsdbCloseMFile(pMFile); + return -1; + } + + if (nread < pRecord->size) { + tsdbError("vgId:%d failed to read file %s since file corrupted, expected read:%" PRId64 " actual read:%d", + REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), pRecord->size, nread); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tfree(pBuf); + tsdbCloseMFile(pMFile); + return -1; + } + + if (tsdbRestoreTable(pRepo, pBuf, (int)pRecord->size) < 0) { + tsdbError("vgId:%d failed to restore table, uid %" PRId64 ", since %s" PRIu64, REPO_ID(pRepo), pRecord->uid, + tstrerror(terrno)); + tfree(pBuf); + tsdbCloseMFile(pMFile); + return -1; + } + + pRecord = taosHashIterate(pfs->metaCache, pRecord); + } + + tsdbOrgMeta(pRepo); + } + + tsdbCloseMFile(pMFile); + tfree(pBuf); + return 0; +} + +static int tsdbScanRootDir(STsdbRepo *pRepo) { + char rootDir[TSDB_FILENAME_LEN]; + char bname[TSDB_FILENAME_LEN]; + STsdbFS * pfs = REPO_FS(pRepo); + const TFILE *pf; + + tsdbGetRootDir(REPO_ID(pRepo), rootDir); + TDIR *tdir = tfsOpendir(rootDir); + if (tdir == NULL) { + tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), rootDir, tstrerror(terrno)); + return -1; + } + + while ((pf = tfsReaddir(tdir))) { + tfsbasename(pf, bname); + + if (strcmp(bname, tsdbTxnFname[TSDB_TXN_CURR_FILE]) == 0 || strcmp(bname, "data") == 0) { + // Skip current file and data directory + continue; + } + + if (pfs->cstatus->pmf && tfsIsSameFile(pf, &(pfs->cstatus->pmf->f))) { + continue; + } + + tfsremove(pf); + tsdbDebug("vgId:%d invalid file %s is removed", REPO_ID(pRepo), TFILE_NAME(pf)); + } + + tfsClosedir(tdir); + + return 0; +} + +static int tsdbScanDataDir(STsdbRepo *pRepo) { + char dataDir[TSDB_FILENAME_LEN]; + char bname[TSDB_FILENAME_LEN]; + STsdbFS * pfs = REPO_FS(pRepo); + const TFILE *pf; + + tsdbGetDataDir(REPO_ID(pRepo), dataDir); + TDIR *tdir = tfsOpendir(dataDir); + if (tdir == NULL) { + tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), dataDir, tstrerror(terrno)); + return -1; + } + + while ((pf = tfsReaddir(tdir))) { + tfsbasename(pf, bname); + + if (!tsdbIsTFileInFS(pfs, pf)) { + tfsremove(pf); + tsdbDebug("vgId:%d invalid file %s is removed", REPO_ID(pRepo), TFILE_NAME(pf)); + } + } + + tfsClosedir(tdir); + + return 0; +} + +static bool tsdbIsTFileInFS(STsdbFS *pfs, const TFILE *pf) { + SFSIter fsiter; + tsdbFSIterInit(&fsiter, pfs, TSDB_FS_ITER_FORWARD); + SDFileSet *pSet; + + while ((pSet = tsdbFSIterNext(&fsiter))) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + SDFile *pDFile = TSDB_DFILE_IN_SET(pSet, ftype); + if (tfsIsSameFile(pf, TSDB_FILE_F(pDFile))) { + return true; + } + } + } + + return false; +} + +static int tsdbRestoreMeta(STsdbRepo *pRepo) { + char rootDir[TSDB_FILENAME_LEN]; + char bname[TSDB_FILENAME_LEN]; + TDIR * tdir = NULL; + const TFILE *pf = NULL; + const char * pattern = "^meta(-ver[0-9]+)?$"; + regex_t regex; + STsdbFS * pfs = REPO_FS(pRepo); + + regcomp(®ex, pattern, REG_EXTENDED); + + tsdbInfo("vgId:%d try to restore meta", REPO_ID(pRepo)); + + tsdbGetRootDir(REPO_ID(pRepo), rootDir); + + tdir = tfsOpendir(rootDir); + if (tdir == NULL) { + tsdbError("vgId:%d failed to open dir %s since %s", REPO_ID(pRepo), rootDir, tstrerror(terrno)); + regfree(®ex); + return -1; + } + + while ((pf = tfsReaddir(tdir))) { + tfsbasename(pf, bname); + + if (strcmp(bname, "data") == 0) { + // Skip the data/ directory + continue; + } + + if (strcmp(bname, tsdbTxnFname[TSDB_TXN_TEMP_FILE]) == 0) { + // Skip current.t file + tsdbInfo("vgId:%d file %s exists, remove it", REPO_ID(pRepo), TFILE_NAME(pf)); + tfsremove(pf); + continue; + } + + int code = regexec(®ex, bname, 0, NULL, 0); + if (code == 0) { + // Match + if (pfs->cstatus->pmf != NULL) { + tsdbError("vgId:%d failed to restore meta since two file exists, file1 %s and file2 %s", REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pfs->cstatus->pmf), TFILE_NAME(pf)); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tfsClosedir(tdir); + regfree(®ex); + return -1; + } else { + uint32_t version = 0; + if (strcmp(bname, "meta") != 0) { + sscanf(bname, "meta-ver%" PRIu32, &version); + pfs->cstatus->meta.version = version; + } + + pfs->cstatus->pmf = &(pfs->cstatus->mf); + pfs->cstatus->pmf->f = *pf; + TSDB_FILE_SET_CLOSED(pfs->cstatus->pmf); + + if (tsdbOpenMFile(pfs->cstatus->pmf, O_RDONLY) < 0) { + tsdbError("vgId:%d failed to restore meta since %s", REPO_ID(pRepo), tstrerror(terrno)); + tfsClosedir(tdir); + regfree(®ex); + return -1; + } + + if (tsdbLoadMFileHeader(pfs->cstatus->pmf, &(pfs->cstatus->pmf->info)) < 0) { + tsdbError("vgId:%d failed to restore meta since %s", REPO_ID(pRepo), tstrerror(terrno)); + tsdbCloseMFile(pfs->cstatus->pmf); + tfsClosedir(tdir); + regfree(®ex); + return -1; + } + + tsdbCloseMFile(pfs->cstatus->pmf); + } + } else if (code == REG_NOMATCH) { + // Not match + tsdbInfo("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), TFILE_NAME(pf)); + tfsremove(pf); + continue; + } else { + // Has other error + tsdbError("vgId:%d failed to restore meta file while run regexec since %s", REPO_ID(pRepo), strerror(code)); + terrno = TAOS_SYSTEM_ERROR(code); + tfsClosedir(tdir); + regfree(®ex); + return -1; + } + } + + if (pfs->cstatus->pmf) { + tsdbInfo("vgId:%d meta file %s is restored", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pfs->cstatus->pmf)); + } else { + tsdbInfo("vgId:%d no meta file is restored", REPO_ID(pRepo)); + } + + tfsClosedir(tdir); + regfree(®ex); + return 0; +} + +static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { + char dataDir[TSDB_FILENAME_LEN]; + char bname[TSDB_FILENAME_LEN]; + TDIR * tdir = NULL; + const TFILE *pf = NULL; + const char * pattern = "^v[0-9]+f[0-9]+\\.(head|data|last)(-ver[0-9]+)?$"; + SArray * fArray = NULL; + regex_t regex; + STsdbFS * pfs = REPO_FS(pRepo); + + tsdbGetDataDir(REPO_ID(pRepo), dataDir); + + // Resource allocation and init + regcomp(®ex, pattern, REG_EXTENDED); + + fArray = taosArrayInit(1024, sizeof(TFILE)); + if (fArray == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("vgId:%d failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir, + tstrerror(terrno)); + regfree(®ex); + return -1; + } + + tdir = tfsOpendir(dataDir); + if (tdir == NULL) { + tsdbError("vgId:%d failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir, + tstrerror(terrno)); + taosArrayDestroy(fArray); + regfree(®ex); + return -1; + } + + while ((pf = tfsReaddir(tdir))) { + tfsbasename(pf, bname); + + int code = regexec(®ex, bname, 0, NULL, 0); + if (code == 0) { + if (taosArrayPush(fArray, (void *)pf) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tfsClosedir(tdir); + taosArrayDestroy(fArray); + regfree(®ex); + return -1; + } + } else if (code == REG_NOMATCH) { + // Not match + tsdbInfo("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), TFILE_NAME(pf)); + tfsremove(pf); + continue; + } else { + // Has other error + tsdbError("vgId:%d failed to restore DFileSet Array while run regexec since %s", REPO_ID(pRepo), strerror(code)); + terrno = TAOS_SYSTEM_ERROR(code); + tfsClosedir(tdir); + taosArrayDestroy(fArray); + regfree(®ex); + return -1; + } + } + + tfsClosedir(tdir); + regfree(®ex); + + // Sort the array according to file name + taosArraySort(fArray, tsdbComparTFILE); + + size_t index = 0; + // Loop to recover each file set + for (;;) { + if (index >= taosArrayGetSize(fArray)) { + break; + } + + SDFileSet fset = {0}; + + TSDB_FSET_SET_CLOSED(&fset); + + // Loop to recover ONE fset + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + SDFile *pDFile = TSDB_DFILE_IN_SET(&fset, ftype); + + if (index >= taosArrayGetSize(fArray)) { + tsdbError("vgId:%d incomplete DFileSet, fid:%d", REPO_ID(pRepo), fset.fid); + taosArrayDestroy(fArray); + return -1; + } + + pf = taosArrayGet(fArray, index); + + int tvid, tfid; + TSDB_FILE_T ttype; + uint32_t tversion; + char bname[TSDB_FILENAME_LEN]; + + tfsbasename(pf, bname); + tsdbParseDFilename(bname, &tvid, &tfid, &ttype, &tversion); + + ASSERT(tvid == REPO_ID(pRepo)); + + if (ftype == 0) { + fset.fid = tfid; + } else { + if (tfid != fset.fid) { + tsdbError("vgId:%d incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid); + taosArrayDestroy(fArray); + return -1; + } + } + + if (ttype != ftype) { + tsdbError("vgId:%d incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid); + taosArrayDestroy(fArray); + return -1; + } + + pDFile->f = *pf; + + if (tsdbOpenDFile(pDFile, O_RDONLY) < 0) { + tsdbError("vgId:%d failed to open DFile %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno)); + taosArrayDestroy(fArray); + return -1; + } + + if (tsdbLoadDFileHeader(pDFile, &(pDFile->info)) < 0) { + tsdbError("vgId:%d failed to load DFile %s header since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), + tstrerror(terrno)); + taosArrayDestroy(fArray); + return -1; + } + + tsdbCloseDFile(pDFile); + index++; + } + + tsdbInfo("vgId:%d FSET %d is restored", REPO_ID(pRepo), fset.fid); + taosArrayPush(pfs->cstatus->df, &fset); + } + + // Resource release + taosArrayDestroy(fArray); + + return 0; +} + +static int tsdbRestoreCurrent(STsdbRepo *pRepo) { + // Loop to recover mfile + if (tsdbRestoreMeta(pRepo) < 0) { + tsdbError("vgId:%d failed to restore current since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + // Loop to recover dfile set + if (tsdbRestoreDFileSet(pRepo) < 0) { + tsdbError("vgId:%d failed to restore DFileSet since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + if (tsdbSaveFSStatus(pRepo->fs->cstatus, REPO_ID(pRepo)) < 0) { + tsdbError("vgId:%d failed to restore corrent since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + return 0; +} + +static int tsdbComparTFILE(const void *arg1, const void *arg2) { + TFILE *pf1 = (TFILE *)arg1; + TFILE *pf2 = (TFILE *)arg2; + + int vid1, fid1, vid2, fid2; + TSDB_FILE_T ftype1, ftype2; + uint32_t version1, version2; + char bname1[TSDB_FILENAME_LEN]; + char bname2[TSDB_FILENAME_LEN]; + + tfsbasename(pf1, bname1); + tfsbasename(pf2, bname2); + tsdbParseDFilename(bname1, &vid1, &fid1, &ftype1, &version1); + tsdbParseDFilename(bname2, &vid2, &fid2, &ftype2, &version2); + + if (fid1 < fid2) { + return -1; + } else if (fid1 > fid2) { + return 1; + } else { + if (ftype1 < ftype2) { + return -1; + } else if (ftype1 > ftype2) { + return 1; + } else { + return 0; + } + } +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 7a8622b110..8124a0e3b5 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -12,433 +12,482 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ -#define _DEFAULT_SOURCE -#define TAOS_RANDOM_FILE_FAIL_TEST -#include -#include "os.h" -#include "talgo.h" -#include "tchecksum.h" -#include "tsdbMain.h" -#include "tutil.h" +#include "tsdbint.h" -const char *tsdbFileSuffix[] = {".head", ".data", ".last", ".stat", ".h", ".d", ".l", ".s"}; +static const char *TSDB_FNAME_SUFFIX[] = { + "head", // TSDB_FILE_HEAD + "data", // TSDB_FILE_DATA + "last", // TSDB_FILE_LAST + "", // TSDB_FILE_MAX + "meta" // TSDB_FILE_META +}; -static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type); -static void tsdbDestroyFile(SFile *pFile); -static int compFGroup(const void *arg1, const void *arg2); -static int keyFGroupCompFunc(const void *key, const void *fgroup); -static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo); -static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep); -static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days); +static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname); +static int tsdbRollBackMFile(SMFile *pMFile); +static int tsdbEncodeDFInfo(void **buf, SDFInfo *pInfo); +static void *tsdbDecodeDFInfo(void *buf, SDFInfo *pInfo); +static int tsdbRollBackDFile(SDFile *pDFile); -// ---------------- INTERNAL FUNCTIONS ---------------- -STsdbFileH *tsdbNewFileH(STsdbCfg *pCfg) { - STsdbFileH *pFileH = (STsdbFileH *)calloc(1, sizeof(*pFileH)); - if (pFileH == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } +// ============== SMFile +void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver) { + char fname[TSDB_FILENAME_LEN]; - int code = pthread_rwlock_init(&(pFileH->fhlock), NULL); - if (code != 0) { - tsdbError("vgId:%d failed to init file handle lock since %s", pCfg->tsdbId, strerror(code)); - terrno = TAOS_SYSTEM_ERROR(code); - goto _err; - } + TSDB_FILE_SET_STATE(pMFile, TSDB_FILE_STATE_OK); - pFileH->maxFGroups = TSDB_MAX_FILE(pCfg->keep, pCfg->daysPerFile); + memset(&(pMFile->info), 0, sizeof(pMFile->info)); + pMFile->info.magic = TSDB_FILE_INIT_MAGIC; - pFileH->pFGroup = (SFileGroup *)calloc(pFileH->maxFGroups, sizeof(SFileGroup)); - if (pFileH->pFGroup == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - return pFileH; - -_err: - tsdbFreeFileH(pFileH); - return NULL; + tsdbGetFilename(vid, 0, ver, TSDB_FILE_META, fname); + tfsInitFile(TSDB_FILE_F(pMFile), did.level, did.id, fname); } -void tsdbFreeFileH(STsdbFileH *pFileH) { - if (pFileH) { - pthread_rwlock_destroy(&pFileH->fhlock); - tfree(pFileH->pFGroup); - free(pFileH); - } +void tsdbInitMFileEx(SMFile *pMFile, const SMFile *pOMFile) { + *pMFile = *pOMFile; + TSDB_FILE_SET_CLOSED(pMFile); } -int tsdbOpenFileH(STsdbRepo *pRepo) { - ASSERT(pRepo != NULL && pRepo->tsdbFileH != NULL); +int tsdbEncodeSMFile(void **buf, SMFile *pMFile) { + int tlen = 0; - char * tDataDir = NULL; - DIR * dir = NULL; - int fid = 0; - int vid = 0; - regex_t regex1 = {0}, regex2 = {0}; - int code = 0; - char fname[TSDB_FILENAME_LEN] = "\0"; + tlen += tsdbEncodeMFInfo(buf, &(pMFile->info)); + tlen += tfsEncodeFile(buf, &(pMFile->f)); - SFileGroup fileGroup = {0}; - STsdbFileH *pFileH = pRepo->tsdbFileH; - STsdbCfg * pCfg = &(pRepo->config); + return tlen; +} - tDataDir = tsdbGetDataDirName(pRepo->rootDir); - if (tDataDir == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } +void *tsdbDecodeSMFile(void *buf, SMFile *pMFile) { + buf = tsdbDecodeMFInfo(buf, &(pMFile->info)); + buf = tfsDecodeFile(buf, &(pMFile->f)); + TSDB_FILE_SET_CLOSED(pMFile); - dir = opendir(tDataDir); - if (dir == NULL) { - if (errno == ENOENT) { - tsdbError("vgId:%d directory %s not exist", REPO_ID(pRepo), tDataDir); - terrno = TAOS_SYSTEM_ERROR(errno); + return buf; +} - if (taosMkDir(tDataDir, 0755) < 0) { - tsdbError("vgId:%d failed to create directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } +int tsdbEncodeSMFileEx(void **buf, SMFile *pMFile) { + int tlen = 0; - dir = opendir(tDataDir); - if (dir == NULL) { - tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } + tlen += tsdbEncodeMFInfo(buf, &(pMFile->info)); + tlen += taosEncodeString(buf, TSDB_FILE_FULL_NAME(pMFile)); + + return tlen; +} + +void *tsdbDecodeSMFileEx(void *buf, SMFile *pMFile) { + char *aname; + buf = tsdbDecodeMFInfo(buf, &(pMFile->info)); + buf = taosDecodeString(buf, &aname); + strncpy(TSDB_FILE_FULL_NAME(pMFile), aname, TSDB_FILENAME_LEN); + TSDB_FILE_SET_CLOSED(pMFile); + + tfree(aname); + + return buf; +} + +int tsdbApplyMFileChange(SMFile *from, SMFile *to) { + if (from == NULL && to == NULL) return 0; + + if (from != NULL) { + if (to == NULL) { + return tsdbRemoveMFile(from); } else { - tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - } - - code = regcomp(®ex1, "^v[0-9]+f[0-9]+\\.(head|data|last|stat)$", REG_EXTENDED); - if (code != 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - code = regcomp(®ex2, "^v[0-9]+f[0-9]+\\.(h|d|l|s)$", REG_EXTENDED); - if (code != 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile); - - struct dirent *dp = NULL; - while ((dp = readdir(dir)) != NULL) { - if (strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) continue; - - code = regexec(®ex1, dp->d_name, 0, NULL, 0); - if (code == 0) { - sscanf(dp->d_name, "v%df%d", &vid, &fid); - if (vid != REPO_ID(pRepo)) { - tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); - continue; - } - - if (fid < mfid) { - for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { - tsdbGetDataFileName(pRepo->rootDir, pCfg->tsdbId, fid, type, fname); - (void)remove(fname); + if (tfsIsSameFile(TSDB_FILE_F(from), TSDB_FILE_F(to))) { + if (from->info.size > to->info.size) { + tsdbRollBackMFile(to); } - continue; - } - - if (tsdbSearchFGroup(pFileH, fid, TD_EQ) != NULL) continue; - memset((void *)(&fileGroup), 0, sizeof(SFileGroup)); - fileGroup.fileId = fid; - - tsdbInitFileGroup(&fileGroup, pRepo); - } else if (code == REG_NOMATCH) { - code = regexec(®ex2, dp->d_name, 0, NULL, 0); - if (code == 0) { - size_t tsize = strlen(tDataDir) + strlen(dp->d_name) + 2; - char * fname1 = malloc(tsize); - if (fname1 == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - sprintf(fname1, "%s/%s", tDataDir, dp->d_name); - - tsize = tsize + 64; - char *fname2 = malloc(tsize); - if (fname2 == NULL) { - free(fname1); - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - sprintf(fname2, "%s/%s_back_%" PRId64, tDataDir, dp->d_name, taosGetTimestamp(TSDB_TIME_PRECISION_MILLI)); - - (void)taosRename(fname1, fname2); - - tsdbDebug("vgId:%d file %s exists, backup it as %s", REPO_ID(pRepo), fname1, fname2); - - free(fname1); - free(fname2); - continue; - } else if (code == REG_NOMATCH) { - tsdbError("vgId:%d invalid file %s exists, ignore it", REPO_ID(pRepo), dp->d_name); - continue; } else { - goto _err; + return tsdbRemoveMFile(from); } - } else { - goto _err; } - - pFileH->pFGroup[pFileH->nFGroups++] = fileGroup; - qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup); - tsdbDebug("vgId:%d file group %d is restored, nFGroups %d", REPO_ID(pRepo), fileGroup.fileId, pFileH->nFGroups); } - regfree(®ex1); - regfree(®ex2); - tfree(tDataDir); - closedir(dir); return 0; - -_err: - for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&fileGroup.files[type]); - - regfree(®ex1); - regfree(®ex2); - - tfree(tDataDir); - if (dir != NULL) closedir(dir); - tsdbCloseFileH(pRepo); - return -1; } -void tsdbCloseFileH(STsdbRepo *pRepo) { - STsdbFileH *pFileH = pRepo->tsdbFileH; +int tsdbCreateMFile(SMFile *pMFile, bool updateHeader) { + ASSERT(pMFile->info.size == 0 && pMFile->info.magic == TSDB_FILE_INIT_MAGIC); - for (int i = 0; i < pFileH->nFGroups; i++) { - SFileGroup *pFGroup = pFileH->pFGroup + i; - for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { - tsdbDestroyFile(&pFGroup->files[type]); - } - } -} - -SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) { - STsdbFileH *pFileH = pRepo->tsdbFileH; - STsdbCfg * pCfg = &(pRepo->config); - - if (pFileH->nFGroups >= pFileH->maxFGroups) { - int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile); - if (pFileH->pFGroup[0].fileId < mfid) { - pthread_rwlock_wrlock(&pFileH->fhlock); - tsdbRemoveFileGroup(pRepo, &(pFileH->pFGroup[0])); - pthread_rwlock_unlock(&pFileH->fhlock); - } - } - - ASSERT(pFileH->nFGroups < pFileH->maxFGroups); - - SFileGroup fGroup; - SFileGroup *pFGroup = &fGroup; - - SFileGroup *pGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ); - if (pGroup == NULL) { // if not exists, create one - pFGroup->fileId = fid; - for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { - if (tsdbCreateFile(&pFGroup->files[type], pRepo, fid, type) < 0) { - for (int i = type; i >= 0; i--) { - remove(pFGroup->files[i].fname); - } - - return NULL; + pMFile->fd = open(TSDB_FILE_FULL_NAME(pMFile), O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0755); + if (pMFile->fd < 0) { + if (errno == ENOENT) { + // Try to create directory recursively + char *s = strdup(TFILE_REL_NAME(&(pMFile->f))); + if (tfsMkdirRecurAt(dirname(s), TSDB_FILE_LEVEL(pMFile), TSDB_FILE_ID(pMFile)) < 0) { + tfree(s); + return -1; } - } + tfree(s); - pthread_rwlock_wrlock(&pFileH->fhlock); - pFileH->pFGroup[pFileH->nFGroups++] = fGroup; - qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup); - pthread_rwlock_unlock(&pFileH->fhlock); - pGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ); - ASSERT(pGroup != NULL); - } - - return pGroup; -} - -void tsdbInitFileGroupIter(STsdbFileH *pFileH, SFileGroupIter *pIter, int direction) { - pIter->pFileH = pFileH; - pIter->direction = direction; - - if (pFileH->nFGroups == 0) { - pIter->index = -1; - pIter->fileId = -1; - } else { - if (direction == TSDB_FGROUP_ITER_FORWARD) { - pIter->index = 0; + pMFile->fd = open(TSDB_FILE_FULL_NAME(pMFile), O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0755); + if (pMFile->fd < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } } else { - pIter->index = pFileH->nFGroups - 1; + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; } - pIter->fileId = pFileH->pFGroup[pIter->index].fileId; - } -} - -void tsdbSeekFileGroupIter(SFileGroupIter *pIter, int fid) { - STsdbFileH *pFileH = pIter->pFileH; - - if (pFileH->nFGroups == 0) { - pIter->index = -1; - pIter->fileId = -1; - return; } - int flags = (pIter->direction == TSDB_FGROUP_ITER_FORWARD) ? TD_GE : TD_LE; - void *ptr = taosbsearch(&fid, (void *)pFileH->pFGroup, pFileH->nFGroups, sizeof(SFileGroup), keyFGroupCompFunc, flags); - if (ptr == NULL) { - pIter->index = -1; - pIter->fileId = -1; - } else { - pIter->index = (int)(POINTER_DISTANCE(ptr, pFileH->pFGroup) / sizeof(SFileGroup)); - pIter->fileId = ((SFileGroup *)ptr)->fileId; - } -} - -SFileGroup *tsdbGetFileGroupNext(SFileGroupIter *pIter) { - STsdbFileH *pFileH = pIter->pFileH; - SFileGroup *pFGroup = NULL; - - if (pIter->index < 0 || pIter->index >= pFileH->nFGroups || pIter->fileId < 0) return NULL; - - pFGroup = &pFileH->pFGroup[pIter->index]; - if (pFGroup->fileId != pIter->fileId) { - tsdbSeekFileGroupIter(pIter, pIter->fileId); + if (!updateHeader) { + return 0; } - if (pIter->index < 0) return NULL; - - pFGroup = &pFileH->pFGroup[pIter->index]; - ASSERT(pFGroup->fileId == pIter->fileId); - - if (pIter->direction == TSDB_FGROUP_ITER_FORWARD) { - pIter->index++; - } else { - pIter->index--; - } - - if (pIter->index >= 0 && pIter->index < pFileH->nFGroups) { - pIter->fileId = pFileH->pFGroup[pIter->index].fileId; - } else { - pIter->fileId = -1; - } - - return pFGroup; -} - -int tsdbOpenFile(SFile *pFile, int oflag) { - ASSERT(!TSDB_IS_FILE_OPENED(pFile)); - - pFile->fd = open(pFile->fname, oflag | O_BINARY, 0755); - if (pFile->fd < 0) { - tsdbError("failed to open file %s since %s", pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); + if (tsdbUpdateMFileHeader(pMFile) < 0) { + tsdbCloseMFile(pMFile); + tsdbRemoveMFile(pMFile); return -1; } - tsdbTrace("open file %s, fd %d", pFile->fname, pFile->fd); + pMFile->info.size += TSDB_FILE_HEAD_SIZE; return 0; } -void tsdbCloseFile(SFile *pFile) { - if (TSDB_IS_FILE_OPENED(pFile)) { - tsdbTrace("close file %s, fd %d", pFile->fname, pFile->fd); - close(pFile->fd); - pFile->fd = -1; - } -} - -int tsdbCreateFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) { - memset((void *)pFile, 0, sizeof(SFile)); - pFile->fd = -1; - - tsdbGetDataFileName(pRepo->rootDir, REPO_ID(pRepo), fid, type, pFile->fname); - - if (access(pFile->fname, F_OK) == 0) { - tsdbError("vgId:%d file %s already exists", REPO_ID(pRepo), pFile->fname); - terrno = TSDB_CODE_TDB_FILE_ALREADY_EXISTS; - goto _err; - } - - if (tsdbOpenFile(pFile, O_RDWR | O_CREAT) < 0) { - goto _err; - } - - pFile->info.size = TSDB_FILE_HEAD_SIZE; - pFile->info.magic = TSDB_FILE_INIT_MAGIC; - - if (tsdbUpdateFileHeader(pFile) < 0) { - tsdbCloseFile(pFile); - return -1; - } - - tsdbCloseFile(pFile); - - return 0; - -_err: - return -1; -} - -SFileGroup *tsdbSearchFGroup(STsdbFileH *pFileH, int fid, int flags) { - void *ptr = - taosbsearch((void *)(&fid), (void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), keyFGroupCompFunc, flags); - if (ptr == NULL) return NULL; - return (SFileGroup *)ptr; -} - -void tsdbFitRetention(STsdbRepo *pRepo) { - STsdbCfg *pCfg = &(pRepo->config); - STsdbFileH *pFileH = pRepo->tsdbFileH; - SFileGroup *pGroup = pFileH->pFGroup; - - int mfid = tsdbGetCurrMinFid(pCfg->precision, pCfg->keep, pCfg->daysPerFile); - - pthread_rwlock_wrlock(&(pFileH->fhlock)); - - while (pFileH->nFGroups > 0 && pGroup[0].fileId < mfid) { - tsdbRemoveFileGroup(pRepo, pGroup); - } - - pthread_rwlock_unlock(&(pFileH->fhlock)); -} - -int tsdbUpdateFileHeader(SFile *pFile) { +int tsdbUpdateMFileHeader(SMFile *pMFile) { char buf[TSDB_FILE_HEAD_SIZE] = "\0"; - void *pBuf = (void *)buf; - taosEncodeFixedU32((void *)(&pBuf), TSDB_FILE_VERSION); - tsdbEncodeSFileInfo((void *)(&pBuf), &(pFile->info)); - - taosCalcChecksumAppend(0, (uint8_t *)buf, TSDB_FILE_HEAD_SIZE); - - if (lseek(pFile->fd, 0, SEEK_SET) < 0) { - tsdbError("failed to lseek file %s since %s", pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); + if (tsdbSeekMFile(pMFile, 0, SEEK_SET) < 0) { return -1; } - if (taosWrite(pFile->fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { - tsdbError("failed to write %d bytes to file %s since %s", TSDB_FILE_HEAD_SIZE, pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); + + void *ptr = buf; + tsdbEncodeMFInfo(&ptr, TSDB_FILE_INFO(pMFile)); + + taosCalcChecksumAppend(0, (uint8_t *)buf, TSDB_FILE_HEAD_SIZE); + if (tsdbWriteMFile(pMFile, buf, TSDB_FILE_HEAD_SIZE) < 0) { return -1; } return 0; } -int tsdbEncodeSFileInfo(void **buf, const STsdbFileInfo *pInfo) { +int tsdbLoadMFileHeader(SMFile *pMFile, SMFInfo *pInfo) { + char buf[TSDB_FILE_HEAD_SIZE] = "\0"; + + ASSERT(TSDB_FILE_OPENED(pMFile)); + + if (tsdbSeekMFile(pMFile, 0, SEEK_SET) < 0) { + return -1; + } + + if (tsdbReadMFile(pMFile, buf, TSDB_FILE_HEAD_SIZE) < 0) { + return -1; + } + + if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + return -1; + } + + tsdbDecodeMFInfo(buf, pInfo); + return 0; +} + +int tsdbScanAndTryFixMFile(STsdbRepo *pRepo) { + SMFile * pMFile = pRepo->fs->cstatus->pmf; + struct stat mfstat; + SMFile mf; + + if (pMFile == NULL) { + // No meta file, no need to scan + return 0; + } + + tsdbInitMFileEx(&mf, pMFile); + + if (access(TSDB_FILE_FULL_NAME(pMFile), F_OK) != 0) { + tsdbError("vgId:%d meta file %s not exit, report to upper layer to fix it", REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pMFile)); + pRepo->state |= TSDB_STATE_BAD_META; + TSDB_FILE_SET_STATE(pMFile, TSDB_FILE_STATE_BAD); + return 0; + } + + if (stat(TSDB_FILE_FULL_NAME(&mf), &mfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + if (pMFile->info.size < mfstat.st_size) { + if (tsdbOpenMFile(&mf, O_WRONLY) < 0) { + return -1; + } + + if (taosFtruncate(mf.fd, mf.info.size) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseMFile(&mf); + return -1; + } + + if (tsdbUpdateMFileHeader(&mf) < 0) { + tsdbCloseMFile(&mf); + return -1; + } + + tsdbCloseMFile(&mf); + tsdbInfo("vgId:%d file %s is truncated from %" PRId64 " to %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), + mfstat.st_size, pMFile->info.size); + } else if (pMFile->info.size > mfstat.st_size) { + tsdbError("vgId:%d meta file %s has wrong size %" PRId64 " expected %" PRId64 ", report to upper layer to fix it", + REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), mfstat.st_size, pMFile->info.size); + pRepo->state |= TSDB_STATE_BAD_META; + TSDB_FILE_SET_STATE(pMFile, TSDB_FILE_STATE_BAD); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + return 0; + } else { + tsdbDebug("vgId:%d meta file %s passes the scan", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile)); + } + + return 0; +} + +int tsdbEncodeMFInfo(void **buf, SMFInfo *pInfo) { int tlen = 0; + + tlen += taosEncodeVariantI64(buf, pInfo->size); + tlen += taosEncodeVariantI64(buf, pInfo->tombSize); + tlen += taosEncodeVariantI64(buf, pInfo->nRecords); + tlen += taosEncodeVariantI64(buf, pInfo->nDels); + tlen += taosEncodeFixedU32(buf, pInfo->magic); + + return tlen; +} + +void *tsdbDecodeMFInfo(void *buf, SMFInfo *pInfo) { + buf = taosDecodeVariantI64(buf, &(pInfo->size)); + buf = taosDecodeVariantI64(buf, &(pInfo->tombSize)); + buf = taosDecodeVariantI64(buf, &(pInfo->nRecords)); + buf = taosDecodeVariantI64(buf, &(pInfo->nDels)); + buf = taosDecodeFixedU32(buf, &(pInfo->magic)); + + return buf; +} + +static int tsdbRollBackMFile(SMFile *pMFile) { + SMFile mf; + + tsdbInitMFileEx(&mf, pMFile); + + if (tsdbOpenMFile(&mf, O_WRONLY) < 0) { + return -1; + } + + if (taosFtruncate(TSDB_FILE_FD(&mf), pMFile->info.size) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseMFile(&mf); + return -1; + } + + if (tsdbUpdateMFileHeader(&mf) < 0) { + tsdbCloseMFile(&mf); + return -1; + } + + TSDB_FILE_FSYNC(&mf); + + tsdbCloseMFile(&mf); + return 0; +} + +// ============== Operations on SDFile +void tsdbInitDFile(SDFile *pDFile, SDiskID did, int vid, int fid, uint32_t ver, TSDB_FILE_T ftype) { + char fname[TSDB_FILENAME_LEN]; + + TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_OK); + + TSDB_FILE_SET_CLOSED(pDFile); + + memset(&(pDFile->info), 0, sizeof(pDFile->info)); + pDFile->info.magic = TSDB_FILE_INIT_MAGIC; + + tsdbGetFilename(vid, fid, ver, ftype, fname); + tfsInitFile(&(pDFile->f), did.level, did.id, fname); +} + +void tsdbInitDFileEx(SDFile *pDFile, SDFile *pODFile) { + *pDFile = *pODFile; + TSDB_FILE_SET_CLOSED(pDFile); +} + +int tsdbEncodeSDFile(void **buf, SDFile *pDFile) { + int tlen = 0; + + tlen += tsdbEncodeDFInfo(buf, &(pDFile->info)); + tlen += tfsEncodeFile(buf, &(pDFile->f)); + + return tlen; +} + +void *tsdbDecodeSDFile(void *buf, SDFile *pDFile) { + buf = tsdbDecodeDFInfo(buf, &(pDFile->info)); + buf = tfsDecodeFile(buf, &(pDFile->f)); + TSDB_FILE_SET_CLOSED(pDFile); + + return buf; +} + +static int tsdbEncodeSDFileEx(void **buf, SDFile *pDFile) { + int tlen = 0; + + tlen += tsdbEncodeDFInfo(buf, &(pDFile->info)); + tlen += taosEncodeString(buf, TSDB_FILE_FULL_NAME(pDFile)); + + return tlen; +} + +static void *tsdbDecodeSDFileEx(void *buf, SDFile *pDFile) { + char *aname; + + buf = tsdbDecodeDFInfo(buf, &(pDFile->info)); + buf = taosDecodeString(buf, &aname); + strncpy(TSDB_FILE_FULL_NAME(pDFile), aname, TSDB_FILENAME_LEN); + TSDB_FILE_SET_CLOSED(pDFile); + tfree(aname); + + return buf; +} + +int tsdbCreateDFile(SDFile *pDFile, bool updateHeader) { + ASSERT(pDFile->info.size == 0 && pDFile->info.magic == TSDB_FILE_INIT_MAGIC); + + pDFile->fd = open(TSDB_FILE_FULL_NAME(pDFile), O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0755); + if (pDFile->fd < 0) { + if (errno == ENOENT) { + // Try to create directory recursively + char *s = strdup(TFILE_REL_NAME(&(pDFile->f))); + if (tfsMkdirRecurAt(dirname(s), TSDB_FILE_LEVEL(pDFile), TSDB_FILE_ID(pDFile)) < 0) { + tfree(s); + return -1; + } + tfree(s); + + pDFile->fd = open(TSDB_FILE_FULL_NAME(pDFile), O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0755); + if (pDFile->fd < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + } else { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + } + + if (!updateHeader) { + return 0; + } + + if (tsdbUpdateDFileHeader(pDFile) < 0) { + tsdbCloseDFile(pDFile); + tsdbRemoveDFile(pDFile); + return -1; + } + + pDFile->info.size += TSDB_FILE_HEAD_SIZE; + + return 0; +} + +int tsdbUpdateDFileHeader(SDFile *pDFile) { + char buf[TSDB_FILE_HEAD_SIZE] = "\0"; + + if (tsdbSeekDFile(pDFile, 0, SEEK_SET) < 0) { + return -1; + } + + void *ptr = buf; + taosEncodeFixedU32(&ptr, TSDB_FS_VERSION); + tsdbEncodeDFInfo(&ptr, &(pDFile->info)); + + taosCalcChecksumAppend(0, (uint8_t *)buf, TSDB_FILE_HEAD_SIZE); + if (tsdbWriteDFile(pDFile, buf, TSDB_FILE_HEAD_SIZE) < 0) { + return -1; + } + + return 0; +} + +int tsdbLoadDFileHeader(SDFile *pDFile, SDFInfo *pInfo) { + char buf[TSDB_FILE_HEAD_SIZE] = "\0"; + uint32_t version; + + ASSERT(TSDB_FILE_OPENED(pDFile)); + + if (tsdbSeekDFile(pDFile, 0, SEEK_SET) < 0) { + return -1; + } + + if (tsdbReadDFile(pDFile, buf, TSDB_FILE_HEAD_SIZE) < 0) { + return -1; + } + + if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + return -1; + } + + void *pBuf = buf; + pBuf = taosDecodeFixedU32(pBuf, &version); + pBuf = tsdbDecodeDFInfo(pBuf, pInfo); + return 0; +} + +static int tsdbScanAndTryFixDFile(STsdbRepo *pRepo, SDFile *pDFile) { + struct stat dfstat; + SDFile df; + + tsdbInitDFileEx(&df, pDFile); + + if (access(TSDB_FILE_FULL_NAME(pDFile), F_OK) != 0) { + tsdbError("vgId:%d data file %s not exit, report to upper layer to fix it", REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pDFile)); + pRepo->state |= TSDB_STATE_BAD_DATA; + TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_BAD); + return 0; + } + + if (stat(TSDB_FILE_FULL_NAME(&df), &dfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + if (pDFile->info.size < dfstat.st_size) { + if (tsdbOpenDFile(&df, O_WRONLY) < 0) { + return -1; + } + + if (taosFtruncate(df.fd, df.info.size) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseDFile(&df); + return -1; + } + + if (tsdbUpdateDFileHeader(&df) < 0) { + tsdbCloseDFile(&df); + return -1; + } + + tsdbCloseDFile(&df); + tsdbInfo("vgId:%d file %s is truncated from %" PRId64 " to %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), + dfstat.st_size, pDFile->info.size); + } else if (pDFile->info.size > dfstat.st_size) { + tsdbError("vgId:%d data file %s has wrong size %" PRId64 " expected %" PRId64 ", report to upper layer to fix it", + REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), dfstat.st_size, pDFile->info.size); + pRepo->state |= TSDB_STATE_BAD_DATA; + TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_BAD); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + return 0; + } else { + tsdbDebug("vgId:%d file %s passes the scan", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); + } + + return 0; +} + +static int tsdbEncodeDFInfo(void **buf, SDFInfo *pInfo) { + int tlen = 0; + tlen += taosEncodeFixedU32(buf, pInfo->magic); tlen += taosEncodeFixedU32(buf, pInfo->len); tlen += taosEncodeFixedU32(buf, pInfo->totalBlocks); @@ -450,7 +499,7 @@ int tsdbEncodeSFileInfo(void **buf, const STsdbFileInfo *pInfo) { return tlen; } -void *tsdbDecodeSFileInfo(void *buf, STsdbFileInfo *pInfo) { +static void *tsdbDecodeDFInfo(void *buf, SDFInfo *pInfo) { buf = taosDecodeFixedU32(buf, &(pInfo->magic)); buf = taosDecodeFixedU32(buf, &(pInfo->len)); buf = taosDecodeFixedU32(buf, &(pInfo->totalBlocks)); @@ -462,156 +511,186 @@ void *tsdbDecodeSFileInfo(void *buf, STsdbFileInfo *pInfo) { return buf; } -void tsdbRemoveFileGroup(STsdbRepo *pRepo, SFileGroup *pFGroup) { - ASSERT(pFGroup != NULL); - STsdbFileH *pFileH = pRepo->tsdbFileH; +static int tsdbApplyDFileChange(SDFile *from, SDFile *to) { + ASSERT(from != NULL || to != NULL); - SFileGroup fileGroup = *pFGroup; - - int nFilesLeft = pFileH->nFGroups - (int)(POINTER_DISTANCE(pFGroup, pFileH->pFGroup) / sizeof(SFileGroup) + 1); - if (nFilesLeft > 0) { - memmove((void *)pFGroup, POINTER_SHIFT(pFGroup, sizeof(SFileGroup)), sizeof(SFileGroup) * nFilesLeft); - } - - pFileH->nFGroups--; - ASSERT(pFileH->nFGroups >= 0); - - for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { - if (remove(fileGroup.files[type].fname) < 0) { - tsdbError("vgId:%d failed to remove file %s", REPO_ID(pRepo), fileGroup.files[type].fname); + if (from != NULL) { + if (to == NULL) { + tsdbRemoveDFile(from); + } else { + if (tfsIsSameFile(TSDB_FILE_F(from), TSDB_FILE_F(to))) { + if (from->info.size > to->info.size) { + tsdbRollBackDFile(to); + } + } else { + tsdbRemoveDFile(from); + } } - tsdbDestroyFile(&fileGroup.files[type]); } + + return 0; } -int tsdbLoadFileHeader(SFile *pFile, uint32_t *version) { - char buf[TSDB_FILE_HEAD_SIZE] = "\0"; +static int tsdbRollBackDFile(SDFile *pDFile) { + SDFile df = *pDFile; - if (lseek(pFile->fd, 0, SEEK_SET) < 0) { - tsdbError("failed to lseek file %s to start since %s", pFile->fname, strerror(errno)); + if (tsdbOpenDFile(&df, O_WRONLY) < 0) { + return -1; + } + + if (taosFtruncate(TSDB_FILE_FD(&df), pDFile->info.size) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseDFile(&df); return -1; } - if (taosRead(pFile->fd, buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { - tsdbError("failed to read file %s header part with %d bytes, reason:%s", pFile->fname, TSDB_FILE_HEAD_SIZE, - strerror(errno)); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + if (tsdbUpdateDFileHeader(&df) < 0) { + tsdbCloseDFile(&df); return -1; } - if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) { - tsdbError("file %s header part is corrupted with failed checksum", pFile->fname); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - return -1; - } - - void *pBuf = (void *)buf; - pBuf = taosDecodeFixedU32(pBuf, version); - pBuf = tsdbDecodeSFileInfo(pBuf, &(pFile->info)); + TSDB_FILE_FSYNC(&df); + tsdbCloseDFile(&df); return 0; } -void tsdbGetFileInfoImpl(char *fname, uint32_t *magic, int64_t *size) { - uint32_t version = 0; - SFile file; - SFile * pFile = &file; +// ============== Operations on SDFileSet +void tsdbInitDFileSet(SDFileSet *pSet, SDiskID did, int vid, int fid, uint32_t ver) { + pSet->fid = fid; + pSet->state = 0; - strncpy(pFile->fname, fname, TSDB_FILENAME_LEN - 1); - pFile->fd = -1; - - if (tsdbOpenFile(pFile, O_RDONLY) < 0) goto _err; - if (tsdbLoadFileHeader(pFile, &version) < 0) goto _err; - - off_t offset = lseek(pFile->fd, 0, SEEK_END); - if (offset < 0) goto _err; - tsdbCloseFile(pFile); - - *magic = pFile->info.magic; - *size = offset; - - return; - -_err: - tsdbCloseFile(pFile); - *magic = TSDB_FILE_INIT_MAGIC; - *size = 0; -} - -// ---------------- LOCAL FUNCTIONS ---------------- -static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) { - uint32_t version; - - tsdbGetDataFileName(pRepo->rootDir, REPO_ID(pRepo), fid, type, pFile->fname); - - pFile->fd = -1; - if (tsdbOpenFile(pFile, O_RDONLY) < 0) goto _err; - - if (tsdbLoadFileHeader(pFile, &version) < 0) { - tsdbError("vgId:%d failed to load file %s header part since %s", REPO_ID(pRepo), pFile->fname, tstrerror(terrno)); - goto _err; - } - - if (pFile->info.size == TSDB_FILE_HEAD_SIZE) { - pFile->info.size = lseek(pFile->fd, 0, SEEK_END); - } - - if (version != TSDB_FILE_VERSION) { - // TODO: deal with error - tsdbError("vgId:%d file %s version %u is not the same as program version %u which may cause problem", - REPO_ID(pRepo), pFile->fname, version, TSDB_FILE_VERSION); - } - - tsdbCloseFile(pFile); - - return 0; -_err: - tsdbDestroyFile(pFile); - return -1; -} - -static void tsdbDestroyFile(SFile *pFile) { tsdbCloseFile(pFile); } - -static int compFGroup(const void *arg1, const void *arg2) { - int val1 = ((SFileGroup *)arg1)->fileId; - int val2 = ((SFileGroup *)arg2)->fileId; - - if (val1 < val2) { - return -1; - } else if (val1 > val2) { - return 1; - } else { - return 0; + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + SDFile *pDFile = TSDB_DFILE_IN_SET(pSet, ftype); + tsdbInitDFile(pDFile, did, vid, fid, ver, ftype); } } -static int keyFGroupCompFunc(const void *key, const void *fgroup) { - int fid = *(int *)key; - SFileGroup *pFGroup = (SFileGroup *)fgroup; - if (fid == pFGroup->fileId) { - return 0; - } else { - return fid > pFGroup->fileId ? 1 : -1; +void tsdbInitDFileSetEx(SDFileSet *pSet, SDFileSet *pOSet) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + tsdbInitDFileEx(TSDB_DFILE_IN_SET(pSet, ftype), TSDB_DFILE_IN_SET(pOSet, ftype)); } } -static void tsdbInitFileGroup(SFileGroup *pFGroup, STsdbRepo *pRepo) { - for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) { - if (tsdbInitFile(&pFGroup->files[type], pRepo, pFGroup->fileId, type) < 0) { - memset(&pFGroup->files[type].info, 0, sizeof(STsdbFileInfo)); - pFGroup->files[type].info.magic = TSDB_FILE_INIT_MAGIC; - pFGroup->state = 1; - pRepo->state = TSDB_STATE_BAD_FILE; - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; +int tsdbEncodeDFileSet(void **buf, SDFileSet *pSet) { + int tlen = 0; + + tlen += taosEncodeFixedI32(buf, pSet->fid); + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + tlen += tsdbEncodeSDFile(buf, TSDB_DFILE_IN_SET(pSet, ftype)); + } + + return tlen; +} + +void *tsdbDecodeDFileSet(void *buf, SDFileSet *pSet) { + int32_t fid; + + buf = taosDecodeFixedI32(buf, &(fid)); + pSet->state = 0; + pSet->fid = fid; + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + buf = tsdbDecodeSDFile(buf, TSDB_DFILE_IN_SET(pSet, ftype)); + } + return buf; +} + +int tsdbEncodeDFileSetEx(void **buf, SDFileSet *pSet) { + int tlen = 0; + + tlen += taosEncodeFixedI32(buf, pSet->fid); + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + tlen += tsdbEncodeSDFileEx(buf, TSDB_DFILE_IN_SET(pSet, ftype)); + } + + return tlen; +} + +void *tsdbDecodeDFileSetEx(void *buf, SDFileSet *pSet) { + int32_t fid; + + buf = taosDecodeFixedI32(buf, &(fid)); + pSet->fid = fid; + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + buf = tsdbDecodeSDFileEx(buf, TSDB_DFILE_IN_SET(pSet, ftype)); + } + return buf; +} + +int tsdbApplyDFileSetChange(SDFileSet *from, SDFileSet *to) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + SDFile *pDFileFrom = (from) ? TSDB_DFILE_IN_SET(from, ftype) : NULL; + SDFile *pDFileTo = (to) ? TSDB_DFILE_IN_SET(to, ftype) : NULL; + if (tsdbApplyDFileChange(pDFileFrom, pDFileTo) < 0) { + return -1; } } + + return 0; } -static TSKEY tsdbGetCurrMinKey(int8_t precision, int32_t keep) { - return (TSKEY)(taosGetTimestamp(precision) - keep * tsMsPerDay[precision]); +int tsdbCreateDFileSet(SDFileSet *pSet, bool updateHeader) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + if (tsdbCreateDFile(TSDB_DFILE_IN_SET(pSet, ftype), updateHeader) < 0) { + tsdbCloseDFileSet(pSet); + tsdbRemoveDFileSet(pSet); + return -1; + } + } + + return 0; } -static int tsdbGetCurrMinFid(int8_t precision, int32_t keep, int32_t days) { - return (int)(TSDB_KEY_FILEID(tsdbGetCurrMinKey(precision, keep), days, precision)); +int tsdbUpdateDFileSetHeader(SDFileSet *pSet) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + if (tsdbUpdateDFileHeader(TSDB_DFILE_IN_SET(pSet, ftype)) < 0) { + return -1; + } + } + return 0; +} + +int tsdbScanAndTryFixDFileSet(STsdbRepo *pRepo, SDFileSet *pSet) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + if (tsdbScanAndTryFixDFile(pRepo, TSDB_DFILE_IN_SET(pSet, ftype)) < 0) { + return -1; + } + } + return 0; +} + +int tsdbParseDFilename(const char *fname, int *vid, int *fid, TSDB_FILE_T *ftype, uint32_t *version) { + char *p = NULL; + *version = 0; + *ftype = TSDB_FILE_MAX; + + sscanf(fname, "v%df%d.%m[a-z]-ver%" PRIu32, vid, fid, &p, version); + for (TSDB_FILE_T i = 0; i < TSDB_FILE_MAX; i++) { + if (strcmp(p, TSDB_FNAME_SUFFIX[i]) == 0) { + *ftype = i; + break; + } + } + + tfree(p); + return 0; +} + +static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname) { + ASSERT(ftype != TSDB_FILE_MAX); + + if (ftype < TSDB_FILE_MAX) { + if (ver == 0) { + snprintf(fname, TSDB_FILENAME_LEN, "vnode/vnode%d/tsdb/data/v%df%d.%s", vid, vid, fid, TSDB_FNAME_SUFFIX[ftype]); + } else { + snprintf(fname, TSDB_FILENAME_LEN, "vnode/vnode%d/tsdb/data/v%df%d.%s-ver%" PRIu32, vid, vid, fid, + TSDB_FNAME_SUFFIX[ftype], ver); + } + } else { + if (ver == 0) { + snprintf(fname, TSDB_FILENAME_LEN, "vnode/vnode%d/tsdb/%s", vid, TSDB_FNAME_SUFFIX[ftype]); + } else { + snprintf(fname, TSDB_FILENAME_LEN, "vnode/vnode%d/tsdb/%s-ver%" PRIu32, vid, TSDB_FNAME_SUFFIX[ftype], ver); + } + } } \ No newline at end of file diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index cadbfa91cf..69a35b3d78 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -14,130 +14,108 @@ */ // no test file errors here -#include "tsdbMain.h" -#include "os.h" -#include "talgo.h" -#include "taosdef.h" -#include "tchecksum.h" -#include "tscompression.h" -#include "tsdb.h" -#include "tulog.h" +#include "tsdbint.h" -#define TSDB_CFG_FILE_NAME "config" -#define TSDB_DATA_DIR_NAME "data" -#define TSDB_META_FILE_NAME "meta" -#define TSDB_META_FILE_INDEX 10000000 #define IS_VALID_PRECISION(precision) \ (((precision) >= TSDB_TIME_PRECISION_MILLI) && ((precision) <= TSDB_TIME_PRECISION_NANO)) #define TSDB_DEFAULT_COMPRESSION TWO_STAGE_COMP #define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP)) -static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg); -static int32_t tsdbSetRepoEnv(char *rootDir, STsdbCfg *pCfg); -static int32_t tsdbUnsetRepoEnv(char *rootDir); -static int32_t tsdbSaveConfig(char *rootDir, STsdbCfg *pCfg); -static int tsdbLoadConfig(char *rootDir, STsdbCfg *pCfg); -static char * tsdbGetCfgFname(char *rootDir); -static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg); -static void tsdbFreeRepo(STsdbRepo *pRepo); -static int tsdbRestoreInfo(STsdbRepo *pRepo); -static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression); -static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep); -static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks); -static int keyFGroupCompFunc(const void *key, const void *fgroup); -static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg); -static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg); -static void tsdbStartStream(STsdbRepo *pRepo); -static void tsdbStopStream(STsdbRepo *pRepo); +static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg); +static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH); +static void tsdbFreeRepo(STsdbRepo *pRepo); +static void tsdbStartStream(STsdbRepo *pRepo); +static void tsdbStopStream(STsdbRepo *pRepo); // Function declaration -int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) { - DIR *dir = opendir(rootDir); - if (dir) { - tsdbDebug("repository %s already exists", rootDir); - closedir(dir); - return 0; - } else { - if (ENOENT != errno) { - tsdbError("failed to open directory %s since %s", rootDir, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } +int32_t tsdbCreateRepo(int repoid) { + char tsdbDir[TSDB_FILENAME_LEN] = "\0"; + char dataDir[TSDB_FILENAME_LEN] = "\0"; + + tsdbGetRootDir(repoid, tsdbDir); + if (tfsMkdir(tsdbDir) < 0) { + goto _err; } - if (mkdir(rootDir, 0755) < 0) { - tsdbError("vgId:%d failed to create rootDir %s since %s", pCfg->tsdbId, rootDir, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; + tsdbGetDataDir(repoid, dataDir); + if (tfsMkdir(dataDir) < 0) { + goto _err; } - if (tsdbCheckAndSetDefaultCfg(pCfg) < 0) return -1; + // TODO: need to create current file with nothing in - if (tsdbSetRepoEnv(rootDir, pCfg) < 0) return -1; - - tsdbDebug( - "vgId:%d tsdb env create succeed! cacheBlockSize %d totalBlocks %d daysPerFile %d keep " - "%d minRowsPerFileBlock %d maxRowsPerFileBlock %d precision %d compression %d update %d cacheLastRow %d", - pCfg->tsdbId, pCfg->cacheBlockSize, pCfg->totalBlocks, pCfg->daysPerFile, pCfg->keep, pCfg->minRowsPerFileBlock, - pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression, pCfg->update, pCfg->cacheLastRow); return 0; + +_err: + tsdbError("vgId:%d failed to create TSDB repository since %s", repoid, tstrerror(terrno)); + return -1; } -int32_t tsdbDropRepo(char *rootDir) { return tsdbUnsetRepoEnv(rootDir); } +int32_t tsdbDropRepo(int repoid) { + char tsdbDir[TSDB_FILENAME_LEN] = "\0"; -TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH) { - STsdbCfg config = {0}; - STsdbRepo *pRepo = NULL; + tsdbGetRootDir(repoid, tsdbDir); + return tfsRmdir(tsdbDir); +} + +STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { + STsdbRepo *pRepo; + STsdbCfg config = *pCfg; terrno = TSDB_CODE_SUCCESS; - if (tsdbLoadConfig(rootDir, &config) < 0) { - tsdbError("failed to open repo in rootDir %s since %s", rootDir, tstrerror(terrno)); + // Check and set default configurations + if (tsdbCheckAndSetDefaultCfg(&config) < 0) { + tsdbError("vgId:%d failed to open TSDB repository since %s", config.tsdbId, tstrerror(terrno)); return NULL; } - pRepo = tsdbNewRepo(rootDir, pAppH, &config); - if (pRepo == NULL) { - tsdbError("failed to open repo in rootDir %s since %s", rootDir, tstrerror(terrno)); + // Create new TSDB object + if ((pRepo = tsdbNewRepo(&config, pAppH)) == NULL) { + tsdbError("vgId:%d failed to open TSDB repository while creating TSDB object since %s", config.tsdbId, + tstrerror(terrno)); return NULL; } + // Open meta if (tsdbOpenMeta(pRepo) < 0) { - tsdbError("vgId:%d failed to open meta since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + tsdbError("vgId:%d failed to open TSDB repository while opening Meta since %s", config.tsdbId, tstrerror(terrno)); + tsdbCloseRepo(pRepo, false); + return NULL; } if (tsdbOpenBufPool(pRepo) < 0) { - tsdbError("vgId:%d failed to open buffer pool since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + tsdbError("vgId:%d failed to open TSDB repository while opening buffer pool since %s", config.tsdbId, + tstrerror(terrno)); + tsdbCloseRepo(pRepo, false); + return NULL; } - if (tsdbOpenFileH(pRepo) < 0) { - tsdbError("vgId:%d failed to open file handle since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + if (tsdbOpenFS(pRepo) < 0) { + tsdbError("vgId:%d failed to open TSDB repository while opening FS since %s", config.tsdbId, tstrerror(terrno)); + tsdbCloseRepo(pRepo, false); + return NULL; } - if (tsdbRestoreInfo(pRepo) < 0) { - tsdbError("vgId:%d failed to restore info from file since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + // TODO: Restore information from data + if ((!(pRepo->state & TSDB_STATE_BAD_DATA)) && tsdbRestoreInfo(pRepo) < 0) { + tsdbError("vgId:%d failed to open TSDB repository while restore info since %s", config.tsdbId, tstrerror(terrno)); + tsdbCloseRepo(pRepo, false); + return NULL; } tsdbStartStream(pRepo); - tsdbDebug("vgId:%d open tsdb repository succeed!", REPO_ID(pRepo)); + tsdbDebug("vgId:%d, TSDB repository opened", REPO_ID(pRepo)); - return (TSDB_REPO_T *)pRepo; - -_err: - tsdbCloseRepo(pRepo, false); - return NULL; + return pRepo; } // Note: all working thread and query thread must stopped when calling this function -int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) { +int tsdbCloseRepo(STsdbRepo *repo, int toCommit) { if (repo == NULL) return 0; - STsdbRepo *pRepo = (STsdbRepo *)repo; + STsdbRepo *pRepo = repo; int vgId = REPO_ID(pRepo); terrno = TSDB_CODE_SUCCESS; @@ -145,16 +123,15 @@ int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) { tsdbStopStream(pRepo); if (toCommit) { - tsdbAsyncCommit(pRepo); - tsem_wait(&(pRepo->readyToCommit)); - terrno = pRepo->code; + tsdbSyncCommit(repo); } + tsdbUnRefMemTable(pRepo, pRepo->mem); tsdbUnRefMemTable(pRepo, pRepo->imem); pRepo->mem = NULL; pRepo->imem = NULL; - tsdbCloseFileH(pRepo); + tsdbCloseFS(pRepo); tsdbCloseBufPool(pRepo); tsdbCloseMeta(pRepo); tsdbFreeRepo(pRepo); @@ -167,88 +144,67 @@ int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) { } } -uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) { - STsdbRepo *pRepo = (STsdbRepo *)repo; - // STsdbMeta *pMeta = pRepo->tsdbMeta; - STsdbFileH *pFileH = pRepo->tsdbFileH; - uint32_t magic = 0; - char * fname = NULL; - - struct stat fState; - - tsdbDebug("vgId:%d name:%s index:%d eindex:%d", pRepo->config.tsdbId, name, *index, eindex); - ASSERT(*index <= eindex); - - char *sdup = strdup(pRepo->rootDir); - char *prefix = dirname(sdup); - int prefixLen = (int)strlen(prefix); - - if (name[0] == 0) { // get the file from index or after, but not larger than eindex - tfree(sdup); - int fid = (*index) / TSDB_FILE_TYPE_MAX; - - if (pFileH->nFGroups == 0 || fid > pFileH->pFGroup[pFileH->nFGroups - 1].fileId) { - if (*index <= TSDB_META_FILE_INDEX && TSDB_META_FILE_INDEX <= eindex) { - fname = tsdbGetMetaFileName(pRepo->rootDir); - *index = TSDB_META_FILE_INDEX; - magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta); - } else { - return 0; - } - } else { - SFileGroup *pFGroup = - taosbsearch(&fid, pFileH->pFGroup, pFileH->nFGroups, sizeof(SFileGroup), keyFGroupCompFunc, TD_GE); - if (pFGroup->fileId == fid) { - fname = strdup(pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX].fname); - magic = pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX].info.magic; - } else { - if ((pFGroup->fileId + 1) * TSDB_FILE_TYPE_MAX - 1 < (int)eindex) { - fname = strdup(pFGroup->files[0].fname); - *index = pFGroup->fileId * TSDB_FILE_TYPE_MAX; - magic = pFGroup->files[0].info.magic; - } else { - return 0; - } - } - } - strcpy(name, fname + prefixLen); - } else { // get the named file at the specified index. If not there, return 0 - fname = malloc(prefixLen + strlen(name) + 2); - sprintf(fname, "%s/%s", prefix, name); - if (access(fname, F_OK) != 0) { - tfree(fname); - tfree(sdup); - return 0; - } - if (*index == TSDB_META_FILE_INDEX) { // get meta file - tsdbGetStoreInfo(fname, &magic, size); - } else { - tsdbGetFileInfoImpl(fname, &magic, size); - } - tfree(fname); - tfree(sdup); - return magic; - } - - if (stat(fname, &fState) < 0) { - tfree(fname); - return 0; - } - - *size = fState.st_size; - // magic = *size; - - tfree(fname); - return magic; -} - -STsdbCfg *tsdbGetCfg(const TSDB_REPO_T *repo) { +STsdbCfg *tsdbGetCfg(const STsdbRepo *repo) { ASSERT(repo != NULL); return &((STsdbRepo *)repo)->config; } -int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) { +int tsdbLockRepo(STsdbRepo *pRepo) { + int code = pthread_mutex_lock(&pRepo->mutex); + if (code != 0) { + tsdbError("vgId:%d failed to lock tsdb since %s", REPO_ID(pRepo), strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(code); + return -1; + } + pRepo->repoLocked = true; + return 0; +} + +int tsdbUnlockRepo(STsdbRepo *pRepo) { + ASSERT(IS_REPO_LOCKED(pRepo)); + pRepo->repoLocked = false; + int code = pthread_mutex_unlock(&pRepo->mutex); + if (code != 0) { + tsdbError("vgId:%d failed to unlock tsdb since %s", REPO_ID(pRepo), strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(code); + return -1; + } + return 0; +} + +int tsdbCheckCommit(STsdbRepo *pRepo) { + ASSERT(pRepo->mem != NULL); + STsdbCfg *pCfg = &(pRepo->config); + + STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); + ASSERT(pBufBlock != NULL); + if ((pRepo->mem->extraBuffList != NULL) || + ((listNEles(pRepo->mem->bufBlockList) >= pCfg->totalBlocks / 3) && (pBufBlock->remain < TSDB_BUFFER_RESERVE))) { + // trigger commit + if (tsdbAsyncCommit(pRepo) < 0) return -1; + } + + return 0; +} + +STsdbMeta *tsdbGetMeta(STsdbRepo *pRepo) { return pRepo->tsdbMeta; } + +STsdbRepoInfo *tsdbGetStatus(STsdbRepo *pRepo) { return NULL; } + +int tsdbGetState(STsdbRepo *repo) { return repo->state; } + +void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage) { + ASSERT(repo != NULL); + STsdbRepo *pRepo = repo; + *totalPoints = pRepo->stat.pointsWritten; + *totalStorage = pRepo->stat.totalStorage; + *compStorage = pRepo->stat.compStorage; +} + +int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg) { // TODO: think about multithread cases + return 0; +#if 0 STsdbRepo *pRepo = (STsdbRepo *)repo; STsdbCfg config = pRepo->config; STsdbCfg * pRCfg = &pRepo->config; @@ -294,112 +250,115 @@ int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) { } return 0; +#endif } -void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage) { - ASSERT(repo != NULL); - STsdbRepo *pRepo = repo; - *totalPoints = pRepo->stat.pointsWritten; - *totalStorage = pRepo->stat.totalStorage; - *compStorage = pRepo->stat.compStorage; -} - -int tsdbGetState(TSDB_REPO_T *repo) { - return ((STsdbRepo *)repo)->state; -} - -// ----------------- INTERNAL FUNCTIONS ----------------- -char *tsdbGetMetaFileName(char *rootDir) { - int tlen = (int)(strlen(rootDir) + strlen(TSDB_META_FILE_NAME) + 2); - char *fname = calloc(1, tlen); - if (fname == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return NULL; - } - - snprintf(fname, tlen, "%s/%s", rootDir, TSDB_META_FILE_NAME); - return fname; -} - -void tsdbGetDataFileName(char *rootDir, int vid, int fid, int type, char *fname) { - snprintf(fname, TSDB_FILENAME_LEN, "%s/%s/v%df%d%s", rootDir, TSDB_DATA_DIR_NAME, vid, fid, tsdbFileSuffix[type]); -} - -int tsdbLockRepo(STsdbRepo *pRepo) { - int code = pthread_mutex_lock(&pRepo->mutex); - if (code != 0) { - tsdbError("vgId:%d failed to lock tsdb since %s", REPO_ID(pRepo), strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(code); - return -1; - } - pRepo->repoLocked = true; +uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) { + // TODO return 0; -} +#if 0 + STsdbRepo *pRepo = (STsdbRepo *)repo; + // STsdbMeta *pMeta = pRepo->tsdbMeta; + STsdbFileH *pFileH = pRepo->tsdbFileH; + uint32_t magic = 0; + char * fname = NULL; -int tsdbUnlockRepo(STsdbRepo *pRepo) { - ASSERT(IS_REPO_LOCKED(pRepo)); - pRepo->repoLocked = false; - int code = pthread_mutex_unlock(&pRepo->mutex); - if (code != 0) { - tsdbError("vgId:%d failed to unlock tsdb since %s", REPO_ID(pRepo), strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(code); - return -1; - } - return 0; -} + struct stat fState; -char *tsdbGetDataDirName(char *rootDir) { - int tlen = (int)(strlen(rootDir) + strlen(TSDB_DATA_DIR_NAME) + 2); - char *fname = calloc(1, tlen); - if (fname == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return NULL; + tsdbDebug("vgId:%d name:%s index:%d eindex:%d", pRepo->config.tsdbId, name, *index, eindex); + ASSERT(*index <= eindex); + + if (name[0] == 0) { // get the file from index or after, but not larger than eindex + int fid = (*index) / TSDB_FILE_TYPE_MAX; + + if (pFileH->nFGroups == 0 || fid > pFileH->pFGroup[pFileH->nFGroups - 1].fileId) { + if (*index <= TSDB_META_FILE_INDEX && TSDB_META_FILE_INDEX <= eindex) { + fname = tsdbGetMetaFileName(pRepo->rootDir); + *index = TSDB_META_FILE_INDEX; + magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta); + sprintf(name, "tsdb/%s", TSDB_META_FILE_NAME); + } else { + return 0; + } + } else { + SFileGroup *pFGroup = + taosbsearch(&fid, pFileH->pFGroup, pFileH->nFGroups, sizeof(SFileGroup), keyFGroupCompFunc, TD_GE); + if (pFGroup->fileId == fid) { + SFile *pFile = &pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX]; + fname = strdup(TSDB_FILE_NAME(pFile)); + magic = pFile->info.magic; + char *tfname = strdup(fname); + sprintf(name, "tsdb/%s/%s", TSDB_DATA_DIR_NAME, basename(tfname)); + tfree(tfname); + } else { + if ((pFGroup->fileId + 1) * TSDB_FILE_TYPE_MAX - 1 < (int)eindex) { + SFile *pFile = &pFGroup->files[0]; + fname = strdup(TSDB_FILE_NAME(pFile)); + *index = pFGroup->fileId * TSDB_FILE_TYPE_MAX; + magic = pFile->info.magic; + char *tfname = strdup(fname); + sprintf(name, "tsdb/%s/%s", TSDB_DATA_DIR_NAME, basename(tfname)); + tfree(tfname); + } else { + return 0; + } + } + } + } else { // get the named file at the specified index. If not there, return 0 + fname = malloc(256); + sprintf(fname, "%s/vnode/vnode%d/%s", TFS_PRIMARY_PATH(), REPO_ID(pRepo), name); + if (access(fname, F_OK) != 0) { + tfree(fname); + return 0; + } + if (*index == TSDB_META_FILE_INDEX) { // get meta file + tsdbGetStoreInfo(fname, &magic, size); + } else { + char tfname[TSDB_FILENAME_LEN] = "\0"; + sprintf(tfname, "vnode/vnode%d/tsdb/%s/%s", REPO_ID(pRepo), TSDB_DATA_DIR_NAME, basename(name)); + tsdbGetFileInfoImpl(tfname, &magic, size); + } + tfree(fname); + return magic; } - snprintf(fname, tlen, "%s/%s", rootDir, TSDB_DATA_DIR_NAME); - return fname; -} - -int tsdbGetNextMaxTables(int tid) { - ASSERT(tid >= 1 && tid <= TSDB_MAX_TABLES); - int maxTables = TSDB_INIT_NTABLES; - while (true) { - maxTables = MIN(maxTables, TSDB_MAX_TABLES); - if (tid <= maxTables) break; - maxTables *= 2; + if (stat(fname, &fState) < 0) { + tfree(fname); + return 0; } - return maxTables + 1; + *size = fState.st_size; + // magic = *size; + + tfree(fname); + return magic; +#endif } -int tsdbCheckCommit(STsdbRepo *pRepo) { - ASSERT(pRepo->mem != NULL); - STsdbCfg *pCfg = &(pRepo->config); - - STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); - ASSERT(pBufBlock != NULL); - if ((pRepo->mem->extraBuffList != NULL) || - ((listNEles(pRepo->mem->bufBlockList) >= pCfg->totalBlocks / 3) && (pBufBlock->remain < TSDB_BUFFER_RESERVE))) { - // trigger commit - if (tsdbAsyncCommit(pRepo) < 0) return -1; - } - - return 0; +void tsdbGetRootDir(int repoid, char dirName[]) { + snprintf(dirName, TSDB_FILENAME_LEN, "vnode/vnode%d/tsdb", repoid); } -STsdbMeta * tsdbGetMeta(TSDB_REPO_T *pRepo) { return ((STsdbRepo *)pRepo)->tsdbMeta; } -STsdbFileH * tsdbGetFile(TSDB_REPO_T *pRepo) { return ((STsdbRepo *)pRepo)->tsdbFileH; } -STsdbRepoInfo *tsdbGetStatus(TSDB_REPO_T *pRepo) { return NULL; } +void tsdbGetDataDir(int repoid, char dirName[]) { + snprintf(dirName, TSDB_FILENAME_LEN, "vnode/vnode%d/tsdb/data", repoid); +} -// ----------------- LOCAL FUNCTIONS ----------------- static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { + // Check tsdbId + if (pCfg->tsdbId < 0) { + tsdbError("vgId:%d invalid vgroup ID", pCfg->tsdbId); + terrno = TSDB_CODE_TDB_INVALID_CONFIG; + return -1; + } + // Check precision if (pCfg->precision == -1) { pCfg->precision = TSDB_DEFAULT_PRECISION; } else { if (!IS_VALID_PRECISION(pCfg->precision)) { tsdbError("vgId:%d invalid precision configuration %d", pCfg->tsdbId, pCfg->precision); - goto _err; + terrno = TSDB_CODE_TDB_INVALID_CONFIG; + return -1; } } @@ -409,16 +368,11 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { } else { if (!IS_VALID_COMPRESSION(pCfg->compression)) { tsdbError("vgId:%d invalid compression configuration %d", pCfg->tsdbId, pCfg->precision); - goto _err; + terrno = TSDB_CODE_TDB_INVALID_CONFIG; + return -1; } } - // Check tsdbId - if (pCfg->tsdbId < 0) { - tsdbError("vgId:%d invalid vgroup ID", pCfg->tsdbId); - goto _err; - } - // Check daysPerFile if (pCfg->daysPerFile == -1) { pCfg->daysPerFile = TSDB_DEFAULT_DAYS_PER_FILE; @@ -428,7 +382,8 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { "vgId:%d invalid daysPerFile configuration! daysPerFile %d TSDB_MIN_DAYS_PER_FILE %d TSDB_MAX_DAYS_PER_FILE " "%d", pCfg->tsdbId, pCfg->daysPerFile, TSDB_MIN_DAYS_PER_FILE, TSDB_MAX_DAYS_PER_FILE); - goto _err; + terrno = TSDB_CODE_TDB_INVALID_CONFIG; + return -1; } } @@ -441,7 +396,8 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { "vgId:%d invalid minRowsPerFileBlock configuration! minRowsPerFileBlock %d TSDB_MIN_MIN_ROW_FBLOCK %d " "TSDB_MAX_MIN_ROW_FBLOCK %d", pCfg->tsdbId, pCfg->minRowsPerFileBlock, TSDB_MIN_MIN_ROW_FBLOCK, TSDB_MAX_MIN_ROW_FBLOCK); - goto _err; + terrno = TSDB_CODE_TDB_INVALID_CONFIG; + return -1; } } @@ -453,14 +409,16 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { "vgId:%d invalid maxRowsPerFileBlock configuration! maxRowsPerFileBlock %d TSDB_MIN_MAX_ROW_FBLOCK %d " "TSDB_MAX_MAX_ROW_FBLOCK %d", pCfg->tsdbId, pCfg->maxRowsPerFileBlock, TSDB_MIN_MIN_ROW_FBLOCK, TSDB_MAX_MIN_ROW_FBLOCK); - goto _err; + terrno = TSDB_CODE_TDB_INVALID_CONFIG; + return -1; } } if (pCfg->minRowsPerFileBlock > pCfg->maxRowsPerFileBlock) { tsdbError("vgId:%d invalid configuration! minRowsPerFileBlock %d maxRowsPerFileBlock %d", pCfg->tsdbId, pCfg->minRowsPerFileBlock, pCfg->maxRowsPerFileBlock); - goto _err; + terrno = TSDB_CODE_TDB_INVALID_CONFIG; + return -1; } // Check keep @@ -472,10 +430,19 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { "vgId:%d invalid keep configuration! keep %d TSDB_MIN_KEEP %d " "TSDB_MAX_KEEP %d", pCfg->tsdbId, pCfg->keep, TSDB_MIN_KEEP, TSDB_MAX_KEEP); - goto _err; + terrno = TSDB_CODE_TDB_INVALID_CONFIG; + return -1; } } + if (pCfg->keep1 == 0) { + pCfg->keep1 = pCfg->keep; + } + + if (pCfg->keep2 == 0) { + pCfg->keep2 = pCfg->keep; + } + // update check if (pCfg->update != 0) pCfg->update = 1; @@ -483,426 +450,75 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) { if (pCfg->cacheLastRow != 0) pCfg->cacheLastRow = 1; return 0; - -_err: - terrno = TSDB_CODE_TDB_INVALID_CONFIG; - return -1; } -static int32_t tsdbSetRepoEnv(char *rootDir, STsdbCfg *pCfg) { - if (tsdbSaveConfig(rootDir, pCfg) < 0) { - tsdbError("vgId:%d failed to set TSDB environment since %s", pCfg->tsdbId, tstrerror(terrno)); - return -1; - } - - char *dirName = tsdbGetDataDirName(rootDir); - if (dirName == NULL) return -1; - - if (mkdir(dirName, 0755) < 0) { - tsdbError("vgId:%d failed to create directory %s since %s", pCfg->tsdbId, dirName, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - free(dirName); - return -1; - } - - free(dirName); - - char *fname = tsdbGetMetaFileName(rootDir); - if (fname == NULL) return -1; - if (tdCreateKVStore(fname) < 0) { - tsdbError("vgId:%d failed to open KV store since %s", pCfg->tsdbId, tstrerror(terrno)); - free(fname); - return -1; - } - - free(fname); - return 0; -} - -static int32_t tsdbUnsetRepoEnv(char *rootDir) { - taosRemoveDir(rootDir); - tsdbDebug("repository %s is removed", rootDir); - return 0; -} - -static int32_t tsdbSaveConfig(char *rootDir, STsdbCfg *pCfg) { - int fd = -1; - char *fname = NULL; - char buf[TSDB_FILE_HEAD_SIZE] = "\0"; - char *pBuf = buf; - - fname = tsdbGetCfgFname(rootDir); - if (fname == NULL) { - tsdbError("vgId:%d failed to save configuration since %s", pCfg->tsdbId, tstrerror(terrno)); - goto _err; - } - - fd = open(fname, O_WRONLY | O_CREAT | O_BINARY, 0755); - if (fd < 0) { - tsdbError("vgId:%d failed to open file %s since %s", pCfg->tsdbId, fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - int tlen = tsdbEncodeCfg((void *)(&pBuf), pCfg); - ASSERT((tlen + sizeof(TSCKSUM) <= TSDB_FILE_HEAD_SIZE) && (POINTER_DISTANCE(pBuf, buf) == tlen)); - - taosCalcChecksumAppend(0, (uint8_t *)buf, TSDB_FILE_HEAD_SIZE); - - if (taosWrite(fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { - tsdbError("vgId:%d failed to write %d bytes to file %s since %s", pCfg->tsdbId, TSDB_FILE_HEAD_SIZE, fname, - strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (fsync(fd) < 0) { - tsdbError("vgId:%d failed to fsync file %s since %s", pCfg->tsdbId, fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - free(fname); - close(fd); - return 0; - -_err: - tfree(fname); - if (fd >= 0) close(fd); - return -1; -} - -static int tsdbLoadConfig(char *rootDir, STsdbCfg *pCfg) { - char *fname = NULL; - int fd = -1; - char buf[TSDB_FILE_HEAD_SIZE] = "\0"; - - fname = tsdbGetCfgFname(rootDir); - if (fname == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - fd = open(fname, O_RDONLY | O_BINARY); - if (fd < 0) { - tsdbError("failed to open file %s since %s", fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (taosRead(fd, (void *)buf, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) { - tsdbError("failed to read %d bytes from file %s since %s", TSDB_FILE_HEAD_SIZE, fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (!taosCheckChecksumWhole((uint8_t *)buf, TSDB_FILE_HEAD_SIZE)) { - tsdbError("file %s is corrupted", fname); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - goto _err; - } - - tsdbDecodeCfg(buf, pCfg); - - tfree(fname); - close(fd); - - return 0; - -_err: - tfree(fname); - if (fd >= 0) close(fd); - return -1; -} - -static char *tsdbGetCfgFname(char *rootDir) { - int tlen = (int)(strlen(rootDir) + strlen(TSDB_CFG_FILE_NAME) + 2); - char *fname = calloc(1, tlen); - if (fname == NULL) { +static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { + STsdbRepo *pRepo = (STsdbRepo *)calloc(1, sizeof(*pRepo)); + if (pRepo == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return NULL; } - snprintf(fname, tlen, "%s/%s", rootDir, TSDB_CFG_FILE_NAME); - return fname; -} - -static STsdbRepo *tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg) { - STsdbRepo *pRepo = (STsdbRepo *)calloc(1, sizeof(STsdbRepo)); - if (pRepo == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - pRepo->state = TSDB_STATE_OK; pRepo->code = TSDB_CODE_SUCCESS; + pRepo->config = *pCfg; + if (pAppH) { + pRepo->appH = *pAppH; + } + pRepo->repoLocked = false; - int code = pthread_mutex_init(&pRepo->mutex, NULL); + int code = pthread_mutex_init(&(pRepo->mutex), NULL); if (code != 0) { terrno = TAOS_SYSTEM_ERROR(code); - goto _err; + tsdbFreeRepo(pRepo); + return NULL; } code = tsem_init(&(pRepo->readyToCommit), 0, 1); if (code != 0) { code = errno; terrno = TAOS_SYSTEM_ERROR(code); - goto _err; + tsdbFreeRepo(pRepo); + return NULL; } - pRepo->repoLocked = false; - - pRepo->rootDir = strdup(rootDir); - if (pRepo->rootDir == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - pRepo->config = *pCfg; - if (pAppH) pRepo->appH = *pAppH; - pRepo->tsdbMeta = tsdbNewMeta(pCfg); if (pRepo->tsdbMeta == NULL) { tsdbError("vgId:%d failed to create meta since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + tsdbFreeRepo(pRepo); + return NULL; } pRepo->pPool = tsdbNewBufPool(pCfg); if (pRepo->pPool == NULL) { tsdbError("vgId:%d failed to create buffer pool since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + tsdbFreeRepo(pRepo); + return NULL; } - pRepo->tsdbFileH = tsdbNewFileH(pCfg); - if (pRepo->tsdbFileH == NULL) { - tsdbError("vgId:%d failed to create file handle since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; + pRepo->fs = tsdbNewFS(pCfg); + if (pRepo->fs == NULL) { + tsdbError("vgId:%d failed to TSDB file system since %s", REPO_ID(pRepo), tstrerror(terrno)); + tsdbFreeRepo(pRepo); + return NULL; } return pRepo; - -_err: - tsdbFreeRepo(pRepo); - return NULL; } static void tsdbFreeRepo(STsdbRepo *pRepo) { if (pRepo) { - tsdbFreeFileH(pRepo->tsdbFileH); + tsdbFreeFS(pRepo->fs); tsdbFreeBufPool(pRepo->pPool); tsdbFreeMeta(pRepo->tsdbMeta); // tsdbFreeMemTable(pRepo->mem); // tsdbFreeMemTable(pRepo->imem); - tfree(pRepo->rootDir); tsem_destroy(&(pRepo->readyToCommit)); pthread_mutex_destroy(&pRepo->mutex); free(pRepo); } } -static int tsdbRestoreInfo(STsdbRepo *pRepo) { // TODO - STsdbMeta * pMeta = pRepo->tsdbMeta; - STsdbFileH *pFileH = pRepo->tsdbFileH; - SFileGroup *pFGroup = NULL; - STsdbCfg * pCfg = &(pRepo->config); - SCompBlock *pBlock = NULL; - - SFileGroupIter iter; - SRWHelper rhelper = {0}; - - if (tsdbInitReadHelper(&rhelper, pRepo) < 0) goto _err; - - tsdbInitFileGroupIter(pFileH, &iter, TSDB_ORDER_DESC); - while ((pFGroup = tsdbGetFileGroupNext(&iter)) != NULL) { - if (pFGroup->state) continue; - if (tsdbSetAndOpenHelperFile(&rhelper, pFGroup) < 0) goto _err; - if (tsdbLoadCompIdx(&rhelper, NULL) < 0) goto _err; - for (int i = 1; i < pMeta->maxTables; i++) { - STable *pTable = pMeta->tables[i]; - if (pTable == NULL) continue; - if (tsdbSetHelperTable(&rhelper, pTable, pRepo) < 0) goto _err; - SCompIdx *pIdx = &(rhelper.curCompIdx); - - TSKEY lastKey = tsdbGetTableLastKeyImpl(pTable); - if (pIdx->offset > 0 && lastKey < pIdx->maxKey) { - pTable->lastKey = pIdx->maxKey; - if (pCfg->cacheLastRow) { // load the block of data - if (tsdbLoadCompInfo(&rhelper, NULL) < 0) goto _err; - - pBlock = rhelper.pCompInfo->blocks + pIdx->numOfBlocks - 1; - if (tsdbLoadBlockData(&rhelper, pBlock, NULL) < 0) goto _err; - - // construct the data row - ASSERT(pTable->lastRow == NULL); - STSchema *pSchema = tsdbGetTableSchema(pTable); - pTable->lastRow = taosTMalloc(schemaTLen(pSchema)); - if (pTable->lastRow == NULL) { - goto _err; - } - - tdInitDataRow(pTable->lastRow, pSchema); - for (int icol = 0; icol < schemaNCols(pSchema); icol++) { - STColumn *pCol = schemaColAt(pSchema, icol); - SDataCol *pDataCol = rhelper.pDataCols[0]->cols + icol; - tdAppendColVal(pTable->lastRow, tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type, pCol->bytes, - pCol->offset); - } - } - } - } - } - - tsdbDestroyHelper(&rhelper); - return 0; - -_err: - tsdbDestroyHelper(&rhelper); - return -1; -} - -static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression) { - int8_t ocompression = pRepo->config.compression; - pRepo->config.compression = compression; - tsdbDebug("vgId:%d tsdb compression is changed from %d to %d", REPO_ID(pRepo), ocompression, compression); -} - -static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep) { - STsdbCfg * pCfg = &pRepo->config; - STsdbFileH *pFileH = pRepo->tsdbFileH; - int okeep = pCfg->keep; - SFileGroup *pFGroup = NULL; - - ASSERT(pCfg->keep != keep); - int maxFiles = TSDB_MAX_FILE(keep, pCfg->daysPerFile); - - if (maxFiles != pFileH->maxFGroups) { - pthread_rwlock_wrlock(&(pFileH->fhlock)); - - pCfg->keep = keep; - pFGroup = (SFileGroup *)calloc(maxFiles, sizeof(SFileGroup)); - if (pFGroup == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - pthread_rwlock_unlock(&(pFileH->fhlock)); - return -1; - } - - int mfid = (int)(TSDB_KEY_FILEID(taosGetTimestamp(pCfg->precision), pCfg->daysPerFile, pCfg->precision) - - TSDB_MAX_FILE(keep, pCfg->daysPerFile)); - - int i = 0; - for (; i < pFileH->nFGroups; i++) { - if (pFileH->pFGroup[i].fileId >= mfid) break; - tsdbRemoveFileGroup(pRepo, &(pFileH->pFGroup[i])); - } - - for (int j = 0; i < pFileH->nFGroups; i++, j++) { - pFGroup[j] = pFileH->pFGroup[i]; - } - - free(pFileH->pFGroup); - pFileH->pFGroup = pFGroup; - - pthread_rwlock_unlock(&(pFileH->fhlock)); - } - - tsdbDebug("vgId:%d keep is changed from %d to %d", REPO_ID(pRepo), okeep, keep); - - return 0; -} - -static int keyFGroupCompFunc(const void *key, const void *fgroup) { - int fid = *(int *)key; - SFileGroup *pFGroup = (SFileGroup *)fgroup; - if (fid == pFGroup->fileId) { - return 0; - } else { - return fid > pFGroup->fileId ? 1 : -1; - } -} - -static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg) { - int tlen = 0; - - tlen += taosEncodeVariantI32(buf, pCfg->tsdbId); - tlen += taosEncodeFixedI32(buf, pCfg->cacheBlockSize); - tlen += taosEncodeVariantI32(buf, pCfg->totalBlocks); - tlen += taosEncodeVariantI32(buf, pCfg->daysPerFile); - tlen += taosEncodeVariantI32(buf, pCfg->keep); - tlen += taosEncodeVariantI32(buf, pCfg->keep1); - tlen += taosEncodeVariantI32(buf, pCfg->keep2); - tlen += taosEncodeVariantI32(buf, pCfg->minRowsPerFileBlock); - tlen += taosEncodeVariantI32(buf, pCfg->maxRowsPerFileBlock); - tlen += taosEncodeFixedI8(buf, pCfg->precision); - tlen += taosEncodeFixedI8(buf, pCfg->compression); - tlen += taosEncodeFixedI8(buf, pCfg->update); - tlen += taosEncodeFixedI8(buf, pCfg->cacheLastRow); - - return tlen; -} - -static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) { - buf = taosDecodeVariantI32(buf, &(pCfg->tsdbId)); - buf = taosDecodeFixedI32(buf, &(pCfg->cacheBlockSize)); - buf = taosDecodeVariantI32(buf, &(pCfg->totalBlocks)); - buf = taosDecodeVariantI32(buf, &(pCfg->daysPerFile)); - buf = taosDecodeVariantI32(buf, &(pCfg->keep)); - buf = taosDecodeVariantI32(buf, &(pCfg->keep1)); - buf = taosDecodeVariantI32(buf, &(pCfg->keep2)); - buf = taosDecodeVariantI32(buf, &(pCfg->minRowsPerFileBlock)); - buf = taosDecodeVariantI32(buf, &(pCfg->maxRowsPerFileBlock)); - buf = taosDecodeFixedI8(buf, &(pCfg->precision)); - buf = taosDecodeFixedI8(buf, &(pCfg->compression)); - buf = taosDecodeFixedI8(buf, &(pCfg->update)); - buf = taosDecodeFixedI8(buf, &(pCfg->cacheLastRow)); - - return buf; -} - -static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) { - // TODO - // STsdbCache *pCache = pRepo->tsdbCache; - // int oldNumOfBlocks = pCache->totalCacheBlocks; - - // tsdbLockRepo((TsdbRepoT *)pRepo); - - // ASSERT(pCache->totalCacheBlocks != totalBlocks); - - // if (pCache->totalCacheBlocks < totalBlocks) { - // ASSERT(pCache->totalCacheBlocks == pCache->pool.numOfCacheBlocks); - // int blocksToAdd = pCache->totalCacheBlocks - totalBlocks; - // pCache->totalCacheBlocks = totalBlocks; - // for (int i = 0; i < blocksToAdd; i++) { - // if (tsdbAddCacheBlockToPool(pCache) < 0) { - // tsdbUnLockRepo((TsdbRepoT *)pRepo); - // tsdbError("tsdbId:%d, failed to add cache block to cache pool", pRepo->config.tsdbId); - // return -1; - // } - // } - // } else { - // pCache->totalCacheBlocks = totalBlocks; - // tsdbAdjustCacheBlocks(pCache); - // } - // pRepo->config.totalBlocks = totalBlocks; - - // tsdbUnLockRepo((TsdbRepoT *)pRepo); - // tsdbDebug("vgId:%d, tsdb total cache blocks changed from %d to %d", pRepo->config.tsdbId, oldNumOfBlocks, - // totalBlocks); - return 0; -} - -#if 0 - -TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid) { - STsdbRepo *pRepo = (STsdbRepo *)repo; - - STable *pTable = tsdbGetTableByUid(pRepo->tsdbMeta, uid); - if (pTable == NULL) return -1; - - return TSDB_GET_TABLE_LAST_KEY(pTable); -} - -#endif - static void tsdbStartStream(STsdbRepo *pRepo) { STsdbMeta *pMeta = pRepo->tsdbMeta; @@ -915,7 +531,6 @@ static void tsdbStartStream(STsdbRepo *pRepo) { } } - static void tsdbStopStream(STsdbRepo *pRepo) { STsdbMeta *pMeta = pRepo->tsdbMeta; @@ -926,3 +541,82 @@ static void tsdbStopStream(STsdbRepo *pRepo) { } } } + +int tsdbRestoreInfo(STsdbRepo *pRepo) { + SFSIter fsiter; + SReadH readh; + SDFileSet *pSet; + STsdbMeta *pMeta = pRepo->tsdbMeta; + STsdbCfg * pCfg = REPO_CFG(pRepo); + SBlock * pBlock; + + if (tsdbInitReadH(&readh, pRepo) < 0) { + return -1; + } + + tsdbFSIterInit(&fsiter, REPO_FS(pRepo), TSDB_FS_ITER_BACKWARD); + + while ((pSet = tsdbFSIterNext(&fsiter)) != NULL) { + if (tsdbSetAndOpenReadFSet(&readh, pSet) < 0) { + tsdbDestroyReadH(&readh); + return -1; + } + + if (tsdbLoadBlockIdx(&readh) < 0) { + tsdbDestroyReadH(&readh); + return -1; + } + + for (int i = 1; i < pMeta->maxTables; i++) { + STable *pTable = pMeta->tables[i]; + if (pTable == NULL) continue; + + if (tsdbSetReadTable(&readh, pTable) < 0) { + tsdbDestroyReadH(&readh); + return -1; + } + + TSKEY lastKey = tsdbGetTableLastKeyImpl(pTable); + SBlockIdx *pIdx = readh.pBlkIdx; + if (pIdx && lastKey < pIdx->maxKey) { + pTable->lastKey = pIdx->maxKey; + + if (pCfg->cacheLastRow) { + if (tsdbLoadBlockInfo(&readh, NULL) < 0) { + tsdbDestroyReadH(&readh); + return -1; + } + + pBlock = readh.pBlkInfo->blocks + pIdx->numOfBlocks - 1; + + if (tsdbLoadBlockData(&readh, pBlock, NULL) < 0) { + tsdbDestroyReadH(&readh); + return -1; + } + + // Get the data in row + ASSERT(pTable->lastRow == NULL); + STSchema *pSchema = tsdbGetTableSchema(pTable); + pTable->lastRow = taosTMalloc(schemaTLen(pSchema)); + if (pTable->lastRow == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbDestroyReadH(&readh); + return -1; + } + + tdInitDataRow(pTable->lastRow, pSchema); + for (int icol = 0; icol < schemaNCols(pSchema); icol++) { + STColumn *pCol = schemaColAt(pSchema, icol); + SDataCol *pDataCol = readh.pDCols[0]->cols + icol; + tdAppendColVal(pTable->lastRow, tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type, pCol->bytes, + pCol->offset); + } + } + } + + } + } + + tsdbDestroyReadH(&readh); + return 0; +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 42bbebe5f7..73a1270799 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -13,12 +13,23 @@ * along with this program. If not, see . */ -#include "tsdb.h" -#include "tsdbMain.h" +#include "tsdbint.h" #define TSDB_DATA_SKIPLIST_LEVEL 5 #define TSDB_MAX_INSERT_BATCH 512 +typedef struct { + int32_t totalLen; + int32_t len; + SDataRow row; +} SSubmitBlkIter; + +typedef struct { + int32_t totalLen; + int32_t len; + void * pMsg; +} SSubmitMsgIter; + static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo); static void tsdbFreeMemTable(SMemTable *pMemTable); static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); @@ -41,8 +52,8 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, TSKEY now); -int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) { - STsdbRepo * pRepo = (STsdbRepo *)repo; +int32_t tsdbInsertData(STsdbRepo *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) { + STsdbRepo * pRepo = repo; SSubmitMsgIter msgIter = {0}; SSubmitBlk * pBlock = NULL; int32_t affectedrows = 0; @@ -205,11 +216,13 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { } int tsdbAsyncCommit(STsdbRepo *pRepo) { - if (pRepo->mem == NULL) return 0; - tsem_wait(&(pRepo->readyToCommit)); ASSERT(pRepo->imem == NULL); + if (pRepo->mem == NULL) { + tsem_post(&(pRepo->readyToCommit)); + return 0; + } if (pRepo->code != TSDB_CODE_SUCCESS) { tsdbWarn("vgId:%d try to commit when TSDB not in good state: %s", REPO_ID(pRepo), tstrerror(terrno)); @@ -225,8 +238,8 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) { return 0; } -int tsdbSyncCommit(TSDB_REPO_T *repo) { - STsdbRepo *pRepo = (STsdbRepo *)repo; +int tsdbSyncCommit(STsdbRepo *repo) { + STsdbRepo *pRepo = repo; tsdbAsyncCommit(pRepo); tsem_wait(&(pRepo->readyToCommit)); @@ -254,14 +267,17 @@ int tsdbSyncCommit(TSDB_REPO_T *repo) { */ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols, TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) { - ASSERT(maxRowsToRead > 0 && nFilterKeys >= 0 && pMergeInfo != NULL); + ASSERT(maxRowsToRead > 0 && nFilterKeys >= 0); if (pIter == NULL) return 0; - STSchema *pSchema = NULL; - TSKEY rowKey = 0; - TSKEY fKey = 0; - bool isRowDel = false; - int filterIter = 0; - SDataRow row = NULL; + STSchema * pSchema = NULL; + TSKEY rowKey = 0; + TSKEY fKey = 0; + bool isRowDel = false; + int filterIter = 0; + SDataRow row = NULL; + SMergeInfo mInfo; + + if (pMergeInfo == NULL) pMergeInfo = &mInfo; memset(pMergeInfo, 0, sizeof(*pMergeInfo)); pMergeInfo->keyFirst = INT64_MAX; @@ -452,11 +468,6 @@ static void tsdbFreeTableData(STableData *pTableData) { static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); } -void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey) { - *minKey = fileId * daysPerFile * tsMsPerDay[precision]; - *maxKey = *minKey + daysPerFile * tsMsPerDay[precision] - 1; -} - static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { ASSERT(pMemTable->maxTables < maxTables); diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 2bc387c3cd..9b47023ecf 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -12,20 +12,12 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ -#include -#include "hash.h" -#include "taosdef.h" -#include "tchecksum.h" -#include "tsdb.h" -#include "tsdbMain.h" -#include "tskiplist.h" +#include "tsdbint.h" #define TSDB_SUPER_TABLE_SL_LEVEL 5 #define DEFAULT_TAG_INDEX_COLUMN 0 static int tsdbCompareSchemaVersion(const void *key1, const void *key2); -static int tsdbRestoreTable(void *pHandle, void *cont, int contLen); -static void tsdbOrgMeta(void *pHandle); static char * getTagIndexKey(const void *pData); static STable *tsdbNewTable(); static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper); @@ -53,7 +45,7 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable); static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid); // ------------------ OUTER FUNCTIONS ------------------ -int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) { +int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { STsdbRepo *pRepo = (STsdbRepo *)repo; STsdbMeta *pMeta = pRepo->tsdbMeta; STable * super = NULL; @@ -148,7 +140,7 @@ _err: return -1; } -int tsdbDropTable(TSDB_REPO_T *repo, STableId tableId) { +int tsdbDropTable(STsdbRepo *repo, STableId tableId) { STsdbRepo *pRepo = (STsdbRepo *)repo; STsdbMeta *pMeta = pRepo->tsdbMeta; uint64_t uid = tableId.uid; @@ -301,7 +293,7 @@ static UNUSED_FUNC int32_t colIdCompar(const void* left, const void* right) { return (colId < p2->colId)? -1:1; } -int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) { +int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) { STsdbRepo *pRepo = (STsdbRepo *)repo; STsdbMeta *pMeta = pRepo->tsdbMeta; STSchema * pNewSchema = NULL; @@ -469,6 +461,8 @@ void tsdbFreeMeta(STsdbMeta *pMeta) { } int tsdbOpenMeta(STsdbRepo *pRepo) { + return 0; +#if 0 char * fname = NULL; STsdbMeta *pMeta = pRepo->tsdbMeta; ASSERT(pMeta != NULL); @@ -479,11 +473,11 @@ int tsdbOpenMeta(STsdbRepo *pRepo) { goto _err; } - pMeta->pStore = tdOpenKVStore(fname, tsdbRestoreTable, tsdbOrgMeta, (void *)pRepo); - if (pMeta->pStore == NULL) { - tsdbError("vgId:%d failed to open TSDB meta while open the kv store since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } + // pMeta->pStore = tdOpenKVStore(fname, tsdbRestoreTable, tsdbOrgMeta, (void *)pRepo); + // if (pMeta->pStore == NULL) { + // tsdbError("vgId:%d failed to open TSDB meta while open the kv store since %s", REPO_ID(pRepo), tstrerror(terrno)); + // goto _err; + // } tsdbDebug("vgId:%d open TSDB meta succeed", REPO_ID(pRepo)); tfree(fname); @@ -492,6 +486,7 @@ int tsdbOpenMeta(STsdbRepo *pRepo) { _err: tfree(fname); return -1; +#endif } int tsdbCloseMeta(STsdbRepo *pRepo) { @@ -500,7 +495,7 @@ int tsdbCloseMeta(STsdbRepo *pRepo) { STable * pTable = NULL; if (pMeta == NULL) return 0; - tdCloseKVStore(pMeta->pStore); + // tdCloseKVStore(pMeta->pStore); for (int i = 1; i < pMeta->maxTables; i++) { tsdbFreeTable(pMeta->tables[i]); } @@ -567,12 +562,13 @@ void tsdbRefTable(STable *pTable) { } void tsdbUnRefTable(STable *pTable) { - int32_t ref = T_REF_DEC(pTable); - tsdbDebug("unref table %s uid:%"PRIu64" tid:%d, refCount:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), ref); + uint64_t uid = TABLE_UID(pTable); + int32_t tid = TABLE_TID(pTable); + int32_t ref = T_REF_DEC(pTable); + + tsdbDebug("unref table, uid:%" PRIu64 " tid:%d, refCount:%d", uid, tid, ref); if (ref == 0) { - // tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable)); - if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) { tsdbUnRefTable(pTable->pSuper); } @@ -609,10 +605,8 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, } } -// ------------------ LOCAL FUNCTIONS ------------------ -static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) { - STsdbRepo *pRepo = (STsdbRepo *)pHandle; - STable * pTable = NULL; +int tsdbRestoreTable(STsdbRepo *pRepo, void *cont, int contLen) { + STable *pTable = NULL; if (!taosCheckChecksumWhole((uint8_t *)cont, contLen)) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; @@ -631,8 +625,7 @@ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) { return 0; } -static void tsdbOrgMeta(void *pHandle) { - STsdbRepo *pRepo = (STsdbRepo *)pHandle; +void tsdbOrgMeta(STsdbRepo *pRepo) { STsdbMeta *pMeta = pRepo->tsdbMeta; for (int i = 1; i < pMeta->maxTables; i++) { @@ -643,6 +636,7 @@ static void tsdbOrgMeta(void *pHandle) { } } +// ------------------ LOCAL FUNCTIONS ------------------ static char *getTagIndexKey(const void *pData) { STable *pTable = (STable *)pData; diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c deleted file mode 100644 index 4a44784cc2..0000000000 --- a/src/tsdb/src/tsdbRWHelper.c +++ /dev/null @@ -1,1753 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#define _DEFAULT_SOURCE -#define TAOS_RANDOM_FILE_FAIL_TEST -#include "os.h" -#include "talgo.h" -#include "tchecksum.h" -#include "tcoding.h" -#include "tscompression.h" -#include "tsdbMain.h" - -#define TSDB_GET_COMPCOL_LEN(nCols) (sizeof(SCompData) + sizeof(SCompCol) * (nCols) + sizeof(TSCKSUM)) -#define TSDB_KEY_COL_OFFSET 0 -#define TSDB_GET_COMPBLOCK_IDX(h, b) (POINTER_DISTANCE(b, (h)->pCompInfo->blocks)/sizeof(SCompBlock)) -#define TSDB_IS_LAST_BLOCK(pb) ((pb)->last) - -static bool tsdbShouldCreateNewLast(SRWHelper *pHelper); -static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDataCols, SCompBlock *pCompBlock, - bool isLast, bool isSuperBlock); -static int compareKeyBlock(const void *arg1, const void *arg2); -static int tsdbAdjustInfoSizeIfNeeded(SRWHelper *pHelper, size_t esize); -static int tsdbInsertSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx); -static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx, SMergeInfo *pMergeInfo); -static int tsdbUpdateSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx); -static void tsdbResetHelperFileImpl(SRWHelper *pHelper); -static int tsdbInitHelperFile(SRWHelper *pHelper); -static void tsdbDestroyHelperFile(SRWHelper *pHelper); -static void tsdbResetHelperTableImpl(SRWHelper *pHelper); -static void tsdbResetHelperTable(SRWHelper *pHelper); -static void tsdbInitHelperTable(SRWHelper *pHelper); -static void tsdbDestroyHelperTable(SRWHelper *pHelper); -static void tsdbResetHelperBlockImpl(SRWHelper *pHelper); -static void tsdbResetHelperBlock(SRWHelper *pHelper); -static int tsdbInitHelperBlock(SRWHelper *pHelper); -static int tsdbInitHelper(SRWHelper *pHelper, STsdbRepo *pRepo, tsdb_rw_helper_t type); -static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32_t len, int8_t comp, int numOfRows, - int maxPoints, char *buffer, int bufferSize); -static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *pDataCols, int16_t *colIds, - int numOfColIds); -static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *pDataCols); -static int tsdbEncodeSCompIdx(void **buf, SCompIdx *pIdx); -static void *tsdbDecodeSCompIdx(void *buf, SCompIdx *pIdx); -static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey); -static void tsdbDestroyHelperBlock(SRWHelper *pHelper); -static int tsdbLoadColData(SRWHelper *pHelper, SFile *pFile, SCompBlock *pCompBlock, SCompCol *pCompCol, - SDataCol *pDataCol); -static int tsdbWriteBlockToProperFile(SRWHelper *pHelper, SDataCols *pDataCols, SCompBlock *pCompBlock); -static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey, - int *blkIdx); -static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows, int8_t update); -static bool tsdbCheckAddSubBlockCond(SRWHelper *pHelper, SCompBlock *pCompBlock, SMergeInfo *pMergeInfo, int maxOps); -static int tsdbDeleteSuperBlock(SRWHelper *pHelper, int blkIdx); - -// ---------------------- INTERNAL FUNCTIONS ---------------------- -int tsdbInitReadHelper(SRWHelper *pHelper, STsdbRepo *pRepo) { - return tsdbInitHelper(pHelper, pRepo, TSDB_READ_HELPER); -} - -int tsdbInitWriteHelper(SRWHelper *pHelper, STsdbRepo *pRepo) { - return tsdbInitHelper(pHelper, pRepo, TSDB_WRITE_HELPER); -} - -void tsdbDestroyHelper(SRWHelper *pHelper) { - if (pHelper) { - taosTZfree(pHelper->pBuffer); - taosTZfree(pHelper->compBuffer); - tsdbDestroyHelperFile(pHelper); - tsdbDestroyHelperTable(pHelper); - tsdbDestroyHelperBlock(pHelper); - memset((void *)pHelper, 0, sizeof(*pHelper)); - } -} - -void tsdbResetHelper(SRWHelper *pHelper) { - if (pHelper) { - // Reset the block part - tsdbResetHelperBlockImpl(pHelper); - - // Reset the table part - tsdbResetHelperTableImpl(pHelper); - - // Reset the file part - tsdbCloseHelperFile(pHelper, false, NULL); - tsdbResetHelperFileImpl(pHelper); - - pHelper->state = TSDB_HELPER_CLEAR_STATE; - } -} - -int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) { - ASSERT(pHelper != NULL && pGroup != NULL); - SFile * pFile = NULL; - STsdbRepo *pRepo = pHelper->pRepo; - - // Clear the helper object - tsdbResetHelper(pHelper); - - ASSERT(pHelper->state == TSDB_HELPER_CLEAR_STATE); - - // Set the files - pHelper->files.fGroup = *pGroup; - if (helperType(pHelper) == TSDB_WRITE_HELPER) { - tsdbGetDataFileName(pRepo->rootDir, REPO_ID(pRepo), pGroup->fileId, TSDB_FILE_TYPE_NHEAD, - helperNewHeadF(pHelper)->fname); - tsdbGetDataFileName(pRepo->rootDir, REPO_ID(pRepo), pGroup->fileId, TSDB_FILE_TYPE_NLAST, - helperNewLastF(pHelper)->fname); - } - - // Open the files - if (tsdbOpenFile(helperHeadF(pHelper), O_RDONLY) < 0) return -1; - if (helperType(pHelper) == TSDB_WRITE_HELPER) { - if (tsdbOpenFile(helperDataF(pHelper), O_RDWR) < 0) return -1; - if (tsdbOpenFile(helperLastF(pHelper), O_RDWR) < 0) return -1; - - // Create and open .h - pFile = helperNewHeadF(pHelper); - if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1; - pFile->info.size = TSDB_FILE_HEAD_SIZE; - pFile->info.magic = TSDB_FILE_INIT_MAGIC; - if (tsdbUpdateFileHeader(pFile) < 0) return -1; - - // Create and open .l file if should - if (tsdbShouldCreateNewLast(pHelper)) { - pFile = helperNewLastF(pHelper); - if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1; - pFile->info.size = TSDB_FILE_HEAD_SIZE; - pFile->info.magic = TSDB_FILE_INIT_MAGIC; - pFile->info.len = 0; - if (tsdbUpdateFileHeader(pFile) < 0) return -1; - } - } else { - if (tsdbOpenFile(helperDataF(pHelper), O_RDONLY) < 0) return -1; - if (tsdbOpenFile(helperLastF(pHelper), O_RDONLY) < 0) return -1; - } - - helperSetState(pHelper, TSDB_HELPER_FILE_SET_AND_OPEN); - - return 0; -} - -int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError, SFileGroup *pGroup) { - SFile *pFile = NULL; - - pFile = helperHeadF(pHelper); - tsdbCloseFile(pFile); - - pFile = helperDataF(pHelper); - if (pFile->fd > 0) { - if (helperType(pHelper) == TSDB_WRITE_HELPER) { - if (!hasError) { - tsdbUpdateFileHeader(pFile); - } else { - ASSERT(pGroup != NULL); - taosFtruncate(pFile->fd, pGroup->files[TSDB_FILE_TYPE_DATA].info.size); - } - fsync(pFile->fd); - } - tsdbCloseFile(pFile); - } - - pFile = helperLastF(pHelper); - if (pFile->fd > 0) { - if (helperType(pHelper) == TSDB_WRITE_HELPER && !TSDB_NLAST_FILE_OPENED(pHelper)) { - if (!hasError) { - tsdbUpdateFileHeader(pFile); - } else { - ASSERT(pGroup != NULL); - taosFtruncate(pFile->fd, pGroup->files[TSDB_FILE_TYPE_LAST].info.size); - } - fsync(pFile->fd); - } - tsdbCloseFile(pFile); - } - - if (helperType(pHelper) == TSDB_WRITE_HELPER) { - pFile = helperNewHeadF(pHelper); - if (pFile->fd > 0) { - if (!hasError) { - tsdbUpdateFileHeader(pFile); - fsync(pFile->fd); - } - tsdbCloseFile(pFile); - if (hasError) (void)remove(pFile->fname); - } - - pFile = helperNewLastF(pHelper); - if (pFile->fd > 0) { - if (!hasError) { - tsdbUpdateFileHeader(pFile); - fsync(pFile->fd); - } - tsdbCloseFile(pFile); - if (hasError) (void)remove(pFile->fname); - } - } - return 0; -} - -int tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) { - ASSERT(helperHasState(pHelper, TSDB_HELPER_FILE_SET_AND_OPEN | TSDB_HELPER_IDX_LOAD)); - - // Clear members and state used by previous table - tsdbResetHelperTable(pHelper); - ASSERT(helperHasState(pHelper, (TSDB_HELPER_FILE_SET_AND_OPEN | TSDB_HELPER_IDX_LOAD))); - - pHelper->tableInfo.tid = pTable->tableId.tid; - pHelper->tableInfo.uid = pTable->tableId.uid; - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); - - if (tdInitDataCols(pHelper->pDataCols[0], pSchema) < 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - if (tdInitDataCols(pHelper->pDataCols[1], pSchema) < 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - if (pHelper->idxH.numOfIdx > 0) { - while (true) { - if (pHelper->idxH.curIdx >= pHelper->idxH.numOfIdx) { - memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx)); - break; - } - - SCompIdx *pIdx = &(pHelper->idxH.pIdxArray[pHelper->idxH.curIdx]); - if (pIdx->tid == TABLE_TID(pTable)) { - if (pIdx->uid == TABLE_UID(pTable)) { - pHelper->curCompIdx = *pIdx; - } else { - memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx)); - } - pHelper->idxH.curIdx++; - break; - } else if (pIdx->tid > TABLE_TID(pTable)) { - memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx)); - break; - } else { - pHelper->idxH.curIdx++; - } - } - } else { - memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx)); - } - - if (helperType(pHelper) == TSDB_WRITE_HELPER && pHelper->curCompIdx.hasLast) { - pHelper->hasOldLastBlock = true; - } - - helperSetState(pHelper, TSDB_HELPER_TABLE_SET); - ASSERT(pHelper->state == ((TSDB_HELPER_TABLE_SET << 1) - 1)); - - return 0; -} - -int tsdbCommitTableData(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey) { - ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER); - - SCompIdx *pIdx = &(pHelper->curCompIdx); - int blkIdx = 0; - - ASSERT(pIdx->offset == 0 || pIdx->uid == TABLE_UID(pCommitIter->pTable)); - if (tsdbLoadCompInfo(pHelper, NULL) < 0) return -1; - - while (true) { - ASSERT(blkIdx <= (int)pIdx->numOfBlocks); - TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter); - if (keyFirst == TSDB_DATA_TIMESTAMP_NULL || keyFirst > maxKey) break; // iter over - - if (pIdx->len <= 0 || keyFirst > pIdx->maxKey) { - if (tsdbProcessAppendCommit(pHelper, pCommitIter, pDataCols, maxKey) < 0) return -1; - blkIdx = pIdx->numOfBlocks; - } else { - if (tsdbProcessMergeCommit(pHelper, pCommitIter, pDataCols, maxKey, &blkIdx) < 0) return -1; - } - } - - return 0; -} - -int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) { - STsdbCfg *pCfg = &pHelper->pRepo->config; - - ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER); - SCompIdx * pIdx = &(pHelper->curCompIdx); - SCompBlock compBlock = {0}; - if (TSDB_NLAST_FILE_OPENED(pHelper) && (pHelper->hasOldLastBlock)) { - if (tsdbLoadCompInfo(pHelper, NULL) < 0) return -1; - - SCompBlock *pCompBlock = blockAtIdx(pHelper, pIdx->numOfBlocks - 1); - ASSERT(pCompBlock->last); - if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1; - ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows && - pHelper->pDataCols[0]->numOfRows < pCfg->minRowsPerFileBlock); - if (tsdbWriteBlockToFile(pHelper, helperNewLastF(pHelper), pHelper->pDataCols[0], &compBlock, true, true) < 0) - return -1; - - if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1; - -#if 0 - if (pCompBlock->numOfSubBlocks > 1) { - if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1; - ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows && - pHelper->pDataCols[0]->numOfRows < pCfg->minRowsPerFileBlock); - if (tsdbWriteBlockToFile(pHelper, helperNewLastF(pHelper), pHelper->pDataCols[0], &compBlock, true, true) < 0) - return -1; - - if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1; - } else { - if (lseek(helperLastF(pHelper)->fd, pCompBlock->offset, SEEK_SET) < 0) { - tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), helperLastF(pHelper)->fname, - strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - pCompBlock->offset = lseek(helperNewLastF(pHelper)->fd, 0, SEEK_END); - if (pCompBlock->offset < 0) { - tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), helperNewLastF(pHelper)->fname, - strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (taosSendFile(helperNewLastF(pHelper)->fd, helperLastF(pHelper)->fd, NULL, pCompBlock->len) < pCompBlock->len) { - tsdbError("vgId:%d failed to sendfile from file %s to file %s since %s", REPO_ID(pHelper->pRepo), - helperLastF(pHelper)->fname, helperNewLastF(pHelper)->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - } -#endif - - pHelper->hasOldLastBlock = false; - } - - return 0; -} - -int tsdbWriteCompInfo(SRWHelper *pHelper) { - SCompIdx *pIdx = &(pHelper->curCompIdx); - off_t offset = 0; - SFile * pFile = helperNewHeadF(pHelper); - - if (pIdx->len > 0) { - if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) { - if (tsdbLoadCompInfo(pHelper, NULL) < 0) return -1; - } else { - pHelper->pCompInfo->delimiter = TSDB_FILE_DELIMITER; - pHelper->pCompInfo->uid = pHelper->tableInfo.uid; - pHelper->pCompInfo->tid = pHelper->tableInfo.tid; - ASSERT(pIdx->len > sizeof(SCompInfo) + sizeof(TSCKSUM) && - (pIdx->len - sizeof(SCompInfo) - sizeof(TSCKSUM)) % sizeof(SCompBlock) == 0); - taosCalcChecksumAppend(0, (uint8_t *)pHelper->pCompInfo, pIdx->len); - } - - pFile->info.magic = taosCalcChecksum( - pFile->info.magic, (uint8_t *)POINTER_SHIFT(pHelper->pCompInfo, pIdx->len - sizeof(TSCKSUM)), sizeof(TSCKSUM)); - offset = lseek(pFile->fd, 0, SEEK_END); - if (offset < 0) { - tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - pIdx->offset = offset; - pIdx->uid = pHelper->tableInfo.uid; - pIdx->tid = pHelper->tableInfo.tid; - ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE); - - if (taosWrite(pFile->fd, (void *)(pHelper->pCompInfo), pIdx->len) < (int)pIdx->len) { - tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pIdx->len, - pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (taosTSizeof(pHelper->pWIdx) < pFile->info.len + sizeof(SCompIdx) + 12) { - pHelper->pWIdx = taosTRealloc(pHelper->pWIdx, taosTSizeof(pHelper->pWIdx) == 0 ? 1024 : taosTSizeof(pHelper->pWIdx) * 2); - if (pHelper->pWIdx == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - } - - void *pBuf = POINTER_SHIFT(pHelper->pWIdx, pFile->info.len); - pFile->info.len += tsdbEncodeSCompIdx(&pBuf, &(pHelper->curCompIdx)); - - pFile->info.size += pIdx->len; - // ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); - } - - return 0; -} - -int tsdbWriteCompIdx(SRWHelper *pHelper) { - ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER); - off_t offset = 0; - - SFile *pFile = helperNewHeadF(pHelper); - - pFile->info.len += sizeof(TSCKSUM); - if (taosTSizeof(pHelper->pWIdx) < pFile->info.len) { - pHelper->pWIdx = taosTRealloc(pHelper->pWIdx, pFile->info.len); - if (pHelper->pWIdx == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - } - taosCalcChecksumAppend(0, (uint8_t *)pHelper->pWIdx, pFile->info.len); - pFile->info.magic = taosCalcChecksum( - pFile->info.magic, (uint8_t *)POINTER_SHIFT(pHelper->pWIdx, pFile->info.len - sizeof(TSCKSUM)), sizeof(TSCKSUM)); - - offset = lseek(pFile->fd, 0, SEEK_END); - if (offset < 0) { - tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - ASSERT(offset == pFile->info.size); - - if (taosWrite(pFile->fd, (void *)pHelper->pWIdx, pFile->info.len) < (int)pFile->info.len) { - tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pFile->info.len, - pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - pFile->info.offset = offset; - pFile->info.size += pFile->info.len; - // ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); - - return 0; -} - -int tsdbLoadCompIdxImpl(SFile *pFile, uint32_t offset, uint32_t len, void *buffer) { - const char *prefixMsg = "failed to load SCompIdx part"; - if (lseek(pFile->fd, offset, SEEK_SET) < 0) { - tsdbError("%s: seek to file %s offset %u failed since %s", prefixMsg, pFile->fname, offset, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (taosRead(pFile->fd, buffer, len) < len) { - tsdbError("%s: read file %s offset %u len %u failed since %s", prefixMsg, pFile->fname, offset, len, - strerror(errno)); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - return -1; - } - - if (!taosCheckChecksumWhole((uint8_t *)buffer, len)) { - tsdbError("%s: file %s corrupted, offset %u len %u", prefixMsg, pFile->fname, offset, len); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - return -1; - } - - return 0; -} - -int tsdbDecodeSCompIdxImpl(void *buffer, uint32_t len, SCompIdx **ppCompIdx, int *numOfIdx) { - int nIdx = 0; - void *pPtr = buffer; - - while (POINTER_DISTANCE(pPtr, buffer) < (int)(len - sizeof(TSCKSUM))) { - size_t tlen = taosTSizeof(*ppCompIdx); - if (tlen < sizeof(SCompIdx) * (nIdx + 1)) { - *ppCompIdx = (SCompIdx *)taosTRealloc(*ppCompIdx, (tlen == 0) ? 1024 : tlen * 2); - if (*ppCompIdx == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - } - - pPtr = tsdbDecodeSCompIdx(pPtr, &((*ppCompIdx)[nIdx])); - if (pPtr == NULL) { - tsdbError("failed to decode SCompIdx part, idx:%d", nIdx); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - return -1; - } - - nIdx++; - - ASSERT(nIdx == 1 || (*ppCompIdx)[nIdx - 1].tid > (*ppCompIdx)[nIdx - 2].tid); - ASSERT(POINTER_DISTANCE(pPtr, buffer) <= (int)(len - sizeof(TSCKSUM))); - } - - *numOfIdx = nIdx; - return 0; -} - -int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) { - ASSERT(pHelper->state == TSDB_HELPER_FILE_SET_AND_OPEN); - SFile *pFile = helperHeadF(pHelper); - - if (!helperHasState(pHelper, TSDB_HELPER_IDX_LOAD)) { - // If not load from file, just load it in object - if (pFile->info.len > 0) { - if ((pHelper->pBuffer = taosTRealloc(pHelper->pBuffer, pFile->info.len)) == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - // Load SCompIdx binary from file - if (tsdbLoadCompIdxImpl(pFile, pFile->info.offset, pFile->info.len, (void *)(pHelper->pBuffer)) < 0) { - return -1; - } - - // Decode the SCompIdx part - if (tsdbDecodeSCompIdxImpl(pHelper->pBuffer, pFile->info.len, &(pHelper->idxH.pIdxArray), - &(pHelper->idxH.numOfIdx)) < 0) { - tsdbError("vgId:%d failed to decode SCompIdx part from file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, - tstrerror(errno)); - return -1; - } - } - } - helperSetState(pHelper, TSDB_HELPER_IDX_LOAD); - - // Copy the memory for outside usage - if (target && pHelper->idxH.numOfIdx > 0) - memcpy(target, pHelper->idxH.pIdxArray, sizeof(SCompIdx) * pHelper->idxH.numOfIdx); - - return 0; -} - -int tsdbLoadCompInfoImpl(SFile *pFile, SCompIdx *pIdx, SCompInfo **ppCompInfo) { - const char *prefixMsg = "failed to load SCompInfo/SCompBlock part"; - - if (lseek(pFile->fd, pIdx->offset, SEEK_SET) < 0) { - tsdbError("%s: seek to file %s offset %u failed since %s", prefixMsg, pFile->fname, pIdx->offset, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - *ppCompInfo = taosTRealloc((void *)(*ppCompInfo), pIdx->len); - if (*ppCompInfo == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - if (taosRead(pFile->fd, (void *)(*ppCompInfo), pIdx->len) < (int)pIdx->len) { - tsdbError("%s: read file %s offset %u len %u failed since %s", prefixMsg, pFile->fname, pIdx->offset, pIdx->len, - strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (!taosCheckChecksumWhole((uint8_t *)(*ppCompInfo), pIdx->len)) { - tsdbError("%s: file %s corrupted, offset %u len %u", prefixMsg, pFile->fname, pIdx->offset, pIdx->len); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - return -1; - } - - return 0; -} - -int tsdbLoadCompInfo(SRWHelper *pHelper, void *target) { - ASSERT(helperHasState(pHelper, TSDB_HELPER_TABLE_SET)); - - SCompIdx *pIdx = &(pHelper->curCompIdx); - - SFile *pFile = helperHeadF(pHelper); - - if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) { - if (pIdx->offset > 0) { - ASSERT(pIdx->uid == pHelper->tableInfo.uid); - - if (tsdbLoadCompInfoImpl(pFile, pIdx, &(pHelper->pCompInfo)) < 0) return -1; - - ASSERT(pIdx->uid == pHelper->pCompInfo->uid && pIdx->tid == pHelper->pCompInfo->tid); - } - - helperSetState(pHelper, TSDB_HELPER_INFO_LOAD); - } - - if (target) memcpy(target, (void *)(pHelper->pCompInfo), pIdx->len); - - return 0; -} - -int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target) { - ASSERT(pCompBlock->numOfSubBlocks <= 1); - SFile *pFile = (pCompBlock->last) ? helperLastF(pHelper) : helperDataF(pHelper); - - if (lseek(pFile->fd, (off_t)pCompBlock->offset, SEEK_SET) < 0) { - tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - size_t tsize = TSDB_GET_COMPCOL_LEN(pCompBlock->numOfCols); - pHelper->pCompData = taosTRealloc((void *)pHelper->pCompData, tsize); - if (pHelper->pCompData == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - if (taosRead(pFile->fd, (void *)pHelper->pCompData, tsize) < tsize) { - tsdbError("vgId:%d failed to read %" PRIzu " bytes from file %s since %s", REPO_ID(pHelper->pRepo), tsize, pFile->fname, - strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (!taosCheckChecksumWhole((uint8_t *)pHelper->pCompData, (uint32_t)tsize)) { - tsdbError("vgId:%d file %s is broken, offset %" PRId64 " size %" PRIzu "", REPO_ID(pHelper->pRepo), pFile->fname, - (int64_t)pCompBlock->offset, tsize); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - return -1; - } - - ASSERT(pCompBlock->numOfCols == pHelper->pCompData->numOfCols); - - if (target) memcpy(target, pHelper->pCompData, tsize); - - return 0; -} - -void tsdbGetDataStatis(SRWHelper *pHelper, SDataStatis *pStatis, int numOfCols) { - SCompData *pCompData = pHelper->pCompData; - - for (int i = 0, j = 0; i < numOfCols;) { - if (j >= pCompData->numOfCols) { - pStatis[i].numOfNull = -1; - i++; - continue; - } - - if (pStatis[i].colId == pCompData->cols[j].colId) { - pStatis[i].sum = pCompData->cols[j].sum; - pStatis[i].max = pCompData->cols[j].max; - pStatis[i].min = pCompData->cols[j].min; - pStatis[i].maxIndex = pCompData->cols[j].maxIndex; - pStatis[i].minIndex = pCompData->cols[j].minIndex; - pStatis[i].numOfNull = pCompData->cols[j].numOfNull; - i++; - j++; - } else if (pStatis[i].colId < pCompData->cols[j].colId) { - pStatis[i].numOfNull = -1; - i++; - } else { - j++; - } - } -} - -int tsdbLoadBlockDataCols(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo, int16_t *colIds, int numOfColIds) { - ASSERT(pCompBlock->numOfSubBlocks >= 1); // Must be super block - SCompBlock *pTCompBlock = pCompBlock; - - int numOfSubBlocks = pCompBlock->numOfSubBlocks; - if (numOfSubBlocks > 1) - pTCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset); - - tdResetDataCols(pHelper->pDataCols[0]); - if (tsdbLoadBlockDataColsImpl(pHelper, pTCompBlock, pHelper->pDataCols[0], colIds, numOfColIds) < 0) goto _err; - for (int i = 1; i < numOfSubBlocks; i++) { - tdResetDataCols(pHelper->pDataCols[1]); - pTCompBlock++; - if (tsdbLoadBlockDataColsImpl(pHelper, pTCompBlock, pHelper->pDataCols[1], colIds, numOfColIds) < 0) goto _err; - if (tdMergeDataCols(pHelper->pDataCols[0], pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfRows) < 0) goto _err; - } - - ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows && - dataColsKeyFirst(pHelper->pDataCols[0]) == pCompBlock->keyFirst && - dataColsKeyLast(pHelper->pDataCols[0]) == pCompBlock->keyLast); - - return 0; - -_err: - return -1; -} - -int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo) { - SCompBlock *pTCompBlock = pCompBlock; - - int numOfSubBlock = pCompBlock->numOfSubBlocks; - if (numOfSubBlock > 1) - pTCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset); - - tdResetDataCols(pHelper->pDataCols[0]); - if (tsdbLoadBlockDataImpl(pHelper, pTCompBlock, pHelper->pDataCols[0]) < 0) goto _err; - for (int i = 1; i < numOfSubBlock; i++) { - tdResetDataCols(pHelper->pDataCols[1]); - pTCompBlock++; - if (tsdbLoadBlockDataImpl(pHelper, pTCompBlock, pHelper->pDataCols[1]) < 0) goto _err; - if (tdMergeDataCols(pHelper->pDataCols[0], pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfRows) < 0) goto _err; - } - - ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows && - dataColsKeyFirst(pHelper->pDataCols[0]) == pCompBlock->keyFirst && - dataColsKeyLast(pHelper->pDataCols[0]) == pCompBlock->keyLast); - - return 0; - -_err: - return -1; -} - -// ---------------------- INTERNAL FUNCTIONS ---------------------- -static bool tsdbShouldCreateNewLast(SRWHelper *pHelper) { - ASSERT(helperLastF(pHelper)->fd > 0); - struct stat st; - if (fstat(helperLastF(pHelper)->fd, &st) < 0) return true; - if (st.st_size > 32 * 1024 + TSDB_FILE_HEAD_SIZE) return true; - return false; -} - -static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDataCols, SCompBlock *pCompBlock, - bool isLast, bool isSuperBlock) { - STsdbCfg * pCfg = &(pHelper->pRepo->config); - SCompData *pCompData = (SCompData *)(pHelper->pBuffer); - int64_t offset = 0; - int rowsToWrite = pDataCols->numOfRows; - - ASSERT(rowsToWrite > 0 && rowsToWrite <= pCfg->maxRowsPerFileBlock); - ASSERT(isLast ? rowsToWrite < pCfg->minRowsPerFileBlock : true); - - offset = lseek(pFile->fd, 0, SEEK_END); - if (offset < 0) { - tsdbError("vgId:%d failed to write block to file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, - strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - int nColsNotAllNull = 0; - for (int ncol = 1; ncol < pDataCols->numOfCols; ncol++) { // ncol from 1, we skip the timestamp column - SDataCol *pDataCol = pDataCols->cols + ncol; - SCompCol *pCompCol = pCompData->cols + nColsNotAllNull; - - if (isNEleNull(pDataCol, rowsToWrite)) { // all data to commit are NULL, just ignore it - continue; - } - - memset(pCompCol, 0, sizeof(*pCompCol)); - - pCompCol->colId = pDataCol->colId; - pCompCol->type = pDataCol->type; - if (tDataTypes[pDataCol->type].statisFunc) { - (*tDataTypes[pDataCol->type].statisFunc)( - pDataCol->pData, rowsToWrite, &(pCompCol->min), &(pCompCol->max), &(pCompCol->sum), &(pCompCol->minIndex), - &(pCompCol->maxIndex), &(pCompCol->numOfNull)); - } - nColsNotAllNull++; - } - - ASSERT(nColsNotAllNull >= 0 && nColsNotAllNull <= pDataCols->numOfCols); - - // Compress the data if neccessary - int tcol = 0; - int32_t toffset = 0; - int32_t tsize = TSDB_GET_COMPCOL_LEN(nColsNotAllNull); - int32_t lsize = tsize; - int32_t keyLen = 0; - for (int ncol = 0; ncol < pDataCols->numOfCols; ncol++) { - if (ncol != 0 && tcol >= nColsNotAllNull) break; - - SDataCol *pDataCol = pDataCols->cols + ncol; - SCompCol *pCompCol = pCompData->cols + tcol; - - if (ncol != 0 && (pDataCol->colId != pCompCol->colId)) continue; - void *tptr = POINTER_SHIFT(pCompData, lsize); - - int32_t flen = 0; // final length - int32_t tlen = dataColGetNEleLen(pDataCol, rowsToWrite); - - if (pCfg->compression) { - if (pCfg->compression == TWO_STAGE_COMP) { - pHelper->compBuffer = taosTRealloc(pHelper->compBuffer, tlen + COMP_OVERFLOW_BYTES); - if (pHelper->compBuffer == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - } - - flen = (*(tDataTypes[pDataCol->type].compFunc))((char *)pDataCol->pData, tlen, rowsToWrite, tptr, - (int32_t)taosTSizeof(pHelper->pBuffer) - lsize, pCfg->compression, - pHelper->compBuffer, (int32_t)taosTSizeof(pHelper->compBuffer)); - } else { - flen = tlen; - memcpy(tptr, pDataCol->pData, flen); - } - - // Add checksum - ASSERT(flen > 0); - flen += sizeof(TSCKSUM); - taosCalcChecksumAppend(0, (uint8_t *)tptr, flen); - pFile->info.magic = - taosCalcChecksum(pFile->info.magic, (uint8_t *)POINTER_SHIFT(tptr, flen - sizeof(TSCKSUM)), sizeof(TSCKSUM)); - - if (ncol != 0) { - pCompCol->offset = toffset; - pCompCol->len = flen; - tcol++; - } else { - keyLen = flen; - } - - toffset += flen; - lsize += flen; - } - - pCompData->delimiter = TSDB_FILE_DELIMITER; - pCompData->uid = pHelper->tableInfo.uid; - pCompData->numOfCols = nColsNotAllNull; - - taosCalcChecksumAppend(0, (uint8_t *)pCompData, tsize); - pFile->info.magic = taosCalcChecksum(pFile->info.magic, (uint8_t *)POINTER_SHIFT(pCompData, tsize - sizeof(TSCKSUM)), - sizeof(TSCKSUM)); - - // Write the whole block to file - if (taosWrite(pFile->fd, (void *)pCompData, lsize) < lsize) { - tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(helperRepo(pHelper)), lsize, pFile->fname, - strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - // Update pCompBlock membership vairables - pCompBlock->last = isLast; - pCompBlock->offset = offset; - pCompBlock->algorithm = pCfg->compression; - pCompBlock->numOfRows = rowsToWrite; - pCompBlock->len = lsize; - pCompBlock->keyLen = keyLen; - pCompBlock->numOfSubBlocks = isSuperBlock ? 1 : 0; - pCompBlock->numOfCols = nColsNotAllNull; - pCompBlock->keyFirst = dataColsKeyFirst(pDataCols); - pCompBlock->keyLast = dataColsKeyAt(pDataCols, rowsToWrite - 1); - - tsdbDebug("vgId:%d tid:%d a block of data is written to file %s, offset %" PRId64 - " numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64, - REPO_ID(helperRepo(pHelper)), pHelper->tableInfo.tid, pFile->fname, (int64_t)(pCompBlock->offset), - (int)(pCompBlock->numOfRows), pCompBlock->len, pCompBlock->numOfCols, pCompBlock->keyFirst, - pCompBlock->keyLast); - - pFile->info.size += pCompBlock->len; - // ASSERT(pFile->info.size == lseek(pFile->fd, 0, SEEK_CUR)); - - return 0; - -_err: - return -1; -} - -static int compareKeyBlock(const void *arg1, const void *arg2) { - TSKEY key = *(TSKEY *)arg1; - SCompBlock *pBlock = (SCompBlock *)arg2; - - if (key < pBlock->keyFirst) { - return -1; - } else if (key > pBlock->keyLast) { - return 1; - } - - return 0; -} - -static int tsdbAdjustInfoSizeIfNeeded(SRWHelper *pHelper, size_t esize) { - if (taosTSizeof((void *)pHelper->pCompInfo) <= esize) { - size_t tsize = esize + sizeof(SCompBlock) * 16; - pHelper->pCompInfo = (SCompInfo *)taosTRealloc(pHelper->pCompInfo, tsize); - if (pHelper->pCompInfo == NULL) return -1; - } - - return 0; -} - -static int tsdbInsertSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx) { - SCompIdx *pIdx = &(pHelper->curCompIdx); - - ASSERT(blkIdx >= 0 && blkIdx <= (int)pIdx->numOfBlocks); - ASSERT(pCompBlock->numOfSubBlocks == 1); - - // Adjust memory if no more room - if (pIdx->len == 0) pIdx->len = sizeof(SCompInfo) + sizeof(TSCKSUM); - if (tsdbAdjustInfoSizeIfNeeded(pHelper, pIdx->len + sizeof(SCompInfo)) < 0) goto _err; - - // Change the offset - for (uint32_t i = 0; i < pIdx->numOfBlocks; i++) { - SCompBlock *pTCompBlock = &pHelper->pCompInfo->blocks[i]; - if (pTCompBlock->numOfSubBlocks > 1) pTCompBlock->offset += sizeof(SCompBlock); - } - - // Memmove if needed - int tsize = pIdx->len - (sizeof(SCompInfo) + sizeof(SCompBlock) * blkIdx); - if (tsize > 0) { - ASSERT(sizeof(SCompInfo) + sizeof(SCompBlock) * (blkIdx + 1) < taosTSizeof(pHelper->pCompInfo)); - ASSERT(sizeof(SCompInfo) + sizeof(SCompBlock) * (blkIdx + 1) + tsize <= taosTSizeof(pHelper->pCompInfo)); - memmove(POINTER_SHIFT(pHelper->pCompInfo, sizeof(SCompInfo) + sizeof(SCompBlock) * (blkIdx + 1)), - POINTER_SHIFT(pHelper->pCompInfo, sizeof(SCompInfo) + sizeof(SCompBlock) * blkIdx), tsize); - } - pHelper->pCompInfo->blocks[blkIdx] = *pCompBlock; - - pIdx->numOfBlocks++; - pIdx->len += sizeof(SCompBlock); - ASSERT(pIdx->len <= taosTSizeof(pHelper->pCompInfo)); - pIdx->maxKey = blockAtIdx(pHelper, pIdx->numOfBlocks - 1)->keyLast; - pIdx->hasLast = (uint32_t)blockAtIdx(pHelper, pIdx->numOfBlocks - 1)->last; - - if (pIdx->numOfBlocks > 1) { - ASSERT(pHelper->pCompInfo->blocks[0].keyLast < pHelper->pCompInfo->blocks[1].keyFirst); - } - - ASSERT((blkIdx == pIdx->numOfBlocks -1) || (!pCompBlock->last)); - - tsdbDebug("vgId:%d tid:%d a super block is inserted at index %d", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid, - blkIdx); - - return 0; - -_err: - return -1; -} - -static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx, SMergeInfo *pMergeInfo) { - ASSERT(pCompBlock->numOfSubBlocks == 0); - - SCompIdx *pIdx = &(pHelper->curCompIdx); - ASSERT(blkIdx >= 0 && blkIdx < (int)pIdx->numOfBlocks); - - SCompBlock *pSCompBlock = pHelper->pCompInfo->blocks + blkIdx; - ASSERT(pSCompBlock->numOfSubBlocks >= 1 && pSCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS); - - size_t spaceNeeded = - (pSCompBlock->numOfSubBlocks == 1) ? pIdx->len + sizeof(SCompBlock) * 2 : pIdx->len + sizeof(SCompBlock); - if (tsdbAdjustInfoSizeIfNeeded(pHelper, spaceNeeded) < 0) goto _err; - - pSCompBlock = pHelper->pCompInfo->blocks + blkIdx; - - // Add the sub-block - if (pSCompBlock->numOfSubBlocks > 1) { - size_t tsize = (size_t)(pIdx->len - (pSCompBlock->offset + pSCompBlock->len)); - if (tsize > 0) { - memmove((void *)((char *)(pHelper->pCompInfo) + pSCompBlock->offset + pSCompBlock->len + sizeof(SCompBlock)), - (void *)((char *)(pHelper->pCompInfo) + pSCompBlock->offset + pSCompBlock->len), tsize); - - for (uint32_t i = blkIdx + 1; i < pIdx->numOfBlocks; i++) { - SCompBlock *pTCompBlock = &pHelper->pCompInfo->blocks[i]; - if (pTCompBlock->numOfSubBlocks > 1) pTCompBlock->offset += sizeof(SCompBlock); - } - } - - *(SCompBlock *)((char *)(pHelper->pCompInfo) + pSCompBlock->offset + pSCompBlock->len) = *pCompBlock; - - pSCompBlock->numOfSubBlocks++; - ASSERT(pSCompBlock->numOfSubBlocks <= TSDB_MAX_SUBBLOCKS); - pSCompBlock->len += sizeof(SCompBlock); - pSCompBlock->numOfRows = pSCompBlock->numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed; - pSCompBlock->keyFirst = pMergeInfo->keyFirst; - pSCompBlock->keyLast = pMergeInfo->keyLast; - pIdx->len += sizeof(SCompBlock); - } else { // Need to create two sub-blocks - void *ptr = NULL; - for (uint32_t i = blkIdx + 1; i < pIdx->numOfBlocks; i++) { - SCompBlock *pTCompBlock = pHelper->pCompInfo->blocks + i; - if (pTCompBlock->numOfSubBlocks > 1) { - ptr = POINTER_SHIFT(pHelper->pCompInfo, pTCompBlock->offset); - break; - } - } - - if (ptr == NULL) ptr = POINTER_SHIFT(pHelper->pCompInfo, pIdx->len - sizeof(TSCKSUM)); - - size_t tsize = pIdx->len - ((char *)ptr - (char *)(pHelper->pCompInfo)); - if (tsize > 0) { - memmove(POINTER_SHIFT(ptr, sizeof(SCompBlock) * 2), ptr, tsize); - for (uint32_t i = blkIdx + 1; i < pIdx->numOfBlocks; i++) { - SCompBlock *pTCompBlock = pHelper->pCompInfo->blocks + i; - if (pTCompBlock->numOfSubBlocks > 1) pTCompBlock->offset += (sizeof(SCompBlock) * 2); - } - } - - ((SCompBlock *)ptr)[0] = *pSCompBlock; - ((SCompBlock *)ptr)[0].numOfSubBlocks = 0; - - ((SCompBlock *)ptr)[1] = *pCompBlock; - - pSCompBlock->numOfSubBlocks = 2; - pSCompBlock->numOfRows = pSCompBlock->numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed; - pSCompBlock->offset = ((char *)ptr) - ((char *)pHelper->pCompInfo); - pSCompBlock->len = sizeof(SCompBlock) * 2; - pSCompBlock->keyFirst = pMergeInfo->keyFirst; - pSCompBlock->keyLast = pMergeInfo->keyLast; - - pIdx->len += (sizeof(SCompBlock) * 2); - } - - pIdx->maxKey = pHelper->pCompInfo->blocks[pIdx->numOfBlocks - 1].keyLast; - pIdx->hasLast = (uint32_t)pHelper->pCompInfo->blocks[pIdx->numOfBlocks - 1].last; - - tsdbDebug("vgId:%d tid:%d a subblock is added at index %d", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid, blkIdx); - - return 0; - -_err: - return -1; -} - -static int tsdbUpdateSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx) { - ASSERT(pCompBlock->numOfSubBlocks == 1); - - SCompIdx *pIdx = &(pHelper->curCompIdx); - - ASSERT(blkIdx >= 0 && blkIdx < (int)pIdx->numOfBlocks); - - SCompBlock *pSCompBlock = pHelper->pCompInfo->blocks + blkIdx; - - ASSERT(pSCompBlock->numOfSubBlocks >= 1); - - // Delete the sub blocks it has - if (pSCompBlock->numOfSubBlocks > 1) { - size_t tsize = (size_t)(pIdx->len - (pSCompBlock->offset + pSCompBlock->len)); - if (tsize > 0) { - memmove(POINTER_SHIFT(pHelper->pCompInfo, pSCompBlock->offset), - POINTER_SHIFT(pHelper->pCompInfo, pSCompBlock->offset + pSCompBlock->len), tsize); - } - - for (uint32_t i = blkIdx + 1; i < pIdx->numOfBlocks; i++) { - SCompBlock *pTCompBlock = &pHelper->pCompInfo->blocks[i]; - if (pTCompBlock->numOfSubBlocks > 1) pTCompBlock->offset -= (sizeof(SCompBlock) * pSCompBlock->numOfSubBlocks); - } - - pIdx->len -= (sizeof(SCompBlock) * pSCompBlock->numOfSubBlocks); - } - - *pSCompBlock = *pCompBlock; - - pIdx->maxKey = blockAtIdx(pHelper, pIdx->numOfBlocks - 1)->keyLast; - pIdx->hasLast = (uint32_t)blockAtIdx(pHelper, pIdx->numOfBlocks - 1)->last; - - ASSERT((blkIdx == pIdx->numOfBlocks-1) || (!pCompBlock->last)); - - tsdbDebug("vgId:%d tid:%d a super block is updated at index %d", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid, - blkIdx); - - return 0; -} - -static int tsdbDeleteSuperBlock(SRWHelper *pHelper, int blkIdx) { - SCompIdx *pCompIdx = &(pHelper->curCompIdx); - - ASSERT(pCompIdx->numOfBlocks > 0 && blkIdx < pCompIdx->numOfBlocks); - - SCompBlock *pCompBlock= blockAtIdx(pHelper, blkIdx); - SCompBlock compBlock = *pCompBlock; - ASSERT(pCompBlock->numOfSubBlocks > 0 && pCompBlock->numOfSubBlocks <= TSDB_MAX_SUBBLOCKS); - - if (pCompIdx->numOfBlocks == 1) { - memset(pCompIdx, 0, sizeof(*pCompIdx)); - } else { - int tsize = 0; - - if (compBlock.numOfSubBlocks > 1) { - tsize = (int)(pCompIdx->len - (compBlock.offset + sizeof(SCompBlock) * compBlock.numOfSubBlocks)); - - ASSERT(tsize > 0); - memmove(POINTER_SHIFT(pHelper->pCompInfo, compBlock.offset), - POINTER_SHIFT(pHelper->pCompInfo, compBlock.offset + sizeof(SCompBlock) * compBlock.numOfSubBlocks), - tsize); - - pCompIdx->len = pCompIdx->len - sizeof(SCompBlock) * compBlock.numOfSubBlocks; - } - - tsize = (int)(pCompIdx->len - POINTER_DISTANCE(blockAtIdx(pHelper, blkIdx + 1), pHelper->pCompInfo)); - ASSERT(tsize > 0); - memmove((void *)blockAtIdx(pHelper, blkIdx), (void *)blockAtIdx(pHelper, blkIdx + 1), tsize); - - pCompIdx->len -= sizeof(SCompBlock); - - pCompIdx->numOfBlocks--; - pCompIdx->hasLast = (uint32_t)(blockAtIdx(pHelper, pCompIdx->numOfBlocks - 1)->last); - pCompIdx->maxKey = blockAtIdx(pHelper, pCompIdx->numOfBlocks - 1)->keyLast; - } - - return 0; -} - -static void tsdbResetHelperFileImpl(SRWHelper *pHelper) { - pHelper->idxH.numOfIdx = 0; - pHelper->idxH.curIdx = 0; - memset((void *)&pHelper->files, 0, sizeof(pHelper->files)); - helperHeadF(pHelper)->fd = -1; - helperDataF(pHelper)->fd = -1; - helperLastF(pHelper)->fd = -1; - helperNewHeadF(pHelper)->fd = -1; - helperNewLastF(pHelper)->fd = -1; -} - -static int tsdbInitHelperFile(SRWHelper *pHelper) { - tsdbResetHelperFileImpl(pHelper); - return 0; -} - -static void tsdbDestroyHelperFile(SRWHelper *pHelper) { - tsdbCloseHelperFile(pHelper, false, NULL); - tsdbResetHelperFileImpl(pHelper); - taosTZfree(pHelper->idxH.pIdxArray); - taosTZfree(pHelper->pWIdx); -} - -// ---------- Operations on Helper Table part -static void tsdbResetHelperTableImpl(SRWHelper *pHelper) { - memset((void *)&pHelper->tableInfo, 0, sizeof(SHelperTable)); - pHelper->hasOldLastBlock = false; -} - -static void tsdbResetHelperTable(SRWHelper *pHelper) { - tsdbResetHelperBlock(pHelper); - tsdbResetHelperTableImpl(pHelper); - helperClearState(pHelper, (TSDB_HELPER_TABLE_SET | TSDB_HELPER_INFO_LOAD)); -} - -static void tsdbInitHelperTable(SRWHelper *pHelper) { tsdbResetHelperTableImpl(pHelper); } - -static void tsdbDestroyHelperTable(SRWHelper *pHelper) { taosTZfree((void *)pHelper->pCompInfo); } - -// ---------- Operations on Helper Block part -static void tsdbResetHelperBlockImpl(SRWHelper *pHelper) { - tdResetDataCols(pHelper->pDataCols[0]); - tdResetDataCols(pHelper->pDataCols[1]); -} - -static void tsdbResetHelperBlock(SRWHelper *pHelper) { - tsdbResetHelperBlockImpl(pHelper); - // helperClearState(pHelper, TSDB_HELPER_) -} - -static int tsdbInitHelperBlock(SRWHelper *pHelper) { - STsdbRepo *pRepo = helperRepo(pHelper); - STsdbMeta *pMeta = pHelper->pRepo->tsdbMeta; - - pHelper->pDataCols[0] = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock); - pHelper->pDataCols[1] = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock); - if (pHelper->pDataCols[0] == NULL || pHelper->pDataCols[1] == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - tsdbResetHelperBlockImpl(pHelper); - - return 0; -} - -static void tsdbDestroyHelperBlock(SRWHelper *pHelper) { - taosTZfree(pHelper->pCompData); - tdFreeDataCols(pHelper->pDataCols[0]); - tdFreeDataCols(pHelper->pDataCols[1]); -} - -static int tsdbInitHelper(SRWHelper *pHelper, STsdbRepo *pRepo, tsdb_rw_helper_t type) { - STsdbCfg *pCfg = &pRepo->config; - memset((void *)pHelper, 0, sizeof(*pHelper)); - STsdbMeta *pMeta = pRepo->tsdbMeta; - - helperType(pHelper) = type; - helperRepo(pHelper) = pRepo; - helperState(pHelper) = TSDB_HELPER_CLEAR_STATE; - - // Init file part - if (tsdbInitHelperFile(pHelper) < 0) goto _err; - - // Init table part - tsdbInitHelperTable(pHelper); - - // Init block part - if (tsdbInitHelperBlock(pHelper) < 0) goto _err; - - // TODO: pMeta->maxRowBytes and pMeta->maxCols may change here causing invalid write - pHelper->pBuffer = - taosTMalloc(sizeof(SCompData) + (sizeof(SCompCol) + sizeof(TSCKSUM) + COMP_OVERFLOW_BYTES) * pMeta->maxCols + - pMeta->maxRowBytes * pCfg->maxRowsPerFileBlock + sizeof(TSCKSUM)); - if (pHelper->pBuffer == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - return 0; - -_err: - tsdbDestroyHelper(pHelper); - return -1; -} - -static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32_t len, int8_t comp, int numOfRows, - int maxPoints, char *buffer, int bufferSize) { - // Verify by checksum - if (!taosCheckChecksumWhole((uint8_t *)content, len)) { - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - return -1; - } - - // Decode the data - if (comp) { - // // Need to decompress - int tlen = (*(tDataTypes[pDataCol->type].decompFunc))(content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData, - pDataCol->spaceSize, comp, buffer, bufferSize); - if (tlen <= 0) { - tsdbError("Failed to decompress column, file corrupted, len:%d comp:%d numOfRows:%d maxPoints:%d bufferSize:%d", - len, comp, numOfRows, maxPoints, bufferSize); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - return -1; - } - pDataCol->len = tlen; - if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) { - dataColSetOffset(pDataCol, numOfRows); - } - } else { - // No need to decompress, just memcpy it - pDataCol->len = len - sizeof(TSCKSUM); - memcpy(pDataCol->pData, content, pDataCol->len); - if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) { - dataColSetOffset(pDataCol, numOfRows); - } - } - return 0; -} - -static int tsdbLoadColData(SRWHelper *pHelper, SFile *pFile, SCompBlock *pCompBlock, SCompCol *pCompCol, - SDataCol *pDataCol) { - ASSERT(pDataCol->colId == pCompCol->colId); - int tsize = pDataCol->bytes * pCompBlock->numOfRows + COMP_OVERFLOW_BYTES; - pHelper->pBuffer = taosTRealloc(pHelper->pBuffer, pCompCol->len); - if (pHelper->pBuffer == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - pHelper->compBuffer = taosTRealloc(pHelper->compBuffer, tsize); - if (pHelper->compBuffer == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - int64_t offset = pCompBlock->offset + TSDB_GET_COMPCOL_LEN(pCompBlock->numOfCols) + pCompCol->offset; - if (lseek(pFile->fd, (off_t)offset, SEEK_SET) < 0) { - tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (taosRead(pFile->fd, pHelper->pBuffer, pCompCol->len) < pCompCol->len) { - tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pCompCol->len, pFile->fname, - strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (tsdbCheckAndDecodeColumnData(pDataCol, pHelper->pBuffer, pCompCol->len, pCompBlock->algorithm, - pCompBlock->numOfRows, pHelper->pRepo->config.maxRowsPerFileBlock, - pHelper->compBuffer, (int32_t)taosTSizeof(pHelper->compBuffer)) < 0) { - tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pHelper->pRepo), pFile->fname, - pCompCol->colId, offset); - return -1; - } - - return 0; -} - -static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *pDataCols, int16_t *colIds, int numOfColIds) { - ASSERT(pCompBlock->numOfSubBlocks <= 1); - ASSERT(colIds[0] == 0); - - SFile * pFile = (pCompBlock->last) ? helperLastF(pHelper) : helperDataF(pHelper); - SCompCol compCol = {0}; - - // If only load timestamp column, no need to load SCompData part - if (numOfColIds > 1 && tsdbLoadCompData(pHelper, pCompBlock, NULL) < 0) goto _err; - - pDataCols->numOfRows = pCompBlock->numOfRows; - - int dcol = 0; - int ccol = 0; - for (int i = 0; i < numOfColIds; i++) { - int16_t colId = colIds[i]; - SDataCol *pDataCol = NULL; - SCompCol *pCompCol = NULL; - - while (true) { - if (dcol >= pDataCols->numOfCols) { - pDataCol = NULL; - break; - } - pDataCol = &pDataCols->cols[dcol]; - if (pDataCol->colId > colId) { - pDataCol = NULL; - break; - } else { - dcol++; - if (pDataCol->colId == colId) break; - } - } - - if (pDataCol == NULL) continue; - ASSERT(pDataCol->colId == colId); - - if (colId == 0) { // load the key row - compCol.colId = colId; - compCol.len = pCompBlock->keyLen; - compCol.type = pDataCol->type; - compCol.offset = TSDB_KEY_COL_OFFSET; - pCompCol = &compCol; - } else { // load non-key rows - while (true) { - if (ccol >= pCompBlock->numOfCols) { - pCompCol = NULL; - break; - } - - pCompCol = &(pHelper->pCompData->cols[ccol]); - if (pCompCol->colId > colId) { - pCompCol = NULL; - break; - } else { - ccol++; - if (pCompCol->colId == colId) break; - } - } - - if (pCompCol == NULL) { - dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints); - continue; - } - - ASSERT(pCompCol->colId == pDataCol->colId); - } - - if (tsdbLoadColData(pHelper, pFile, pCompBlock, pCompCol, pDataCol) < 0) goto _err; - } - - return 0; - -_err: - return -1; -} - -static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *pDataCols) { - ASSERT(pCompBlock->numOfSubBlocks <= 1); - - SFile *pFile = (pCompBlock->last) ? helperLastF(pHelper) : helperDataF(pHelper); - - pHelper->pBuffer = taosTRealloc(pHelper->pBuffer, pCompBlock->len); - if (pHelper->pBuffer == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - SCompData *pCompData = (SCompData *)pHelper->pBuffer; - - int fd = pFile->fd; - if (lseek(fd, (off_t)pCompBlock->offset, SEEK_SET) < 0) { - tsdbError("vgId:%d tid:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pHelper->tableInfo.tid, - pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - if (taosRead(fd, (void *)pCompData, pCompBlock->len) < pCompBlock->len) { - tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pCompBlock->len, - pFile->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - int32_t tsize = TSDB_GET_COMPCOL_LEN(pCompBlock->numOfCols); - if (!taosCheckChecksumWhole((uint8_t *)pCompData, tsize)) { - tsdbError("vgId:%d file %s block data is corrupted offset %" PRId64 " len %d", REPO_ID(pHelper->pRepo), - pFile->fname, (int64_t)(pCompBlock->offset), pCompBlock->len); - terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - goto _err; - } - ASSERT(pCompData->numOfCols == pCompBlock->numOfCols); - - pDataCols->numOfRows = pCompBlock->numOfRows; - - // Recover the data - int ccol = 0; // loop iter for SCompCol object - int dcol = 0; // loop iter for SDataCols object - while (dcol < pDataCols->numOfCols) { - SDataCol *pDataCol = &(pDataCols->cols[dcol]); - if (dcol != 0 && ccol >= pCompData->numOfCols) { - // Set current column as NULL and forward - dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints); - dcol++; - continue; - } - - int16_t tcolId = 0; - int32_t toffset = TSDB_KEY_COL_OFFSET; - int32_t tlen = pCompBlock->keyLen; - - if (dcol != 0) { - SCompCol *pCompCol = &(pCompData->cols[ccol]); - tcolId = pCompCol->colId; - toffset = pCompCol->offset; - tlen = pCompCol->len; - } else { - ASSERT(pDataCol->colId == tcolId); - } - - if (tcolId == pDataCol->colId) { - if (pCompBlock->algorithm == TWO_STAGE_COMP) { - int zsize = pDataCol->bytes * pCompBlock->numOfRows + COMP_OVERFLOW_BYTES; - if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) { - zsize += (sizeof(VarDataLenT) * pCompBlock->numOfRows); - } - pHelper->compBuffer = taosTRealloc(pHelper->compBuffer, zsize); - if (pHelper->compBuffer == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - } - if (tsdbCheckAndDecodeColumnData(pDataCol, (char *)pCompData + tsize + toffset, tlen, pCompBlock->algorithm, - pCompBlock->numOfRows, pDataCols->maxPoints, pHelper->compBuffer, - (int32_t)taosTSizeof(pHelper->compBuffer)) < 0) { - tsdbError("vgId:%d file %s is broken at column %d block offset %" PRId64 " column offset %d", - REPO_ID(pHelper->pRepo), pFile->fname, tcolId, (int64_t)pCompBlock->offset, toffset); - goto _err; - } - if (dcol != 0) ccol++; - dcol++; - } else if (tcolId < pDataCol->colId) { - ccol++; - } else { - // Set current column as NULL and forward - dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints); - dcol++; - } - } - - return 0; - -_err: - return -1; -} - -static int tsdbEncodeSCompIdx(void **buf, SCompIdx *pIdx) { - int tlen = 0; - - tlen += taosEncodeVariantI32(buf, pIdx->tid); - tlen += taosEncodeVariantU32(buf, pIdx->len); - tlen += taosEncodeVariantU32(buf, pIdx->offset); - tlen += taosEncodeFixedU8(buf, pIdx->hasLast); - tlen += taosEncodeVariantU32(buf, pIdx->numOfBlocks); - tlen += taosEncodeFixedU64(buf, pIdx->uid); - tlen += taosEncodeFixedU64(buf, pIdx->maxKey); - - return tlen; -} - -static void *tsdbDecodeSCompIdx(void *buf, SCompIdx *pIdx) { - uint8_t hasLast = 0; - uint32_t numOfBlocks = 0; - uint64_t value = 0; - - if ((buf = taosDecodeVariantI32(buf, &(pIdx->tid))) == NULL) return NULL; - if ((buf = taosDecodeVariantU32(buf, &(pIdx->len))) == NULL) return NULL; - if ((buf = taosDecodeVariantU32(buf, &(pIdx->offset))) == NULL) return NULL; - if ((buf = taosDecodeFixedU8(buf, &(hasLast))) == NULL) return NULL; - pIdx->hasLast = hasLast; - if ((buf = taosDecodeVariantU32(buf, &(numOfBlocks))) == NULL) return NULL; - pIdx->numOfBlocks = numOfBlocks; - if ((buf = taosDecodeFixedU64(buf, &value)) == NULL) return NULL; - pIdx->uid = (int64_t)value; - if ((buf = taosDecodeFixedU64(buf, &value)) == NULL) return NULL; - pIdx->maxKey = (TSKEY)value; - - return buf; -} - -static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey) { - STsdbCfg * pCfg = &(pHelper->pRepo->config); - STable * pTable = pCommitIter->pTable; - SCompIdx * pIdx = &(pHelper->curCompIdx); - TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter); - int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5; - SCompBlock compBlock = {0}; - SMergeInfo mergeInfo = {0}; - SMergeInfo *pMergeInfo = &mergeInfo; - - ASSERT(pIdx->len <= 0 || keyFirst > pIdx->maxKey); - if (pIdx->hasLast) { // append to with last block - ASSERT(pIdx->len > 0); - SCompBlock *pCompBlock = blockAtIdx(pHelper, pIdx->numOfBlocks - 1); - ASSERT(pCompBlock->last && pCompBlock->numOfRows < pCfg->minRowsPerFileBlock); - tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, defaultRowsInBlock - pCompBlock->numOfRows, pDataCols, - NULL, 0, pCfg->update, pMergeInfo); - - ASSERT(pMergeInfo->rowsInserted == pMergeInfo->nOperations && pMergeInfo->nOperations == pDataCols->numOfRows); - - if (pDataCols->numOfRows > 0) { - ASSERT((pMergeInfo->keyFirst == dataColsKeyFirst(pDataCols)) && (pMergeInfo->keyLast == dataColsKeyLast(pDataCols))); - - if (pDataCols->numOfRows + pCompBlock->numOfRows < pCfg->minRowsPerFileBlock && - pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) { - if (tsdbWriteBlockToFile(pHelper, helperLastF(pHelper), pDataCols, &compBlock, true, false) < 0) return -1; - pMergeInfo->keyFirst = MIN(pMergeInfo->keyFirst, pCompBlock->keyFirst); - pMergeInfo->keyLast = MAX(pMergeInfo->keyLast, pCompBlock->keyLast); - if (tsdbAddSubBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1, pMergeInfo) < 0) return -1; - } else { - if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1; - ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows); - - if (tdMergeDataCols(pHelper->pDataCols[0], pDataCols, pDataCols->numOfRows) < 0) return -1; - ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows + pDataCols->numOfRows); - - if (tsdbWriteBlockToProperFile(pHelper, pHelper->pDataCols[0], &compBlock) < 0) return -1; - if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1; - } - - if (pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false; - } - } else { - ASSERT(!pHelper->hasOldLastBlock); - tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, defaultRowsInBlock, pDataCols, NULL, 0, pCfg->update, pMergeInfo); - ASSERT(pMergeInfo->rowsInserted == pMergeInfo->nOperations && pMergeInfo->nOperations == pDataCols->numOfRows); - - if (pDataCols->numOfRows > 0) { - ASSERT((pMergeInfo->keyFirst == dataColsKeyFirst(pDataCols)) && (pMergeInfo->keyLast == dataColsKeyLast(pDataCols))); - if (tsdbWriteBlockToProperFile(pHelper, pDataCols, &compBlock) < 0) return -1; - if (tsdbInsertSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks) < 0) return -1; - } - } - -#ifndef NDEBUG - TSKEY keyNext = tsdbNextIterKey(pCommitIter->pIter); - ASSERT(keyNext == TSDB_DATA_TIMESTAMP_NULL || keyNext > pIdx->maxKey); -#endif - - return 0; -} - -static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey, - int *blkIdx) { - STsdbCfg * pCfg = &(pHelper->pRepo->config); - STable * pTable = pCommitIter->pTable; - SCompIdx * pIdx = &(pHelper->curCompIdx); - SCompBlock compBlock = {0}; - TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter); - int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5; - SDataCols * pDataCols0 = pHelper->pDataCols[0]; - SMergeInfo mergeInfo = {0}; - SMergeInfo *pMergeInfo = &mergeInfo; - SCompBlock oBlock = {0}; - - SSkipListIterator slIter = {0}; - - ASSERT(keyFirst <= pIdx->maxKey); - - SCompBlock *pCompBlock = taosbsearch((void *)(&keyFirst), (void *)blockAtIdx(pHelper, *blkIdx), - pIdx->numOfBlocks - *blkIdx, sizeof(SCompBlock), compareKeyBlock, TD_GE); - ASSERT(pCompBlock != NULL); - int tblkIdx = (int32_t)(TSDB_GET_COMPBLOCK_IDX(pHelper, pCompBlock)); - oBlock = *pCompBlock; - - ASSERT((!TSDB_IS_LAST_BLOCK(&oBlock)) || (tblkIdx == pIdx->numOfBlocks - 1)); - - if ((!TSDB_IS_LAST_BLOCK(&oBlock)) && keyFirst < pCompBlock->keyFirst) { - while (true) { - tsdbLoadDataFromCache(pTable, pCommitIter->pIter, oBlock.keyFirst-1, defaultRowsInBlock, pDataCols, NULL, 0, - pCfg->update, pMergeInfo); - ASSERT(pMergeInfo->rowsInserted == pMergeInfo->nOperations && pMergeInfo->nOperations == pDataCols->numOfRows); - if (pDataCols->numOfRows == 0) break; - - if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0) return -1; - if (tsdbInsertSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; - tblkIdx++; - } - ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) == TSDB_DATA_TIMESTAMP_NULL || - tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast)); - } else { - int16_t colId = 0; - if (tsdbLoadBlockDataCols(pHelper, &oBlock, NULL, &colId, 1) < 0) return -1; - - TSKEY keyLimit = (tblkIdx == pIdx->numOfBlocks - 1) ? maxKey : (blockAtIdx(pHelper, tblkIdx + 1)->keyFirst - 1); - - slIter = *(pCommitIter->pIter); - tsdbLoadDataFromCache(pTable, &slIter, keyLimit, INT_MAX, NULL, pDataCols0->cols[0].pData, pDataCols0->numOfRows, - pCfg->update, pMergeInfo); - - if (pMergeInfo->nOperations == 0) { - // Do nothing - ASSERT(pMergeInfo->rowsDeleteFailed >= 0); - *(pCommitIter->pIter) = slIter; - tblkIdx++; - } else if (oBlock.numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed == 0) { - // Delete the block and do some stuff - // ASSERT(pMergeInfo->keyFirst == INT64_MAX && pMergeInfo->keyFirst == INT64_MIN); - if (tsdbDeleteSuperBlock(pHelper, tblkIdx) < 0) return -1; - *pCommitIter->pIter = slIter; - if (oBlock.last && pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false; - } else if (tsdbCheckAddSubBlockCond(pHelper, &oBlock, pMergeInfo, pDataCols->maxPoints)) { - // Append as a sub-block of the searched block - tsdbLoadDataFromCache(pTable, pCommitIter->pIter, keyLimit, INT_MAX, pDataCols, pDataCols0->cols[0].pData, - pDataCols0->numOfRows, pCfg->update, pMergeInfo); - ASSERT(memcmp(pCommitIter->pIter, &slIter, sizeof(slIter)) == 0); - if (tsdbWriteBlockToFile(pHelper, oBlock.last ? helperLastF(pHelper) : helperDataF(pHelper), pDataCols, - &compBlock, oBlock.last, false) < 0) { - return -1; - } - if (tsdbAddSubBlock(pHelper, &compBlock, tblkIdx, pMergeInfo) < 0) { - return -1; - } - tblkIdx++; - } else { - // load the block data, merge with the memory data - if (tsdbLoadBlockData(pHelper, &oBlock, NULL) < 0) return -1; - int round = 0; - int dIter = 0; - while (true) { - tsdbLoadAndMergeFromCache(pDataCols0, &dIter, pCommitIter, pDataCols, keyLimit, defaultRowsInBlock, - pCfg->update); - - if (pDataCols->numOfRows == 0) break; - if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0) return -1; - - if (round == 0) { - if (oBlock.last && pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false; - if (tsdbUpdateSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; - } else { - if (tsdbInsertSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1; - } - - round++; - tblkIdx++; - } - } - } - - *blkIdx = tblkIdx; - return 0; -} - -static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows, int8_t update) { - TSKEY key1 = INT64_MAX; - TSKEY key2 = INT64_MAX; - STSchema *pSchema = NULL; - - ASSERT(maxRows > 0 && dataColsKeyLast(pDataCols) <= maxKey); - tdResetDataCols(pTarget); - - while (true) { - key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter); - bool isRowDel = false; - SDataRow row = tsdbNextIterRow(pCommitIter->pIter); - if (row == NULL || dataRowKey(row) > maxKey) { - key2 = INT64_MAX; - } else { - key2 = dataRowKey(row); - isRowDel = dataRowDeleted(row); - } - - if (key1 == INT64_MAX && key2 == INT64_MAX) break; - - if (key1 < key2) { - for (int i = 0; i < pDataCols->numOfCols; i++) { - dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, - pTarget->maxPoints); - } - - pTarget->numOfRows++; - (*iter)++; - } else if (key1 > key2) { - if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); - ASSERT(pSchema != NULL); - } - - tdAppendDataRowToDataCol(row, pSchema, pTarget); - } - - tSkipListIterNext(pCommitIter->pIter); - } else { - if (update) { - if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); - ASSERT(pSchema != NULL); - } - - tdAppendDataRowToDataCol(row, pSchema, pTarget); - } - } else { - ASSERT(!isRowDel); - - for (int i = 0; i < pDataCols->numOfCols; i++) { - dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, - pTarget->maxPoints); - } - - pTarget->numOfRows++; - } - (*iter)++; - tSkipListIterNext(pCommitIter->pIter); - } - - if (pTarget->numOfRows >= maxRows) break; - } -} - -static int tsdbWriteBlockToProperFile(SRWHelper *pHelper, SDataCols *pDataCols, SCompBlock *pCompBlock) { - STsdbCfg *pCfg = &(pHelper->pRepo->config); - SFile * pFile = NULL; - bool isLast = false; - - ASSERT(pDataCols->numOfRows > 0); - - if (pDataCols->numOfRows >= pCfg->minRowsPerFileBlock) { - pFile = helperDataF(pHelper); - } else { - isLast = true; - pFile = TSDB_NLAST_FILE_OPENED(pHelper) ? helperNewLastF(pHelper) : helperLastF(pHelper); - } - - ASSERT(pFile->fd > 0); - - if (tsdbWriteBlockToFile(pHelper, pFile, pDataCols, pCompBlock, isLast, true) < 0) return -1; - - return 0; -} - -static bool tsdbCheckAddSubBlockCond(SRWHelper *pHelper, SCompBlock *pCompBlock, SMergeInfo *pMergeInfo, int maxOps) { - STsdbCfg *pCfg = &(pHelper->pRepo->config); - int mergeRows = pCompBlock->numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed; - - ASSERT(mergeRows > 0); - - if (pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && pMergeInfo->nOperations <= maxOps) { - if (pCompBlock->last) { - if (!TSDB_NLAST_FILE_OPENED(pHelper) && mergeRows < pCfg->minRowsPerFileBlock) return true; - } else { - if (mergeRows < pCfg->maxRowsPerFileBlock) return true; - } - } - - return false; -} \ No newline at end of file diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 4045e302c7..7162f74d3e 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -20,8 +20,7 @@ #include "exception.h" #include "tlosertree.h" -#include "tsdb.h" -#include "tsdbMain.h" +#include "tsdbint.h" #include "texpr.h" #define EXTRA_BYTES 2 @@ -54,7 +53,7 @@ typedef struct SQueryFilePos { } SQueryFilePos; typedef struct SDataBlockLoadInfo { - SFileGroup* fileGroup; + SDFileSet* fileGroup; int32_t slot; int32_t tid; SArray* pLoadedCols; @@ -69,7 +68,7 @@ typedef struct STableCheckInfo { STableId tableId; TSKEY lastKey; STable* pTableObj; - SCompInfo* pCompInfo; + SBlockInfo* pCompInfo; int32_t compSize; int32_t numOfBlocks:29; // number of qualified data blocks not the original blocks int8_t chosen:2; // indicate which iterator should move forward @@ -79,7 +78,7 @@ typedef struct STableCheckInfo { } STableCheckInfo; typedef struct STableBlockInfo { - SCompBlock* compBlock; + SBlock* compBlock; STableCheckInfo* pTableCheckInfo; } STableBlockInfo; @@ -114,9 +113,9 @@ typedef struct STsdbQueryHandle { bool loadExternalRow; // load time window external data rows void* qinfo; // query info handle, for debug purpose int32_t type; // query type: retrieve all data blocks, 2. retrieve only last row, 3. retrieve direct prev|next rows - SFileGroup* pFileGroup; - SFileGroupIter fileIter; - SRWHelper rhelper; + SDFileSet* pFileGroup; + SFSIter fileIter; + SReadH rhelper; STableBlockInfo* pDataBlockInfo; SDataCols *pDataCols; // in order to hold current file data block @@ -142,7 +141,7 @@ static int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroup static int32_t tsdbGetCachedLastRow(STable* pTable, SDataRow* pRes, TSKEY* lastKey); static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle); -static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock); +static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SBlock* pBlock); static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); static int32_t tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win, STsdbQueryHandle* pQueryHandle); static int32_t tsdbCheckInfoCompar(const void* key1, const void* key2); @@ -308,7 +307,7 @@ static SArray* createCheckInfoFromCheckInfo(SArray* pTableCheckInfo, TSKEY skey) return pNew; } -static STsdbQueryHandle* tsdbQueryTablesImpl(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, void* qinfo, SMemRef* pMemRef) { +static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pCond, void* qinfo, SMemRef* pMemRef) { STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle)); if (pQueryHandle == NULL) { goto out_of_memory; @@ -318,7 +317,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(TSDB_REPO_T* tsdb, STsdbQueryCond* pQueryHandle->window = pCond->twindow; pQueryHandle->pTsdb = tsdb; pQueryHandle->type = TSDB_QUERY_TYPE_ALL; - pQueryHandle->cur.fid = -1; + pQueryHandle->cur.fid = INT32_MIN; pQueryHandle->cur.win = TSWINDOW_INITIALIZER; pQueryHandle->checkFiles = true; pQueryHandle->activeIndex = 0; // current active table index @@ -329,7 +328,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(TSDB_REPO_T* tsdb, STsdbQueryCond* pQueryHandle->pMemRef = pMemRef; pQueryHandle->loadExternalRow = pCond->loadExternalRows; - if (tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb) != 0) { + if (tsdbInitReadH(&pQueryHandle->rhelper, (STsdbRepo*)tsdb) != 0) { goto out_of_memory; } @@ -388,7 +387,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(TSDB_REPO_T* tsdb, STsdbQueryCond* return NULL; } -TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo, SMemRef* pRef) { +TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo, SMemRef* pRef) { STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qinfo, pRef); STsdbMeta* pMeta = tsdbGetMeta(tsdb); @@ -406,7 +405,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab return (TsdbQueryHandleT) pQueryHandle; } -TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pMemRef) { +TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pMemRef) { pCond->twindow = updateLastrowForEachGroup(groupList); // no qualified table @@ -442,7 +441,7 @@ SArray* tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle) { return res; } -TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TSDB_REPO_T *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pRef) { +TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pRef) { STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo, pRef); pQueryHandle->loadExternalRow = true; if (pQueryHandle != NULL) { @@ -479,16 +478,18 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) { pMem = pMemT->tData[pCheckInfo->tableId.tid]; if (pMem != NULL && pMem->uid == pCheckInfo->tableId.uid) { // check uid + TKEY tLastKey = keyToTkey(pCheckInfo->lastKey); pCheckInfo->iter = - tSkipListCreateIterFromVal(pMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + tSkipListCreateIterFromVal(pMem->pData, (const char*)&tLastKey, TSDB_DATA_TYPE_TIMESTAMP, order); } } if (pIMemT && pCheckInfo->tableId.tid < pIMemT->maxTables) { pIMem = pIMemT->tData[pCheckInfo->tableId.tid]; if (pIMem != NULL && pIMem->uid == pCheckInfo->tableId.uid) { // check uid + TKEY tLastKey = keyToTkey(pCheckInfo->lastKey); pCheckInfo->iiter = - tSkipListCreateIterFromVal(pIMem->pData, (const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order); + tSkipListCreateIterFromVal(pIMem->pData, (const char*)&tLastKey, TSDB_DATA_TYPE_TIMESTAMP, order); } } @@ -653,7 +654,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { STsdbCfg *pCfg = &pHandle->pTsdb->config; size_t size = taosArrayGetSize(pHandle->pTableCheckInfo); assert(pHandle->activeIndex < size && pHandle->activeIndex >= 0 && size >= 1); - pHandle->cur.fid = -1; + pHandle->cur.fid = INT32_MIN; STableCheckInfo* pCheckInfo = taosArrayGet(pHandle->pTableCheckInfo, pHandle->activeIndex); @@ -713,7 +714,7 @@ static int32_t getFileIdFromKey(TSKEY key, int32_t daysPerFile, int32_t precisio return (int32_t)fid; } -static int32_t binarySearchForBlock(SCompBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) { +static int32_t binarySearchForBlock(SBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) { int32_t firstSlot = 0; int32_t lastSlot = numOfBlocks - 1; @@ -751,16 +752,16 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i); pCheckInfo->numOfBlocks = 0; - if (tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb) != TSDB_CODE_SUCCESS) { + if (tsdbSetReadTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj) != TSDB_CODE_SUCCESS) { code = terrno; break; } - SCompIdx* compIndex = &pQueryHandle->rhelper.curCompIdx; + SBlockIdx* compIndex = pQueryHandle->rhelper.pBlkIdx; // no data block in this file, try next file - if (compIndex->len == 0 || compIndex->numOfBlocks == 0 || compIndex->uid != pCheckInfo->tableId.uid) { - continue; // no data blocks in the file belongs to pCheckInfo->pTable + if (compIndex == NULL || compIndex->uid != pCheckInfo->tableId.uid) { + continue; // no data blocks in the file belongs to pCheckInfo->pTable } if (pCheckInfo->compSize < (int32_t)compIndex->len) { @@ -773,12 +774,12 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo break; } - pCheckInfo->pCompInfo = (SCompInfo*) t; + pCheckInfo->pCompInfo = (SBlockInfo*) t; pCheckInfo->compSize = compIndex->len; } - tsdbLoadCompInfo(&(pQueryHandle->rhelper), (void *)(pCheckInfo->pCompInfo)); - SCompInfo* pCompInfo = pCheckInfo->pCompInfo; + tsdbLoadBlockInfo(&(pQueryHandle->rhelper), (void *)(pCheckInfo->pCompInfo)); + SBlockInfo* pCompInfo = pCheckInfo->pCompInfo; TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL; @@ -807,7 +808,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo pCheckInfo->numOfBlocks = (end - start); if (start > 0) { - memmove(pCompInfo->blocks, &pCompInfo->blocks[start], pCheckInfo->numOfBlocks * sizeof(SCompBlock)); + memmove(pCompInfo->blocks, &pCompInfo->blocks[start], pCheckInfo->numOfBlocks * sizeof(SBlock)); } (*numOfBlocks) += pCheckInfo->numOfBlocks; @@ -816,7 +817,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo return code; } -static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo, int32_t slotIndex) { +static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, STableCheckInfo* pCheckInfo, int32_t slotIndex) { int64_t st = taosGetTimestampUs(); STSchema *pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj); @@ -827,14 +828,14 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* p goto _error; } - code = tdInitDataCols(pQueryHandle->rhelper.pDataCols[0], pSchema); + code = tdInitDataCols(pQueryHandle->rhelper.pDCols[0], pSchema); if (code != TSDB_CODE_SUCCESS) { tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], %p", pQueryHandle, pQueryHandle->qinfo); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _error; } - code = tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema); + code = tdInitDataCols(pQueryHandle->rhelper.pDCols[1], pSchema); if (code != TSDB_CODE_SUCCESS) { tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], %p", pQueryHandle, pQueryHandle->qinfo); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -856,7 +857,7 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* p pBlockLoadInfo->slot = pQueryHandle->cur.slot; pBlockLoadInfo->tid = pCheckInfo->pTableObj->tableId.tid; - SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0]; + SDataCols* pCols = pQueryHandle->rhelper.pDCols[0]; assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows); pBlock->numOfRows = pCols->numOfRows; @@ -882,7 +883,7 @@ static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, i static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle); static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SDataBlockInfo* pBlockInfo, int32_t endPos); -static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo){ +static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, STableCheckInfo* pCheckInfo){ SQueryFilePos* cur = &pQueryHandle->cur; STsdbCfg* pCfg = &pQueryHandle->pTsdb->config; SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock); @@ -965,7 +966,7 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBloc return code; } -static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo, bool* exists) { +static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, STableCheckInfo* pCheckInfo, bool* exists) { SQueryFilePos* cur = &pQueryHandle->cur; int32_t code = TSDB_CODE_SUCCESS; @@ -977,7 +978,7 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBl return code; } - SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0]; + SDataCols* pTSCol = pQueryHandle->rhelper.pDCols[0]; assert(pTSCol->cols->type == TSDB_DATA_TYPE_TIMESTAMP && pTSCol->numOfRows == pBlock->numOfRows); if (pCheckInfo->lastKey > pBlock->keyFirst) { @@ -1000,7 +1001,7 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBl return code; } - SDataCols* pTsCol = pQueryHandle->rhelper.pDataCols[0]; + SDataCols* pTsCol = pQueryHandle->rhelper.pDCols[0]; if (pCheckInfo->lastKey < pBlock->keyLast) { cur->pos = binarySearchForKey(pTsCol->cols[0].pData, pBlock->numOfRows, pCheckInfo->lastKey, pQueryHandle->order); } else { @@ -1085,7 +1086,7 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity char* pData = NULL; int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1 : -1; - SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0]; + SDataCols* pCols = pQueryHandle->rhelper.pDCols[0]; TSKEY* tsArray = pCols->cols[0].pData; int32_t num = end - start + 1; @@ -1118,7 +1119,12 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity if (pColInfo->info.colId == src->colId) { - if (pColInfo->info.type != TSDB_DATA_TYPE_BINARY && pColInfo->info.type != TSDB_DATA_TYPE_NCHAR) { + if (pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + for (int32_t n = 0; n < num; n++) { + TKEY tkey = *(TKEY *)((char*)src->pData + bytes * start + n * sizeof(TKEY)); + *(TSKEY *)(pData + n * sizeof(TSKEY)) = tdGetKey(tkey); + } + } else if (pColInfo->info.type != TSDB_DATA_TYPE_BINARY && pColInfo->info.type != TSDB_DATA_TYPE_NCHAR) { memmove(pData, (char*)src->pData + bytes * start, bytes * num); } else { // handle the var-string char* dst = pData; @@ -1177,13 +1183,18 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity } static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SDataRow row, - int32_t numOfCols, STable* pTable) { + int32_t numOfCols, STable* pTable, STSchema* pSchema) { char* pData = NULL; // the schema version info is embeded in SDataRow - STSchema* pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); - int32_t numOfRowCols = schemaNCols(pSchema); - + int32_t numOfRowCols = 0; + if (pSchema == NULL) { + pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); + numOfRowCols = schemaNCols(pSchema); + } else { + numOfRowCols = schemaNCols(pSchema); + } + int32_t i = 0, j = 0; while(i < numOfCols && j < numOfRowCols) { SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); @@ -1200,10 +1211,40 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, if (pSchema->columns[j].colId == pColInfo->info.colId) { void* value = tdGetRowDataOfCol(row, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); - if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { - memcpy(pData, value, varDataTLen(value)); - } else { - memcpy(pData, value, pColInfo->info.bytes); + switch (pColInfo->info.type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(pData, value, varDataTLen(value)); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t *)pData = *(uint8_t *)value; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *(uint16_t *)pData = *(uint16_t *)value; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *(uint32_t *)pData = *(uint32_t *)value; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *(uint64_t *)pData = *(uint64_t *)value; + break; + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_PTR(pData, value); + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_PTR(pData, value); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + *(TSKEY *)pData = tdGetKey(*(TKEY *)value); + break; + default: + memcpy(pData, value, pColInfo->info.bytes); } j++; @@ -1309,7 +1350,7 @@ static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle) { static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SDataBlockInfo* pBlockInfo, int32_t endPos) { SQueryFilePos* cur = &pQueryHandle->cur; - SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0]; + SDataCols* pCols = pQueryHandle->rhelper.pDCols[0]; TSKEY* tsArray = pCols->cols[0].pData; int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1; @@ -1352,7 +1393,7 @@ int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBl int32_t order = ASCENDING_TRAVERSE(pQueryHandle->order)? TSDB_ORDER_DESC : TSDB_ORDER_ASC; SQueryFilePos* cur = &pQueryHandle->cur; - SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0]; + SDataCols* pCols = pQueryHandle->rhelper.pDCols[0]; if (ASCENDING_TRAVERSE(pQueryHandle->order) && pQueryHandle->window.ekey >= pBlockInfo->window.ekey) { endPos = pBlockInfo->rows - 1; @@ -1371,14 +1412,14 @@ int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBl // only return the qualified data to client in terms of query time window, data rows in the same block but do not // be included in the query time window will be discarded -static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock) { +static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SBlock* pBlock) { SQueryFilePos* cur = &pQueryHandle->cur; SDataBlockInfo blockInfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock); STsdbCfg* pCfg = &pQueryHandle->pTsdb->config; initTableMemIterator(pQueryHandle, pCheckInfo); - SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0]; + SDataCols* pCols = pQueryHandle->rhelper.pDCols[0]; assert(pCols->cols[0].type == TSDB_DATA_TYPE_TIMESTAMP && pCols->cols[0].colId == PRIMARYKEY_TIMESTAMP_COL_INDEX && cur->pos >= 0 && cur->pos < pBlock->numOfRows); @@ -1402,6 +1443,9 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* // compared with the data from in-memory buffer, to generate the correct timestamp array list int32_t numOfRows = 0; + int16_t rv = -1; + STSchema* pSchema = NULL; + int32_t pos = cur->pos; cur->win = TSWINDOW_INITIALIZER; @@ -1430,7 +1474,12 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) { - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable); + if (rv != dataRowVersion(row)) { + pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); + rv = dataRowVersion(row); + } + + copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -1443,7 +1492,12 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* moveToNextRowInMem(pCheckInfo); } else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it if (pCfg->update) { - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable); + if (rv != dataRowVersion(row)) { + pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); + rv = dataRowVersion(row); + } + + copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -1670,7 +1724,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO continue; } - SCompBlock* pBlock = pTableCheck->pCompInfo->blocks; + SBlock* pBlock = pTableCheck->pCompInfo->blocks; sup.numOfBlocksPerTable[numOfQualTables] = pTableCheck->numOfBlocks; char* buf = calloc(1, sizeof(STableBlockInfo) * pTableCheck->numOfBlocks); @@ -1786,19 +1840,19 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist STimeWindow win = TSWINDOW_INITIALIZER; while (true) { - pthread_rwlock_rdlock(&pQueryHandle->pTsdb->tsdbFileH->fhlock); + tsdbRLockFS(REPO_FS(pQueryHandle->pTsdb)); - if ((pQueryHandle->pFileGroup = tsdbGetFileGroupNext(&pQueryHandle->fileIter)) == NULL) { - pthread_rwlock_unlock(&pQueryHandle->pTsdb->tsdbFileH->fhlock); + if ((pQueryHandle->pFileGroup = tsdbFSIterNext(&pQueryHandle->fileIter)) == NULL) { + tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb)); break; } - tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, pQueryHandle->pFileGroup->fileId, &win.skey, &win.ekey); + tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, pQueryHandle->pFileGroup->fid, &win.skey, &win.ekey); // current file are not overlapped with query time window, ignore remain files if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) || (!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) { - pthread_rwlock_unlock(&pQueryHandle->pTsdb->tsdbFileH->fhlock); + tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb)); tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %p", pQueryHandle, pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qinfo); pQueryHandle->pFileGroup = NULL; @@ -1806,15 +1860,15 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist break; } - if (tsdbSetAndOpenHelperFile(&pQueryHandle->rhelper, pQueryHandle->pFileGroup) < 0) { - pthread_rwlock_unlock(&pQueryHandle->pTsdb->tsdbFileH->fhlock); + if (tsdbSetAndOpenReadFSet(&pQueryHandle->rhelper, pQueryHandle->pFileGroup) < 0) { + tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb)); code = terrno; break; } - pthread_rwlock_unlock(&pQueryHandle->pTsdb->tsdbFileH->fhlock); + tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb)); - if (tsdbLoadCompIdx(&pQueryHandle->rhelper, NULL) < 0) { + if (tsdbLoadBlockIdx(&pQueryHandle->rhelper) < 0) { code = terrno; break; } @@ -1824,7 +1878,7 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist } tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %p", pQueryHandle, numOfBlocks, numOfTables, - pQueryHandle->pFileGroup->fileId, pQueryHandle->qinfo); + pQueryHandle->pFileGroup->fid, pQueryHandle->qinfo); assert(numOfBlocks >= 0); if (numOfBlocks == 0) { @@ -1848,14 +1902,14 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist assert(pQueryHandle->pFileGroup == NULL); } - cur->fid = -1; // denote that there are no data in file anymore + cur->fid = INT32_MIN; // denote that there are no data in file anymore *exists = false; return code; } assert(pQueryHandle->pFileGroup != NULL && pQueryHandle->numOfBlocks > 0); cur->slot = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:pQueryHandle->numOfBlocks-1; - cur->fid = pQueryHandle->pFileGroup->fileId; + cur->fid = pQueryHandle->pFileGroup->fid; STableBlockInfo* pBlockInfo = &pQueryHandle->pDataBlockInfo[cur->slot]; return getDataBlockRv(pQueryHandle, pBlockInfo, exists); @@ -1878,7 +1932,7 @@ static void moveToNextDataBlockInCurrentFile(STsdbQueryHandle* pQueryHandle) { } static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists) { - STsdbFileH* pFileHandle = tsdbGetFile(pQueryHandle->pTsdb); + STsdbFS* pFileHandle = REPO_FS(pQueryHandle->pTsdb); SQueryFilePos* cur = &pQueryHandle->cur; // find the start data block in file @@ -1887,10 +1941,10 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists STsdbCfg* pCfg = &pQueryHandle->pTsdb->config; int32_t fid = getFileIdFromKey(pQueryHandle->window.skey, pCfg->daysPerFile, pCfg->precision); - pthread_rwlock_rdlock(&pQueryHandle->pTsdb->tsdbFileH->fhlock); - tsdbInitFileGroupIter(pFileHandle, &pQueryHandle->fileIter, pQueryHandle->order); - tsdbSeekFileGroupIter(&pQueryHandle->fileIter, fid); - pthread_rwlock_unlock(&pQueryHandle->pTsdb->tsdbFileH->fhlock); + tsdbRLockFS(pFileHandle); + tsdbFSIterInit(&pQueryHandle->fileIter, pFileHandle, pQueryHandle->order); + tsdbFSIterSeek(&pQueryHandle->fileIter, fid); + tsdbUnLockFS(pFileHandle); return getFirstFileDataBlock(pQueryHandle, exists); } else { @@ -1988,6 +2042,8 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int int64_t st = taosGetTimestampUs(); STable* pTable = pCheckInfo->pTableObj; + int16_t rv = -1; + STSchema* pSchema = NULL; do { SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); @@ -2008,7 +2064,11 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int } win->ekey = key; - copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, numOfCols, pTable); + if (rv != dataRowVersion(row)) { + pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); + rv = dataRowVersion(row); + } + copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, numOfCols, pTable, pSchema); if (++numOfRows >= maxRowsToRead) { moveToNextRowInMem(pCheckInfo); @@ -2091,7 +2151,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { return false; } - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj); + copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj, NULL); tfree(pRow); // update the last key value @@ -2165,7 +2225,7 @@ bool tsdbNextDataBlockWithoutMerge(TsdbQueryHandleT* pHandle) { return false; } - copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj); + copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj, NULL); tfree(pRow); // update the last key value @@ -2445,7 +2505,7 @@ void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle, SDataBlockInfo* p STable* pTable = NULL; // there are data in file - if (pHandle->cur.fid >= 0) { + if (pHandle->cur.fid != INT32_MIN) { STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[cur->slot]; pTable = pBlockInfo->pTableCheckInfo->pTableObj; } else { @@ -2482,7 +2542,7 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta } int64_t stime = taosGetTimestampUs(); - tsdbLoadCompData(&pHandle->rhelper, pBlockInfo->compBlock, NULL); + tsdbLoadBlockStatis(&pHandle->rhelper, pBlockInfo->compBlock); int16_t* colIds = pHandle->defaultLoadColumn->pData; @@ -2492,7 +2552,7 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta pHandle->statis[i].colId = colIds[i]; } - tsdbGetDataStatis(&pHandle->rhelper, pHandle->statis, (int)numOfCols); + tsdbGetBlockStatis(&pHandle->rhelper, pHandle->statis, (int)numOfCols); // always load the first primary timestamp column data SDataStatis* pPrimaryColStatis = &pHandle->statis[0]; @@ -2529,7 +2589,7 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) { */ STsdbQueryHandle* pHandle = (STsdbQueryHandle*)pQueryHandle; - if (pHandle->cur.fid < 0) { + if (pHandle->cur.fid == INT32_MIN) { return pHandle->pColumns; } else { STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[pHandle->cur.slot]; @@ -2544,11 +2604,11 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) { // data block has been loaded, todo extract method SDataBlockLoadInfo* pBlockLoadInfo = &pHandle->dataBlockLoadInfo; - if (pBlockLoadInfo->slot == pHandle->cur.slot && pBlockLoadInfo->fileGroup->fileId == pHandle->cur.fid && + if (pBlockLoadInfo->slot == pHandle->cur.slot && pBlockLoadInfo->fileGroup->fid == pHandle->cur.fid && pBlockLoadInfo->tid == pCheckInfo->pTableObj->tableId.tid) { return pHandle->pColumns; } else { // only load the file block - SCompBlock* pBlock = pBlockInfo->compBlock; + SBlock* pBlock = pBlockInfo->compBlock; if (doLoadFileDataBlock(pHandle, pBlock, pCheckInfo, pHandle->cur.slot) != TSDB_CODE_SUCCESS) { return NULL; } @@ -2624,7 +2684,7 @@ static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *pa f1 = (char*) TABLE_NAME(pTable1); f2 = (char*) TABLE_NAME(pTable2); type = TSDB_DATA_TYPE_BINARY; - bytes = tGetTableNameColumnSchema().bytes; + bytes = tGetTbnameColumnSchema()->bytes; } else { STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); bytes = pCol->bytes; @@ -2820,7 +2880,7 @@ static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) return TSDB_CODE_SUCCESS; } -int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, +int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo, SColIndex* pColIndex, int32_t numOfCols) { if (tsdbRLockRepoMeta(tsdb) < 0) goto _error; @@ -2915,7 +2975,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY skey, co return terrno; } -int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY startKey, STableGroupInfo* pGroupInfo) { +int32_t tsdbGetOneTableGroup(STsdbRepo* tsdb, uint64_t uid, TSKEY startKey, STableGroupInfo* pGroupInfo) { if (tsdbRLockRepoMeta(tsdb) < 0) goto _error; STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid); @@ -2945,7 +3005,7 @@ int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY startKey, ST return terrno; } -int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo) { +int32_t tsdbGetTableGroupFromIdList(STsdbRepo* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo) { if (tsdbRLockRepoMeta(tsdb) < 0) { return terrno; } @@ -3031,7 +3091,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { // todo check error tsdbMayUnTakeMemSnapshot(pQueryHandle); - tsdbDestroyHelper(&pQueryHandle->rhelper); + tsdbDestroyReadH(&pQueryHandle->rhelper); tdFreeDataCols(pQueryHandle->pDataCols); pQueryHandle->pDataCols = NULL; diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c new file mode 100644 index 0000000000..312f1f9b20 --- /dev/null +++ b/src/tsdb/src/tsdbReadImpl.c @@ -0,0 +1,660 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tsdbint.h" + +#define TSDB_KEY_COL_OFFSET 0 + +static void tsdbResetReadTable(SReadH *pReadh); +static void tsdbResetReadFile(SReadH *pReadh); +static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols); +static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, void *content, int32_t len, int8_t comp, int numOfRows, + int maxPoints, char *buffer, int bufferSize); +static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols, int16_t *colIds, + int numOfColIds); +static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBlockCol *pBlockCol, SDataCol *pDataCol); + +int tsdbInitReadH(SReadH *pReadh, STsdbRepo *pRepo) { + ASSERT(pReadh != NULL && pRepo != NULL); + + STsdbCfg *pCfg = REPO_CFG(pRepo); + + memset((void *)pReadh, 0, sizeof(*pReadh)); + pReadh->pRepo = pRepo; + + TSDB_FSET_SET_CLOSED(TSDB_READ_FSET(pReadh)); + + pReadh->aBlkIdx = taosArrayInit(1024, sizeof(SBlockIdx)); + if (pReadh->aBlkIdx == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + pReadh->pDCols[0] = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + if (pReadh->pDCols[0] == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbDestroyReadH(pReadh); + return -1; + } + + pReadh->pDCols[1] = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + if (pReadh->pDCols[1] == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbDestroyReadH(pReadh); + return -1; + } + + return 0; +} + +void tsdbDestroyReadH(SReadH *pReadh) { + if (pReadh == NULL) return; + + pReadh->pCBuf = taosTZfree(pReadh->pCBuf); + pReadh->pBuf = taosTZfree(pReadh->pBuf); + pReadh->pDCols[0] = tdFreeDataCols(pReadh->pDCols[0]); + pReadh->pDCols[1] = tdFreeDataCols(pReadh->pDCols[1]); + pReadh->pBlkData = taosTZfree(pReadh->pBlkData); + pReadh->pBlkInfo = taosTZfree(pReadh->pBlkInfo); + pReadh->cidx = 0; + pReadh->pBlkIdx = NULL; + pReadh->pTable = NULL; + pReadh->aBlkIdx = taosArrayDestroy(pReadh->aBlkIdx); + tsdbCloseDFileSet(TSDB_READ_FSET(pReadh)); + pReadh->pRepo = NULL; +} + +int tsdbSetAndOpenReadFSet(SReadH *pReadh, SDFileSet *pSet) { + ASSERT(pSet != NULL); + tsdbResetReadFile(pReadh); + + pReadh->rSet = *pSet; + TSDB_FSET_SET_CLOSED(TSDB_READ_FSET(pReadh)); + if (tsdbOpenDFileSet(TSDB_READ_FSET(pReadh), O_RDONLY) < 0) { + tsdbError("vgId:%d failed to open file set %d since %s", TSDB_READ_REPO_ID(pReadh), TSDB_FSET_FID(pSet), + tstrerror(terrno)); + return -1; + } + + return 0; +} + +void tsdbCloseAndUnsetFSet(SReadH *pReadh) { tsdbResetReadFile(pReadh); } + +int tsdbLoadBlockIdx(SReadH *pReadh) { + SDFile * pHeadf = TSDB_READ_HEAD_FILE(pReadh); + SBlockIdx blkIdx; + + ASSERT(taosArrayGetSize(pReadh->aBlkIdx) == 0); + + // No data at all, just return + if (pHeadf->info.offset <= 0) return 0; + + if (tsdbSeekDFile(pHeadf, pHeadf->info.offset, SEEK_SET) < 0) { + tsdbError("vgId:%d failed to load SBlockIdx part while seek file %s since %s, offset:%u len :%u", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pHeadf->info.offset, + pHeadf->info.len); + return -1; + } + + if (tsdbMakeRoom((void **)(&TSDB_READ_BUF(pReadh)), pHeadf->info.len) < 0) return -1; + + int64_t nread = tsdbReadDFile(pHeadf, TSDB_READ_BUF(pReadh), pHeadf->info.len); + if (nread < 0) { + tsdbError("vgId:%d failed to load SBlockIdx part while read file %s since %s, offset:%u len :%u", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pHeadf->info.offset, + pHeadf->info.len); + return -1; + } + + if (nread < pHeadf->info.len) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbError("vgId:%d SBlockIdx part in file %s is corrupted, offset:%u expected bytes:%u read bytes: %" PRId64, + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pHeadf->info.offset, pHeadf->info.len, nread); + return -1; + } + + if (!taosCheckChecksumWhole((uint8_t *)TSDB_READ_BUF(pReadh), pHeadf->info.len)) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbError("vgId:%d SBlockIdx part in file %s is corrupted since wrong checksum, offset:%u len :%u", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pHeadf->info.offset, pHeadf->info.len); + return -1; + } + + void *ptr = TSDB_READ_BUF(pReadh); + int tsize = 0; + while (POINTER_DISTANCE(ptr, TSDB_READ_BUF(pReadh)) < (pHeadf->info.len - sizeof(TSCKSUM))) { + ptr = tsdbDecodeSBlockIdx(ptr, &blkIdx); + ASSERT(ptr != NULL); + + if (taosArrayPush(pReadh->aBlkIdx, (void *)(&blkIdx)) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + tsize++; + ASSERT(tsize == 1 || ((SBlockIdx *)taosArrayGet(pReadh->aBlkIdx, tsize - 2))->tid < + ((SBlockIdx *)taosArrayGet(pReadh->aBlkIdx, tsize - 1))->tid); + } + + return 0; +} + +int tsdbSetReadTable(SReadH *pReadh, STable *pTable) { + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + + pReadh->pTable = pTable; + + if (tdInitDataCols(pReadh->pDCols[0], pSchema) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + if (tdInitDataCols(pReadh->pDCols[1], pSchema) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + size_t size = taosArrayGetSize(pReadh->aBlkIdx); + if (size > 0) { + while (true) { + if (pReadh->cidx >= size) { + pReadh->pBlkIdx = NULL; + break; + } + + SBlockIdx *pBlkIdx = taosArrayGet(pReadh->aBlkIdx, pReadh->cidx); + if (pBlkIdx->tid == TABLE_TID(pTable)) { + if (pBlkIdx->uid == TABLE_UID(pTable)) { + pReadh->pBlkIdx = pBlkIdx; + } else { + pReadh->pBlkIdx = NULL; + } + pReadh->cidx++; + break; + } else if (pBlkIdx->tid > TABLE_TID(pTable)) { + pReadh->pBlkIdx = NULL; + break; + } else { + pReadh->cidx++; + } + } + } else { + pReadh->pBlkIdx = NULL; + } + + return 0; +} + +int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget) { + ASSERT(pReadh->pBlkIdx != NULL); + + SDFile * pHeadf = TSDB_READ_HEAD_FILE(pReadh); + SBlockIdx *pBlkIdx = pReadh->pBlkIdx; + + if (tsdbSeekDFile(pHeadf, pBlkIdx->offset, SEEK_SET) < 0) { + tsdbError("vgId:%d failed to load SBlockInfo part while seek file %s since %s, offset:%u len:%u", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pBlkIdx->offset, pBlkIdx->len); + return -1; + } + + if (tsdbMakeRoom((void **)(&(pReadh->pBlkInfo)), pBlkIdx->len) < 0) return -1; + + int64_t nread = tsdbReadDFile(pHeadf, (void *)(pReadh->pBlkInfo), pBlkIdx->len); + if (nread < 0) { + tsdbError("vgId:%d failed to load SBlockInfo part while read file %s since %s, offset:%u len :%u", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pBlkIdx->offset, pBlkIdx->len); + return -1; + } + + if (nread < pBlkIdx->len) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbError("vgId:%d SBlockInfo part in file %s is corrupted, offset:%u expected bytes:%u read bytes:%" PRId64, + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pBlkIdx->offset, pBlkIdx->len, nread); + return -1; + } + + if (!taosCheckChecksumWhole((uint8_t *)(pReadh->pBlkInfo), pBlkIdx->len)) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbError("vgId:%d SBlockInfo part in file %s is corrupted since wrong checksum, offset:%u len :%u", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pBlkIdx->offset, pBlkIdx->len); + return -1; + } + + ASSERT(pBlkIdx->tid == pReadh->pBlkInfo->tid && pBlkIdx->uid == pReadh->pBlkInfo->uid); + + if (pTarget) { + memcpy(pTarget, (void *)(pReadh->pBlkInfo), pBlkIdx->len); + } + + return 0; +} + +int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { + ASSERT(pBlock->numOfSubBlocks > 0); + + SBlock *iBlock = pBlock; + if (pBlock->numOfSubBlocks > 1) { + if (pBlkInfo) { + iBlock = (SBlock *)POINTER_SHIFT(pBlkInfo, pBlock->offset); + } else { + iBlock = (SBlock *)POINTER_SHIFT(pReadh->pBlkInfo, pBlock->offset); + } + } + + if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[0]) < 0) return -1; + for (int i = 1; i < pBlock->numOfSubBlocks; i++) { + iBlock++; + if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows) < 0) return -1; + } + + ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); + ASSERT(dataColsKeyFirst(pReadh->pDCols[0]) == pBlock->keyFirst); + ASSERT(dataColsKeyLast(pReadh->pDCols[0]) == pBlock->keyLast); + + return 0; +} + +int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, int16_t *colIds, int numOfColsIds) { + ASSERT(pBlock->numOfSubBlocks > 0); + + SBlock *iBlock = pBlock; + if (pBlock->numOfSubBlocks > 1) { + if (pBlkInfo) { + iBlock = POINTER_SHIFT(pBlkInfo, pBlock->offset); + } else { + iBlock = POINTER_SHIFT(pReadh->pBlkInfo, pBlock->offset); + } + } + + if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[0], colIds, numOfColsIds) < 0) return -1; + for (int i = 1; i < pBlock->numOfSubBlocks; i++) { + iBlock++; + if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows) < 0) return -1; + } + + ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); + ASSERT(dataColsKeyFirst(pReadh->pDCols[0]) == pBlock->keyFirst); + ASSERT(dataColsKeyLast(pReadh->pDCols[0]) == pBlock->keyLast); + + return 0; +} + +int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) { + ASSERT(pBlock->numOfSubBlocks <= 1); + + SDFile *pDFile = (pBlock->last) ? TSDB_READ_LAST_FILE(pReadh) : TSDB_READ_DATA_FILE(pReadh); + + if (tsdbSeekDFile(pDFile, pBlock->offset, SEEK_SET) < 0) { + tsdbError("vgId:%d failed to load block statis part while seek file %s to offset %" PRId64 " since %s", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, tstrerror(terrno)); + return -1; + } + + size_t size = TSDB_BLOCK_STATIS_SIZE(pBlock->numOfCols); + if (tsdbMakeRoom((void **)(&(pReadh->pBlkData)), size) < 0) return -1; + + int64_t nread = tsdbReadDFile(pDFile, (void *)(pReadh->pBlkData), size); + if (nread < 0) { + tsdbError("vgId:%d failed to load block statis part while read file %s since %s, offset:%" PRId64 " len :%" PRIzu, + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno), (int64_t)pBlock->offset, size); + return -1; + } + + if (nread < size) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbError("vgId:%d block statis part in file %s is corrupted, offset:%" PRId64 " expected bytes:%" PRIzu + " read bytes: %" PRId64, + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, size, nread); + return -1; + } + + if (!taosCheckChecksumWhole((uint8_t *)(pReadh->pBlkData), (uint32_t)size)) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbError("vgId:%d block statis part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%" PRIzu, + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, size); + return -1; + } + + return 0; +} + +int tsdbEncodeSBlockIdx(void **buf, SBlockIdx *pIdx) { + int tlen = 0; + + tlen += taosEncodeVariantI32(buf, pIdx->tid); + tlen += taosEncodeVariantU32(buf, pIdx->len); + tlen += taosEncodeVariantU32(buf, pIdx->offset); + tlen += taosEncodeFixedU8(buf, pIdx->hasLast); + tlen += taosEncodeVariantU32(buf, pIdx->numOfBlocks); + tlen += taosEncodeFixedU64(buf, pIdx->uid); + tlen += taosEncodeFixedU64(buf, pIdx->maxKey); + + return tlen; +} + +void *tsdbDecodeSBlockIdx(void *buf, SBlockIdx *pIdx) { + uint8_t hasLast = 0; + uint32_t numOfBlocks = 0; + uint64_t value = 0; + + if ((buf = taosDecodeVariantI32(buf, &(pIdx->tid))) == NULL) return NULL; + if ((buf = taosDecodeVariantU32(buf, &(pIdx->len))) == NULL) return NULL; + if ((buf = taosDecodeVariantU32(buf, &(pIdx->offset))) == NULL) return NULL; + if ((buf = taosDecodeFixedU8(buf, &(hasLast))) == NULL) return NULL; + pIdx->hasLast = hasLast; + if ((buf = taosDecodeVariantU32(buf, &(numOfBlocks))) == NULL) return NULL; + pIdx->numOfBlocks = numOfBlocks; + if ((buf = taosDecodeFixedU64(buf, &value)) == NULL) return NULL; + pIdx->uid = (int64_t)value; + if ((buf = taosDecodeFixedU64(buf, &value)) == NULL) return NULL; + pIdx->maxKey = (TSKEY)value; + + return buf; +} + +void tsdbGetBlockStatis(SReadH *pReadh, SDataStatis *pStatis, int numOfCols) { + SBlockData *pBlockData = pReadh->pBlkData; + + for (int i = 0, j = 0; i < numOfCols;) { + if (j >= pBlockData->numOfCols) { + pStatis[i].numOfNull = -1; + i++; + continue; + } + + if (pStatis[i].colId == pBlockData->cols[j].colId) { + pStatis[i].sum = pBlockData->cols[j].sum; + pStatis[i].max = pBlockData->cols[j].max; + pStatis[i].min = pBlockData->cols[j].min; + pStatis[i].maxIndex = pBlockData->cols[j].maxIndex; + pStatis[i].minIndex = pBlockData->cols[j].minIndex; + pStatis[i].numOfNull = pBlockData->cols[j].numOfNull; + i++; + j++; + } else if (pStatis[i].colId < pBlockData->cols[j].colId) { + pStatis[i].numOfNull = -1; + i++; + } else { + j++; + } + } +} + +static void tsdbResetReadTable(SReadH *pReadh) { + tdResetDataCols(pReadh->pDCols[0]); + tdResetDataCols(pReadh->pDCols[1]); + pReadh->cidx = 0; + pReadh->pBlkIdx = NULL; + pReadh->pTable = NULL; +} + +static void tsdbResetReadFile(SReadH *pReadh) { + tsdbResetReadTable(pReadh); + taosArrayClear(pReadh->aBlkIdx); + tsdbCloseDFileSet(TSDB_READ_FSET(pReadh)); +} + +static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols) { + ASSERT(pBlock->numOfSubBlocks == 0 || pBlock->numOfSubBlocks == 1); + + SDFile *pDFile = (pBlock->last) ? TSDB_READ_LAST_FILE(pReadh) : TSDB_READ_DATA_FILE(pReadh); + + tdResetDataCols(pDataCols); + if (tsdbMakeRoom((void **)(&TSDB_READ_BUF(pReadh)), pBlock->len) < 0) return -1; + + SBlockData *pBlockData = (SBlockData *)TSDB_READ_BUF(pReadh); + + if (tsdbSeekDFile(pDFile, pBlock->offset, SEEK_SET) < 0) { + tsdbError("vgId:%d failed to load block data part while seek file %s to offset %" PRId64 " since %s", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, tstrerror(terrno)); + return -1; + } + + int64_t nread = tsdbReadDFile(pDFile, TSDB_READ_BUF(pReadh), pBlock->len); + if (nread < 0) { + tsdbError("vgId:%d failed to load block data part while read file %s since %s, offset:%" PRId64 " len :%d", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno), (int64_t)pBlock->offset, + pBlock->len); + return -1; + } + + if (nread < pBlock->len) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbError("vgId:%d block data part in file %s is corrupted, offset:%" PRId64 + " expected bytes:%d read bytes: %" PRId64, + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, pBlock->len, nread); + return -1; + } + + int32_t tsize = TSDB_BLOCK_STATIS_SIZE(pBlock->numOfCols); + if (!taosCheckChecksumWhole((uint8_t *)TSDB_READ_BUF(pReadh), tsize)) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbError("vgId:%d block statis part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%d", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, tsize); + return -1; + } + + ASSERT(tsize < pBlock->len); + ASSERT(pBlockData->numOfCols == pBlock->numOfCols); + + pDataCols->numOfRows = pBlock->numOfRows; + + // Recover the data + int ccol = 0; // loop iter for SBlockCol object + int dcol = 0; // loop iter for SDataCols object + while (dcol < pDataCols->numOfCols) { + SDataCol *pDataCol = &(pDataCols->cols[dcol]); + if (dcol != 0 && ccol >= pBlockData->numOfCols) { + // Set current column as NULL and forward + dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); + dcol++; + continue; + } + + int16_t tcolId = 0; + int32_t toffset = TSDB_KEY_COL_OFFSET; + int32_t tlen = pBlock->keyLen; + + if (dcol != 0) { + SBlockCol *pBlockCol = &(pBlockData->cols[ccol]); + tcolId = pBlockCol->colId; + toffset = pBlockCol->offset; + tlen = pBlockCol->len; + } else { + ASSERT(pDataCol->colId == tcolId); + } + + if (tcolId == pDataCol->colId) { + if (pBlock->algorithm == TWO_STAGE_COMP) { + int zsize = pDataCol->bytes * pBlock->numOfRows + COMP_OVERFLOW_BYTES; + if (tsdbMakeRoom((void **)(&TSDB_READ_COMP_BUF(pReadh)), zsize) < 0) return -1; + } + + if (tsdbCheckAndDecodeColumnData(pDataCol, POINTER_SHIFT(pBlockData, tsize + toffset), tlen, pBlock->algorithm, + pBlock->numOfRows, pDataCols->maxPoints, TSDB_READ_COMP_BUF(pReadh), + (int)taosTSizeof(TSDB_READ_COMP_BUF(pReadh))) < 0) { + tsdbError("vgId:%d file %s is broken at column %d block offset %" PRId64 " column offset %d", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tcolId, (int64_t)pBlock->offset, toffset); + return -1; + } + + if (dcol != 0) { + ccol++; + } + dcol++; + } else if (tcolId < pDataCol->colId) { + ccol++; + } else { + // Set current column as NULL and forward + dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); + dcol++; + } + } + + return 0; +} + +static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, void *content, int32_t len, int8_t comp, int numOfRows, + int maxPoints, char *buffer, int bufferSize) { + if (!taosCheckChecksumWhole((uint8_t *)content, len)) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + return -1; + } + + // Decode the data + if (comp) { + // Need to decompress + int tlen = (*(tDataTypes[pDataCol->type].decompFunc))(content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData, + pDataCol->spaceSize, comp, buffer, bufferSize); + if (tlen <= 0) { + tsdbError("Failed to decompress column, file corrupted, len:%d comp:%d numOfRows:%d maxPoints:%d bufferSize:%d", + len, comp, numOfRows, maxPoints, bufferSize); + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + return -1; + } + pDataCol->len = tlen; + } else { + // No need to decompress, just memcpy it + pDataCol->len = len - sizeof(TSCKSUM); + memcpy(pDataCol->pData, content, pDataCol->len); + } + + if (IS_VAR_DATA_TYPE(pDataCol->type)) { + dataColSetOffset(pDataCol, numOfRows); + } + return 0; +} + +static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols, int16_t *colIds, + int numOfColIds) { + ASSERT(pBlock->numOfSubBlocks == 0 || pBlock->numOfSubBlocks == 1); + ASSERT(colIds[0] == 0); + + SDFile * pDFile = (pBlock->last) ? TSDB_READ_LAST_FILE(pReadh) : TSDB_READ_DATA_FILE(pReadh); + SBlockCol blockCol = {0}; + + tdResetDataCols(pDataCols); + + // If only load timestamp column, no need to load SBlockData part + if (numOfColIds > 1 && tsdbLoadBlockStatis(pReadh, pBlock) < 0) return -1; + + pDataCols->numOfRows = pBlock->numOfRows; + + int dcol = 0; + int ccol = 0; + for (int i = 0; i < numOfColIds; i++) { + int16_t colId = colIds[i]; + SDataCol * pDataCol = NULL; + SBlockCol *pBlockCol = NULL; + + while (true) { + if (dcol >= pDataCols->numOfCols) { + pDataCol = NULL; + break; + } + pDataCol = &pDataCols->cols[dcol]; + if (pDataCol->colId > colId) { + pDataCol = NULL; + break; + } else { + dcol++; + if (pDataCol->colId == colId) break; + } + } + + if (pDataCol == NULL) continue; + ASSERT(pDataCol->colId == colId); + + if (colId == 0) { // load the key row + blockCol.colId = colId; + blockCol.len = pBlock->keyLen; + blockCol.type = pDataCol->type; + blockCol.offset = TSDB_KEY_COL_OFFSET; + pBlockCol = &blockCol; + } else { // load non-key rows + while (true) { + if (ccol >= pBlock->numOfCols) { + pBlockCol = NULL; + break; + } + + pBlockCol = &(pReadh->pBlkData->cols[ccol]); + if (pBlockCol->colId > colId) { + pBlockCol = NULL; + break; + } else { + ccol++; + if (pBlockCol->colId == colId) break; + } + } + + if (pBlockCol == NULL) { + dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); + continue; + } + + ASSERT(pBlockCol->colId == pDataCol->colId); + } + + if (tsdbLoadColData(pReadh, pDFile, pBlock, pBlockCol, pDataCol) < 0) return -1; + } + + return 0; +} + +static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBlockCol *pBlockCol, SDataCol *pDataCol) { + ASSERT(pDataCol->colId == pBlockCol->colId); + + STsdbRepo *pRepo = TSDB_READ_REPO(pReadh); + STsdbCfg * pCfg = REPO_CFG(pRepo); + int tsize = pDataCol->bytes * pBlock->numOfRows + COMP_OVERFLOW_BYTES; + + if (tsdbMakeRoom((void **)(&TSDB_READ_BUF(pReadh)), pBlockCol->len) < 0) return -1; + if (tsdbMakeRoom((void **)(&TSDB_READ_COMP_BUF(pReadh)), tsize) < 0) return -1; + + int64_t offset = pBlock->offset + TSDB_BLOCK_STATIS_SIZE(pBlock->numOfCols) + pBlockCol->offset; + if (tsdbSeekDFile(pDFile, offset, SEEK_SET) < 0) { + tsdbError("vgId:%d failed to load block column data while seek file %s to offset %" PRId64 " since %s", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), offset, tstrerror(terrno)); + return -1; + } + + int64_t nread = tsdbReadDFile(pDFile, TSDB_READ_BUF(pReadh), pBlockCol->len); + if (nread < 0) { + tsdbError("vgId:%d failed to load block column data while read file %s since %s, offset:%" PRId64 " len :%d", + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno), offset, pBlockCol->len); + return -1; + } + + if (nread < pBlockCol->len) { + terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + tsdbError("vgId:%d block column data in file %s is corrupted, offset:%" PRId64 " expected bytes:%d" PRIzu + " read bytes: %" PRId64, + TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), offset, pBlockCol->len, nread); + return -1; + } + + if (tsdbCheckAndDecodeColumnData(pDataCol, pReadh->pBuf, pBlockCol->len, pBlock->algorithm, pBlock->numOfRows, + pCfg->maxRowsPerFileBlock, pReadh->pCBuf, (int32_t)taosTSizeof(pReadh->pCBuf)) < 0) { + tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), + pBlockCol->colId, offset); + return -1; + } + + return 0; +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbScan.c b/src/tsdb/src/tsdbScan.c index 91f6787874..382f7b11ae 100644 --- a/src/tsdb/src/tsdbScan.c +++ b/src/tsdb/src/tsdbScan.c @@ -13,8 +13,9 @@ * along with this program. If not, see . */ -#include "tsdbMain.h" +#include "tsdbint.h" +#if 0 #ifndef _TSDB_PLUGINS int tsdbScanFGroup(STsdbScanHandle* pScanHandle, char* rootDir, int fid) { return 0; } @@ -25,12 +26,13 @@ void tsdbSetScanLogStream(STsdbScanHandle* pScanHandle, FILE* fLogStream) {} int tsdbSetAndOpenScanFile(STsdbScanHandle* pScanHandle, char* rootDir, int fid) { return 0; } -int tsdbScanSCompIdx(STsdbScanHandle* pScanHandle) { return 0; } +int tsdbScanSBlockIdx(STsdbScanHandle* pScanHandle) { return 0; } -int tsdbScanSCompBlock(STsdbScanHandle* pScanHandle, int idx) { return 0; } +int tsdbScanSBlock(STsdbScanHandle* pScanHandle, int idx) { return 0; } int tsdbCloseScanFile(STsdbScanHandle* pScanHandle) { return 0; } void tsdbFreeScanHandle(STsdbScanHandle* pScanHandle) {} +#endif #endif \ No newline at end of file diff --git a/src/tsdb/src/tsdbSync.c b/src/tsdb/src/tsdbSync.c new file mode 100644 index 0000000000..6b8483e4a6 --- /dev/null +++ b/src/tsdb/src/tsdbSync.c @@ -0,0 +1,699 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "tsdbint.h" + +// Sync handle +typedef struct { + STsdbRepo *pRepo; + SRtn rtn; + SOCKET socketFd; + void * pBuf; + bool mfChanged; + SMFile * pmf; + SMFile mf; + SDFileSet df; + SDFileSet *pdf; +} SSyncH; + +#define SYNC_BUFFER(sh) ((sh)->pBuf) + +static void tsdbInitSyncH(SSyncH *pSyncH, STsdbRepo *pRepo, SOCKET socketFd); +static void tsdbDestroySyncH(SSyncH *pSyncH); +static int32_t tsdbSyncSendMeta(SSyncH *pSynch); +static int32_t tsdbSyncRecvMeta(SSyncH *pSynch); +static int32_t tsdbSendMetaInfo(SSyncH *pSynch); +static int32_t tsdbRecvMetaInfo(SSyncH *pSynch); +static int32_t tsdbSendDecision(SSyncH *pSynch, bool toSend); +static int32_t tsdbRecvDecision(SSyncH *pSynch, bool *toSend); +static int32_t tsdbSyncSendDFileSetArray(SSyncH *pSynch); +static int32_t tsdbSyncRecvDFileSetArray(SSyncH *pSynch); +static bool tsdbIsTowFSetSame(SDFileSet *pSet1, SDFileSet *pSet2); +static int32_t tsdbSyncSendDFileSet(SSyncH *pSynch, SDFileSet *pSet); +static int32_t tsdbSendDFileSetInfo(SSyncH *pSynch, SDFileSet *pSet); +static int32_t tsdbRecvDFileSetInfo(SSyncH *pSynch); +static int tsdbReload(STsdbRepo *pRepo, bool isMfChanged); + +int32_t tsdbSyncSend(void *tsdb, SOCKET socketFd) { + STsdbRepo *pRepo = (STsdbRepo *)tsdb; + SSyncH synch = {0}; + + tsdbInitSyncH(&synch, pRepo, socketFd); + // Disable TSDB commit + tsem_wait(&(pRepo->readyToCommit)); + + if (tsdbSyncSendMeta(&synch) < 0) { + tsdbError("vgId:%d, failed to send metafile since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + if (tsdbSyncSendDFileSetArray(&synch) < 0) { + tsdbError("vgId:%d, failed to send filesets since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + // Enable TSDB commit + tsem_post(&(pRepo->readyToCommit)); + tsdbDestroySyncH(&synch); + return 0; + +_err: + tsem_post(&(pRepo->readyToCommit)); + tsdbDestroySyncH(&synch); + return -1; +} + +int32_t tsdbSyncRecv(void *tsdb, SOCKET socketFd) { + STsdbRepo *pRepo = (STsdbRepo *)tsdb; + SSyncH synch = {0}; + + pRepo->state = TSDB_STATE_OK; + + tsdbInitSyncH(&synch, pRepo, socketFd); + tsem_wait(&(pRepo->readyToCommit)); + tsdbStartFSTxn(pRepo, 0, 0); + + if (tsdbSyncRecvMeta(&synch) < 0) { + tsdbError("vgId:%d, failed to recv metafile since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + if (tsdbSyncRecvDFileSetArray(&synch) < 0) { + tsdbError("vgId:%d, failed to recv filesets since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + tsdbEndFSTxn(pRepo); + tsem_post(&(pRepo->readyToCommit)); + tsdbDestroySyncH(&synch); + + // Reload file change + tsdbReload(pRepo, synch.mfChanged); + + return 0; + +_err: + tsdbEndFSTxnWithError(REPO_FS(pRepo)); + tsem_post(&(pRepo->readyToCommit)); + tsdbDestroySyncH(&synch); + return -1; +} + +static void tsdbInitSyncH(SSyncH *pSyncH, STsdbRepo *pRepo, SOCKET socketFd) { + pSyncH->pRepo = pRepo; + pSyncH->socketFd = socketFd; + tsdbGetRtnSnap(pRepo, &(pSyncH->rtn)); +} + +static void tsdbDestroySyncH(SSyncH *pSyncH) { taosTZfree(pSyncH->pBuf); } + +static int32_t tsdbSyncSendMeta(SSyncH *pSynch) { + STsdbRepo *pRepo = pSynch->pRepo; + bool toSendMeta = false; + SMFile mf; + + // Send meta info to remote + tsdbInfo("vgId:%d, metainfo will be sent", REPO_ID(pRepo)); + if (tsdbSendMetaInfo(pSynch) < 0) { + tsdbError("vgId:%d, failed to send metainfo since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + if (pRepo->fs->cstatus->pmf == NULL) { + // No meta file, not need to wait to retrieve meta file + tsdbInfo("vgId:%d, metafile not exist, no need to send", REPO_ID(pRepo)); + return 0; + } + + if (tsdbRecvDecision(pSynch, &toSendMeta) < 0) { + tsdbError("vgId:%d, failed to recv decision while send meta since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + if (toSendMeta) { + tsdbInitMFileEx(&mf, pRepo->fs->cstatus->pmf); + if (tsdbOpenMFile(&mf, O_RDONLY) < 0) { + tsdbError("vgId:%d, failed to open file while send metafile since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + int32_t writeLen = (int32_t)mf.info.size; + tsdbInfo("vgId:%d, metafile:%s will be sent, size:%d", REPO_ID(pRepo), mf.f.aname, writeLen); + + int32_t ret = (int32_t)taosSendFile(pSynch->socketFd, TSDB_FILE_FD(&mf), 0, writeLen); + if (ret != writeLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to send metafile since %s, ret:%d writeLen:%d", REPO_ID(pRepo), tstrerror(terrno), ret, + writeLen); + tsdbCloseMFile(&mf); + return -1; + } + + tsdbCloseMFile(&mf); + tsdbInfo("vgId:%d, metafile is sent", REPO_ID(pRepo)); + } else { + tsdbInfo("vgId:%d, metafile is same, no need to send", REPO_ID(pRepo)); + } + + return 0; +} + +static int32_t tsdbSyncRecvMeta(SSyncH *pSynch) { + STsdbRepo *pRepo = pSynch->pRepo; + SMFile * pLMFile = pRepo->fs->cstatus->pmf; + + // Recv meta info from remote + if (tsdbRecvMetaInfo(pSynch) < 0) { + tsdbError("vgId:%d, failed to recv metainfo since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + // No meta file, do nothing (rm local meta file) + if (pSynch->pmf == NULL) { + if (pLMFile == NULL) { + pSynch->mfChanged = false; + } else { + pSynch->mfChanged = true; + } + tsdbInfo("vgId:%d, metafile not exist in remote, no need to recv", REPO_ID(pRepo)); + return 0; + } + + if (pLMFile == NULL || memcmp(&(pSynch->pmf->info), &(pLMFile->info), sizeof(SMFInfo)) != 0 || + TSDB_FILE_IS_BAD(pLMFile)) { + // Local has no meta file or has a different meta file, need to copy from remote + pSynch->mfChanged = true; + + if (tsdbSendDecision(pSynch, true) < 0) { + tsdbError("vgId:%d, failed to send decision while recv metafile since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + tsdbInfo("vgId:%d, metafile will be received", REPO_ID(pRepo)); + + // Recv from remote + SMFile mf; + SDiskID did = {.level = TFS_PRIMARY_LEVEL, .id = TFS_PRIMARY_ID}; + tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo))); + if (tsdbCreateMFile(&mf, false) < 0) { + tsdbError("vgId:%d, failed to create file while recv metafile since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + tsdbInfo("vgId:%d, metafile:%s is created", REPO_ID(pRepo), mf.f.aname); + + int32_t readLen = (int32_t)pSynch->pmf->info.size; + int32_t ret = taosCopyFds(pSynch->socketFd, TSDB_FILE_FD(&mf), readLen); + if (ret != readLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to recv metafile since %s, ret:%d readLen:%d", REPO_ID(pRepo), tstrerror(terrno), ret, + readLen); + tsdbCloseMFile(&mf); + tsdbRemoveMFile(&mf); + return -1; + } + + tsdbInfo("vgId:%d, metafile is received, size:%d", REPO_ID(pRepo), readLen); + + mf.info = pSynch->pmf->info; + tsdbCloseMFile(&mf); + tsdbUpdateMFile(REPO_FS(pRepo), &mf); + } else { + pSynch->mfChanged = false; + tsdbInfo("vgId:%d, metafile is same, no need to recv", REPO_ID(pRepo)); + if (tsdbSendDecision(pSynch, false) < 0) { + tsdbError("vgId:%d, failed to send decision while recv metafile since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + tsdbUpdateMFile(REPO_FS(pRepo), pLMFile); + } + + return 0; +} + +static int32_t tsdbSendMetaInfo(SSyncH *pSynch) { + STsdbRepo *pRepo = pSynch->pRepo; + uint32_t tlen = 0; + SMFile * pMFile = pRepo->fs->cstatus->pmf; + + if (pMFile) { + tlen = tlen + tsdbEncodeSMFileEx(NULL, pMFile) + sizeof(TSCKSUM); + } + + if (tsdbMakeRoom((void **)(&SYNC_BUFFER(pSynch)), tlen + sizeof(tlen)) < 0) { + tsdbError("vgId:%d, failed to makeroom while send metainfo since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + void *ptr = SYNC_BUFFER(pSynch); + taosEncodeFixedU32(&ptr, tlen); + void *tptr = ptr; + if (pMFile) { + tsdbEncodeSMFileEx(&ptr, pMFile); + taosCalcChecksumAppend(0, (uint8_t *)tptr, tlen); + } + + int32_t writeLen = tlen + sizeof(uint32_t); + int32_t ret = taosWriteMsg(pSynch->socketFd, SYNC_BUFFER(pSynch), writeLen); + if (ret != writeLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to send metainfo since %s, ret:%d writeLen:%d", REPO_ID(pRepo), tstrerror(terrno), ret, + writeLen); + return -1; + } + + tsdbInfo("vgId:%d, metainfo is sent, tlen:%d, writeLen:%d", REPO_ID(pRepo), tlen, writeLen); + return 0; +} + +static int32_t tsdbRecvMetaInfo(SSyncH *pSynch) { + STsdbRepo *pRepo = pSynch->pRepo; + uint32_t tlen = 0; + char buf[64] = {0}; + + int32_t readLen = sizeof(uint32_t); + int32_t ret = taosReadMsg(pSynch->socketFd, buf, readLen); + if (ret != readLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to recv metalen, ret:%d readLen:%d", REPO_ID(pRepo), ret, readLen); + return -1; + } + + taosDecodeFixedU32(buf, &tlen); + + tsdbInfo("vgId:%d, metalen is received, readLen:%d, tlen:%d", REPO_ID(pRepo), readLen, tlen); + if (tlen == 0) { + pSynch->pmf = NULL; + return 0; + } + + if (tsdbMakeRoom((void **)(&SYNC_BUFFER(pSynch)), tlen) < 0) { + tsdbError("vgId:%d, failed to makeroom while recv metainfo since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + ret = taosReadMsg(pSynch->socketFd, SYNC_BUFFER(pSynch), tlen); + if (ret != tlen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to recv metainfo, ret:%d tlen:%d", REPO_ID(pRepo), ret, tlen); + return -1; + } + + tsdbInfo("vgId:%d, metainfo is received, tlen:%d", REPO_ID(pRepo), tlen); + if (!taosCheckChecksumWhole((uint8_t *)SYNC_BUFFER(pSynch), tlen)) { + terrno = TSDB_CODE_TDB_MESSED_MSG; + tsdbError("vgId:%d, failed to checksum while recv metainfo since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + pSynch->pmf = &(pSynch->mf); + tsdbDecodeSMFileEx(SYNC_BUFFER(pSynch), pSynch->pmf); + + return 0; +} + +static int32_t tsdbSendDecision(SSyncH *pSynch, bool toSend) { + STsdbRepo *pRepo = pSynch->pRepo; + uint8_t decision = toSend; + + int32_t writeLen = sizeof(uint8_t); + int32_t ret = taosWriteMsg(pSynch->socketFd, (void *)(&decision), writeLen); + if (ret != writeLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to send decison, ret:%d writeLen:%d", REPO_ID(pRepo), ret, writeLen); + return -1; + } + + return 0; +} + +static int32_t tsdbRecvDecision(SSyncH *pSynch, bool *toSend) { + STsdbRepo *pRepo = pSynch->pRepo; + uint8_t decision = 0; + + int32_t readLen = sizeof(uint8_t); + int32_t ret = taosReadMsg(pSynch->socketFd, (void *)(&decision), readLen); + if (ret != readLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to recv decison, ret:%d readLen:%d", REPO_ID(pRepo), ret, readLen); + return -1; + } + + *toSend = decision; + return 0; +} + +static int32_t tsdbSyncSendDFileSetArray(SSyncH *pSynch) { + STsdbRepo *pRepo = pSynch->pRepo; + STsdbFS * pfs = REPO_FS(pRepo); + SFSIter fsiter; + SDFileSet *pSet; + + tsdbFSIterInit(&fsiter, pfs, TSDB_FS_ITER_FORWARD); + + do { + pSet = tsdbFSIterNext(&fsiter); + if (tsdbSyncSendDFileSet(pSynch, pSet) < 0) { + tsdbError("vgId:%d, failed to send fileset:%d since %s", REPO_ID(pRepo), pSet ? pSet->fid : -1, + tstrerror(terrno)); + return -1; + } + + // No more file set to send, jut break + if (pSet == NULL) { + tsdbInfo("vgId:%d, no filesets any more", REPO_ID(pRepo)); + break; + } + } while (true); + + return 0; +} + +static int32_t tsdbSyncRecvDFileSetArray(SSyncH *pSynch) { + STsdbRepo *pRepo = pSynch->pRepo; + STsdbFS * pfs = REPO_FS(pRepo); + SFSIter fsiter; + SDFileSet *pLSet; // Local file set + + tsdbFSIterInit(&fsiter, pfs, TSDB_FS_ITER_FORWARD); + + pLSet = tsdbFSIterNext(&fsiter); + if (tsdbRecvDFileSetInfo(pSynch) < 0) { + tsdbError("vgId:%d, failed to recv fileset since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + while (true) { + if (pLSet == NULL && pSynch->pdf == NULL) { + tsdbInfo("vgId:%d, all filesets is disposed", REPO_ID(pRepo)); + break; + } else { + tsdbInfo("vgId:%d, fileset local:%d remote:%d, will be disposed", REPO_ID(pRepo), pLSet != NULL ? pLSet->fid : -1, + pSynch->pdf != NULL ? pSynch->pdf->fid : -1); + } + + if (pLSet && (pSynch->pdf == NULL || pLSet->fid < pSynch->pdf->fid)) { + // remote not has pLSet->fid set, just remove local (do nothing to remote the fset) + tsdbInfo("vgId:%d, fileset:%d smaller than remote:%d, remove it", REPO_ID(pRepo), pLSet->fid, + pSynch->pdf != NULL ? pSynch->pdf->fid : -1); + pLSet = tsdbFSIterNext(&fsiter); + } else { + if (pLSet && pSynch->pdf && pLSet->fid == pSynch->pdf->fid && tsdbIsTowFSetSame(pLSet, pSynch->pdf) && + tsdbFSetIsOk(pLSet)) { + // Just keep local files and notify remote not to send + tsdbInfo("vgId:%d, fileset:%d is same and no need to recv", REPO_ID(pRepo), pLSet->fid); + + if (tsdbUpdateDFileSet(pfs, pLSet) < 0) { + tsdbError("vgId:%d, failed to update fileset since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + if (tsdbSendDecision(pSynch, false) < 0) { + tsdbError("vgId:%d, filed to send decision since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + } else { + // Need to copy from remote + tsdbInfo("vgId:%d, fileset:%d will be received", REPO_ID(pRepo), pSynch->pdf->fid); + + // Notify remote to send there file here + if (tsdbSendDecision(pSynch, true) < 0) { + tsdbError("vgId:%d, failed to send decision since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + // Create local files and copy from remote + SDiskID did; + SDFileSet fset; + + tfsAllocDisk(tsdbGetFidLevel(pSynch->pdf->fid, &(pSynch->rtn)), &(did.level), &(did.id)); + if (did.level == TFS_UNDECIDED_LEVEL) { + terrno = TSDB_CODE_TDB_NO_AVAIL_DISK; + tsdbError("vgId:%d, failed allc disk since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + tsdbInitDFileSet(&fset, did, REPO_ID(pRepo), pSynch->pdf->fid, FS_TXN_VERSION(pfs)); + + // Create new FSET + if (tsdbCreateDFileSet(&fset, false) < 0) { + tsdbError("vgId:%d, failed to create fileset since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + SDFile *pDFile = TSDB_DFILE_IN_SET(&fset, ftype); // local file + SDFile *pRDFile = TSDB_DFILE_IN_SET(pSynch->pdf, ftype); // remote file + + tsdbInfo("vgId:%d, file:%s will be received, osize:%" PRIu64 " rsize:%" PRIu64, REPO_ID(pRepo), + pDFile->f.aname, pDFile->info.size, pRDFile->info.size); + + int32_t writeLen = (int32_t)pRDFile->info.size; + int32_t ret = taosCopyFds(pSynch->socketFd, pDFile->fd, writeLen); + if (ret != writeLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to recv file:%s since %s, ret:%d writeLen:%d", REPO_ID(pRepo), pDFile->f.aname, + tstrerror(terrno), ret, writeLen); + tsdbCloseDFileSet(&fset); + tsdbRemoveDFileSet(&fset); + return -1; + } + + // Update new file info + pDFile->info = pRDFile->info; + tsdbInfo("vgId:%d, file:%s is received, size:%d", REPO_ID(pRepo), pDFile->f.aname, writeLen); + } + + tsdbCloseDFileSet(&fset); + if (tsdbUpdateDFileSet(pfs, &fset) < 0) { + tsdbInfo("vgId:%d, fileset:%d failed to update since %s", REPO_ID(pRepo), fset.fid, tstrerror(terrno)); + return -1; + } + + tsdbInfo("vgId:%d, fileset:%d is received", REPO_ID(pRepo), pSynch->pdf->fid); + } + + // Move forward + if (tsdbRecvDFileSetInfo(pSynch) < 0) { + tsdbError("vgId:%d, failed to recv fileset since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + if (pLSet) { + pLSet = tsdbFSIterNext(&fsiter); + } + } + +#if 0 + if (pLSet == NULL) { + // Copy from remote >>>>>>>>>>> + } else { + if (pSynch->pdf == NULL) { + // Remove local file, just ignore ++++++++++++++ + pLSet = tsdbFSIterNext(&fsiter); + } else { + if (pLSet->fid < pSynch->pdf->fid) { + // Remove local file, just ignore ++++++++++++ + pLSet = tsdbFSIterNext(&fsiter); + } else if (pLSet->fid > pSynch->pdf->fid){ + // Copy from remote >>>>>>>>>>>>>> + if (tsdbRecvDFileSetInfo(pSynch) < 0) { + // TODO + return -1; + } + } else { + if (true/*TODO: is same fset*/) { + // No need to copy --------------------- + } else { + // copy from remote >>>>>>>>>>>>>. + } + } + } + } +#endif + } + + return 0; +} + +static bool tsdbIsTowFSetSame(SDFileSet *pSet1, SDFileSet *pSet2) { + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + SDFile *pDFile1 = TSDB_DFILE_IN_SET(pSet1, ftype); + SDFile *pDFile2 = TSDB_DFILE_IN_SET(pSet2, ftype); + + if (memcmp((void *)(TSDB_FILE_INFO(pDFile1)), (void *)(TSDB_FILE_INFO(pDFile2)), sizeof(SDFInfo)) != 0) { + return false; + } + } + + return true; +} + +static int32_t tsdbSyncSendDFileSet(SSyncH *pSynch, SDFileSet *pSet) { + STsdbRepo *pRepo = pSynch->pRepo; + bool toSend = false; + + if (tsdbSendDFileSetInfo(pSynch, pSet) < 0) { + tsdbError("vgId:%d, failed to send fileset:%d info since %s", REPO_ID(pRepo), pSet ? pSet->fid : -1, tstrerror(terrno)); + return -1; + } + + // No file any more, no need to send file, just return + if (pSet == NULL) { + return 0; + } + + if (tsdbRecvDecision(pSynch, &toSend) < 0) { + tsdbError("vgId:%d, failed to recv decision while send fileset:%d since %s", REPO_ID(pRepo), pSet->fid, + tstrerror(terrno)); + return -1; + } + + if (toSend) { + tsdbInfo("vgId:%d, fileset:%d will be sent", REPO_ID(pRepo), pSet->fid); + + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + SDFile df = *TSDB_DFILE_IN_SET(pSet, ftype); + + if (tsdbOpenDFile(&df, O_RDONLY) < 0) { + tsdbError("vgId:%d, failed to file:%s since %s", REPO_ID(pRepo), df.f.aname, tstrerror(terrno)); + return -1; + } + + int32_t writeLen = (int32_t)df.info.size; + tsdbInfo("vgId:%d, file:%s will be sent, size:%d", REPO_ID(pRepo), df.f.aname, writeLen); + + int32_t ret = (int32_t)taosSendFile(pSynch->socketFd, TSDB_FILE_FD(&df), 0, writeLen); + if (ret != writeLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to send file:%s since %s, ret:%d writeLen:%d", REPO_ID(pRepo), df.f.aname, + tstrerror(terrno), ret, writeLen); + tsdbCloseDFile(&df); + return -1; + } + + tsdbInfo("vgId:%d, file:%s is sent", REPO_ID(pRepo), df.f.aname); + tsdbCloseDFile(&df); + } + + tsdbInfo("vgId:%d, fileset:%d is sent", REPO_ID(pRepo), pSet->fid); + } else { + tsdbInfo("vgId:%d, fileset:%d is same, no need to send", REPO_ID(pRepo), pSet->fid); + } + + return 0; +} + +static int32_t tsdbSendDFileSetInfo(SSyncH *pSynch, SDFileSet *pSet) { + STsdbRepo *pRepo = pSynch->pRepo; + uint32_t tlen = 0; + + if (pSet) { + tlen = tsdbEncodeDFileSetEx(NULL, pSet) + sizeof(TSCKSUM); + } + + if (tsdbMakeRoom((void **)(&SYNC_BUFFER(pSynch)), tlen + sizeof(tlen)) < 0) { + tsdbError("vgId:%d, failed to makeroom while send fileinfo since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + void *ptr = SYNC_BUFFER(pSynch); + taosEncodeFixedU32(&ptr, tlen); + void *tptr = ptr; + if (pSet) { + tsdbEncodeDFileSetEx(&ptr, pSet); + taosCalcChecksumAppend(0, (uint8_t *)tptr, tlen); + } + + int32_t writeLen = tlen + sizeof(uint32_t); + int32_t ret = taosWriteMsg(pSynch->socketFd, SYNC_BUFFER(pSynch), writeLen); + if (ret != writeLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to send fileinfo, ret:%d writeLen:%d", REPO_ID(pRepo), ret, writeLen); + return -1; + } + + return 0; +} + +static int32_t tsdbRecvDFileSetInfo(SSyncH *pSynch) { + STsdbRepo *pRepo = pSynch->pRepo; + uint32_t tlen; + char buf[64] = {0}; + + int32_t readLen = sizeof(uint32_t); + int32_t ret = taosReadMsg(pSynch->socketFd, buf, readLen); + if (ret != readLen) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + taosDecodeFixedU32(buf, &tlen); + + tsdbInfo("vgId:%d, fileinfo len:%d is received", REPO_ID(pRepo), tlen); + if (tlen == 0) { + pSynch->pdf = NULL; + return 0; + } + + if (tsdbMakeRoom((void **)(&SYNC_BUFFER(pSynch)), tlen) < 0) { + tsdbError("vgId:%d, failed to makeroom while recv fileinfo since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + ret = taosReadMsg(pSynch->socketFd, SYNC_BUFFER(pSynch), tlen); + if (ret != tlen) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbError("vgId:%d, failed to recv fileinfo, ret:%d readLen:%d", REPO_ID(pRepo), ret, tlen); + return -1; + } + + if (!taosCheckChecksumWhole((uint8_t *)SYNC_BUFFER(pSynch), tlen)) { + terrno = TSDB_CODE_TDB_MESSED_MSG; + tsdbError("vgId:%d, failed to checksum while recv fileinfo since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + pSynch->pdf = &(pSynch->df); + tsdbDecodeDFileSetEx(SYNC_BUFFER(pSynch), pSynch->pdf); + + return 0; +} + +static int tsdbReload(STsdbRepo *pRepo, bool isMfChanged) { + // TODO: may need to stop and restart stream + if (isMfChanged) { + tsdbCloseMeta(pRepo); + tsdbFreeMeta(pRepo->tsdbMeta); + pRepo->tsdbMeta = tsdbNewMeta(REPO_CFG(pRepo)); + tsdbOpenMeta(pRepo); + tsdbLoadMetaCache(pRepo, true); + } + + tsdbUnRefMemTable(pRepo, pRepo->mem); + tsdbUnRefMemTable(pRepo, pRepo->imem); + pRepo->mem = NULL; + pRepo->imem = NULL; + + if (tsdbRestoreInfo(pRepo) < 0) { + tsdbError("vgId:%d failed to restore info from file since %s", REPO_ID(pRepo), tstrerror(terrno)); + return -1; + } + + return 0; +} \ No newline at end of file diff --git a/src/tsdb/tests/tsdbTests.cpp b/src/tsdb/tests/tsdbTests.cpp index ef5ed6f044..ac254d6c34 100644 --- a/src/tsdb/tests/tsdbTests.cpp +++ b/src/tsdb/tests/tsdbTests.cpp @@ -12,7 +12,7 @@ static double getCurTime() { } typedef struct { - TSDB_REPO_T *pRepo; + STsdbRepo *pRepo; bool isAscend; int tid; uint64_t uid; @@ -143,7 +143,7 @@ TEST(TsdbTest, testInsertSpeed) { // Create and open repository tsdbSetCfg(&tsdbCfg, 1, 16, 4, -1, -1, -1, -1, -1, -1, -1); tsdbCreateRepo(rootDir, &tsdbCfg); - TSDB_REPO_T *repo = tsdbOpenRepo(rootDir, NULL); + STsdbRepo *repo = tsdbOpenRepo(rootDir, NULL); ASSERT_NE(repo, nullptr); // Create table diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 92e030ad81..a7f4f59e07 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -1,11 +1,12 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/sync/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/rmonotonic/inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tutil ${SRC}) -TARGET_LINK_LIBRARIES(tutil pthread osdetail lz4 z) +TARGET_LINK_LIBRARIES(tutil pthread osdetail lz4 z rmonotonic) IF (TD_LINUX) TARGET_LINK_LIBRARIES(tutil m rt) @@ -32,3 +33,7 @@ ELSEIF(TD_DARWIN) TARGET_LINK_LIBRARIES(tutil m) TARGET_LINK_LIBRARIES(tutil iconv) ENDIF() + +IF (TD_STORAGE) + TARGET_LINK_LIBRARIES(tutil storage) +ENDIF () \ No newline at end of file diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h index 35053c278e..63e62a54c2 100644 --- a/src/util/inc/tarray.h +++ b/src/util/inc/tarray.h @@ -21,9 +21,11 @@ extern "C" { #endif #include "os.h" +#include "talgo.h" #define TARRAY_MIN_SIZE 8 #define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize)) +#define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize) typedef struct SArray { size_t size; @@ -44,9 +46,20 @@ void* taosArrayInit(size_t size, size_t elemSize); * * @param pArray * @param pData + * @param nEles * @return */ -void* taosArrayPush(SArray* pArray, void* pData); +void *taosArrayPushBatch(SArray *pArray, const void *pData, int nEles); + +/** + * + * @param pArray + * @param pData + * @return + */ +static FORCE_INLINE void* taosArrayPush(SArray* pArray, const void* pData) { + return taosArrayPushBatch(pArray, pData, 1); +} /** * @@ -92,6 +105,14 @@ size_t taosArrayGetSize(const SArray* pArray); */ void* taosArrayInsert(SArray* pArray, size_t index, void* pData); +/** + * set data in array + * @param pArray + * @param index + * @param pData + */ +void taosArraySet(SArray* pArray, size_t index, void* pData); + /** * remove data entry of the given index * @param pArray @@ -104,7 +125,7 @@ void taosArrayRemove(SArray* pArray, size_t index); * @param pDst * @param pSrc */ -void taosArrayCopy(SArray* pDst, const SArray* pSrc); +SArray* taosArrayFromList(const void* src, size_t size, size_t elemSize); /** * clone a new array @@ -122,7 +143,7 @@ void taosArrayClear(SArray* pArray); * destroy array list * @param pArray */ -void taosArrayDestroy(SArray* pArray); +void* taosArrayDestroy(SArray* pArray); /** * @@ -136,7 +157,7 @@ void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)); * @param pArray * @param compar */ -void taosArraySort(SArray* pArray, int (*compar)(const void*, const void*)); +void taosArraySort(SArray* pArray, __compar_fn_t comparFn); /** * sort string array @@ -150,14 +171,14 @@ void taosArraySortString(SArray* pArray, __compar_fn_t comparFn); * @param compar * @param key */ -void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t comparFn); +void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t comparFn, int flags); /** * search the array * @param pArray * @param key */ -char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn); +char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn, int flags); #ifdef __cplusplus } diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index bc1da9858a..9923409885 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -48,6 +48,7 @@ enum { TAOS_CFG_VTYPE_STRING, TAOS_CFG_VTYPE_IPSTR, TAOS_CFG_VTYPE_DIRECTORY, + TAOS_CFG_VTYPE_DATA_DIRCTORY, }; enum { diff --git a/src/util/inc/tkvstore.h b/src/util/inc/tkvstore.h deleted file mode 100644 index b2b0ff05f5..0000000000 --- a/src/util/inc/tkvstore.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ -#ifndef _TD_KVSTORE_H_ -#define _TD_KVSTORE_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -#define KVSTORE_FILE_VERSION ((uint32_t)0) - -typedef int (*iterFunc)(void *, void *cont, int contLen); -typedef void (*afterFunc)(void *); - -typedef struct { - int64_t size; // including 512 bytes of header size - int64_t tombSize; - int64_t nRecords; - int64_t nDels; - uint32_t magic; -} SStoreInfo; - -typedef struct { - char * fname; - int fd; - char * fsnap; - int sfd; - char * fnew; - int nfd; - SHashObj * map; - iterFunc iFunc; - afterFunc aFunc; - void * appH; - SStoreInfo info; -} SKVStore; - -#define KVSTORE_MAGIC(s) (s)->info.magic - -int tdCreateKVStore(char *fname); -int tdDestroyKVStore(char *fname); -SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH); -void tdCloseKVStore(SKVStore *pStore); -int tdKVStoreStartCommit(SKVStore *pStore); -int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLen); -int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid); -int tdKVStoreEndCommit(SKVStore *pStore); -void tsdbGetStoreInfo(char *fname, uint32_t *magic, int64_t *size); - -#ifdef __cplusplus -} -#endif - -#endif \ No newline at end of file diff --git a/src/util/inc/tlist.h b/src/util/inc/tlist.h index e8380294da..6c96ec0b13 100644 --- a/src/util/inc/tlist.h +++ b/src/util/inc/tlist.h @@ -47,7 +47,7 @@ typedef struct { #define listNodeFree(n) free(n); SList * tdListNew(int eleSize); -void tdListFree(SList *list); +void * tdListFree(SList *list); void tdListEmpty(SList *list); void tdListPrependNode(SList *list, SListNode *node); void tdListAppendNode(SList *list, SListNode *node); diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 45cb6eee0f..4dde5dbba2 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -55,24 +55,29 @@ static int32_t taosArrayResize(SArray* pArray) { return 0; } -void* taosArrayPush(SArray* pArray, void* pData) { +void* taosArrayPushBatch(SArray* pArray, const void* pData, int nEles) { if (pArray == NULL || pData == NULL) { return NULL; } - if (pArray->size >= pArray->capacity) { - int32_t ret = taosArrayResize(pArray); - - // failed to push data into buffer due to the failure of memory allocation - if (ret != 0) { + if (pArray->size + nEles > pArray->capacity) { + size_t tsize = (pArray->capacity << 1u); + while (pArray->size + nEles > tsize) { + tsize = (tsize << 1u); + } + + pArray->pData = realloc(pArray->pData, tsize * pArray->elemSize); + if (pArray->pData == NULL) { return NULL; } + + pArray->capacity = tsize; } void* dst = TARRAY_GET_ELEM(pArray, pArray->size); - memcpy(dst, pData, pArray->elemSize); + memcpy(dst, pData, pArray->elemSize * nEles); - pArray->size += 1; + pArray->size += nEles; return dst; } @@ -133,6 +138,11 @@ void* taosArrayInsert(SArray* pArray, size_t index, void* pData) { return dst; } +void taosArraySet(SArray* pArray, size_t index, void* pData) { + assert(index < pArray->size); + memcpy(TARRAY_GET_ELEM(pArray, index), pData, pArray->elemSize); +} + void taosArrayRemove(SArray* pArray, size_t index) { assert(index < pArray->size); @@ -146,23 +156,14 @@ void taosArrayRemove(SArray* pArray, size_t index) { pArray->size -= 1; } -void taosArrayCopy(SArray* pDst, const SArray* pSrc) { - assert(pSrc != NULL && pDst != NULL); - - if (pDst->capacity < pSrc->size) { - void* pData = realloc(pDst->pData, pSrc->size * pSrc->elemSize); - if (pData == NULL) { // todo handle oom - - } else { - pDst->pData = pData; - pDst->capacity = pSrc->size; - } - } - - memcpy(pDst->pData, pSrc->pData, pSrc->elemSize * pSrc->size); - pDst->elemSize = pSrc->elemSize; - pDst->capacity = pSrc->size; - pDst->size = pSrc->size; +SArray* taosArrayFromList(const void* src, size_t size, size_t elemSize) { + assert(src != NULL && elemSize > 0); + SArray* pDst = taosArrayInit(size, elemSize); + + memcpy(pDst->pData, src, elemSize * size); + pDst->size = size; + + return pDst; } SArray* taosArrayDup(const SArray* pSrc) { @@ -184,13 +185,13 @@ void taosArrayClear(SArray* pArray) { pArray->size = 0; } -void taosArrayDestroy(SArray* pArray) { - if (pArray == NULL) { - return; +void* taosArrayDestroy(SArray* pArray) { + if (pArray) { + free(pArray->pData); + free(pArray); } - free(pArray->pData); - free(pArray); + return NULL; } void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)) { @@ -210,18 +211,18 @@ void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)) { taosArrayDestroy(pArray); } -void taosArraySort(SArray* pArray, int (*compar)(const void*, const void*)) { +void taosArraySort(SArray* pArray, __compar_fn_t compar) { assert(pArray != NULL); assert(compar != NULL); qsort(pArray->pData, pArray->size, pArray->elemSize, compar); } -void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t comparFn) { +void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t comparFn, int flags) { assert(pArray != NULL && comparFn != NULL); assert(key != NULL); - return bsearch(key, pArray->pData, pArray->size, pArray->elemSize, comparFn); + return taosbsearch(key, pArray->pData, pArray->size, pArray->elemSize, comparFn, flags); } void taosArraySortString(SArray* pArray, __compar_fn_t comparFn) { @@ -229,11 +230,11 @@ void taosArraySortString(SArray* pArray, __compar_fn_t comparFn) { qsort(pArray->pData, pArray->size, pArray->elemSize, comparFn); } -char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn) { +char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn, int flags) { assert(pArray != NULL); assert(key != NULL); - void* p = bsearch(&key, pArray->pData, pArray->size, pArray->elemSize, comparFn); + void* p = taosbsearch(&key, pArray->pData, pArray->size, pArray->elemSize, comparFn, flags); if (p == NULL) { return NULL; } diff --git a/src/util/src/tbuffer.c b/src/util/src/tbuffer.c index 240f744ea3..a2cb32c1f4 100644 --- a/src/util/src/tbuffer.c +++ b/src/util/src/tbuffer.c @@ -191,7 +191,8 @@ double tbufReadDouble(SBufferReader* buf) { // writer functions void tbufCloseWriter( SBufferWriter* buf ) { - (*buf->allocator)( buf->data, 0 ); + tfree(buf->data); +// (*buf->allocator)( buf->data, 0 ); // potential memory leak. buf->data = NULL; buf->pos = 0; buf->size = 0; diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 01e61987c6..b0d4ecd075 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -277,7 +277,7 @@ int32_t taosArrayCompareString(const void* a, const void* b) { static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) { const SArray* arr = (const SArray*) pRight; - return taosArraySearchString(arr, pLeft, taosArrayCompareString) == NULL ? 0 : 1; + return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1; } static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 0a9f5a98c0..7a92750f8f 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -20,7 +20,7 @@ #include "tconfig.h" #include "tglobal.h" #include "tulog.h" -#include "tsystem.h" +#include "tsocket.h" #include "tutil.h" SGlobalCfg tsGlobalConfig[TSDB_CFG_MAX_NUM] = {{0}}; @@ -112,32 +112,39 @@ static void taosReadInt8Config(SGlobalCfg *cfg, char *input_value) { } } -static void taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { +static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { int length = (int)strlen(input_value); char *option = (char *)cfg->ptr; if (length <= 0 || length > cfg->ptrLength) { - uError("config option:%s, input value:%s, length out of range[0, %d], use default value:%s", - cfg->option, input_value, cfg->ptrLength, option); + uError("config option:%s, input value:%s, length out of range[0, %d], use default value:%s", cfg->option, + input_value, cfg->ptrLength, option); + return false; } else { if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { wordexp_t full_path; if (0 != wordexp(input_value, &full_path, 0)) { printf("\nconfig dir: %s wordexp fail! reason:%s\n", input_value, strerror(errno)); wordfree(&full_path); - return; + return false; } - + if (full_path.we_wordv != NULL && full_path.we_wordv[0] != NULL) { strcpy(option, full_path.we_wordv[0]); } - + wordfree(&full_path); + char tmp[1025] = {0}; + if (realpath(option, tmp) != NULL) { + strcpy(option, tmp); + } + int code = taosMkDir(option, 0755); if (code != 0) { terrno = TAOS_SYSTEM_ERROR(errno); - uError("config option:%s, input value:%s, directory not exist, create fail:%s", - cfg->option, input_value, strerror(errno)); + uError("config option:%s, input value:%s, directory not exist, create fail:%s", cfg->option, input_value, + strerror(errno)); + return false; } cfg->cfgStatus = TAOS_CFG_CSTATUS_FILE; } else { @@ -145,6 +152,8 @@ static void taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { tsCfgStatusStr[cfg->cfgStatus], option); } } + + return true; } static void taosReadIpStrConfig(SGlobalCfg *cfg, char *input_value) { @@ -214,7 +223,7 @@ SGlobalCfg *taosGetConfigOption(const char *option) { return NULL; } -static void taosReadConfigOption(const char *option, char *value) { +static void taosReadConfigOption(const char *option, char *value, char *value2, char *value3) { for (int i = 0; i < tsGlobalConfigNum; ++i) { SGlobalCfg *cfg = tsGlobalConfig + i; if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_CONFIG)) continue; @@ -242,6 +251,11 @@ static void taosReadConfigOption(const char *option, char *value) { case TAOS_CFG_VTYPE_DIRECTORY: taosReadDirectoryConfig(cfg, value); break; + case TAOS_CFG_VTYPE_DATA_DIRCTORY: + if (taosReadDirectoryConfig(cfg, value)) { + taosReadDataDirCfg(value, value2, value3); + } + break; default: uError("config option:%s, input value:%s, can't be recognized", option, value); break; @@ -322,8 +336,8 @@ void taosReadGlobalLogCfg() { } bool taosReadGlobalCfg() { - char * line, *option, *value, *value1; - int olen, vlen, vlen1; + char * line, *option, *value, *value2, *value3; + int olen, vlen, vlen2, vlen3; char fileName[PATH_MAX] = {0}; sprintf(fileName, "%s/taos.cfg", configDir); @@ -346,8 +360,8 @@ bool taosReadGlobalCfg() { while (!feof(fp)) { memset(line, 0, len); - option = value = NULL; - olen = vlen = 0; + option = value = value2 = value3 = NULL; + olen = vlen = vlen2 = vlen3 = 0; tgetline(&line, &len, fp); line[len - 1] = 0; @@ -360,11 +374,14 @@ bool taosReadGlobalCfg() { if (vlen == 0) continue; value[vlen] = 0; - // For dataDir, the format is: - // dataDir /mnt/disk1 0 - paGetToken(value + vlen + 1, &value1, &vlen1); - - taosReadConfigOption(option, value); + paGetToken(value + vlen + 1, &value2, &vlen2); + if (vlen2 != 0) { + value2[vlen2] = 0; + paGetToken(value2 + vlen2 + 1, &value3, &vlen3); + if (vlen3 != 0) value3[vlen3] = 0; + } + + taosReadConfigOption(option, value, value2, value3); } fclose(fp); @@ -419,6 +436,8 @@ void taosPrintGlobalCfg() { } taosPrintOsInfo(); + taosPrintDataDirCfg(); + uInfo("=================================="); } static void taosDumpCfg(SGlobalCfg *cfg) { diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c deleted file mode 100644 index 0abba410b0..0000000000 --- a/src/util/src/tkvstore.c +++ /dev/null @@ -1,621 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#define _DEFAULT_SOURCE -#define TAOS_RANDOM_FILE_FAIL_TEST -#include "os.h" -#include "hash.h" -#include "taoserror.h" -#include "tchecksum.h" -#include "tcoding.h" -#include "tkvstore.h" -#include "tulog.h" - -#define TD_KVSTORE_HEADER_SIZE 512 -#define TD_KVSTORE_MAJOR_VERSION 1 -#define TD_KVSTORE_MAINOR_VERSION 0 -#define TD_KVSTORE_SNAP_SUFFIX ".snap" -#define TD_KVSTORE_NEW_SUFFIX ".new" -#define TD_KVSTORE_INIT_MAGIC 0xFFFFFFFF - -typedef struct { - uint64_t uid; - int64_t offset; - int64_t size; -} SKVRecord; - -static int tdInitKVStoreHeader(int fd, char *fname); -static int tdEncodeStoreInfo(void **buf, SStoreInfo *pInfo); -static void * tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo); -static SKVStore *tdNewKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH); -static char * tdGetKVStoreSnapshotFname(char *fdata); -static char * tdGetKVStoreNewFname(char *fdata); -static void tdFreeKVStore(SKVStore *pStore); -static int tdUpdateKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo); -static int tdLoadKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo, uint32_t *version); -static int tdEncodeKVRecord(void **buf, SKVRecord *pRecord); -static void * tdDecodeKVRecord(void *buf, SKVRecord *pRecord); -static int tdRestoreKVStore(SKVStore *pStore); - -int tdCreateKVStore(char *fname) { - int fd = open(fname, O_RDWR | O_CREAT | O_BINARY, 0755); - if (fd < 0) { - uError("failed to open file %s since %s", fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (tdInitKVStoreHeader(fd, fname) < 0) goto _err; - - if (fsync(fd) < 0) { - uError("failed to fsync file %s since %s", fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (close(fd) < 0) { - uError("failed to close file %s since %s", fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - return 0; - -_err: - if (fd >= 0) close(fd); - (void)remove(fname); - return -1; -} - -int tdDestroyKVStore(char *fname) { - if (remove(fname) < 0) { - uError("failed to remove file %s since %s", fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - return 0; -} - -SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH) { - SStoreInfo info = {0}; - uint32_t version = 0; - - SKVStore *pStore = tdNewKVStore(fname, iFunc, aFunc, appH); - if (pStore == NULL) return NULL; - - pStore->fd = open(pStore->fname, O_RDWR | O_BINARY); - if (pStore->fd < 0) { - uError("failed to open file %s since %s", pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - pStore->sfd = open(pStore->fsnap, O_RDONLY | O_BINARY); - if (pStore->sfd < 0) { - if (errno != ENOENT) { - uError("failed to open file %s since %s", pStore->fsnap, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - } else { - uDebug("file %s exists, try to recover the KV store", pStore->fsnap); - if (tdLoadKVStoreHeader(pStore->sfd, pStore->fsnap, &info, &version) < 0) { - if (terrno != TSDB_CODE_COM_FILE_CORRUPTED) goto _err; - } else { - if (version != KVSTORE_FILE_VERSION) { - uError("file %s version %u is not the same as program version %u, this may cause problem", pStore->fsnap, - version, KVSTORE_FILE_VERSION); - } - - if (taosFtruncate(pStore->fd, info.size) < 0) { - uError("failed to truncate %s to %" PRId64 " size since %s", pStore->fname, info.size, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (tdUpdateKVStoreHeader(pStore->fd, pStore->fname, &info) < 0) goto _err; - if (fsync(pStore->fd) < 0) { - uError("failed to fsync file %s since %s", pStore->fname, strerror(errno)); - goto _err; - } - } - - close(pStore->sfd); - pStore->sfd = -1; - (void)remove(pStore->fsnap); - } - - if (tdLoadKVStoreHeader(pStore->fd, pStore->fname, &info, &version) < 0) goto _err; - if (version != KVSTORE_FILE_VERSION) { - uError("file %s version %u is not the same as program version %u, this may cause problem", pStore->fname, version, - KVSTORE_FILE_VERSION); - } - - pStore->info.size = TD_KVSTORE_HEADER_SIZE; - pStore->info.magic = info.magic; - - if (tdRestoreKVStore(pStore) < 0) goto _err; - - close(pStore->fd); - pStore->fd = -1; - - return pStore; - -_err: - if (pStore->fd > 0) { - close(pStore->fd); - pStore->fd = -1; - } - if (pStore->sfd > 0) { - close(pStore->sfd); - pStore->sfd = -1; - } - tdFreeKVStore(pStore); - return NULL; -} - -void tdCloseKVStore(SKVStore *pStore) { tdFreeKVStore(pStore); } - -int tdKVStoreStartCommit(SKVStore *pStore) { - ASSERT(pStore->fd < 0); - - pStore->fd = open(pStore->fname, O_RDWR | O_BINARY); - if (pStore->fd < 0) { - uError("failed to open file %s since %s", pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - pStore->sfd = open(pStore->fsnap, O_WRONLY | O_CREAT | O_BINARY, 0755); - if (pStore->sfd < 0) { - uError("failed to open file %s since %s", pStore->fsnap, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (taosSendFile(pStore->sfd, pStore->fd, NULL, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) { - uError("failed to send file %d bytes since %s", TD_KVSTORE_HEADER_SIZE, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (fsync(pStore->sfd) < 0) { - uError("failed to fsync file %s since %s", pStore->fsnap, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (close(pStore->sfd) < 0) { - uError("failed to close file %s since %s", pStore->fsnap, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - pStore->sfd = -1; - - if (lseek(pStore->fd, 0, SEEK_END) < 0) { - uError("failed to lseek file %s since %s", pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - ASSERT(pStore->info.size == lseek(pStore->fd, 0, SEEK_CUR)); - - return 0; - -_err: - if (pStore->sfd > 0) { - close(pStore->sfd); - pStore->sfd = -1; - (void)remove(pStore->fsnap); - } - if (pStore->fd > 0) { - close(pStore->fd); - pStore->fd = -1; - } - return -1; -} - -int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLen) { - SKVRecord rInfo = {0}; - char buf[64] = "\0"; - char * pBuf = buf; - - rInfo.offset = lseek(pStore->fd, 0, SEEK_CUR); - if (rInfo.offset < 0) { - uError("failed to lseek file %s since %s", pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - rInfo.uid = uid; - rInfo.size = contLen; - - int tlen = tdEncodeKVRecord((void *)(&pBuf), &rInfo); - ASSERT(tlen == POINTER_DISTANCE(pBuf, buf)); - ASSERT(tlen == sizeof(SKVRecord)); - - if (taosWrite(pStore->fd, buf, tlen) < tlen) { - uError("failed to write %d bytes to file %s since %s", tlen, pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (taosWrite(pStore->fd, cont, contLen) < contLen) { - uError("failed to write %d bytes to file %s since %s", contLen, pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - pStore->info.magic = - taosCalcChecksum(pStore->info.magic, (uint8_t *)POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM)), sizeof(TSCKSUM)); - pStore->info.size += (sizeof(SKVRecord) + contLen); - SKVRecord *pRecord = taosHashGet(pStore->map, (void *)&uid, sizeof(uid)); - if (pRecord != NULL) { // just to insert - pStore->info.tombSize += pRecord->size; - } else { - pStore->info.nRecords++; - } - - taosHashPut(pStore->map, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo)); - uTrace("put uid %" PRIu64 " into kvStore %s", uid, pStore->fname); - - return 0; -} - -int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid) { - SKVRecord rInfo = {0}; - char buf[128] = "\0"; - - SKVRecord *pRecord = taosHashGet(pStore->map, (void *)(&uid), sizeof(uid)); - if (pRecord == NULL) { - uError("failed to drop KV store record with key %" PRIu64 " since not find", uid); - return -1; - } - - rInfo.offset = -pRecord->offset; - rInfo.uid = pRecord->uid; - rInfo.size = pRecord->size; - - void *pBuf = buf; - tdEncodeKVRecord(&pBuf, &rInfo); - - if (taosWrite(pStore->fd, buf, POINTER_DISTANCE(pBuf, buf)) < POINTER_DISTANCE(pBuf, buf)) { - uError("failed to write %" PRId64 " bytes to file %s since %s", (int64_t)(POINTER_DISTANCE(pBuf, buf)), pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - pStore->info.magic = taosCalcChecksum(pStore->info.magic, (uint8_t *)buf, (uint32_t)POINTER_DISTANCE(pBuf, buf)); - pStore->info.size += POINTER_DISTANCE(pBuf, buf); - pStore->info.nDels++; - pStore->info.nRecords--; - pStore->info.tombSize += (rInfo.size + sizeof(SKVRecord) * 2); - - taosHashRemove(pStore->map, (void *)(&uid), sizeof(uid)); - uDebug("drop uid %" PRIu64 " from KV store %s", uid, pStore->fname); - - return 0; -} - -int tdKVStoreEndCommit(SKVStore *pStore) { - ASSERT(pStore->fd > 0); - - if (tdUpdateKVStoreHeader(pStore->fd, pStore->fname, &(pStore->info)) < 0) return -1; - - if (fsync(pStore->fd) < 0) { - uError("failed to fsync file %s since %s", pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (close(pStore->fd) < 0) { - uError("failed to close file %s since %s", pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - pStore->fd = -1; - - (void)remove(pStore->fsnap); - return 0; -} - -void tsdbGetStoreInfo(char *fname, uint32_t *magic, int64_t *size) { - char buf[TD_KVSTORE_HEADER_SIZE] = "\0"; - SStoreInfo info = {0}; - - int fd = open(fname, O_RDONLY | O_BINARY); - if (fd < 0) goto _err; - - if (taosRead(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) goto _err; - if (!taosCheckChecksumWhole((uint8_t *)buf, TD_KVSTORE_HEADER_SIZE)) goto _err; - - void *pBuf = (void *)buf; - pBuf = tdDecodeStoreInfo(pBuf, &info); - off_t offset = lseek(fd, 0, SEEK_END); - if (offset < 0) goto _err; - close(fd); - - *magic = info.magic; - *size = offset; - - return; - -_err: - if (fd >= 0) close(fd); - *magic = TD_KVSTORE_INIT_MAGIC; - *size = 0; -} - -static int tdLoadKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo, uint32_t *version) { - char buf[TD_KVSTORE_HEADER_SIZE] = "\0"; - - if (lseek(fd, 0, SEEK_SET) < 0) { - uError("failed to lseek file %s since %s", fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (taosRead(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) { - uError("failed to read %d bytes from file %s since %s", TD_KVSTORE_HEADER_SIZE, fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - if (!taosCheckChecksumWhole((uint8_t *)buf, TD_KVSTORE_HEADER_SIZE)) { - uError("file %s is broken", fname); - terrno = TSDB_CODE_COM_FILE_CORRUPTED; - return -1; - } - - void *pBuf = (void *)buf; - pBuf = tdDecodeStoreInfo(pBuf, pInfo); - pBuf = taosDecodeFixedU32(pBuf, version); - - return 0; -} - -static int tdUpdateKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo) { - char buf[TD_KVSTORE_HEADER_SIZE] = "\0"; - - if (lseek(fd, 0, SEEK_SET) < 0) { - uError("failed to lseek file %s since %s", fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - void *pBuf = buf; - tdEncodeStoreInfo(&pBuf, pInfo); - taosEncodeFixedU32(&pBuf, KVSTORE_FILE_VERSION); - ASSERT(POINTER_DISTANCE(pBuf, buf) + sizeof(TSCKSUM) <= TD_KVSTORE_HEADER_SIZE); - - taosCalcChecksumAppend(0, (uint8_t *)buf, TD_KVSTORE_HEADER_SIZE); - if (taosWrite(fd, buf, TD_KVSTORE_HEADER_SIZE) < TD_KVSTORE_HEADER_SIZE) { - uError("failed to write %d bytes to file %s since %s", TD_KVSTORE_HEADER_SIZE, fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - - return 0; -} - -static int tdInitKVStoreHeader(int fd, char *fname) { - SStoreInfo info = {TD_KVSTORE_HEADER_SIZE, 0, 0, 0, TD_KVSTORE_INIT_MAGIC}; - - return tdUpdateKVStoreHeader(fd, fname, &info); -} - -static int tdEncodeStoreInfo(void **buf, SStoreInfo *pInfo) { - int tlen = 0; - tlen += taosEncodeVariantI64(buf, pInfo->size); - tlen += taosEncodeVariantI64(buf, pInfo->tombSize); - tlen += taosEncodeVariantI64(buf, pInfo->nRecords); - tlen += taosEncodeVariantI64(buf, pInfo->nDels); - tlen += taosEncodeFixedU32(buf, pInfo->magic); - - return tlen; -} - -static void *tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo) { - buf = taosDecodeVariantI64(buf, &(pInfo->size)); - buf = taosDecodeVariantI64(buf, &(pInfo->tombSize)); - buf = taosDecodeVariantI64(buf, &(pInfo->nRecords)); - buf = taosDecodeVariantI64(buf, &(pInfo->nDels)); - buf = taosDecodeFixedU32(buf, &(pInfo->magic)); - - return buf; -} - -static SKVStore *tdNewKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH) { - SKVStore *pStore = (SKVStore *)calloc(1, sizeof(SKVStore)); - if (pStore == NULL) goto _err; - - pStore->fname = strdup(fname); - if (pStore->fname == NULL) { - terrno = TSDB_CODE_COM_OUT_OF_MEMORY; - goto _err; - } - - pStore->fsnap = tdGetKVStoreSnapshotFname(fname); - if (pStore->fsnap == NULL) { - goto _err; - } - - pStore->fnew = tdGetKVStoreNewFname(fname); - if (pStore->fnew == NULL) goto _err; - - pStore->fd = -1; - pStore->sfd = -1; - pStore->nfd = -1; - pStore->iFunc = iFunc; - pStore->aFunc = aFunc; - pStore->appH = appH; - pStore->map = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); - if (pStore->map == NULL) { - terrno = TSDB_CODE_COM_OUT_OF_MEMORY; - goto _err; - } - - return pStore; - -_err: - tdFreeKVStore(pStore); - return NULL; -} - -static void tdFreeKVStore(SKVStore *pStore) { - if (pStore) { - tfree(pStore->fname); - tfree(pStore->fsnap); - tfree(pStore->fnew); - taosHashCleanup(pStore->map); - free(pStore); - } -} - -static char *tdGetKVStoreSnapshotFname(char *fdata) { - size_t size = strlen(fdata) + strlen(TD_KVSTORE_SNAP_SUFFIX) + 1; - char * fname = malloc(size); - if (fname == NULL) { - terrno = TSDB_CODE_COM_OUT_OF_MEMORY; - return NULL; - } - sprintf(fname, "%s%s", fdata, TD_KVSTORE_SNAP_SUFFIX); - return fname; -} - -static char *tdGetKVStoreNewFname(char *fdata) { - size_t size = strlen(fdata) + strlen(TD_KVSTORE_NEW_SUFFIX) + 1; - char * fname = malloc(size); - if (fname == NULL) { - terrno = TSDB_CODE_COM_OUT_OF_MEMORY; - return NULL; - } - sprintf(fname, "%s%s", fdata, TD_KVSTORE_NEW_SUFFIX); - return fname; -} - -static int tdEncodeKVRecord(void **buf, SKVRecord *pRecord) { - int tlen = 0; - tlen += taosEncodeFixedU64(buf, pRecord->uid); - tlen += taosEncodeFixedI64(buf, pRecord->offset); - tlen += taosEncodeFixedI64(buf, pRecord->size); - - return tlen; -} - -static void *tdDecodeKVRecord(void *buf, SKVRecord *pRecord) { - buf = taosDecodeFixedU64(buf, &(pRecord->uid)); - buf = taosDecodeFixedI64(buf, &(pRecord->offset)); - buf = taosDecodeFixedI64(buf, &(pRecord->size)); - - return buf; -} - -static int tdRestoreKVStore(SKVStore *pStore) { - char tbuf[128] = "\0"; - void * buf = NULL; - int64_t maxBufSize = 0; - SKVRecord rInfo = {0}; - SKVRecord *pRecord = NULL; - - ASSERT(TD_KVSTORE_HEADER_SIZE == lseek(pStore->fd, 0, SEEK_CUR)); - ASSERT(pStore->info.size == TD_KVSTORE_HEADER_SIZE); - - while (true) { - int64_t tsize = taosRead(pStore->fd, tbuf, sizeof(SKVRecord)); - if (tsize == 0) break; - if (tsize < sizeof(SKVRecord)) { - uError("failed to read %" PRIzu " bytes from file %s at offset %" PRId64 "since %s", sizeof(SKVRecord), pStore->fname, - pStore->info.size, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - char *pBuf = tdDecodeKVRecord(tbuf, &rInfo); - ASSERT(POINTER_DISTANCE(pBuf, tbuf) == sizeof(SKVRecord)); - ASSERT((rInfo.offset > 0) ? (pStore->info.size == rInfo.offset) : true); - - if (rInfo.offset < 0) { - taosHashRemove(pStore->map, (void *)(&rInfo.uid), sizeof(rInfo.uid)); - pStore->info.size += sizeof(SKVRecord); - pStore->info.nRecords--; - pStore->info.nDels++; - pStore->info.tombSize += (rInfo.size + sizeof(SKVRecord) * 2); - } else { - ASSERT(rInfo.offset > 0 && rInfo.size > 0); - if (taosHashPut(pStore->map, (void *)(&rInfo.uid), sizeof(rInfo.uid), &rInfo, sizeof(rInfo)) < 0) { - uError("failed to put record in KV store %s", pStore->fname); - terrno = TSDB_CODE_COM_OUT_OF_MEMORY; - goto _err; - } - - maxBufSize = MAX(maxBufSize, rInfo.size); - - if (lseek(pStore->fd, (off_t)rInfo.size, SEEK_CUR) < 0) { - uError("failed to lseek file %s since %s", pStore->fname, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - pStore->info.size += (sizeof(SKVRecord) + rInfo.size); - pStore->info.nRecords++; - } - } - - buf = malloc((size_t)maxBufSize); - if (buf == NULL) { - uError("failed to allocate %" PRId64 " bytes in KV store %s", maxBufSize, pStore->fname); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - pRecord = taosHashIterate(pStore->map, NULL); - while (pRecord) { - if (lseek(pStore->fd, (off_t)(pRecord->offset + sizeof(SKVRecord)), SEEK_SET) < 0) { - uError("failed to lseek file %s since %s, offset %" PRId64, pStore->fname, strerror(errno), pRecord->offset); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (taosRead(pStore->fd, buf, (size_t)pRecord->size) < pRecord->size) { - uError("failed to read %" PRId64 " bytes from file %s since %s, offset %" PRId64, pRecord->size, pStore->fname, - strerror(errno), pRecord->offset); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - if (pStore->iFunc) { - if ((*pStore->iFunc)(pStore->appH, buf, (int)pRecord->size) < 0) { - uError("failed to restore record uid %" PRIu64 " in kv store %s at offset %" PRId64 " size %" PRId64 - " since %s", - pRecord->uid, pStore->fname, pRecord->offset, pRecord->size, tstrerror(terrno)); - goto _err; - } - } - - pRecord = taosHashIterate(pStore->map, pRecord); - } - - if (pStore->aFunc) (*pStore->aFunc)(pStore->appH); - - tfree(buf); - return 0; - -_err: - taosHashCancelIterate(pStore->map, pRecord); - tfree(buf); - return -1; -} diff --git a/src/util/src/tlist.c b/src/util/src/tlist.c index 8c2ad83de1..2f52551e2a 100644 --- a/src/util/src/tlist.c +++ b/src/util/src/tlist.c @@ -38,11 +38,13 @@ void tdListEmpty(SList *list) { list->numOfEles = 0; } -void tdListFree(SList *list) { +void *tdListFree(SList *list) { if (list) { tdListEmpty(list); free(list); } + + return NULL; } void tdListPrependNode(SList *list, SListNode *node) { diff --git a/src/util/src/tref.c b/src/util/src/tref.c index ebae8ece14..7d64bd1f83 100644 --- a/src/util/src/tref.c +++ b/src/util/src/tref.c @@ -24,22 +24,22 @@ #define TSDB_REF_STATE_DELETED 2 typedef struct SRefNode { - struct SRefNode *prev; // previous node + struct SRefNode *prev; // previous node struct SRefNode *next; // next node - void *p; // pointer to resource protected, + void *p; // pointer to resource protected, int64_t rid; // reference ID int32_t count; // number of references - int removed; // 1: removed + int removed; // 1: removed } SRefNode; typedef struct { SRefNode **nodeList; // array of SRefNode linked list int state; // 0: empty, 1: active; 2: deleted - int rsetId; // refSet ID, global unique + int rsetId; // refSet ID, global unique int64_t rid; // increase by one for each new reference - int max; // mod + int max; // mod int32_t count; // total number of SRefNodes in this set - int64_t *lockedBy; + int64_t *lockedBy; void (*fp)(void *); } SRefSet; @@ -62,9 +62,9 @@ int taosOpenRef(int max, void (*fp)(void *)) SRefSet *pSet; int64_t *lockedBy; int i, rsetId; - + pthread_once(&tsRefModuleInit, taosInitRefModule); - + nodeList = calloc(sizeof(SRefNode *), (size_t)max); if (nodeList == NULL) { terrno = TSDB_CODE_REF_NO_MEMORY; @@ -79,12 +79,12 @@ int taosOpenRef(int max, void (*fp)(void *)) } pthread_mutex_lock(&tsRefMutex); - + for (i = 0; i < TSDB_REF_OBJECTS; ++i) { tsNextId = (tsNextId + 1) % TSDB_REF_OBJECTS; if (tsNextId == 0) tsNextId = 1; // dont use 0 as rsetId if (tsRefSetList[tsNextId].state == TSDB_REF_STATE_EMPTY) break; - } + } if (i < TSDB_REF_OBJECTS) { rsetId = tsNextId; @@ -105,7 +105,7 @@ int taosOpenRef(int max, void (*fp)(void *)) free (nodeList); free (lockedBy); uTrace("run out of Ref ID, maximum:%d refSetNum:%d", TSDB_REF_OBJECTS, tsRefSetNum); - } + } pthread_mutex_unlock(&tsRefMutex); @@ -127,7 +127,7 @@ int taosCloseRef(int rsetId) pthread_mutex_lock(&tsRefMutex); - if (pSet->state == TSDB_REF_STATE_ACTIVE) { + if (pSet->state == TSDB_REF_STATE_ACTIVE) { pSet->state = TSDB_REF_STATE_DELETED; deleted = 1; uTrace("rsetId:%d is closed, count:%d", rsetId, pSet->count); @@ -142,7 +142,7 @@ int taosCloseRef(int rsetId) return 0; } -int64_t taosAddRef(int rsetId, void *p) +int64_t taosAddRef(int rsetId, void *p) { int hash; SRefNode *pNode; @@ -163,7 +163,7 @@ int64_t taosAddRef(int rsetId, void *p) terrno = TSDB_CODE_REF_ID_REMOVED; return -1; } - + pNode = calloc(sizeof(SRefNode), 1); if (pNode == NULL) { terrno = TSDB_CODE_REF_NO_MEMORY; @@ -173,7 +173,7 @@ int64_t taosAddRef(int rsetId, void *p) rid = atomic_add_fetch_64(&pSet->rid, 1); hash = rid % pSet->max; taosLockList(pSet->lockedBy+hash); - + pNode->p = p; pNode->rid = rid; pNode->count = 1; @@ -187,16 +187,16 @@ int64_t taosAddRef(int rsetId, void *p) taosUnlockList(pSet->lockedBy+hash); - return rid; + return rid; } -int taosRemoveRef(int rsetId, int64_t rid) +int taosRemoveRef(int rsetId, int64_t rid) { return taosDecRefCount(rsetId, rid, 1); } // if rid is 0, return the first p in hash list, otherwise, return the next after current rid -void *taosAcquireRef(int rsetId, int64_t rid) +void *taosAcquireRef(int rsetId, int64_t rid) { int hash; SRefNode *pNode; @@ -204,7 +204,7 @@ void *taosAcquireRef(int rsetId, int64_t rid) void *p = NULL; if (rsetId < 0 || rsetId >= TSDB_REF_OBJECTS) { - uTrace("rsetId:%d rid:%" PRId64 " failed to acquire, rsetId not valid", rsetId, rid); + //uTrace("rsetId:%d rid:%" PRId64 " failed to acquire, rsetId not valid", rsetId, rid); terrno = TSDB_CODE_REF_INVALID_ID; return NULL; } @@ -223,7 +223,7 @@ void *taosAcquireRef(int rsetId, int64_t rid) terrno = TSDB_CODE_REF_ID_REMOVED; return NULL; } - + hash = rid % pSet->max; taosLockList(pSet->lockedBy+hash); @@ -233,8 +233,8 @@ void *taosAcquireRef(int rsetId, int64_t rid) if (pNode->rid == rid) { break; } - - pNode = pNode->next; + + pNode = pNode->next; } if (pNode) { @@ -258,7 +258,7 @@ void *taosAcquireRef(int rsetId, int64_t rid) return p; } -int taosReleaseRef(int rsetId, int64_t rid) +int taosReleaseRef(int rsetId, int64_t rid) { return taosDecRefCount(rsetId, rid, 0); } @@ -280,6 +280,7 @@ void *taosIterateRef(int rsetId, int64_t rid) { return NULL; } + void *newP = NULL; pSet = tsRefSetList + rsetId; taosIncRsetCount(pSet); if (pSet->state != TSDB_REF_STATE_ACTIVE) { @@ -289,52 +290,68 @@ void *taosIterateRef(int rsetId, int64_t rid) { return NULL; } - int hash = 0; - if (rid > 0) { - hash = rid % pSet->max; - taosLockList(pSet->lockedBy+hash); + do { + newP = NULL; + int hash = 0; + if (rid > 0) { + hash = rid % pSet->max; + taosLockList(pSet->lockedBy+hash); - pNode = pSet->nodeList[hash]; - while (pNode) { - if (pNode->rid == rid) break; - pNode = pNode->next; + pNode = pSet->nodeList[hash]; + while (pNode) { + if (pNode->rid == rid) break; + pNode = pNode->next; + } + + if (pNode == NULL) { + uError("rsetId:%d rid:%" PRId64 " not there, quit", rsetId, rid); + terrno = TSDB_CODE_REF_NOT_EXIST; + taosUnlockList(pSet->lockedBy+hash); + taosDecRsetCount(pSet); + return NULL; + } + + // rid is there + pNode = pNode->next; + // check first place + while (pNode) { + if (!pNode->removed) break; + pNode = pNode->next; + } + if (pNode == NULL) { + taosUnlockList(pSet->lockedBy+hash); + hash++; + } } if (pNode == NULL) { - uError("rsetId:%d rid:%" PRId64 " not there, quit", rsetId, rid); - terrno = TSDB_CODE_REF_NOT_EXIST; - taosUnlockList(pSet->lockedBy+hash); - return NULL; + for (; hash < pSet->max; ++hash) { + taosLockList(pSet->lockedBy+hash); + pNode = pSet->nodeList[hash]; + if (pNode) { + // check first place + while (pNode) { + if (!pNode->removed) break; + pNode = pNode->next; + } + if (pNode) break; + } + taosUnlockList(pSet->lockedBy+hash); + } } - // rid is there - pNode = pNode->next; - if (pNode == NULL) { + if (pNode) { + pNode->count++; // acquire it + newP = pNode->p; taosUnlockList(pSet->lockedBy+hash); - hash++; + uTrace("rsetId:%d p:%p rid:%" PRId64 " is returned", rsetId, newP, rid); + } else { + uTrace("rsetId:%d the list is over", rsetId); } - } - if (pNode == NULL) { - for (; hash < pSet->max; ++hash) { - taosLockList(pSet->lockedBy+hash); - pNode = pSet->nodeList[hash]; - if (pNode) break; - taosUnlockList(pSet->lockedBy+hash); - } - } - - void *newP = NULL; - if (pNode) { - pNode->count++; // acquire it - newP = pNode->p; - taosUnlockList(pSet->lockedBy+hash); - uTrace("rsetId:%d p:%p rid:%" PRId64 " is returned", rsetId, newP, rid); - } else { - uTrace("rsetId:%d the list is over", rsetId); - } - - if (rid > 0) taosReleaseRef(rsetId, rid); // release the current one + if (rid > 0) taosReleaseRef(rsetId, rid); // release the current one + if (pNode) rid = pNode->rid; + } while (newP && pNode->removed); taosDecRsetCount(pSet); @@ -350,22 +367,22 @@ int taosListRef() { for (int i = 0; i < TSDB_REF_OBJECTS; ++i) { pSet = tsRefSetList + i; - - if (pSet->state == TSDB_REF_STATE_EMPTY) + + if (pSet->state == TSDB_REF_STATE_EMPTY) continue; uInfo("rsetId:%d state:%d count::%d", i, pSet->state, pSet->count); for (int j=0; j < pSet->max; ++j) { pNode = pSet->nodeList[j]; - + while (pNode) { uInfo("rsetId:%d p:%p rid:%" PRId64 "count:%d", i, pNode->p, pNode->rid, pNode->count); pNode = pNode->next; num++; } - } - } + } + } pthread_mutex_unlock(&tsRefMutex); @@ -397,16 +414,16 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) { terrno = TSDB_CODE_REF_ID_REMOVED; return -1; } - + hash = rid % pSet->max; taosLockList(pSet->lockedBy+hash); - + pNode = pSet->nodeList[hash]; while (pNode) { if (pNode->rid == rid) break; - pNode = pNode->next; + pNode = pNode->next; } if (pNode) { @@ -417,18 +434,18 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) { if (pNode->prev) { pNode->prev->next = pNode->next; } else { - pSet->nodeList[hash] = pNode->next; + pSet->nodeList[hash] = pNode->next; } if (pNode->next) { - pNode->next->prev = pNode->prev; - } + pNode->next->prev = pNode->prev; + } released = 1; } else { - uTrace("rsetId:%d p:%p rid:%" PRId64 " is released", rsetId, pNode->p, rid); + uTrace("rsetId:%d p:%p rid:%" PRId64 " is released", rsetId, pNode->p, rid); } } else { - uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to release/remove", rsetId, rid); + uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to release/remove", rsetId, rid); terrno = TSDB_CODE_REF_NOT_EXIST; code = -1; } @@ -437,11 +454,11 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) { if (released) { uTrace("rsetId:%d p:%p rid:%" PRId64 " is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode); - (*pSet->fp)(pNode->p); + (*pSet->fp)(pNode->p); free(pNode); taosDecRsetCount(pSet); - } + } return code; } @@ -493,5 +510,5 @@ static void taosDecRsetCount(SRefSet *pSet) { } pthread_mutex_unlock(&tsRefMutex); -} +} diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index 57e262e5cf..e075876867 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -32,14 +32,27 @@ int32_t taosGetFqdn(char *fqdn) { struct addrinfo hints = {0}; struct addrinfo *result = NULL; +#ifdef __APPLE__ + // on macosx, hostname -f has the form of xxx.local + // which will block getaddrinfo for a few seconds if AI_CANONNAME is set + // thus, we choose AF_INET (ipv4 for the moment) to make getaddrinfo return + // immediately + hints.ai_family = AF_INET; +#else // __APPLE__ hints.ai_flags = AI_CANONNAME; +#endif // __APPLE__ int32_t ret = getaddrinfo(hostname, NULL, &hints, &result); if (!result) { uError("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret)); return -1; } +#ifdef __APPLE__ + // refer to comments above + strcpy(fqdn, hostname); +#else // __APPLE__ strcpy(fqdn, result->ai_canonname); +#endif // __APPLE__ freeaddrinfo(result); return 0; } @@ -286,7 +299,7 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie int32_t bufSize = 1024 * 1024; sockFd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); - + if (sockFd <= 2) { uError("failed to open the socket: %d (%s)", errno, strerror(errno)); taosCloseSocketNoCheck(sockFd); @@ -483,5 +496,5 @@ int32_t taosCopyFds(SOCKET sfd, int32_t dfd, int64_t len) { leftLen -= readLen; } - return 0; + return (int32_t)len; } diff --git a/src/util/src/ttimer.c b/src/util/src/ttimer.c index 015c687b3d..9833449777 100644 --- a/src/util/src/ttimer.c +++ b/src/util/src/ttimer.c @@ -18,6 +18,7 @@ #include "tsched.h" #include "ttimer.h" #include "tutil.h" +#include "monotonic.h" extern int8_t tscEmbedded; @@ -186,6 +187,10 @@ static void removeTimer(uintptr_t id) { unlockTimerList(list); } +static int64_t getMonotonicMs(void) { + return (int64_t) getMonotonicUs() / 1000; +} + static void addToWheel(tmr_obj_t* timer, uint32_t delay) { timerAddRef(timer); // select a wheel for the timer, we are not an accurate timer, @@ -201,7 +206,7 @@ static void addToWheel(tmr_obj_t* timer, uint32_t delay) { time_wheel_t* wheel = wheels + timer->wheel; timer->prev = NULL; - timer->expireAt = taosGetTimestampMs() + delay; + timer->expireAt = getMonotonicMs() + delay; pthread_mutex_lock(&wheel->mutex); @@ -334,7 +339,7 @@ tmr_h taosTmrStart(TAOS_TMR_CALLBACK fp, int mseconds, void* param, void* handle } static void taosTimerLoopFunc(int signo) { - int64_t now = taosGetTimestampMs(); + int64_t now = getMonotonicMs(); for (int i = 0; i < tListLen(wheels); i++) { // `expried` is a temporary expire list. @@ -501,7 +506,7 @@ static void taosTmrModuleInit(void) { pthread_mutex_init(&tmrCtrlMutex, NULL); - int64_t now = taosGetTimestampMs(); + int64_t now = getMonotonicMs(); for (int i = 0; i < tListLen(wheels); i++) { time_wheel_t* wheel = wheels + i; if (pthread_mutex_init(&wheel->mutex, NULL) != 0) { @@ -532,6 +537,9 @@ static void taosTmrModuleInit(void) { } void* taosTmrInit(int maxNumOfTmrs, int resolution, int longest, const char* label) { + const char* ret = monotonicInit(); + tmrInfo("ttimer monotonic clock source:%s", ret); + pthread_once(&tmrModuleInit, taosTmrModuleInit); pthread_mutex_lock(&tmrCtrlMutex); diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt index 6066d58416..0c96ed2a2f 100644 --- a/src/util/tests/CMakeLists.txt +++ b/src/util/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) diff --git a/src/vnode/CMakeLists.txt b/src/vnode/CMakeLists.txt index 5d77d48ebf..3fefbea05b 100644 --- a/src/vnode/CMakeLists.txt +++ b/src/vnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) @@ -11,4 +11,4 @@ INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(vnode ${SRC}) -TARGET_LINK_LIBRARIES(vnode tsdb tcq) +TARGET_LINK_LIBRARIES(vnode tsdb tcq common) diff --git a/src/vnode/inc/vnodeMain.h b/src/vnode/inc/vnodeMain.h index e1ddcdc36a..73591bc10d 100644 --- a/src/vnode/inc/vnodeMain.h +++ b/src/vnode/inc/vnodeMain.h @@ -26,8 +26,6 @@ int32_t vnodeDrop(int32_t vgId); int32_t vnodeOpen(int32_t vgId); int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg); int32_t vnodeClose(int32_t vgId); - -int32_t vnodeReset(SVnodeObj *pVnode); void vnodeCleanUp(SVnodeObj *pVnode); void vnodeDestroy(SVnodeObj *pVnode); diff --git a/src/vnode/inc/vnodeSync.h b/src/vnode/inc/vnodeSync.h index ae02ca17cb..c9ac25c227 100644 --- a/src/vnode/inc/vnodeSync.h +++ b/src/vnode/inc/vnodeSync.h @@ -25,7 +25,8 @@ uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t ei int32_t vnodeGetWalInfo(int32_t vgId, char *fileName, int64_t *fileId); void vnodeNotifyRole(int32_t vgId, int8_t role); void vnodeCtrlFlow(int32_t vgId, int32_t level); -int32_t vnodeNotifyFileSynced(int32_t vgId, uint64_t fversion); +void vnodeStartSyncFile(int32_t vgId); +void vnodeStopSyncFile(int32_t vgId, uint64_t fversion); void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code); int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rparam); int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver); diff --git a/src/vnode/src/vnodeCfg.c b/src/vnode/src/vnodeCfg.c index 9b7916c8bd..03f2b11eec 100644 --- a/src/vnode/src/vnodeCfg.c +++ b/src/vnode/src/vnodeCfg.c @@ -34,6 +34,7 @@ static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) { pVnode->tsdbCfg.maxRowsPerFileBlock = vnodeMsg->cfg.maxRowsPerFileBlock; pVnode->tsdbCfg.precision = vnodeMsg->cfg.precision; pVnode->tsdbCfg.compression = vnodeMsg->cfg.compression; + pVnode->tsdbCfg.update = vnodeMsg->cfg.update; pVnode->tsdbCfg.cacheLastRow = vnodeMsg->cfg.cacheLastRow; pVnode->walCfg.walLevel = vnodeMsg->cfg.walLevel; pVnode->walCfg.fsyncPeriod = vnodeMsg->cfg.fsyncPeriod; @@ -227,6 +228,15 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) { } vnodeMsg.cfg.quorum = (int8_t)quorum->valueint; + cJSON *update = cJSON_GetObjectItem(root, "update"); + if (!update || update->type != cJSON_Number) { + vError("vgId: %d, failed to read %s, update not found", pVnode->vgId, file); + vnodeMsg.cfg.update = 0; + vnodeMsg.cfg.vgCfgVersion = 0; + } else { + vnodeMsg.cfg.update = (int8_t)update->valueint; + } + cJSON *cacheLastRow = cJSON_GetObjectItem(root, "cacheLastRow"); if (!cacheLastRow || cacheLastRow->type != cJSON_Number) { vError("vgId: %d, failed to read %s, cacheLastRow not found", pVnode->vgId, file); @@ -325,6 +335,7 @@ int32_t vnodeWriteCfg(SCreateVnodeMsg *pMsg) { len += snprintf(content + len, maxLen - len, " \"dbReplica\": %d,\n", pMsg->cfg.dbReplica); len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pMsg->cfg.wals); len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pMsg->cfg.quorum); + len += snprintf(content + len, maxLen - len, " \"update\": %d,\n", pMsg->cfg.update); len += snprintf(content + len, maxLen - len, " \"cacheLastRow\": %d,\n", pMsg->cfg.cacheLastRow); len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); for (int32_t i = 0; i < pMsg->cfg.vgReplica; i++) { diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 3e72562c55..ac9536d243 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -18,7 +18,7 @@ #include "taoserror.h" #include "taosmsg.h" #include "tglobal.h" -// #include "tfs.h" +#include "tfs.h" #include "query.h" #include "dnode.h" #include "vnodeCfg.h" @@ -41,32 +41,19 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) { return TSDB_CODE_SUCCESS; } - if (mkdir(tsVnodeDir, 0755) != 0 && errno != EEXIST) { - vError("vgId:%d, failed to create vnode, reason:%s dir:%s", pVnodeCfg->cfg.vgId, strerror(errno), tsVnodeDir); - if (errno == EACCES) { - return TSDB_CODE_VND_NO_DISK_PERMISSIONS; - } else if (errno == ENOSPC) { - return TSDB_CODE_VND_NO_DISKSPACE; - } else if (errno == ENOENT) { - return TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR; - } else { - return TSDB_CODE_VND_INIT_FAILED; - } + if (tfsMkdir("vnode") < 0) { + vError("vgId:%d, failed to create vnode dir, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno)); + return terrno; } char rootDir[TSDB_FILENAME_LEN] = {0}; sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId); - if (mkdir(rootDir, 0755) != 0 && errno != EEXIST) { - vError("vgId:%d, failed to create vnode, reason:%s dir:%s", pVnodeCfg->cfg.vgId, strerror(errno), rootDir); - if (errno == EACCES) { - return TSDB_CODE_VND_NO_DISK_PERMISSIONS; - } else if (errno == ENOSPC) { - return TSDB_CODE_VND_NO_DISKSPACE; - } else if (errno == ENOENT) { - return TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR; - } else { - return TSDB_CODE_VND_INIT_FAILED; - } + + char vnodeDir[TSDB_FILENAME_LEN] = "\0"; + snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d", pVnodeCfg->cfg.vgId); + if (tfsMkdir(vnodeDir) < 0) { + vError("vgId:%d, failed to create vnode dir %s, reason:%s", pVnodeCfg->cfg.vgId, vnodeDir, strerror(errno)); + return terrno; } code = vnodeWriteCfg(pVnodeCfg); @@ -75,22 +62,24 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) { return code; } - STsdbCfg tsdbCfg = {0}; - tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId; - tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize; - tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks; - tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile; - tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep; - tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock; - tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock; - tsdbCfg.precision = pVnodeCfg->cfg.precision; - tsdbCfg.compression = pVnodeCfg->cfg.compression; - tsdbCfg.update = pVnodeCfg->cfg.update; - tsdbCfg.cacheLastRow = pVnodeCfg->cfg.cacheLastRow; + // STsdbCfg tsdbCfg = {0}; + // tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId; + // tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize; + // tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks; + // tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile; + // tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep; + // tsdbCfg.keep1 = pVnodeCfg->cfg.daysToKeep1; + // tsdbCfg.keep2 = pVnodeCfg->cfg.daysToKeep2; + // tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock; + // tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock; + // tsdbCfg.precision = pVnodeCfg->cfg.precision; + // tsdbCfg.compression = pVnodeCfg->cfg.compression; + // tsdbCfg.update = pVnodeCfg->cfg.update; + // tsdbCfg.cacheLastRow = pVnodeCfg->cfg.cacheLastRow; - char tsdbDir[TSDB_FILENAME_LEN] = {0}; - sprintf(tsdbDir, "%s/vnode%d/tsdb", tsVnodeDir, pVnodeCfg->cfg.vgId); - if (tsdbCreateRepo(tsdbDir, &tsdbCfg) < 0) { + // char tsdbDir[TSDB_FILENAME_LEN] = {0}; + // sprintf(tsdbDir, "vnode/vnode%d/tsdb", pVnodeCfg->cfg.vgId); + if (tsdbCreateRepo(pVnodeCfg->cfg.vgId) < 0) { vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno)); return TSDB_CODE_VND_INIT_FAILED; } @@ -205,12 +194,14 @@ int32_t vnodeOpen(int32_t vgId) { int32_t code = vnodeReadCfg(pVnode); if (code != TSDB_CODE_SUCCESS) { + vError("vgId:%d, failed to read config file, set cfgVersion to 0", pVnode->vgId); vnodeCleanUp(pVnode); - return code; + return 0; } code = vnodeReadVersion(pVnode); if (code != TSDB_CODE_SUCCESS) { + pVnode->version = 0; vError("vgId:%d, failed to read file version, generate it from data file", pVnode->vgId); // Allow vnode start even when read file version fails, set file version as wal version or zero // vnodeCleanUp(pVnode); @@ -247,14 +238,13 @@ int32_t vnodeOpen(int32_t vgId) { appH.cqH = pVnode->cq; appH.cqCreateFunc = cqCreate; appH.cqDropFunc = cqDrop; - sprintf(temp, "%s/tsdb", rootDir); terrno = 0; - pVnode->tsdb = tsdbOpenRepo(temp, &appH); + pVnode->tsdb = tsdbOpenRepo(&(pVnode->tsdbCfg), &appH); if (pVnode->tsdb == NULL) { vnodeCleanUp(pVnode); return terrno; - } else if (terrno != TSDB_CODE_SUCCESS) { + } else if (tsdbGetState(pVnode->tsdb) != TSDB_STATE_OK) { vError("vgId:%d, failed to open tsdb, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, tstrerror(terrno)); if (pVnode->syncCfg.replica <= 1) { @@ -301,20 +291,23 @@ int32_t vnodeOpen(int32_t vgId) { vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode); vnodeAddIntoHash(pVnode); - + SSyncInfo syncInfo; syncInfo.vgId = pVnode->vgId; syncInfo.version = pVnode->version; syncInfo.syncCfg = pVnode->syncCfg; tstrncpy(syncInfo.path, rootDir, TSDB_FILENAME_LEN); - syncInfo.getWalInfo = vnodeGetWalInfo; - syncInfo.getFileInfo = vnodeGetFileInfo; - syncInfo.writeToCache = vnodeWriteToCache; + syncInfo.getWalInfoFp = vnodeGetWalInfo; + syncInfo.writeToCacheFp = vnodeWriteToCache; syncInfo.confirmForward = vnodeConfirmForard; - syncInfo.notifyRole = vnodeNotifyRole; - syncInfo.notifyFlowCtrl = vnodeCtrlFlow; - syncInfo.notifyFileSynced = vnodeNotifyFileSynced; - syncInfo.getVersion = vnodeGetVersion; + syncInfo.notifyRoleFp = vnodeNotifyRole; + syncInfo.notifyFlowCtrlFp = vnodeCtrlFlow; + syncInfo.startSyncFileFp = vnodeStartSyncFile; + syncInfo.stopSyncFileFp = vnodeStopSyncFile; + syncInfo.getVersionFp = vnodeGetVersion; + syncInfo.sendFileFp = tsdbSyncSend; + syncInfo.recvFileFp = tsdbSyncRecv; + syncInfo.pTsdb = pVnode->tsdb; pVnode->sync = syncStart(&syncInfo); if (pVnode->sync <= 0) { @@ -344,7 +337,7 @@ int32_t vnodeClose(int32_t vgId) { void vnodeDestroy(SVnodeObj *pVnode) { int32_t code = 0; int32_t vgId = pVnode->vgId; - + if (pVnode->qMgmt) { qCleanupQueryMgmt(pVnode->qMgmt); pVnode->qMgmt = NULL; @@ -396,17 +389,17 @@ void vnodeDestroy(SVnodeObj *pVnode) { if (pVnode->dropped) { char rootDir[TSDB_FILENAME_LEN] = {0}; char newDir[TSDB_FILENAME_LEN] = {0}; - sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId); - sprintf(newDir, "%s/vnode%d", tsVnodeBakDir, vgId); + sprintf(rootDir, "%s/vnode%d", "vnode", vgId); + sprintf(newDir, "%s/vnode%d", "vnode_bak", vgId); if (0 == tsEnableVnodeBak) { vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId); } else { - taosRemoveDir(newDir); - taosRename(rootDir, newDir); + tfsRmdir(newDir); + tfsRename(rootDir, newDir); } - taosRemoveDir(rootDir); + tfsRmdir(rootDir); dnodeSendStatusMsgToMnode(); } @@ -466,37 +459,3 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { return 0; } - -int32_t vnodeReset(SVnodeObj *pVnode) { - char rootDir[128] = "\0"; - sprintf(rootDir, "%s/tsdb", pVnode->rootDir); - - if (!vnodeSetResetStatus(pVnode)) { - return -1; - } - - void *tsdb = pVnode->tsdb; - pVnode->tsdb = NULL; - - // acquire vnode - int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - - if (refCount > 3) { - tsem_wait(&pVnode->sem); - } - - // close tsdb, then open tsdb - tsdbCloseRepo(tsdb, 0); - STsdbAppH appH = {0}; - appH.appH = (void *)pVnode; - appH.notifyStatus = vnodeProcessTsdbStatus; - appH.cqH = pVnode->cq; - appH.cqCreateFunc = cqCreate; - appH.cqDropFunc = cqDrop; - pVnode->tsdb = tsdbOpenRepo(rootDir, &appH); - - vnodeSetReadyStatus(pVnode); - vnodeRelease(pVnode); - - return 0; -} diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index c67132c41f..627783c391 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -20,6 +20,7 @@ #include "dnode.h" #include "vnodeVersion.h" #include "vnodeMain.h" +#include "vnodeStatus.h" uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fver) { SVnodeObj *pVnode = vnodeAcquire(vgId); @@ -83,22 +84,34 @@ void vnodeCtrlFlow(int32_t vgId, int32_t level) { vnodeRelease(pVnode); } -int32_t vnodeNotifyFileSynced(int32_t vgId, uint64_t fversion) { +void vnodeStartSyncFile(int32_t vgId) { SVnodeObj *pVnode = vnodeAcquire(vgId); if (pVnode == NULL) { - vError("vgId:%d, vnode not found while notify file synced", vgId); - return 0; + vError("vgId:%d, vnode not found while start filesync", vgId); + return; + } + + vDebug("vgId:%d, datafile will be synced", vgId); + vnodeSetResetStatus(pVnode); + + vnodeRelease(pVnode); +} + +void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vError("vgId:%d, vnode not found while stop filesync", vgId); + return; } pVnode->fversion = fversion; pVnode->version = fversion; vnodeSaveVersion(pVnode); - vDebug("vgId:%d, data file is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); - int32_t code = vnodeReset(pVnode); + vDebug("vgId:%d, datafile is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); + vnodeSetReadyStatus(pVnode); vnodeRelease(pVnode); - return code; } void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code) { diff --git a/src/wal/CMakeLists.txt b/src/wal/CMakeLists.txt index 6f35cb9ba7..42a764fce2 100644 --- a/src/wal/CMakeLists.txt +++ b/src/wal/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 0eda6ff786..ea1eaa4fee 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -39,7 +39,7 @@ int32_t walRenew(void *handle) { if (tfValid(pWal->tfd)) { tfClose(pWal->tfd); - wDebug("vgId:%d, file:%s, it is closed", pWal->vgId, pWal->name); + wDebug("vgId:%d, file:%s, it is closed while renew", pWal->vgId, pWal->name); } if (pWal->keep == TAOS_WAL_KEEP) { @@ -56,7 +56,7 @@ int32_t walRenew(void *handle) { code = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, file:%s, failed to open since %s", pWal->vgId, pWal->name, strerror(errno)); } else { - wDebug("vgId:%d, file:%s, it is created", pWal->vgId, pWal->name); + wDebug("vgId:%d, file:%s, it is created and open while renew", pWal->vgId, pWal->name); } pthread_mutex_unlock(&pWal->mutex); @@ -95,11 +95,15 @@ void walRemoveAllOldFiles(void *handle) { int64_t fileId = -1; pthread_mutex_lock(&pWal->mutex); + + tfClose(pWal->tfd); + wDebug("vgId:%d, file:%s, it is closed before remove all wals", pWal->vgId, pWal->name); + while (walGetNextFile(pWal, &fileId) >= 0) { snprintf(pWal->name, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, fileId); if (remove(pWal->name) < 0) { - wError("vgId:%d, wal:%p file:%s, failed to remove", pWal->vgId, pWal, pWal->name); + wError("vgId:%d, wal:%p file:%s, failed to remove since %s", pWal->vgId, pWal, pWal->name, strerror(errno)); } else { wInfo("vgId:%d, wal:%p file:%s, it is removed", pWal->vgId, pWal, pWal->name); } @@ -192,7 +196,7 @@ int32_t walRestore(void *handle, void *pVnode, FWalWrite writeFp) { wError("vgId:%d, file:%s, failed to open since %s", pWal->vgId, pWal->name, strerror(errno)); return TAOS_SYSTEM_ERROR(errno); } - wDebug("vgId:%d, file:%s open success", pWal->vgId, pWal->name); + wDebug("vgId:%d, file:%s, it is created and open while restore", pWal->vgId, pWal->name); } return TSDB_CODE_SUCCESS; @@ -265,6 +269,8 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch wError("vgId:%d, file:%s, failed to open for restore since %s", pWal->vgId, name, strerror(errno)); tfree(buffer); return TAOS_SYSTEM_ERROR(errno); + } else { + wDebug("vgId:%d, file:%s, open for restore", pWal->vgId, name); } int32_t code = TSDB_CODE_SUCCESS; @@ -332,6 +338,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch tfClose(tfd); tfree(buffer); + wDebug("vgId:%d, file:%s, it is closed after restore", pWal->vgId, name); return code; } diff --git a/src/wal/test/CMakeLists.txt b/src/wal/test/CMakeLists.txt index aec0602ac0..f20a57899e 100644 --- a/src/wal/test/CMakeLists.txt +++ b/src/wal/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 57fc8b1953..4e7e9a87ea 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -3,7 +3,7 @@ # generate release version: # mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release .. -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) SET(CMAKE_C_STANDARD 11) diff --git a/tests/comparisonTest/tdengine/CMakeLists.txt b/tests/comparisonTest/tdengine/CMakeLists.txt index a12e36ab6b..36ed3efe19 100644 --- a/tests/comparisonTest/tdengine/CMakeLists.txt +++ b/tests/comparisonTest/tdengine/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/tests/examples/C#/taosdemo/taosdemo.cs b/tests/examples/C#/taosdemo/taosdemo.cs index df52acc99d..2d78418e0a 100644 --- a/tests/examples/C#/taosdemo/taosdemo.cs +++ b/tests/examples/C#/taosdemo/taosdemo.cs @@ -650,6 +650,7 @@ namespace TDengineDriver tester.CloseConnection(); Console.WriteLine("End."); + CleanAndExitProgram(0); } public class InsertDataThread diff --git a/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md b/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md index e14a5f7b67..17c5c8df00 100644 --- a/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md +++ b/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md @@ -137,7 +137,7 @@ Welcome to the TDengine shell from Linux, Client Version:2.0.1.1 Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | + name | created_time | ntables | vgroups | replica | quorum | days | keep0,keep1,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | =================================================================================================================================================================================================================================================================== test | 2020-08-19 18:43:50.731 | 1 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | log | 2020-08-19 18:40:28.064 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | diff --git a/tests/examples/c/epoll.c b/tests/examples/c/epoll.c index 20ab395158..de7c5989d1 100644 --- a/tests/examples/c/epoll.c +++ b/tests/examples/c/epoll.c @@ -38,6 +38,8 @@ #include #include #include +#include +#include #define D(fmt, ...) fprintf(stderr, "%s[%d]%s(): " fmt "\n", basename(__FILE__), __LINE__, __func__, ##__VA_ARGS__) #define A(statement, fmt, ...) do { \ @@ -56,6 +58,8 @@ ##__VA_ARGS__); \ } while (0) +#include "os.h" + typedef struct ep_s ep_t; struct ep_s { int ep; @@ -214,7 +218,6 @@ static void* routine(void* arg) { A(0==pthread_mutex_lock(&ep->lock), ""); ep->wakenup = 0; A(0==pthread_mutex_unlock(&ep->lock), ""); - D("........"); continue; } A(ev->data.ptr, "internal logic error"); diff --git a/tests/examples/python/taosdemo/README.md b/tests/examples/python/taosdemo/README.md new file mode 100644 index 0000000000..d48fffe8ff --- /dev/null +++ b/tests/examples/python/taosdemo/README.md @@ -0,0 +1,38 @@ +install build environment +=== +/usr/bin/python3 -m pip install -r requirements.txt + +run python version taosdemo +=== +Usage: ./taosdemo.py [OPTION...] + +Author: Shuduo Sang + + -H, --help Show usage. + + -N, --native flag, Use native interface if set. Default is using RESTful interface. + -h, --host host, The host to connect to TDengine. Default is localhost. + -p, --port port, The TCP/IP port number to use for the connection. Default is 0. + -u, --user user, The user name to use when connecting to the server. Default is 'root'. + -P, --password password, The password to use when connecting to the server. Default is 'taosdata'. + -l, --colsPerRec num_of_columns_per_record, The number of columns per record. Default is 3. + -d, --dbname database, Destination database. Default is 'test'. + -a, --replica replica, Set the replica parameters of the database, Default 1, min: 1, max: 5. + -m, --tbname table_prefix, Table prefix name. Default is 't'. + -M, --stable flag, Use super table. Default is no + -s, --stbname stable_prefix, STable prefix name. Default is 'st' + -Q, --query query, Execute query command. set 'DEFAULT' means select * from each table + -T, --threads num_of_threads, The number of threads. Default is 1. + -C, --processes num_of_processes, The number of threads. Default is 1. + -r, --batch num_of_records_per_req, The number of records per request. Default is 1000. + -t, --numOfTb num_of_tables, The number of tables. Default is 1. + -n, --numOfRec num_of_records_per_table, The number of records per table. Default is 1. + -c, --config config_directory, Configuration directory. Default is '/etc/taos/'. + -x, --inserOnly flag, Insert only flag. + -O, --outOfOrder out of order data insert, 0: In order, 1: Out of order. Default is in order. + -R, --rateOOOO rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50. + -D, --deleteMethod Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database. + -v, --verbose Print verbose output + -g, --debug Print debug output + -y, --skipPrompt Skip read key for continous test, default is not skip + diff --git a/tests/examples/python/taosdemo/requirements.txt b/tests/examples/python/taosdemo/requirements.txt new file mode 100644 index 0000000000..977e8e3726 --- /dev/null +++ b/tests/examples/python/taosdemo/requirements.txt @@ -0,0 +1,28 @@ +## +######## example-requirements.txt ####### +## +####### Requirements without Version Specifiers ###### +requests +multipledispatch +#beautifulsoup4 +## +####### Requirements with Version Specifiers ###### +## See https://www.python.org/dev/peps/pep-0440/#version-specifiers +#docopt == 0.6.1 # Version Matching. Must be version 0.6.1 +#keyring >= 4.1.1 # Minimum version 4.1.1 +#coverage != 3.5 # Version Exclusion. Anything except version 3.5 +#Mopidy-Dirble ~= 1.1 # Compatible release. Same as >= 1.1, == 1.* +## +####### Refer to other requirements files ###### +#-r other-requirements.txt +## +## +####### A particular file ###### +#./downloads/numpy-1.9.2-cp34-none-win32.whl +#http://wxpython.org/Phoenix/snapshot-builds/wxPython_Phoenix-3.0.3.dev1820+49a8884-cp34-none-win_amd64.whl +## +####### Additional Requirements without Version Specifiers ###### +## Same as 1st section, just here to show that you can put things in any order. +#rejected +#green +## diff --git a/tests/examples/python/taosdemo/taosdemo.py b/tests/examples/python/taosdemo/taosdemo.py new file mode 100755 index 0000000000..d55023bdbf --- /dev/null +++ b/tests/examples/python/taosdemo/taosdemo.py @@ -0,0 +1,797 @@ +#!/usr/bin/python3 +# * Copyright (c) 2019 TAOS Data, Inc. +# * +# * This program is free software: you can use, redistribute, and/or modify +# * it under the terms of the GNU Affero General Public License, version 3 +# * or later ("AGPL"), as published by the Free Software Foundation. +# * +# * This program is distributed in the hope that it will be useful, but WITHOUT +# * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# * FITNESS FOR A PARTICULAR PURPOSE. +# * +# * You should have received a copy of the GNU Affero General Public License +# * along with this program. If not, see . + +# -*- coding: utf-8 -*- + +import sys +import getopt +import requests +import json +import random +import time +import datetime +from multiprocessing import Manager, Pool, Lock +from multipledispatch import dispatch +from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED + + +@dispatch(str, str) +def v_print(msg: str, arg: str): + if verbose: + print(msg % arg) + + +@dispatch(str, str, str) +def v_print(msg: str, arg1: str, arg2: str): + if verbose: + print(msg % (arg1, arg2)) + + +@dispatch(str, str, str, str) +def v_print(msg: str, arg1: str, arg2: str, arg3: str): + if verbose: + print(msg % (arg1, arg2, arg3)) + + +@dispatch(str, str, str, str, str) +def v_print(msg: str, arg1: str, arg2: str, arg3: str, arg4: str): + if verbose: + print(msg % (arg1, arg2, arg3, arg4)) + + +@dispatch(str, int) +def v_print(msg: str, arg: int): + if verbose: + print(msg % int(arg)) + + +@dispatch(str, int, str) +def v_print(msg: str, arg1: int, arg2: str): + if verbose: + print(msg % (int(arg1), str(arg2))) + + +@dispatch(str, str, int) +def v_print(msg: str, arg1: str, arg2: int): + if verbose: + print(msg % (arg1, int(arg2))) + + +@dispatch(str, int, int) +def v_print(msg: str, arg1: int, arg2: int): + if verbose: + print(msg % (int(arg1), int(arg2))) + + +@dispatch(str, int, int, str) +def v_print(msg: str, arg1: int, arg2: int, arg3: str): + if verbose: + print(msg % (int(arg1), int(arg2), str(arg3))) + + +@dispatch(str, int, int, int) +def v_print(msg: str, arg1: int, arg2: int, arg3: int): + if verbose: + print(msg % (int(arg1), int(arg2), int(arg3))) + + +@dispatch(str, int, int, int, int) +def v_print(msg: str, arg1: int, arg2: int, arg3: int, arg4: int): + if verbose: + print(msg % (int(arg1), int(arg2), int(arg3), int(arg4))) + + +def restful_execute(host: str, port: int, user: str, password: str, cmd: str): + url = "http://%s:%d/rest/sql" % (host, restPort) + + v_print("restful_execute - cmd: %s", cmd) + + resp = requests.post(url, cmd, auth=(user, password)) + + v_print("resp status: %d", resp.status_code) + + if debug: + v_print( + "resp text: %s", + json.dumps( + resp.json(), + sort_keys=True, + indent=2)) + else: + print("resp: %s" % json.dumps(resp.json())) + + +def query_func(process: int, thread: int, cmd: str): + v_print("%d process %d thread cmd: %s", process, thread, cmd) + + if oneMoreHost != "NotSupported" and random.randint( + 0, 1) == 1: + v_print("%s", "Send to second host") + if native: + cursor2.execute(cmd) + else: + restful_execute( + oneMoreHost, port, user, password, cmd) + else: + v_print("%s%s%s", "Send ", cmd, " to the host") + if native: + pass +# cursor.execute(cmd) + else: + restful_execute( + host, port, user, password, cmd) + + +def query_data_process(cmd: str): + # establish connection if native + if native: + v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir) + try: + conn = taos.connect( + host=host, + user=user, + password=password, + config=configDir) + v_print("conn: %s", str(conn.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + + try: + cursor = conn.cursor() + v_print("cursor:%d %s", id(cursor), str(cursor.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + conn.close() + sys.exit(1) + + if native: + try: + cursor.execute(cmd) + cols = cursor.description + data = cursor.fetchall() + + for col in data: + print(col) + except Exception as e: + conn.close() + print("Error: %s" % e.args[0]) + sys.exit(1) + + else: + restful_execute( + host, + port, + user, + password, + cmd) + + if native: + cursor.close() + conn.close() + + +def create_stb(): + for i in range(0, numOfStb): + if native: + cursor.execute( + "CREATE TABLE IF NOT EXISTS %s%d (ts timestamp, value float) TAGS (uuid binary(50))" % + (stbName, i)) + else: + restful_execute( + host, + port, + user, + password, + "CREATE TABLE IF NOT EXISTS %s%d (ts timestamp, value float) TAGS (uuid binary(50))" % + (stbName, i) + ) + + +def use_database(): + + if native: + cursor.execute("USE %s" % current_db) + else: + restful_execute(host, port, user, password, "USE %s" % current_db) + + +def create_databases(): + for i in range(0, numOfDb): + v_print("will create database db%d", int(i)) + + if native: + cursor.execute( + "CREATE DATABASE IF NOT EXISTS %s%d" % (dbName, i)) + else: + restful_execute( + host, + port, + user, + password, + "CREATE DATABASE IF NOT EXISTS %s%d" % (dbName, i)) + + +def drop_tables(): + # TODO + v_print("TODO: drop tables total %d", numOfTb) + pass + + +def drop_stable(): + # TODO + v_print("TODO: drop stables total %d", numOfStb) + pass + + +def drop_databases(): + v_print("drop databases total %d", numOfDb) + + # drop exist databases first + for i in range(0, numOfDb): + v_print("will drop database db%d", int(i)) + + if native: + cursor.execute( + "DROP DATABASE IF EXISTS %s%d" % + (dbName, i)) + else: + restful_execute( + host, + port, + user, + password, + "DROP DATABASE IF EXISTS %s%d" % + (dbName, i)) + + +def insert_func(process: int, thread: int): + v_print("%d process %d thread, insert_func ", process, thread) + + # generate uuid + uuid_int = random.randint(0, numOfTb + 1) + uuid = "%s" % uuid_int + v_print("uuid is: %s", uuid) + + # establish connection if native + if native: + v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir) + try: + conn = taos.connect( + host=host, + user=user, + password=password, + config=configDir) + v_print("conn: %s", str(conn.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + + try: + cursor = conn.cursor() + v_print("cursor:%d %s", id(cursor), str(cursor.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + conn.close() + sys.exit(1) + + v_print("numOfRec %d:", numOfRec) + + row = 0 + while row < numOfRec: + v_print("row: %d", row) + sqlCmd = ['INSERT INTO '] + try: + sqlCmd.append( + "%s.%s%d " % (current_db, tbName, thread)) + + if (numOfStb > 0 and autosubtable): + sqlCmd.append("USING %s.%s%d TAGS('%s') " % + (current_db, stbName, numOfStb - 1, uuid)) + + start_time = datetime.datetime( + 2021, 1, 25) + datetime.timedelta(seconds=row) + + sqlCmd.append("VALUES ") + for batchIter in range(0, batch): + sqlCmd.append("('%s', %f) " % + ( + start_time + + datetime.timedelta( + milliseconds=batchIter), + random.random())) + row = row + 1 + if row >= numOfRec: + v_print("BREAK, row: %d numOfRec:%d", row, numOfRec) + break + + except Exception as e: + print("Error: %s" % e.args[0]) + + cmd = ' '.join(sqlCmd) + + if measure: + exec_start_time = datetime.datetime.now() + + if native: + affectedRows = cursor.execute(cmd) + else: + restful_execute( + host, port, user, password, cmd) + + if measure: + exec_end_time = datetime.datetime.now() + exec_delta = exec_end_time - exec_start_time + v_print( + "consume %d microseconds", + exec_delta.microseconds) + + v_print("cmd: %s, length:%d", cmd, len(cmd)) + + if native: + cursor.close() + conn.close() + + +def create_tb_using_stb(): + # TODO: + pass + + +def create_tb(): + v_print("create_tb() numOfTb: %d", numOfTb) + for i in range(0, numOfDb): + if native: + cursor.execute("USE %s%d" % (dbName, i)) + else: + restful_execute( + host, port, user, password, "USE %s%d" % + (dbName, i)) + + for j in range(0, numOfTb): + if native: + cursor.execute( + "CREATE TABLE %s%d (ts timestamp, value float)" % + (tbName, j)) + else: + restful_execute( + host, + port, + user, + password, + "CREATE TABLE %s%d (ts timestamp, value float)" % + (tbName, j)) + + +def insert_data_process(lock, i: int, begin: int, end: int): + lock.acquire() + tasks = end - begin + v_print("insert_data_process:%d table from %d to %d, tasks %d", i, begin, end, tasks) + + if (threads < (end - begin)): + for j in range(begin, end, threads): + with ThreadPoolExecutor(max_workers=threads) as executor: + k = end if ((j + threads) > end) else (j + threads) + workers = [ + executor.submit( + insert_func, + i, + n) for n in range( + j, + k)] + wait(workers, return_when=ALL_COMPLETED) + else: + with ThreadPoolExecutor(max_workers=threads) as executor: + workers = [ + executor.submit( + insert_func, + i, + j) for j in range( + begin, + end)] + wait(workers, return_when=ALL_COMPLETED) + + lock.release() + + +def query_db(i): + if native: + cursor.execute("USE %s%d" % (dbName, i)) + else: + restful_execute( + host, port, user, password, "USE %s%d" % + (dbName, i)) + + for j in range(0, numOfTb): + if native: + cursor.execute( + "SELECT COUNT(*) FROM %s%d" % (tbName, j)) + else: + restful_execute( + host, port, user, password, "SELECT COUNT(*) FROM %s%d" % + (tbName, j)) + + +def printConfig(): + + print("###################################################################") + print("# Use native interface: %s" % native) + print("# Server IP: %s" % host) + if native: + print("# Server port: %s" % port) + else: + print("# Server port: %s" % restPort) + + print("# Configuration Dir: %s" % configDir) + print("# User: %s" % user) + print("# Password: %s" % password) + print("# Number of Columns per record: %s" % colsPerRecord) + print("# Number of Threads: %s" % threads) + print("# Number of Processes: %s" % processes) + print("# Number of Tables: %s" % numOfTb) + print("# Number of records per Table: %s" % numOfRec) + print("# Records/Request: %s" % batch) + print("# Database name: %s" % dbName) + print("# Replica: %s" % replica) + print("# Use STable: %s" % useStable) + print("# Table prefix: %s" % tbName) + if useStable: + print("# STable prefix: %s" % stbName) + + print("# Data order: %s" % outOfOrder) + print("# Data out of order rate: %s" % rateOOOO) + print("# Delete method: %s" % deleteMethod) + print("# Query command: %s" % queryCmd) + print("# Insert Only: %s" % insertOnly) + print("# Verbose output %s" % verbose) + print("# Test time: %s" % + datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")) + print("###################################################################") + + +if __name__ == "__main__": + + native = False + verbose = False + debug = False + measure = True + dropDbOnly = False + colsPerRecord = 3 + numOfDb = 1 + dbName = "test" + replica = 1 + batch = 1 + numOfTb = 1 + tbName = "tb" + useStable = False + numOfStb = 0 + stbName = "stb" + numOfRec = 10 + ieration = 1 + host = "127.0.0.1" + configDir = "/etc/taos" + oneMoreHost = "NotSupported" + port = 6030 + restPort = 6041 + user = "root" + defaultPass = "taosdata" + processes = 1 + threads = 1 + insertOnly = False + autosubtable = False + queryCmd = "NO" + outOfOrder = 0 + rateOOOO = 0 + deleteMethod = 0 + skipPrompt = False + + try: + opts, args = getopt.gnu_getopt(sys.argv[1:], + 'Nh:p:u:P:d:a:m:Ms:Q:T:C:r:l:t:n:c:xOR:D:vgyH', + [ + 'native', 'host', 'port', 'user', 'password', 'dbname', 'replica', 'tbname', + 'stable', 'stbname', 'query', 'threads', 'processes', + 'recPerReq', 'colsPerRecord', 'numOfTb', 'numOfRec', 'config', + 'insertOnly', 'outOfOrder', 'rateOOOO', 'deleteMethod', + 'verbose', 'debug', 'skipPrompt', 'help' + ]) + except getopt.GetoptError as err: + print('ERROR:', err) + print('Try `taosdemo.py --help` for more options.') + sys.exit(1) + + if bool(opts) is False: + print('Try `taosdemo.py --help` for more options.') + sys.exit(1) + + for key, value in opts: + if key in ['-H', '--help']: + print('') + print( + 'taosdemo.py for TDengine') + print('') + print('Author: Shuduo Sang ') + print('') + + print('\t-H, --help Show usage.') + print('') + + print('\t-N, --native flag, Use native interface if set. Default is using RESTful interface.') + print('\t-h, --host host, The host to connect to TDengine. Default is localhost.') + print('\t-p, --port port, The TCP/IP port number to use for the connection. Default is 0.') + print('\t-u, --user user, The user name to use when connecting to the server. Default is \'root\'.') + print('\t-P, --password password, The password to use when connecting to the server. Default is \'taosdata\'.') + print('\t-l, --colsPerRec num_of_columns_per_record, The number of columns per record. Default is 3.') + print( + '\t-d, --dbname database, Destination database. Default is \'test\'.') + print('\t-a, --replica replica, Set the replica parameters of the database, Default 1, min: 1, max: 5.') + print( + '\t-m, --tbname
table_prefix, Table prefix name. Default is \'t\'.') + print( + '\t-M, --stable flag, Use super table. Default is no') + print( + '\t-s, --stbname stable_prefix, STable prefix name. Default is \'st\'') + print('\t-Q, --query [NO|EACHTB|command] query, Execute query command. set \'EACHTB\' means select * from each table') + print( + '\t-T, --threads num_of_threads, The number of threads. Default is 1.') + print( + '\t-C, --processes num_of_processes, The number of threads. Default is 1.') + print('\t-r, --batch num_of_records_per_req, The number of records per request. Default is 1000.') + print( + '\t-t, --numOfTb num_of_tables, The number of tables. Default is 1.') + print('\t-n, --numOfRec num_of_records_per_table, The number of records per table. Default is 1.') + print('\t-c, --config config_directory, Configuration directory. Default is \'/etc/taos/\'.') + print('\t-x, --inserOnly flag, Insert only flag.') + print('\t-O, --outOfOrder out of order data insert, 0: In order, 1: Out of order. Default is in order.') + print('\t-R, --rateOOOO rate, Out of order data\'s rate--if order=1 Default 10, min: 0, max: 50.') + print('\t-D, --deleteMethod Delete data methods 0: don\'t delete, 1: delete by table, 2: delete by stable, 3: delete by database.') + print('\t-v, --verbose Print verbose output') + print('\t-g, --debug Print debug output') + print( + '\t-y, --skipPrompt Skip read key for continous test, default is not skip') + print('') + sys.exit(0) + + if key in ['-N', '--native']: + try: + import taos + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + native = True + + if key in ['-h', '--host']: + host = value + + if key in ['-p', '--port']: + port = int(value) + + if key in ['-u', '--user']: + user = value + + if key in ['-P', '--password']: + password = value + else: + password = defaultPass + + if key in ['-d', '--dbname']: + dbName = value + + if key in ['-a', '--replica']: + replica = int(value) + if replica < 1: + print("FATAL: number of replica need > 0") + sys.exit(1) + + if key in ['-m', '--tbname']: + tbName = value + + if key in ['-M', '--stable']: + useStable = True + numOfStb = 1 + + if key in ['-s', '--stbname']: + stbName = value + + if key in ['-Q', '--query']: + queryCmd = str(value) + + if key in ['-T', '--threads']: + threads = int(value) + if threads < 1: + print("FATAL: number of threads must be larger than 0") + sys.exit(1) + + if key in ['-C', '--processes']: + processes = int(value) + if processes < 1: + print("FATAL: number of processes must be larger than 0") + sys.exit(1) + + if key in ['-r', '--batch']: + batch = int(value) + + if key in ['-l', '--colsPerRec']: + colsPerRec = int(value) + + if key in ['-t', '--numOfTb']: + numOfTb = int(value) + v_print("numOfTb is %d", numOfTb) + + if key in ['-n', '--numOfRec']: + numOfRec = int(value) + v_print("numOfRec is %d", numOfRec) + if numOfRec < 1: + print("FATAL: number of records must be larger than 0") + sys.exit(1) + + + if key in ['-c', '--config']: + configDir = value + v_print("config dir: %s", configDir) + + if key in ['-x', '--insertOnly']: + insertOnly = True + v_print("insert only: %d", insertOnly) + + if key in ['-O', '--outOfOrder']: + outOfOrder = int(value) + v_print("out of order is %d", outOfOrder) + + if key in ['-R', '--rateOOOO']: + rateOOOO = int(value) + v_print("the rate of out of order is %d", rateOOOO) + + if key in ['-D', '--deleteMethod']: + deleteMethod = int(value) + if (deleteMethod < 0) or (deleteMethod > 3): + print( + "inputed delete method is %d, valid value is 0~3, set to default 0" % + deleteMethod) + deleteMethod = 0 + v_print("the delete method is %d", deleteMethod) + + if key in ['-v', '--verbose']: + verbose = True + + if key in ['-g', '--debug']: + debug = True + + if key in ['-y', '--skipPrompt']: + skipPrompt = True + + if verbose: + printConfig() + + if not skipPrompt: + input("Press any key to continue..") + + # establish connection first if native + if native: + v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir) + try: + conn = taos.connect( + host=host, + user=user, + password=password, + config=configDir) + v_print("conn: %s", str(conn.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + + try: + cursor = conn.cursor() + v_print("cursor:%d %s", id(cursor), str(cursor.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + conn.close() + sys.exit(1) + + # drop data only if delete method be set + if deleteMethod > 0: + if deleteMethod == 1: + drop_tables() + print("Drop tables done.") + elif deleteMethod == 2: + drop_stables() + print("Drop super tables done.") + elif deleteMethod == 3: + drop_databases() + print("Drop Database done.") + sys.exit(0) + + # create databases + drop_databases() + create_databases() + + # use last database + current_db = "%s%d" % (dbName, (numOfDb - 1)) + use_database() + + if measure: + start_time_begin = time.time() + + if numOfStb > 0: + create_stb() + if (autosubtable == False): + create_tb_using_stb() + else: + create_tb() + + if measure: + end_time = time.time() + print( + "Total time consumed {} seconds for create table.".format( + (end_time - start_time_begin))) + + if native: + cursor.close() + conn.close() + + # start insert data + if measure: + start_time = time.time() + + manager = Manager() + lock = manager.Lock() + pool = Pool(processes) + + begin = 0 + end = 0 + + quotient = numOfTb // processes + if quotient < 1: + processes = numOfTb + quotient = 1 + + remainder = numOfTb % processes + v_print( + "num of tables: %d, quotient: %d, remainder: %d", + numOfTb, + quotient, + remainder) + + for i in range(processes): + begin = end + + if i < remainder: + end = begin + quotient + 1 + else: + end = begin + quotient + pool.apply_async(insert_data_process, args=(lock, i, begin, end,)) + + pool.close() + pool.join() + time.sleep(1) + + if measure: + end_time = time.time() + print( + "Total time consumed {} seconds for insert data.".format( + (end_time - start_time))) + + + # query data + if queryCmd != "NO": + print("queryCmd: %s" % queryCmd) + query_data_process(queryCmd) + + if measure: + end_time = time.time() + print( + "Total time consumed {} seconds.".format( + (end_time - start_time_begin))) + + print("done") diff --git a/tests/pytest/alter/alter_debugFlag.py b/tests/pytest/alter/alter_debugFlag.py new file mode 100644 index 0000000000..38d972b582 --- /dev/null +++ b/tests/pytest/alter/alter_debugFlag.py @@ -0,0 +1,54 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import random +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + flagList=["debugflag", "cdebugflag", "tmrDebugFlag", "uDebugFlag", "rpcDebugFlag"] + + for flag in flagList: + tdSql.execute("alter local %s 131" % flag) + tdSql.execute("alter local %s 135" % flag) + tdSql.execute("alter local %s 143" % flag) + randomFlag = random.randint(100, 250) + if randomFlag != 131 and randomFlag != 135 and randomFlag != 143: + tdSql.error("alter local %s %d" % (flag, randomFlag)) + + tdSql.query("show dnodes") + dnodeId = tdSql.getData(0, 0) + + for flag in flagList: + tdSql.execute("alter dnode %d %s 131" % (dnodeId, flag)) + tdSql.execute("alter dnode %d %s 135" % (dnodeId, flag)) + tdSql.execute("alter dnode %d %s 143" % (dnodeId, flag)) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/alter/alter_table.py b/tests/pytest/alter/alter_table.py index 48b0154361..828c348b14 100644 --- a/tests/pytest/alter/alter_table.py +++ b/tests/pytest/alter/alter_table.py @@ -126,7 +126,6 @@ class TDTestCase: for i in range(2, size): tdSql.checkData(0, i, self.rowNum * (size - i)) - tdSql.error("alter local debugflag 143") tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)") tdSql.execute("create table t0 using st tags(null)") diff --git a/tests/pytest/concurrent_inquiry.sh b/tests/pytest/concurrent_inquiry.sh new file mode 100755 index 0000000000..f426fbbcec --- /dev/null +++ b/tests/pytest/concurrent_inquiry.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# This is the script for us to try to cause the TDengine server or client to crash +# +# PREPARATION +# +# 1. Build an compile the TDengine source code that comes with this script, in the same directory tree +# 2. Please follow the direction in our README.md, and build TDengine in the build/ directory +# 3. Adjust the configuration file if needed under build/test/cfg/taos.cfg +# 4. Run the TDengine server instance: cd build; ./build/bin/taosd -c test/cfg +# 5. Make sure you have a working Python3 environment: run /usr/bin/python3 --version, and you should get 3.6 or above +# 6. Make sure you have the proper Python packages: # sudo apt install python3-setuptools python3-pip python3-distutils +# +# RUNNING THIS SCRIPT +# +# This script assumes the source code directory is intact, and that the binaries has been built in the +# build/ directory, as such, will will load the Python libraries in the directory tree, and also load +# the TDengine client shared library (so) file, in the build/directory, as evidenced in the env +# variables below. +# +# Running the script is simple, no parameter is needed (for now, but will change in the future). +# +# Happy Crashing... + + +# Due to the heavy path name assumptions/usage, let us require that the user be in the current directory +EXEC_DIR=`dirname "$0"` +if [[ $EXEC_DIR != "." ]] +then + echo "ERROR: Please execute `basename "$0"` in its own directory (for now anyway, pardon the dust)" + exit -1 +fi + +CURR_DIR=`pwd` +IN_TDINTERNAL="community" +if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then + TAOS_DIR=$CURR_DIR/../../.. + TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` + LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6,7|rev`/lib +else + TAOS_DIR=$CURR_DIR/../.. + TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` + LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib +fi + +# Now getting ready to execute Python +# The following is the default of our standard dev env (Ubuntu 20.04), modify/adjust at your own risk +PYTHON_EXEC=python3.8 + +# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. +export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd) + +# Then let us set up the library path so that our compiled SO file can be loaded by Python +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR + +# Now we are all let, and let's see if we can find a crash. Note we pass all params +CONCURRENT_INQUIRY=concurrent_inquiry.py +if [[ $1 == '--valgrind' ]]; then + shift + export PYTHONMALLOC=malloc + VALGRIND_OUT=valgrind.out + VALGRIND_ERR=valgrind.err + # How to generate valgrind suppression file: https://stackoverflow.com/questions/17159578/generating-suppressions-for-memory-leaks + # valgrind --leak-check=full --gen-suppressions=all --log-fd=9 python3.8 ./concurrent_inquiry.py $@ 9>>memcheck.log + echo Executing under VALGRIND, with STDOUT/ERR going to $VALGRIND_OUT and $VALGRIND_ERR, please watch them from a different terminal. + valgrind \ + --leak-check=yes \ + --suppressions=crash_gen/valgrind_taos.supp \ + $PYTHON_EXEC \ + $CONCURRENT_INQUIRY $@ > $VALGRIND_OUT 2> $VALGRIND_ERR +elif [[ $1 == '--helgrind' ]]; then + shift + HELGRIND_OUT=helgrind.out + HELGRIND_ERR=helgrind.err + valgrind \ + --tool=helgrind \ + $PYTHON_EXEC \ + $CONCURRENT_INQUIRY $@ > $HELGRIND_OUT 2> $HELGRIND_ERR +else + $PYTHON_EXEC $CONCURRENT_INQUIRY $@ +fi + diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index fa45b2164c..fe1b54c579 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1280,6 +1280,7 @@ class Task(): 0x510, # vnode not in ready state 0x14, # db not ready, errno changed 0x600, # Invalid table ID, why? + 0x218, # Table does not exist 1000 # REST catch-all error ]: return True # These are the ALWAYS-ACCEPTABLE ones diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index aee8f7502c..9c5ee6aae0 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -193,6 +193,7 @@ python3 ./test.py -f stream/table_n.py #alter table python3 ./test.py -f alter/alter_table_crash.py python3 ./test.py -f alter/alter_table.py +python3 ./test.py -f alter/alter_debugFlag.py # client python3 ./test.py -f client/client.py diff --git a/tests/pytest/functions/function_stddev.py b/tests/pytest/functions/function_stddev.py index 23df415aa3..a5b2d31dd1 100644 --- a/tests/pytest/functions/function_stddev.py +++ b/tests/pytest/functions/function_stddev.py @@ -44,12 +44,14 @@ class TDTestCase: # stddev verifacation tdSql.error("select stddev(ts) from test1") - tdSql.error("select stddev(col1) from test") - tdSql.error("select stddev(col2) from test") - tdSql.error("select stddev(col3) from test") - tdSql.error("select stddev(col4) from test") - tdSql.error("select stddev(col5) from test") - tdSql.error("select stddev(col6) from test") + + # stddev support super table now + # tdSql.error("select stddev(col1) from test") + # tdSql.error("select stddev(col2) from test") + # tdSql.error("select stddev(col3) from test") + # tdSql.error("select stddev(col4) from test") + # tdSql.error("select stddev(col5) from test") + # tdSql.error("select stddev(col6) from test") tdSql.error("select stddev(col7) from test1") tdSql.error("select stddev(col8) from test1") tdSql.error("select stddev(col9) from test1") diff --git a/tests/pytest/functions/function_stddev_restart.py b/tests/pytest/functions/function_stddev_restart.py index ec413b94b2..837f8bb406 100644 --- a/tests/pytest/functions/function_stddev_restart.py +++ b/tests/pytest/functions/function_stddev_restart.py @@ -39,12 +39,6 @@ class TDTestCase: # stddev verifacation tdSql.error("select stddev(ts) from test1") - tdSql.error("select stddev(col1) from test") - tdSql.error("select stddev(col2) from test") - tdSql.error("select stddev(col3) from test") - tdSql.error("select stddev(col4) from test") - tdSql.error("select stddev(col5) from test") - tdSql.error("select stddev(col6) from test") tdSql.error("select stddev(col7) from test1") tdSql.error("select stddev(col8) from test1") tdSql.error("select stddev(col9) from test1") diff --git a/tests/pytest/functions/function_stddev_td2555.py b/tests/pytest/functions/function_stddev_td2555.py new file mode 100644 index 0000000000..a4da7d5e29 --- /dev/null +++ b/tests/pytest/functions/function_stddev_td2555.py @@ -0,0 +1,91 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 100 + self.ts = 1537146000000 + self.clist1 = [] + self.clist2 = [] + self.clist3 = [] + self.clist4 = [] + self.clist5 = [] + self.clist6 = [] + + def getData(self): + for i in range(tdSql.queryRows): + for j in range(6): + exec('self.clist{}.append(tdSql.queryResult[i][j+1])'.format(j+1)) + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20)) tags(cid int,gbid binary(20),loc nchar(20))''') + tdSql.execute("create table test1 using test tags(1,'beijing','北京')") + tdSql.execute("create table test2 using test tags(2,'shanghai','深圳')") + tdSql.execute("create table test3 using test tags(2,'shenzhen','深圳')") + tdSql.execute("create table test4 using test tags(1,'shanghai','上海')") + for j in range(4): + for i in range(self.rowNum): + tdSql.execute("insert into test%d values(now-%dh, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" + % (j+1,i, i + 1, i + 1, i + 1, i + 1, i + i * 0.1, i * 1.5, i % 2, i + 1, i + 1)) + + # stddev verifacation + tdSql.error("select stddev(ts) from test") + tdSql.error("select stddev(col7) from test") + tdSql.error("select stddev(col8) from test") + tdSql.error("select stddev(col9) from test") + + con_list = [ + ' where cid = 1 and ts >=now - 1d and ts =now - 1d and ts =now - 1d and ts =now - 1d and ts =now - 1d and ts now-1d') - self.checkRows(1,cmd) + self.checkRows(2,cmd) def stop(self): os.system("sudo timedatectl set-ntp true") - time.sleep(40) + os.system("date -s '%s'"%(datetime.datetime.now()+datetime.timedelta(hours=1))) + time.sleep(5) tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/pytest_1.sh b/tests/pytest/pytest_1.sh index ad26c48004..6905f0c61e 100755 --- a/tests/pytest/pytest_1.sh +++ b/tests/pytest/pytest_1.sh @@ -16,7 +16,7 @@ python3 ./test.py -f insert/nchar.py python3 ./test.py -f insert/nchar-unicode.py python3 ./test.py -f insert/multi.py python3 ./test.py -f insert/randomNullCommit.py -python3 insert/retentionpolicy.py +#python3 insert/retentionpolicy.py python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py #python3 ./test.py -f insert/before_1970.py @@ -135,106 +135,4 @@ python3 ./test.py -f import_merge/importTORestart.py python3 ./test.py -f import_merge/importTPORestart.py python3 ./test.py -f import_merge/importTRestart.py python3 ./test.py -f import_merge/importInsertThenImport.py -python3 ./test.py -f import_merge/importCSV.py -# user -python3 ./test.py -f user/user_create.py -python3 ./test.py -f user/pass_len.py - -# stable -python3 ./test.py -f stable/query_after_reset.py - -#query -python3 ./test.py -f query/filter.py -python3 ./test.py -f query/filterCombo.py -python3 ./test.py -f query/queryNormal.py -python3 ./test.py -f query/queryError.py -python3 ./test.py -f query/filterAllIntTypes.py -python3 ./test.py -f query/filterFloatAndDouble.py -python3 ./test.py -f query/filterOtherTypes.py -python3 ./test.py -f query/querySort.py -python3 ./test.py -f query/queryJoin.py -python3 ./test.py -f query/select_last_crash.py -python3 ./test.py -f query/queryNullValueTest.py -python3 ./test.py -f query/queryInsertValue.py -python3 ./test.py -f query/queryConnection.py -python3 ./test.py -f query/queryCountCSVData.py -python3 ./test.py -f query/natualInterval.py -python3 ./test.py -f query/bug1471.py -#python3 ./test.py -f query/dataLossTest.py -python3 ./test.py -f query/bug1874.py -python3 ./test.py -f query/bug1875.py -python3 ./test.py -f query/bug1876.py -python3 ./test.py -f query/bug2218.py -python3 ./test.py -f query/bug2117.py -python3 ./test.py -f query/bug2118.py -python3 ./test.py -f query/bug2143.py -python3 ./test.py -f query/sliding.py -python3 ./test.py -f query/unionAllTest.py -python3 ./test.py -f query/bug2281.py -python3 ./test.py -f query/bug2119.py -python3 ./test.py -f query/isNullTest.py -python3 ./test.py -f query/queryWithTaosdKilled.py -python3 ./test.py -f query/floatCompare.py - -#stream -python3 ./test.py -f stream/metric_1.py -python3 ./test.py -f stream/metric_n.py -python3 ./test.py -f stream/new.py -python3 ./test.py -f stream/stream1.py -python3 ./test.py -f stream/stream2.py -#python3 ./test.py -f stream/parser.py -python3 ./test.py -f stream/history.py -python3 ./test.py -f stream/sys.py -python3 ./test.py -f stream/table_1.py -python3 ./test.py -f stream/table_n.py - -#alter table -python3 ./test.py -f alter/alter_table_crash.py - -# client -python3 ./test.py -f client/client.py -python3 ./test.py -f client/version.py -python3 ./test.py -f client/alterDatabase.py -python3 ./test.py -f client/noConnectionErrorTest.py - -# Misc -python3 testCompress.py -python3 testNoCompress.py -python3 testMinTablesPerVnode.py - -# functions -python3 ./test.py -f functions/function_avg.py -r 1 -python3 ./test.py -f functions/function_bottom.py -r 1 -python3 ./test.py -f functions/function_count.py -r 1 -python3 ./test.py -f functions/function_diff.py -r 1 -python3 ./test.py -f functions/function_first.py -r 1 -python3 ./test.py -f functions/function_last.py -r 1 -python3 ./test.py -f functions/function_last_row.py -r 1 -python3 ./test.py -f functions/function_leastsquares.py -r 1 -python3 ./test.py -f functions/function_max.py -r 1 -python3 ./test.py -f functions/function_min.py -r 1 -python3 ./test.py -f functions/function_operations.py -r 1 -python3 ./test.py -f functions/function_percentile.py -r 1 -python3 ./test.py -f functions/function_spread.py -r 1 -python3 ./test.py -f functions/function_stddev.py -r 1 -python3 ./test.py -f functions/function_sum.py -r 1 -python3 ./test.py -f functions/function_top.py -r 1 -python3 ./test.py -f functions/function_twa.py -r 1 -python3 ./test.py -f functions/function_twa_test2.py -python3 queryCount.py -python3 ./test.py -f query/queryGroupbyWithInterval.py -python3 client/twoClients.py -python3 test.py -f query/queryInterval.py -python3 test.py -f query/queryFillTest.py - -# tools -python3 test.py -f tools/taosdemoTest.py -python3 test.py -f tools/taosdumpTest.py -python3 test.py -f tools/lowaTest.py -python3 test.py -f tools/taosdemoTest2.py - -# subscribe -python3 test.py -f subscribe/singlemeter.py -#python3 test.py -f subscribe/stability.py -python3 test.py -f subscribe/supertable.py - +python3 ./test.py -f import_merge/importCSV.py \ No newline at end of file diff --git a/tests/pytest/pytest_2.sh b/tests/pytest/pytest_2.sh index 4ec517a0bf..d152ed85fb 100755 --- a/tests/pytest/pytest_2.sh +++ b/tests/pytest/pytest_2.sh @@ -7,15 +7,10 @@ python3 ./test.py -f update/append_commit_data.py python3 ./test.py -f update/append_commit_last-0.py python3 ./test.py -f update/append_commit_last.py python3 ./test.py -f update/merge_commit_data.py -python3 ./test.py -f update/merge_commit_data-0.py + python3 ./test.py -f update/merge_commit_data2.py python3 ./test.py -f update/merge_commit_data2_update0.py python3 ./test.py -f update/merge_commit_last-0.py python3 ./test.py -f update/merge_commit_last.py python3 ./test.py -f update/bug_td2279.py -# wal -python3 ./test.py -f wal/addOldWalTest.py - -# function -python3 ./test.py -f functions/all_null_value.py \ No newline at end of file diff --git a/tests/pytest/pytest_3.sh b/tests/pytest/pytest_3.sh new file mode 100755 index 0000000000..b1e2539fa9 --- /dev/null +++ b/tests/pytest/pytest_3.sh @@ -0,0 +1,90 @@ +#!/bin/bash +ulimit -c unlimited + + +python3 ./test.py -f insert/randomNullCommit.py + +# user +python3 ./test.py -f user/user_create.py +python3 ./test.py -f user/pass_len.py + +# stable +python3 ./test.py -f stable/query_after_reset.py + +#query +python3 ./test.py -f query/filter.py +python3 ./test.py -f query/filterCombo.py +python3 ./test.py -f query/queryNormal.py +python3 ./test.py -f query/queryError.py +python3 ./test.py -f query/filterAllIntTypes.py +python3 ./test.py -f query/filterFloatAndDouble.py +python3 ./test.py -f query/filterOtherTypes.py +python3 ./test.py -f query/querySort.py +python3 ./test.py -f query/queryJoin.py +python3 ./test.py -f query/select_last_crash.py +python3 ./test.py -f query/queryNullValueTest.py +python3 ./test.py -f query/queryInsertValue.py +python3 ./test.py -f query/queryConnection.py +python3 ./test.py -f query/queryCountCSVData.py +python3 ./test.py -f query/natualInterval.py +python3 ./test.py -f query/bug1471.py +#python3 ./test.py -f query/dataLossTest.py +python3 ./test.py -f query/bug1874.py +python3 ./test.py -f query/bug1875.py +python3 ./test.py -f query/bug1876.py +python3 ./test.py -f query/bug2218.py +python3 ./test.py -f query/bug2117.py +python3 ./test.py -f query/bug2118.py +python3 ./test.py -f query/bug2143.py +python3 ./test.py -f query/sliding.py +python3 ./test.py -f query/unionAllTest.py +python3 ./test.py -f query/bug2281.py +python3 ./test.py -f query/bug2119.py +python3 ./test.py -f query/isNullTest.py +python3 ./test.py -f query/queryWithTaosdKilled.py +python3 ./test.py -f query/floatCompare.py + +#stream +python3 ./test.py -f stream/metric_1.py +python3 ./test.py -f stream/metric_n.py +python3 ./test.py -f stream/new.py +python3 ./test.py -f stream/stream1.py +python3 ./test.py -f stream/stream2.py +#python3 ./test.py -f stream/parser.py +python3 ./test.py -f stream/history.py +python3 ./test.py -f stream/sys.py +python3 ./test.py -f stream/table_1.py +python3 ./test.py -f stream/table_n.py + +#alter table +python3 ./test.py -f alter/alter_table_crash.py + +# client +python3 ./test.py -f client/client.py +python3 ./test.py -f client/version.py +python3 ./test.py -f client/alterDatabase.py +python3 ./test.py -f client/noConnectionErrorTest.py + +# Misc +python3 testCompress.py +python3 testNoCompress.py +python3 testMinTablesPerVnode.py + + +python3 queryCount.py +python3 ./test.py -f query/queryGroupbyWithInterval.py +python3 client/twoClients.py +python3 test.py -f query/queryInterval.py +python3 test.py -f query/queryFillTest.py + +# tools +python3 test.py -f tools/taosdemoTest.py +python3 test.py -f tools/taosdumpTest.py +python3 test.py -f tools/lowaTest.py +#python3 test.py -f tools/taosdemoTest2.py + +# subscribe +python3 test.py -f subscribe/singlemeter.py +#python3 test.py -f subscribe/stability.py +python3 test.py -f subscribe/supertable.py + diff --git a/tests/pytest/pytest_4.sh b/tests/pytest/pytest_4.sh new file mode 100755 index 0000000000..6e201d6885 --- /dev/null +++ b/tests/pytest/pytest_4.sh @@ -0,0 +1,26 @@ +python3 ./test.py -f update/merge_commit_data-0.py +# wal +python3 ./test.py -f wal/addOldWalTest.py + +# function +python3 ./test.py -f functions/all_null_value.py +# functions +python3 ./test.py -f functions/function_avg.py -r 1 +python3 ./test.py -f functions/function_bottom.py -r 1 +python3 ./test.py -f functions/function_count.py -r 1 +python3 ./test.py -f functions/function_diff.py -r 1 +python3 ./test.py -f functions/function_first.py -r 1 +python3 ./test.py -f functions/function_last.py -r 1 +python3 ./test.py -f functions/function_last_row.py -r 1 +python3 ./test.py -f functions/function_leastsquares.py -r 1 +python3 ./test.py -f functions/function_max.py -r 1 +python3 ./test.py -f functions/function_min.py -r 1 +python3 ./test.py -f functions/function_operations.py -r 1 +python3 ./test.py -f functions/function_percentile.py -r 1 +python3 ./test.py -f functions/function_spread.py -r 1 +python3 ./test.py -f functions/function_stddev.py -r 1 +python3 ./test.py -f functions/function_sum.py -r 1 +python3 ./test.py -f functions/function_top.py -r 1 +python3 ./test.py -f functions/function_twa.py -r 1 +python3 ./test.py -f functions/function_twa_test2.py +python3 ./test.py -f functions/function_stddev_td2555.pyhao \ No newline at end of file diff --git a/tests/pytest/query/query.py b/tests/pytest/query/query.py index 756aa0eda9..8cec38780e 100644 --- a/tests/pytest/query/query.py +++ b/tests/pytest/query/query.py @@ -111,6 +111,18 @@ class TDTestCase: tdSql.query("select * from tb where c5 = 'true' ") tdSql.checkRows(5) + # For jira: https://jira.taosdata.com:18080/browse/TD-2850 + tdSql.execute("create database 'Test' ") + tdSql.execute("use 'Test' ") + tdSql.execute("create table 'TB'(ts timestamp, 'Col1' int) tags('Tag1' int)") + tdSql.execute("insert into 'Tb0' using tb tags(1) values(now, 1)") + tdSql.query("select * from tb") + tdSql.checkRows(1) + + tdSql.query("select * from tb0") + tdSql.checkRows(1) + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/query/queryGroupbySort.py b/tests/pytest/query/queryGroupbySort.py index 80f6d55aae..c2649a86db 100644 --- a/tests/pytest/query/queryGroupbySort.py +++ b/tests/pytest/query/queryGroupbySort.py @@ -45,6 +45,10 @@ class TDTestCase: tdSql.checkData(0, 0, "2018-10-03 14:38:05") tdSql.checkData(1, 0, "2018-10-03 14:38:15") tdSql.checkData(2, 0, "2018-10-03 14:38:16") + + tdSql.error("SELECT SUM(current) as s, AVG(voltage) FROM meters WHERE groupId > 1 INTERVAL(1s) GROUP BY location order by s ASC") + + tdSql.error("SELECT SUM(current) as s, AVG(voltage) FROM meters WHERE groupId > 1 INTERVAL(1s) GROUP BY location order by s DESC") def stop(self): tdSql.close() diff --git a/tests/pytest/stable/insert.py b/tests/pytest/stable/insert.py index 3d37e6726c..0ef816da8d 100644 --- a/tests/pytest/stable/insert.py +++ b/tests/pytest/stable/insert.py @@ -47,6 +47,40 @@ class TDTestCase: tdSql.query("select * from db.st where dev='dev_02'") tdSql.checkRows(1) + #For: https://jira.taosdata.com:18080/browse/TD-2671 + print("==============step3") + tdSql.execute( + "create stable if not exists stb (ts timestamp, tagtype int) tags(dev nchar(50))") + tdSql.execute( + 'CREATE TABLE if not exists dev_01 using stb tags("dev_01")') + tdSql.execute( + 'CREATE TABLE if not exists dev_02 using stb tags("dev_02")') + + print("==============step4") + + tdSql.execute( + """INSERT INTO dev_01(ts, tagtype) VALUES('2020-05-13 10:00:00.000', 1), + ('2020-05-13 10:00:00.001', 1) + dev_02 VALUES('2020-05-13 10:00:00.001', 1)""") + + tdSql.query("select * from db.stb where dev='dev_01'") + tdSql.checkRows(2) + + tdSql.query("select * from db.stb where dev='dev_02'") + tdSql.checkRows(1) + + tdSql.query("describe db.stb") + tdSql.checkRows(3) + + tdSql.execute("alter stable db.stb add tag t1 int") + tdSql.query("describe db.stb") + tdSql.checkRows(4) + + tdSql.execute("drop stable db.stb") + tdSql.query("show stables") + tdSql.checkRows(1) + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/tools/taosdemoTest2.py b/tests/pytest/tools/taosdemoTest2.py index 7d5627be43..1e492aa8fc 100644 --- a/tests/pytest/tools/taosdemoTest2.py +++ b/tests/pytest/tools/taosdemoTest2.py @@ -31,10 +31,28 @@ class TDTestCase: def insertDataAndAlterTable(self, threadID): if(threadID == 0): - os.system("yes | taosdemo -t %d -n %d" % (self.numberOfTables, self.numberOfRecords)) + os.system("yes | taosdemo -t %d -n %d -x" % (self.numberOfTables, self.numberOfRecords)) if(threadID == 1): + time.sleep(2) print("use test") - tdSql.execute("use test") + tdSql.execute("use test") + # check if all the tables have heen created + while True: + tdSql.query("show tables") + rows = tdSql.queryRows + print("number of tables: %d" % rows) + if(rows == self.numberOfTables): + break + time.sleep(1) + # check if there are any records in the last created table + while True: + print("query started") + tdSql.query("select * from test.t9") + rows = tdSql.queryRows + print("number of records: %d" % rows) + if(rows > 0): + break + time.sleep(1) print("alter table test.meters add column f4 int") tdSql.execute("alter table test.meters add column f4 int") print("insert into test.t0 values (now, 1, 2, 3, 4)") @@ -46,8 +64,7 @@ class TDTestCase: t1 = threading.Thread(target=self.insertDataAndAlterTable, args=(0, )) t2 = threading.Thread(target=self.insertDataAndAlterTable, args=(1, )) - t1.start() - time.sleep(2) + t1.start() t2.start() t1.join() t2.join() diff --git a/tests/script/general/http/restful_full.sim b/tests/script/general/http/restful_full.sim index 12f8cc5c38..05f6a9ac9c 100644 --- a/tests/script/general/http/restful_full.sim +++ b/tests/script/general/http/restful_full.sim @@ -88,13 +88,13 @@ print =============== step2 - no db #11 system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql print 11-> $system_content -if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"data":[],"rows":0}@ then +if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"data":[],"rows":0}@ then return -1 endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:7111/rest/sql print 12-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":1}@ then return -1 endi @@ -160,7 +160,7 @@ endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d1.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql print 22-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":1}@ then return -1 endi @@ -214,13 +214,13 @@ endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d2' 127.0.0.1:7111/rest/sql print 28-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":1}@ then return -1 endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d2.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql print 29-> $system_content -if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":0}@ then +if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[0]],"rows":1}@ then return -1 endi diff --git a/tests/script/general/parser/alter.sim b/tests/script/general/parser/alter.sim index 56a677cc73..e604d2122e 100644 --- a/tests/script/general/parser/alter.sim +++ b/tests/script/general/parser/alter.sim @@ -133,7 +133,7 @@ sleep 100 # return -1 #endi #sql alter table tb1 drop column c3 -#sleep 2000 +#sleep 500 #sql insert into tb1 values (now, 2, 'taos') #sleep 30000 #sql select * from strm @@ -144,7 +144,7 @@ sleep 100 # return -1 #endi #sql alter table tb1 add column c3 int -#sleep 2000 +#sleep 500 #sql insert into tb1 values (now, 3, 'taos', 3); #sleep 100 #sql select * from strm diff --git a/tests/script/general/parser/auto_create_tb.sim b/tests/script/general/parser/auto_create_tb.sim index e19eb0c667..926eb75476 100644 --- a/tests/script/general/parser/auto_create_tb.sim +++ b/tests/script/general/parser/auto_create_tb.sim @@ -208,7 +208,7 @@ endi print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/col_arithmetic_operation.sim b/tests/script/general/parser/col_arithmetic_operation.sim index 9fd690a444..3911c2dca6 100644 --- a/tests/script/general/parser/col_arithmetic_operation.sim +++ b/tests/script/general/parser/col_arithmetic_operation.sim @@ -105,12 +105,12 @@ run general/parser/col_arithmetic_query.sim #======================================= all in files query ======================================= print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect -sleep 500c +sleep 5000 run general/parser/col_arithmetic_query.sim diff --git a/tests/script/general/parser/col_arithmetic_query.sim b/tests/script/general/parser/col_arithmetic_query.sim index 53e2c98b56..3f1a430e2b 100644 --- a/tests/script/general/parser/col_arithmetic_query.sim +++ b/tests/script/general/parser/col_arithmetic_query.sim @@ -41,10 +41,10 @@ if $data00 != 0.000000000 then return -1 endi -if $data01 != -nan then - print expect -nan, actual: $data01 - return -1 -endi +#if $data01 != -nan then +# print expect -nan, actual: $data01 +# return -1 +#endi if $data10 != 0.666666667 then return -1 @@ -185,9 +185,9 @@ if $data00 != 0.000000000 then return -1 endi -if $data01 != -nan then - return -1 -endi +#if $data01 != -nan then +# return -1 +#endi if $data02 != 0.000000000 then return -1 diff --git a/tests/script/general/parser/commit.sim b/tests/script/general/parser/commit.sim index 533fbf48f0..4085ef620d 100644 --- a/tests/script/general/parser/commit.sim +++ b/tests/script/general/parser/commit.sim @@ -82,7 +82,7 @@ endw print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start sleep 100 print ================== server restart completed diff --git a/tests/script/general/parser/dbtbnameValidate.sim b/tests/script/general/parser/dbtbnameValidate.sim index 48c5d4a1f9..5fc67334c4 100644 --- a/tests/script/general/parser/dbtbnameValidate.sim +++ b/tests/script/general/parser/dbtbnameValidate.sim @@ -12,24 +12,24 @@ sql create database 'abc123' sql create database '_ab1234' sql create database 'ABC123' sql create database '_ABC123' -sql create database 'aABb123 ' -sql create database ' xyz ' -sql create database ' XYZ ' +sql_error create database 'aABb123 ' +sql_error create database ' xyz ' +sql_error create database ' XYZ ' sql use 'abc123' sql use '_ab1234' sql use 'ABC123' sql use '_ABC123' -sql use 'aABb123' -sql use ' xyz ' -sql use ' XYZ ' +sql_error use 'aABb123' +sql_error use ' xyz ' +sql_error use ' XYZ ' sql drop database 'abc123' sql drop database '_ab1234' sql_error drop database 'ABC123' sql drop database '_ABC123' -sql drop database 'aABb123' -sql drop database ' xyz ' +sql_error drop database 'aABb123' +sql_error drop database ' xyz ' sql_error drop database ' XYZ ' diff --git a/tests/script/general/parser/first_last.sim b/tests/script/general/parser/first_last.sim index 9c1f0774ba..aeff740a5f 100644 --- a/tests/script/general/parser/first_last.sim +++ b/tests/script/general/parser/first_last.sim @@ -77,7 +77,7 @@ run general/parser/first_last_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index c4d9d910e4..ca020c4063 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -407,4 +407,359 @@ sql insert into tm13 values('2020-1-1 1:1:1', 0); sql select count(*) from m1 where ts='2020-1-1 1:1:1' interval(1h) group by tbname; if $rows != 2 then return -1 -endi \ No newline at end of file +endi + +sql drop table m1; +sql drop table if exists tm1; +sql drop table if exists tm2; +sql create table m1(ts timestamp, k double, b double, c int, d smallint, e int unsigned) tags(a int); +sql create table tm1 using m1 tags(1); +sql create table tm2 using m1 tags(2); +sql insert into tm1 values('2021-01-27 22:22:39.294', 1, 10, NULL, 110, 123) ('2021-01-27 22:22:40.294', 2, 20, NULL, 120, 124) ('2021-01-27 22:22:41.294', 3, 30, NULL, 130, 125)('2021-01-27 22:22:43.294', 4, 40, NULL, 140, 126)('2021-01-27 22:22:44.294', 5, 50, NULL, 150, 127); +sql insert into tm2 values('2021-01-27 22:22:40.688', 5, 101, NULL, 210, 321) ('2021-01-27 22:22:41.688', 5, 102, NULL, 220, 322) ('2021-01-27 22:22:42.688', 5, 103, NULL, 230, 323)('2021-01-27 22:22:43.688', 5, 104, NULL, 240, 324)('2021-01-27 22:22:44.688', 5, 105, NULL, 250, 325)('2021-01-27 22:22:45.688', 5, 106, NULL, 260, 326); +sql select stddev(k) from m1 +if $rows != 1 then + return -1 +endi + +if $data00 != 1.378704626 then + return -1 +endi + +sql select stddev(c) from m1 +if $rows != 0 then + return -1 +endi + +sql select stddev(k), stddev(c) from m1 +if $rows != 1 then + return -1 +endi + +if $data00 != 1.378704626 then + return -1 +endi + +if $data01 != NULL then + return -1; +endi + +sql select stddev(b),stddev(b),stddev(k) from m1; +if $rows != 1 then + return -1 +endi + +if $data00 != 37.840465463 then + return -1 +endi + +if $data01 != 37.840465463 then + return -1 +endi + +if $data02 != 1.378704626 then + return -1 +endi + +sql select stddev(k), stddev(b) from m1 group by a +if $rows != 2 then + return -1 +endi + +if $data00 != 1.414213562 then + return -1 +endi + +if $data01 != 14.142135624 then + return -1 +endi + +if $data02 != 1 then + return -1 +endi + +if $data10 != 0.000000000 then + return -1 +endi + +if $data11 != 1.707825128 then + return -1 +endi + +if $data12 != 2 then + return -1 +endi + +sql select stddev(k), stddev(b) from m1 where a= 1 group by a +if $rows != 1 then + return -1 +endi + +if $data00 != 1.414213562 then + return -1 +endi + +if $data01 != 14.142135624 then + return -1 +endi + +if $data02 != 1 then + return -1 +endi + +sql select stddev(k), stddev(b) from m1 group by tbname +if $rows != 2 then + return -1 +endi + +if $data00 != 1.414213562 then + return -1 +endi + +if $data01 != 14.142135624 then + return -1 +endi + +if $data02 != @tm1@ then + return -1 +endi + +if $data10 != 0.000000000 then + return -1 +endi + +if $data11 != 1.707825128 then + return -1 +endi + +if $data12 != @tm2@ then + return -1 +endi + +sql select stddev(k), stddev(b) from m1 group by tbname,a +if $rows != 2 then + return -1 +endi + +sql select stddev(k), stddev(b), stddev(c) from m1 group by tbname,a +if $rows != 2 then + return -1 +endi + +if $data00 != 1.414213562 then + return -1 +endi + +if $data01 != 14.142135624 then + return -1 +endi + +if $data02 != NULL then + return -1 +endi + +if $data03 != @tm1@ then + return -1 +endi + +if $data04 != 1 then + return -1 +endi + +if $data10 != 0.000000000 then + return -1 +endi + +if $data11 != 1.707825128 then + return -1 +endi + +if $data12 != NULL then + return -1 +endi + +if $data13 != @tm2@ then + return -1 +endi + +if $data14 != 2 then + return -1 +endi + +sql select stddev(k), stddev(b), stddev(c) from m1 interval(10s) group by tbname,a +if $rows != 3 then + return -1 +endi + +if $data01 != 0.000000000 then + return -1 +endi + +if $data02 != 0.000000000 then + return -1 +endi + +if $data03 != NULL then + return -1 +endi + +if $data04 != @tm1@ then + return -1 +endi + +if $data05 != 1 then + return -1 +endi + +if $data11 != 1.118033989 then + return -1 +endi + +if $data12 != 11.180339887 then + return -1 +endi + +if $data13 != NULL then + return -1 +endi + +if $data14 != @tm1@ then + return -1 +endi + +if $data22 != 1.707825128 then + return -1 +endi + +if $data23 != NULL then + return -1 +endi + +if $data24 != @tm2@ then + return -1 +endi + +if $data25 != 2 then + return -1 +endi + +sql select count(*), first(b), stddev(b), stddev(c) from m1 interval(10s) group by a +if $rows != 3 then + return -1 +endi + +if $data00 != @21-01-27 22:22:30.000@ then + return -1 +endi + +if $data01 != 1 then + return -1 +endi + +if $data02 != 10.000000000 then + return -1 +endi + +if $data03 != 0.000000000 then + return -1 +endi + +if $data04 != NULL then + return -1 +endi + +if $data05 != 1 then + return -1 +endi + +if $data12 != 20.000000000 then + return -1 +endi + +if $data13 != 11.180339887 then + return -1 +endi + +if $data14 != NULL then + return -1 +endi + +if $data23 != 1.707825128 then + return -1 +endi + +sql select count(*), first(b), stddev(b), stddev(c) from m1 interval(10s) group by tbname,a +if $rows != 3 then + return -1 +endi + +if $data23 != 1.707825128 then + return -1 +endi + +if $data25 != @tm2@ then + return -1 +endi + +sql select count(*), stddev(b), stddev(b)+20, stddev(c) from m1 interval(10s) group by tbname,a +if $rows != 3 then + return -1 +endi + +if $data02 != 0.000000000 then + return -1 +endi + +if $data03 != 20.000000000 then + return -1 +endi + +if $data13 != 31.180339887 then + return -1 +endi + +if $data14 != NULL then + return -1 +endi + +sql select count(*), first(b), stddev(b)+first(b), stddev(c) from m1 interval(10s) group by tbname,a +if $rows != 3 then + return -1 +endi + +if $data02 != 10.000000000 then + return -1 +endi + +if $data03 != 10.000000000 then + return -1 +endi + +if $data12 != 20.000000000 then + return -1 +endi + +if $data13 != 31.180339887 then + return -1 +endi + +if $data22 != 101.000000000 then + return -1 +endi + +if $data23 != 102.707825128 then + return -1 +endi + +sql select stddev(e),stddev(k) from m1 where a=1 +if $rows != 1 then + return -1 +endi + +if $data00 != 1.414213562 then + return -1 +endi + +if $data01 != 1.414213562 then + return -1 +endi diff --git a/tests/script/general/parser/import.sim b/tests/script/general/parser/import.sim index 83751dc616..d626f4fa74 100644 --- a/tests/script/general/parser/import.sim +++ b/tests/script/general/parser/import.sim @@ -25,15 +25,15 @@ sql use $db sql create table tb (ts timestamp, c1 int, c2 timestamp) sql insert into tb values ('2019-05-05 11:30:00.000', 1, now) sql insert into tb values ('2019-05-05 12:00:00.000', 1, now) -sleep 2000 +sleep 500 sql import into tb values ('2019-05-05 11:00:00.000', -1, now) -sleep 2000 +sleep 500 sql import into tb values ('2019-05-05 11:59:00.000', -1, now) -sleep 2000 +sleep 500 sql import into tb values ('2019-05-04 08:00:00.000', -1, now) -sleep 2000 +sleep 500 sql import into tb values ('2019-05-04 07:59:00.000', -1, now) -sleep 2000 +sleep 500 sql select * from tb if $rows != 6 then @@ -60,7 +60,7 @@ endi print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/import_commit1.sim b/tests/script/general/parser/import_commit1.sim index 27be5560c5..f330fe4cd9 100644 --- a/tests/script/general/parser/import_commit1.sim +++ b/tests/script/general/parser/import_commit1.sim @@ -40,7 +40,7 @@ while $x < $rowNum endw print ====== tables created -sleep 2000 +sleep 500 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit2.sim b/tests/script/general/parser/import_commit2.sim index 72ee2b3844..47b30acb49 100644 --- a/tests/script/general/parser/import_commit2.sim +++ b/tests/script/general/parser/import_commit2.sim @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 2000 +sleep 500 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit3.sim b/tests/script/general/parser/import_commit3.sim index a9f021b20c..1e041375de 100644 --- a/tests/script/general/parser/import_commit3.sim +++ b/tests/script/general/parser/import_commit3.sim @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 2000 +sleep 500 $ts = $ts + 1 sql insert into $tb values ( $ts , -1, -1, -1, -1, -1) @@ -47,7 +47,7 @@ $ts = $ts0 + $delta $ts = $ts + 1 sql import into $tb values ( $ts , -2, -2, -2, -2, -2) -sleep 2000 +sleep 500 sql show databases diff --git a/tests/script/general/parser/import_file.sim b/tests/script/general/parser/import_file.sim index 6b4dd07c79..e50fc92e28 100644 --- a/tests/script/general/parser/import_file.sim +++ b/tests/script/general/parser/import_file.sim @@ -3,9 +3,9 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 2000 +sleep 500 sql connect -sleep 2000 +sleep 500 sql drop database if exists indb diff --git a/tests/script/general/parser/insert_multiTbl.sim b/tests/script/general/parser/insert_multiTbl.sim index 39223d84e3..e9ee4fcf98 100644 --- a/tests/script/general/parser/insert_multiTbl.sim +++ b/tests/script/general/parser/insert_multiTbl.sim @@ -4,7 +4,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 2000 +sleep 500 sql connect sleep 100 print ======================== dnode1 start diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 36a643c424..13b6a08024 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -59,7 +59,7 @@ run general/parser/interp_test.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim index 682a6cd5df..d1eadfb67a 100644 --- a/tests/script/general/parser/lastrow.sim +++ b/tests/script/general/parser/lastrow.sim @@ -62,7 +62,7 @@ run general/parser/lastrow_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim index 22d52c4257..17636dfb74 100644 --- a/tests/script/general/parser/limit.sim +++ b/tests/script/general/parser/limit.sim @@ -66,7 +66,7 @@ run general/parser/limit_stb.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/limit1.sim b/tests/script/general/parser/limit1.sim index 0597723490..2c40f0af2b 100644 --- a/tests/script/general/parser/limit1.sim +++ b/tests/script/general/parser/limit1.sim @@ -61,7 +61,7 @@ run general/parser/limit1_stb.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit1_tblocks100.sim b/tests/script/general/parser/limit1_tblocks100.sim index 43519d2df4..45ead58ba0 100644 --- a/tests/script/general/parser/limit1_tblocks100.sim +++ b/tests/script/general/parser/limit1_tblocks100.sim @@ -61,7 +61,7 @@ run general/parser/limit1_stb.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit2.sim b/tests/script/general/parser/limit2.sim index ddc5c10362..0e7e13b6de 100644 --- a/tests/script/general/parser/limit2.sim +++ b/tests/script/general/parser/limit2.sim @@ -69,7 +69,7 @@ print ====== tables created print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit2_query.sim b/tests/script/general/parser/limit2_query.sim index 1e8077d26e..9fe287960d 100644 --- a/tests/script/general/parser/limit2_query.sim +++ b/tests/script/general/parser/limit2_query.sim @@ -143,6 +143,97 @@ if $data11 != -1 then return -1 endi +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 8200 +if $rows != 8200 then + return -1 +endi + +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10 offset 8190; +if $rows != 10 then + return -1 +endi + +if $data00 != @18-10-15 19:30:00.000@ then + return -1 +endi + +if $data01 != 5 then + return -1 +endi + +if $data10 != @18-10-15 19:35:00.000@ then + return -1 +endi + +if $data11 != -1000 then + return -1 +endi + +if $data20 != @18-10-15 19:40:00.000@ then + return -1 +endi + +if $data21 != 6 then + return -1 +endi + +if $data30 != @18-10-15 19:45:00.000@ then + return -1 +endi + +if $data31 != -1000 then + return -1 +endi + +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10 offset 10001; +if $rows != 10 then + return -1 +endi + +if $data00 != @18-10-22 02:25:00.000@ then + return -1 +endi + +if $data01 != -1000 then + return -1 +endi + +if $data10 != @18-10-22 02:30:00.000@ then + return -1 +endi + +if $data11 != 1 then + return -1 +endi + +if $data20 != @18-10-22 02:35:00.000@ then + return -1 +endi + +if $data21 != -1000 then + return -1 +endi + +if $data30 != @18-10-22 02:40:00.000@ then + return -1 +endi + +if $data31 != 2 then + return -1 +endi + +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10000 offset 10001; +print ====> needs to validate the last row result +if $rows != 9998 then + return -1 +endi + + +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 100 offset 20001; +if $rows != 0 then + return -1 +endi + # tb + interval + fill(linear) + limit offset $limit = $rowNum $offset = $limit / 2 diff --git a/tests/script/general/parser/mixed_blocks.sim b/tests/script/general/parser/mixed_blocks.sim index 79bf65d147..8208963858 100644 --- a/tests/script/general/parser/mixed_blocks.sim +++ b/tests/script/general/parser/mixed_blocks.sim @@ -59,7 +59,7 @@ sql show databases print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed @@ -154,7 +154,7 @@ sql insert into t2 values('2020-1-1 1:5:1', 99); print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql select ts from m1 where ts='2020-1-1 1:5:1' diff --git a/tests/script/general/parser/projection_limit_offset.sim b/tests/script/general/parser/projection_limit_offset.sim index e8a4c75a12..a92493b7f4 100644 --- a/tests/script/general/parser/projection_limit_offset.sim +++ b/tests/script/general/parser/projection_limit_offset.sim @@ -334,6 +334,9 @@ sql insert into tm0 values(10000, 1) (20000, 2)(30000, 3) (40000, NULL) (50000, #=============================tbase-1205 sql select count(*) from tm1 where ts= now -1d interval(1h) fill(NULL); +if $rows != 0 then + return -1 +endi print ===================>TD-1834 sql select * from tm0 where ts>11000 and ts< 20000 order by ts asc @@ -409,7 +412,7 @@ sql_error select k, sum(k)+1 from tm0; print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/selectResNum.sim b/tests/script/general/parser/selectResNum.sim index 8f18a41d41..20b5024478 100644 --- a/tests/script/general/parser/selectResNum.sim +++ b/tests/script/general/parser/selectResNum.sim @@ -118,7 +118,7 @@ endw print ====== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ====== server restart completed sleep 100 diff --git a/tests/script/general/parser/select_from_cache_disk.sim b/tests/script/general/parser/select_from_cache_disk.sim index 36a749cc3c..3d2cc0b700 100644 --- a/tests/script/general/parser/select_from_cache_disk.sim +++ b/tests/script/general/parser/select_from_cache_disk.sim @@ -35,7 +35,7 @@ sql insert into $tb values ('2018-09-17 09:00:00.030', 3) print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/set_tag_vals.sim b/tests/script/general/parser/set_tag_vals.sim index 0b8ffd946f..92380ace84 100644 --- a/tests/script/general/parser/set_tag_vals.sim +++ b/tests/script/general/parser/set_tag_vals.sim @@ -61,7 +61,7 @@ while $i < $tbNum endw print ====== tables created -sleep 2000 +sleep 500 sql show tables if $rows != $tbNum then diff --git a/tests/script/general/parser/single_row_in_tb.sim b/tests/script/general/parser/single_row_in_tb.sim index 651f44a3a4..bc93629041 100644 --- a/tests/script/general/parser/single_row_in_tb.sim +++ b/tests/script/general/parser/single_row_in_tb.sim @@ -32,7 +32,7 @@ run general/parser/single_row_in_tb_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/slimit.sim b/tests/script/general/parser/slimit.sim index 426104c168..bfb97b5261 100644 --- a/tests/script/general/parser/slimit.sim +++ b/tests/script/general/parser/slimit.sim @@ -97,7 +97,7 @@ run general/parser/slimit_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/slimit1.sim b/tests/script/general/parser/slimit1.sim index 85cbe51aad..901da4cab2 100644 --- a/tests/script/general/parser/slimit1.sim +++ b/tests/script/general/parser/slimit1.sim @@ -56,7 +56,7 @@ run general/parser/slimit1_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/slimit_alter_tags.sim b/tests/script/general/parser/slimit_alter_tags.sim index 1073e0a3cc..ad557e891b 100644 --- a/tests/script/general/parser/slimit_alter_tags.sim +++ b/tests/script/general/parser/slimit_alter_tags.sim @@ -93,7 +93,7 @@ if $data02 != tb0 then return -1 endi -sleep 2000 +sleep 500 sql reset query cache sql select count(*), first(ts) from stb group by tg_added order by tg_added asc slimit 5 soffset 3 if $rows != 5 then @@ -171,7 +171,7 @@ endi print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/tbnameIn.sim b/tests/script/general/parser/tbnameIn.sim index 65ed1ed65d..2ee5f38ab1 100644 --- a/tests/script/general/parser/tbnameIn.sim +++ b/tests/script/general/parser/tbnameIn.sim @@ -67,7 +67,7 @@ run general/parser/tbnameIn_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 1868ff9683..255389a2df 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -106,4 +106,4 @@ run general/parser/sliding.sim sleep 100 run general/parser/function.sim sleep 100 -run general/parse/stableOp.sim +run general/parser/stableOp.sim diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim index 57378331e8..f5c78d07a1 100644 --- a/tests/script/general/parser/topbot.sim +++ b/tests/script/general/parser/topbot.sim @@ -128,7 +128,7 @@ sql insert into test values(29999, 1)(70000, 2)(80000, 3) print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 1f148cbb2c..5f9e0ec208 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -324,7 +324,7 @@ while $i < 1 endw system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start diff --git a/tests/script/jenkins/basic_1.txt b/tests/script/jenkins/basic_1.txt index 7f7ef18368..1724576734 100644 --- a/tests/script/jenkins/basic_1.txt +++ b/tests/script/jenkins/basic_1.txt @@ -1,44 +1,3 @@ - -./test.sh -f general/compute/avg.sim -./test.sh -f general/compute/bottom.sim -./test.sh -f general/compute/count.sim -./test.sh -f general/compute/diff.sim -./test.sh -f general/compute/diff2.sim -./test.sh -f general/compute/first.sim -./test.sh -f general/compute/interval.sim -./test.sh -f general/compute/last.sim -./test.sh -f general/compute/leastsquare.sim -./test.sh -f general/compute/max.sim -./test.sh -f general/compute/min.sim -./test.sh -f general/compute/null.sim -./test.sh -f general/compute/percentile.sim -./test.sh -f general/compute/stddev.sim -./test.sh -f general/compute/sum.sim -./test.sh -f general/compute/top.sim - -./test.sh -f general/db/alter_option.sim -./test.sh -f general/db/alter_tables_d2.sim -./test.sh -f general/db/alter_tables_v1.sim -./test.sh -f general/db/alter_tables_v4.sim -./test.sh -f general/db/alter_vgroups.sim -./test.sh -f general/db/basic.sim -./test.sh -f general/db/basic1.sim -./test.sh -f general/db/basic2.sim -./test.sh -f general/db/basic3.sim -./test.sh -f general/db/basic4.sim -./test.sh -f general/db/basic5.sim -./test.sh -f general/db/delete_reuse1.sim -./test.sh -f general/db/delete_reuse2.sim -./test.sh -f general/db/delete_reusevnode.sim -./test.sh -f general/db/delete_reusevnode2.sim -./test.sh -f general/db/delete_writing1.sim -./test.sh -f general/db/delete_writing2.sim -./test.sh -f general/db/delete.sim -./test.sh -f general/db/len.sim -./test.sh -f general/db/repeat.sim -./test.sh -f general/db/tables.sim -./test.sh -f general/db/vnodes.sim - ./test.sh -f general/field/2.sim ./test.sh -f general/field/3.sim ./test.sh -f general/field/4.sim @@ -63,10 +22,7 @@ ./test.sh -f general/http/grafana_bug.sim ./test.sh -f general/http/grafana.sim -./test.sh -f general/import/basic.sim -./test.sh -f general/import/commit.sim -./test.sh -f general/import/large.sim -./test.sh -f general/import/replica1.sim + ./test.sh -f general/insert/basic.sim ./test.sh -f general/insert/insert_drop.sim @@ -128,33 +84,4 @@ ./test.sh -f general/parser/union.sim ./test.sh -f general/parser/topbot.sim ./test.sh -f general/db/nosuchfile.sim -./test.sh -f general/parser/function.sim - -./test.sh -f general/table/autocreate.sim -./test.sh -f general/table/basic1.sim -./test.sh -f general/table/basic2.sim -./test.sh -f general/table/basic3.sim -./test.sh -f general/table/bigint.sim -./test.sh -f general/table/binary.sim -./test.sh -f general/table/bool.sim -./test.sh -f general/table/column_name.sim -./test.sh -f general/table/column_num.sim -./test.sh -f general/table/column_value.sim -./test.sh -f general/table/column2.sim -./test.sh -f general/table/date.sim -./test.sh -f general/table/db.table.sim -./test.sh -f general/table/delete_reuse1.sim -./test.sh -f general/table/delete_reuse2.sim -./test.sh -f general/table/delete_writing.sim -./test.sh -f general/table/describe.sim -./test.sh -f general/table/double.sim -./test.sh -f general/table/fill.sim -./test.sh -f general/table/float.sim -./test.sh -f general/table/int.sim -./test.sh -f general/table/limit.sim -./test.sh -f general/table/smallint.sim -./test.sh -f general/table/table_len.sim -./test.sh -f general/table/table.sim -./test.sh -f general/table/tinyint.sim -./test.sh -f general/table/vgroup.sim -./test.sh -f general/table/createmulti.sim \ No newline at end of file +./test.sh -f general/parser/function.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_2.txt b/tests/script/jenkins/basic_2.txt index 9b3b3f9b18..20e574711a 100644 --- a/tests/script/jenkins/basic_2.txt +++ b/tests/script/jenkins/basic_2.txt @@ -72,32 +72,4 @@ cd ../../../debug; make ./test.sh -f unique/cluster/cache.sim ./test.sh -f unique/cluster/vgroup100.sim -./test.sh -f unique/column/replica3.sim - -./test.sh -f unique/db/commit.sim -./test.sh -f unique/db/delete.sim -./test.sh -f unique/db/delete_part.sim -./test.sh -f unique/db/replica_add12.sim -./test.sh -f unique/db/replica_add13.sim -./test.sh -f unique/db/replica_add23.sim -./test.sh -f unique/db/replica_reduce21.sim -./test.sh -f unique/db/replica_reduce32.sim -./test.sh -f unique/db/replica_reduce31.sim -./test.sh -f unique/db/replica_part.sim - -./test.sh -f unique/vnode/many.sim -./test.sh -f unique/vnode/replica2_basic2.sim -./test.sh -f unique/vnode/replica2_repeat.sim -./test.sh -f unique/vnode/replica3_basic.sim -./test.sh -f unique/vnode/replica3_repeat.sim -./test.sh -f unique/vnode/replica3_vgroup.sim - -./test.sh -f unique/dnode/monitor.sim -./test.sh -f unique/dnode/monitor_bug.sim -./test.sh -f unique/dnode/simple.sim -./test.sh -f unique/dnode/data1.sim -./test.sh -f unique/dnode/m2.sim -./test.sh -f unique/dnode/m3.sim -./test.sh -f unique/dnode/offline3.sim -./test.sh -f general/wal/kill.sim -./test.sh -f general/wal/maxtables.sim \ No newline at end of file +./test.sh -f unique/column/replica3.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_3.txt b/tests/script/jenkins/basic_3.txt index 25bfde28f0..f53b1b763a 100644 --- a/tests/script/jenkins/basic_3.txt +++ b/tests/script/jenkins/basic_3.txt @@ -19,7 +19,7 @@ ./test.sh -f unique/arbitrator/dn3_mn1_r3_vnode_delDir.sim ./test.sh -f unique/arbitrator/dn3_mn1_vnode_nomaster.sim ./test.sh -f unique/arbitrator/dn3_mn2_killDnode.sim -./test.sh -f unique/arbitrator/insert_duplicationTs.sim + ./test.sh -f unique/arbitrator/offline_replica2_alterTable_online.sim ./test.sh -f unique/arbitrator/offline_replica2_alterTag_online.sim ./test.sh -f unique/arbitrator/offline_replica2_createTable_online.sim @@ -55,25 +55,4 @@ ./test.sh -f unique/stable/replica3_dnode6.sim ./test.sh -f unique/stable/replica3_vnode3.sim -./test.sh -f unique/mnode/mgmt20.sim -./test.sh -f unique/mnode/mgmt21.sim -./test.sh -f unique/mnode/mgmt22.sim -./test.sh -f unique/mnode/mgmt23.sim -./test.sh -f unique/mnode/mgmt24.sim -./test.sh -f unique/mnode/mgmt25.sim -./test.sh -f unique/mnode/mgmt26.sim -./test.sh -f unique/mnode/mgmt33.sim -./test.sh -f unique/mnode/mgmt34.sim -./test.sh -f unique/mnode/mgmtr2.sim -./test.sh -f general/stream/metrics_del.sim -./test.sh -f general/stream/metrics_replica1_vnoden.sim -./test.sh -f general/stream/restart_stream.sim -./test.sh -f general/stream/stream_3.sim -./test.sh -f general/stream/stream_restart.sim -./test.sh -f general/stream/table_del.sim -./test.sh -f general/stream/table_replica1_vnoden.sim - -./test.sh -f general/connection/test_old_data.sim -./test.sh -f unique/dnode/datatrans_3node.sim -./test.sh -f unique/dnode/datatrans_3node_2.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_4.txt b/tests/script/jenkins/basic_4.txt index 895281f218..2f2ed147a2 100644 --- a/tests/script/jenkins/basic_4.txt +++ b/tests/script/jenkins/basic_4.txt @@ -1,14 +1,3 @@ -./test.sh -f unique/dnode/alternativeRole.sim -./test.sh -f unique/dnode/balance1.sim -./test.sh -f unique/dnode/balance2.sim -./test.sh -f unique/dnode/balance3.sim -./test.sh -f unique/dnode/balancex.sim -./test.sh -f unique/dnode/offline1.sim -./test.sh -f unique/dnode/offline2.sim -./test.sh -f unique/dnode/reason.sim -./test.sh -f unique/dnode/remove1.sim -./test.sh -f unique/dnode/remove2.sim -./test.sh -f unique/dnode/vnode_clean.sim ./test.sh -f unique/http/admin.sim ./test.sh -f unique/http/opentsdb.sim diff --git a/tests/script/jenkins/basic_5.txt b/tests/script/jenkins/basic_5.txt new file mode 100644 index 0000000000..f89be9499e --- /dev/null +++ b/tests/script/jenkins/basic_5.txt @@ -0,0 +1,19 @@ +./test.sh -f unique/dnode/alternativeRole.sim +./test.sh -f unique/dnode/balance1.sim +./test.sh -f unique/dnode/balance2.sim +./test.sh -f unique/dnode/balance3.sim +./test.sh -f unique/dnode/balancex.sim +./test.sh -f unique/dnode/offline1.sim +./test.sh -f unique/dnode/offline2.sim + +./test.sh -f general/stream/metrics_del.sim +./test.sh -f general/stream/metrics_replica1_vnoden.sim +./test.sh -f general/stream/restart_stream.sim +./test.sh -f general/stream/stream_3.sim +./test.sh -f general/stream/stream_restart.sim +./test.sh -f general/stream/table_del.sim +./test.sh -f general/stream/table_replica1_vnoden.sim + +./test.sh -f general/connection/test_old_data.sim +./test.sh -f unique/dnode/datatrans_3node.sim +./test.sh -f unique/dnode/datatrans_3node_2.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_6.txt b/tests/script/jenkins/basic_6.txt new file mode 100644 index 0000000000..9156360a9f --- /dev/null +++ b/tests/script/jenkins/basic_6.txt @@ -0,0 +1,37 @@ +./test.sh -f unique/dnode/reason.sim +./test.sh -f unique/dnode/remove1.sim +./test.sh -f unique/dnode/remove2.sim +./test.sh -f unique/dnode/vnode_clean.sim + +./test.sh -f unique/db/commit.sim +./test.sh -f unique/db/delete.sim +./test.sh -f unique/db/delete_part.sim +./test.sh -f unique/db/replica_add12.sim +./test.sh -f unique/db/replica_add13.sim +./test.sh -f unique/db/replica_add23.sim +./test.sh -f unique/db/replica_reduce21.sim +./test.sh -f unique/db/replica_reduce32.sim +./test.sh -f unique/db/replica_reduce31.sim +./test.sh -f unique/db/replica_part.sim + +./test.sh -f unique/vnode/many.sim +./test.sh -f unique/vnode/replica2_basic2.sim +./test.sh -f unique/vnode/replica2_repeat.sim +./test.sh -f unique/vnode/replica3_basic.sim +./test.sh -f unique/vnode/replica3_repeat.sim +./test.sh -f unique/vnode/replica3_vgroup.sim + +./test.sh -f unique/dnode/monitor.sim +./test.sh -f unique/dnode/monitor_bug.sim +./test.sh -f unique/dnode/simple.sim +./test.sh -f unique/dnode/data1.sim +./test.sh -f unique/dnode/m2.sim +./test.sh -f unique/dnode/m3.sim +./test.sh -f unique/dnode/offline3.sim +./test.sh -f general/wal/kill.sim +./test.sh -f general/wal/maxtables.sim + +./test.sh -f general/import/basic.sim +./test.sh -f general/import/commit.sim +./test.sh -f general/import/large.sim +./test.sh -f general/import/replica1.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_7.txt b/tests/script/jenkins/basic_7.txt new file mode 100644 index 0000000000..27d7d4ff97 --- /dev/null +++ b/tests/script/jenkins/basic_7.txt @@ -0,0 +1,81 @@ + +./test.sh -f general/compute/avg.sim +./test.sh -f general/compute/bottom.sim +./test.sh -f general/compute/count.sim +./test.sh -f general/compute/diff.sim +./test.sh -f general/compute/diff2.sim +./test.sh -f general/compute/first.sim +./test.sh -f general/compute/interval.sim +./test.sh -f general/compute/last.sim +./test.sh -f general/compute/leastsquare.sim +./test.sh -f general/compute/max.sim +./test.sh -f general/compute/min.sim +./test.sh -f general/compute/null.sim +./test.sh -f general/compute/percentile.sim +./test.sh -f general/compute/stddev.sim +./test.sh -f general/compute/sum.sim +./test.sh -f general/compute/top.sim + +./test.sh -f general/db/alter_option.sim +./test.sh -f general/db/alter_tables_d2.sim +./test.sh -f general/db/alter_tables_v1.sim +./test.sh -f general/db/alter_tables_v4.sim +./test.sh -f general/db/alter_vgroups.sim +./test.sh -f general/db/basic.sim +./test.sh -f general/db/basic1.sim +./test.sh -f general/db/basic2.sim +./test.sh -f general/db/basic3.sim +./test.sh -f general/db/basic4.sim +./test.sh -f general/db/basic5.sim +./test.sh -f general/db/delete_reuse1.sim +./test.sh -f general/db/delete_reuse2.sim +./test.sh -f general/db/delete_reusevnode.sim +./test.sh -f general/db/delete_reusevnode2.sim +./test.sh -f general/db/delete_writing1.sim +./test.sh -f general/db/delete_writing2.sim +./test.sh -f general/db/delete.sim +./test.sh -f general/db/len.sim +./test.sh -f general/db/repeat.sim +./test.sh -f general/db/tables.sim +./test.sh -f general/db/vnodes.sim +./test.sh -f general/table/autocreate.sim +./test.sh -f general/table/basic1.sim +./test.sh -f general/table/basic2.sim +./test.sh -f general/table/basic3.sim +./test.sh -f general/table/bigint.sim +./test.sh -f general/table/binary.sim +./test.sh -f general/table/bool.sim +./test.sh -f general/table/column_name.sim +./test.sh -f general/table/column_num.sim +./test.sh -f general/table/column_value.sim +./test.sh -f general/table/column2.sim +./test.sh -f general/table/date.sim +./test.sh -f general/table/db.table.sim +./test.sh -f general/table/delete_reuse1.sim +./test.sh -f general/table/delete_reuse2.sim +./test.sh -f general/table/delete_writing.sim +./test.sh -f general/table/describe.sim +./test.sh -f general/table/double.sim +./test.sh -f general/table/fill.sim +./test.sh -f general/table/float.sim +./test.sh -f general/table/int.sim +./test.sh -f general/table/limit.sim +./test.sh -f general/table/smallint.sim +./test.sh -f general/table/table_len.sim +./test.sh -f general/table/table.sim +./test.sh -f general/table/tinyint.sim +./test.sh -f general/table/vgroup.sim +./test.sh -f general/table/createmulti.sim + +./test.sh -f unique/mnode/mgmt20.sim +./test.sh -f unique/mnode/mgmt21.sim +./test.sh -f unique/mnode/mgmt22.sim +./test.sh -f unique/mnode/mgmt23.sim +./test.sh -f unique/mnode/mgmt24.sim +./test.sh -f unique/mnode/mgmt25.sim +./test.sh -f unique/mnode/mgmt26.sim +./test.sh -f unique/mnode/mgmt33.sim +./test.sh -f unique/mnode/mgmt34.sim +./test.sh -f unique/mnode/mgmtr2.sim + +./test.sh -f unique/arbitrator/insert_duplicationTs.sim \ No newline at end of file diff --git a/tests/script/sh/deploy.bat b/tests/script/sh/deploy.bat index 04c7b8a660..921f1611fb 100644 --- a/tests/script/sh/deploy.bat +++ b/tests/script/sh/deploy.bat @@ -65,24 +65,24 @@ echo serverPort %NODE% >> %TAOS_CFG% echo dataDir %DATA_DIR% >> %TAOS_CFG% echo logDir %LOG_DIR% >> %TAOS_CFG% echo debugFlag 0 >> %TAOS_CFG% -echo mDebugFlag 143 >> %TAOS_CFG% -echo sdbDebugFlag 143 >> %TAOS_CFG% -echo dDebugFlag 143 >> %TAOS_CFG% -echo vDebugFlag 143 >> %TAOS_CFG% -echo tsdbDebugFlag 143 >> %TAOS_CFG% -echo cDebugFlag 143 >> %TAOS_CFG% -echo jnidebugFlag 143 >> %TAOS_CFG% -echo odbcdebugFlag 143 >> %TAOS_CFG% -echo httpDebugFlag 143 >> %TAOS_CFG% -echo monDebugFlag 143 >> %TAOS_CFG% -echo mqttDebugFlag 143 >> %TAOS_CFG% -echo qdebugFlag 143 >> %TAOS_CFG% -echo rpcDebugFlag 143 >> %TAOS_CFG% +echo mDebugFlag 135 >> %TAOS_CFG% +echo sdbDebugFlag 135 >> %TAOS_CFG% +echo dDebugFlag 135 >> %TAOS_CFG% +echo vDebugFlag 135 >> %TAOS_CFG% +echo tsdbDebugFlag 135 >> %TAOS_CFG% +echo cDebugFlag 135 >> %TAOS_CFG% +echo jnidebugFlag 135 >> %TAOS_CFG% +echo odbcdebugFlag 135 >> %TAOS_CFG% +echo httpDebugFlag 135 >> %TAOS_CFG% +echo monDebugFlag 135 >> %TAOS_CFG% +echo mqttDebugFlag 135 >> %TAOS_CFG% +echo qdebugFlag 135 >> %TAOS_CFG% +echo rpcDebugFlag 135 >> %TAOS_CFG% echo tmrDebugFlag 131 >> %TAOS_CFG% -echo udebugFlag 143 >> %TAOS_CFG% -echo sdebugFlag 143 >> %TAOS_CFG% -echo wdebugFlag 143 >> %TAOS_CFG% -echo cqdebugFlag 143 >> %TAOS_CFG% +echo udebugFlag 135 >> %TAOS_CFG% +echo sdebugFlag 135 >> %TAOS_CFG% +echo wdebugFlag 135 >> %TAOS_CFG% +echo cqdebugFlag 135 >> %TAOS_CFG% echo monitor 0 >> %TAOS_CFG% echo monitorInterval 1 >> %TAOS_CFG% echo http 0 >> %TAOS_CFG% diff --git a/tests/script/wtest.bat b/tests/script/wtest.bat index 0b5cdda527..9ed58a90d1 100644 --- a/tests/script/wtest.bat +++ b/tests/script/wtest.bat @@ -44,10 +44,10 @@ echo serverPort 7100 >> %TAOS_CFG% echo logDir %LOG_DIR% >> %TAOS_CFG% echo scriptDir %SCRIPT_DIR% >> %TAOS_CFG% echo numOfLogLines 100000000 >> %TAOS_CFG% -echo rpcDebugFlag 143 >> %TAOS_CFG% +echo rpcDebugFlag 135 >> %TAOS_CFG% echo tmrDebugFlag 131 >> %TAOS_CFG% -echo cDebugFlag 143 >> %TAOS_CFG% -echo udebugFlag 143 >> %TAOS_CFG% +echo cDebugFlag 135 >> %TAOS_CFG% +echo udebugFlag 135 >> %TAOS_CFG% echo wal 0 >> %TAOS_CFG% echo asyncLog 0 >> %TAOS_CFG% echo locale en_US.UTF-8 >> %TAOS_CFG% diff --git a/tests/test-all.sh b/tests/test-all.sh index 1c1657cf12..bee2f1e7ec 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -6,15 +6,15 @@ GREEN='\033[1;32m' GREEN_DARK='\033[0;32m' GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' -function git_branch { - branch="`git branch 2>/dev/null | grep "^\*" | sed -e "s/^\*\ //"`" - if [ "${branch}" != "" ];then - if [ "${branch}" = "(no branch)" ];then - branch="(`git rev-parse --short HEAD`...)" - fi - branch=(${branch////_}) - echo "$branch" - fi + +function dohavecore(){ + corefile=`find $corepath -mmin 1` + if [ -n "$corefile" ];then + echo 'taosd or taos has generated core' + if [[ $1 == 1 ]];then + exit 8 + fi + fi } function runSimCaseOneByOne { while read -r line; do @@ -42,6 +42,7 @@ function runSimCaseOneByOne { # fi end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log + dohavecore 0 fi done < $1 } @@ -69,14 +70,15 @@ function runSimCaseOneByOnefq { out_log=`tail -1 out.log ` if [[ $out_log =~ 'failed' ]];then if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then - cp -r ../../../sim ~/sim_$(git_branch)_`date "+%Y_%m_%d_%H:%M:%S"` + cp -r ../../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"` else - cp -r ../../sim ~/sim_$(git_branch)_`date "+%Y_%m_%d_%H:%M:%S" ` + cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` fi exit 8 fi end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log + dohavecore 1 fi done < $1 } @@ -105,6 +107,7 @@ function runPyCaseOneByOne { else $line > /dev/null 2>&1 fi + dohavecore 0 fi done < $1 } @@ -126,13 +129,14 @@ function runPyCaseOneByOnefq { end_time=`date +%s` out_log=`tail -1 pytest-out.log ` if [[ $out_log =~ 'failed' ]];then - cp -r ../../sim ~/sim_$(git_branch)_`date "+%Y_%m_%d_%H:%M:%S" ` + cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` exit 8 fi echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log else $line > /dev/null 2>&1 fi + dohavecore 1 fi done < $1 } @@ -140,7 +144,7 @@ totalFailed=0 totalPyFailed=0 tests_dir=`pwd` - +corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` if [ "$2" != "python" ]; then echo "### run TSIM test case ###" cd $tests_dir/script @@ -156,6 +160,9 @@ if [ "$2" != "python" ]; then echo "### run TSIM b1 test ###" runSimCaseOneByOne jenkins/basic_1.txt runSimCaseOneByOne jenkins/basic_4.txt + runSimCaseOneByOne jenkins/basic_5.txt + runSimCaseOneByOne jenkins/basic_6.txt + runSimCaseOneByOne jenkins/basic_7.txt elif [ "$1" == "b2" ]; then echo "### run TSIM b2 test ###" runSimCaseOneByOne jenkins/basic_2.txt @@ -174,6 +181,15 @@ if [ "$2" != "python" ]; then elif [ "$1" == "b4fq" ]; then echo "### run TSIM b4 test ###" runSimCaseOneByOnefq jenkins/basic_4.txt + elif [ "$1" == "b5fq" ]; then + echo "### run TSIM b5 test ###" + runSimCaseOneByOnefq jenkins/basic_5.txt + elif [ "$1" == "b6fq" ]; then + echo "### run TSIM b6 test ###" + runSimCaseOneByOnefq jenkins/basic_6.txt + elif [ "$1" == "b7fq" ]; then + echo "### run TSIM b7 test ###" + runSimCaseOneByOnefq jenkins/basic_7.txt elif [ "$1" == "smoke" ] || [ -z "$1" ]; then echo "### run TSIM smoke test ###" runSimCaseOneByOne basicSuite.sim @@ -242,6 +258,12 @@ if [ "$2" != "sim" ]; then elif [ "$1" == "p2" ]; then echo "### run Python_2 test ###" runPyCaseOneByOnefq pytest_2.sh + elif [ "$1" == "p3" ]; then + echo "### run Python_3 test ###" + runPyCaseOneByOnefq pytest_3.sh + elif [ "$1" == "p4" ]; then + echo "### run Python_4 test ###" + runPyCaseOneByOnefq pytest_4.sh elif [ "$1" == "b2" ] || [ "$1" == "b3" ]; then exit $(($totalFailed + $totalPyFailed)) elif [ "$1" == "smoke" ] || [ -z "$1" ]; then diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt index 33e1528b70..2eb8ee1614 100644 --- a/tests/test/c/CMakeLists.txt +++ b/tests/test/c/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)