diff --git a/.gitignore b/.gitignore index 07003bda4c..ff27b53139 100644 --- a/.gitignore +++ b/.gitignore @@ -156,6 +156,9 @@ pcre2_grep_test.sh pcre2_chartables.c geos-config config.h +!contrib/xml2-cmake +!contrib/xml2-cmake/linux_x86_64/include/config.h +!contrib/xml2-cmake/CMakeLists.txt pcre2.h zconf.h version.h diff --git a/cmake/addr2line_CMakeLists.txt.in b/cmake/addr2line_CMakeLists.txt.in index 93fb9bb96c..7cfcb46718 100644 --- a/cmake/addr2line_CMakeLists.txt.in +++ b/cmake/addr2line_CMakeLists.txt.in @@ -2,7 +2,7 @@ # addr2line ExternalProject_Add(addr2line GIT_REPOSITORY https://github.com/davea42/libdwarf-addr2line.git - GIT_TAG master + GIT_TAG main SOURCE_DIR "${TD_CONTRIB_DIR}/addr2line" BINARY_DIR "${TD_CONTRIB_DIR}/addr2line" CONFIGURE_COMMAND "" diff --git a/cmake/azure_CMakeLists.txt.in b/cmake/azure_CMakeLists.txt.in index 5aa32b70e5..d9e47ce6b1 100644 --- a/cmake/azure_CMakeLists.txt.in +++ b/cmake/azure_CMakeLists.txt.in @@ -2,6 +2,7 @@ ExternalProject_Add(azure URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9 + DEPENDS xml2 DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1" diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index 13826a1a74..ef6ed4af1d 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG main + GIT_TAG 3.0 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 9bbda8309f..9a6a5329ae 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG main + GIT_TAG 3.0 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in index b013d45911..17446d184d 100644 --- a/cmake/taosws_CMakeLists.txt.in +++ b/cmake/taosws_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosws-rs ExternalProject_Add(taosws-rs GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git - GIT_TAG main + GIT_TAG 3.0 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/xml2_CMakeLists.txt.in b/cmake/xml2_CMakeLists.txt.in index 0e7492aea7..8dcd89efc0 100644 --- a/cmake/xml2_CMakeLists.txt.in +++ b/cmake/xml2_CMakeLists.txt.in @@ -1,19 +1,16 @@ # xml2 ExternalProject_Add(xml2 - URL https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.5.tar.xz - URL_HASH SHA256=3727b078c360ec69fa869de14bd6f75d7ee8d36987b071e6928d4720a28df3a6 - #https://github.com/GNOME/libxml2/archive/refs/tags/v2.11.5.tar.gz - #GIT_REPOSITORY https://github.com/GNOME/libxml2 - #GIT_TAG v2.11.5 + URL https://github.com/GNOME/libxml2/archive/refs/tags/v2.10.4.tar.gz + URL_HASH SHA256=6f6fb27f91bb65f9d7196e3c616901b3e18a7dea31ccc2ae857940b125faa780 DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" - SOURCE_DIR "${TD_CONTRIB_DIR}/xml2" + SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma - BUILD_COMMAND make -j - INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" TEST_COMMAND "" GIT_SHALLOW true ) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 59ba8ccfc5..71b1f339e8 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -145,6 +145,13 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) +# libcurl +if(NOT ${TD_WINDOWS}) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/) + cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif(NOT ${TD_WINDOWS}) + # s3 if(${BUILD_WITH_S3}) cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -157,7 +164,6 @@ elseif(${BUILD_WITH_COS}) # cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) # cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) # cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - # cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif() @@ -661,7 +667,12 @@ if(${BUILD_PCRE2}) endif(${BUILD_PCRE2}) if(${TD_LINUX} AND ${BUILD_WITH_S3}) - add_subdirectory(azure-cmake EXCLUDE_FROM_ALL) + set(ORIG_CMAKE_C_FLAGS ${CMAKE_C_FLAGS}) + string(REPLACE " -Werror " " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") + add_subdirectory(xml2-cmake) + set(CMAKE_C_FLAGS ${ORIG_CMAKE_C_FLAGS}) + + add_subdirectory(azure-cmake) endif() IF(TD_LINUX) diff --git a/contrib/azure-cmake/CMakeLists.txt b/contrib/azure-cmake/CMakeLists.txt index aaa5617860..eaf4c569e7 100644 --- a/contrib/azure-cmake/CMakeLists.txt +++ b/contrib/azure-cmake/CMakeLists.txt @@ -36,10 +36,6 @@ target_include_directories( ) find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) -find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - -# find_library(CURL_LIBRARY curl) -# find_library(XML2_LIBRARY xml2) find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) @@ -50,9 +46,8 @@ target_link_libraries( PRIVATE ${CURL_LIBRARY} PRIVATE ${SSL_LIBRARY} PRIVATE ${CRYPTO_LIBRARY} - PRIVATE ${XML2_LIBRARY} - # PRIVATE xml2 + PRIVATE _libxml2 PRIVATE zlib # PRIVATE ${CoreFoundation_Library} diff --git a/contrib/xml2-cmake/CMakeLists.txt b/contrib/xml2-cmake/CMakeLists.txt new file mode 100644 index 0000000000..9067c0e6e7 --- /dev/null +++ b/contrib/xml2-cmake/CMakeLists.txt @@ -0,0 +1,58 @@ +set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2") + +set(SRCS + "${LIBXML2_SOURCE_DIR}/SAX.c" + "${LIBXML2_SOURCE_DIR}/entities.c" + "${LIBXML2_SOURCE_DIR}/encoding.c" + "${LIBXML2_SOURCE_DIR}/error.c" + "${LIBXML2_SOURCE_DIR}/parserInternals.c" + "${LIBXML2_SOURCE_DIR}/parser.c" + "${LIBXML2_SOURCE_DIR}/tree.c" + "${LIBXML2_SOURCE_DIR}/hash.c" + "${LIBXML2_SOURCE_DIR}/list.c" + "${LIBXML2_SOURCE_DIR}/xmlIO.c" + "${LIBXML2_SOURCE_DIR}/xmlmemory.c" + "${LIBXML2_SOURCE_DIR}/uri.c" + "${LIBXML2_SOURCE_DIR}/valid.c" + "${LIBXML2_SOURCE_DIR}/xlink.c" + "${LIBXML2_SOURCE_DIR}/HTMLparser.c" + "${LIBXML2_SOURCE_DIR}/HTMLtree.c" + "${LIBXML2_SOURCE_DIR}/debugXML.c" + "${LIBXML2_SOURCE_DIR}/xpath.c" + "${LIBXML2_SOURCE_DIR}/xpointer.c" + "${LIBXML2_SOURCE_DIR}/xinclude.c" + "${LIBXML2_SOURCE_DIR}/nanohttp.c" + "${LIBXML2_SOURCE_DIR}/nanoftp.c" + "${LIBXML2_SOURCE_DIR}/catalog.c" + "${LIBXML2_SOURCE_DIR}/globals.c" + "${LIBXML2_SOURCE_DIR}/threads.c" + "${LIBXML2_SOURCE_DIR}/c14n.c" + "${LIBXML2_SOURCE_DIR}/xmlstring.c" + "${LIBXML2_SOURCE_DIR}/buf.c" + "${LIBXML2_SOURCE_DIR}/xmlregexp.c" + "${LIBXML2_SOURCE_DIR}/xmlschemas.c" + "${LIBXML2_SOURCE_DIR}/xmlschemastypes.c" + "${LIBXML2_SOURCE_DIR}/xmlunicode.c" + "${LIBXML2_SOURCE_DIR}/triostr.c" + "${LIBXML2_SOURCE_DIR}/xmlreader.c" + "${LIBXML2_SOURCE_DIR}/relaxng.c" + "${LIBXML2_SOURCE_DIR}/dict.c" + "${LIBXML2_SOURCE_DIR}/SAX2.c" + "${LIBXML2_SOURCE_DIR}/xmlwriter.c" + "${LIBXML2_SOURCE_DIR}/legacy.c" + "${LIBXML2_SOURCE_DIR}/chvalid.c" + "${LIBXML2_SOURCE_DIR}/pattern.c" + "${LIBXML2_SOURCE_DIR}/xmlsave.c" + "${LIBXML2_SOURCE_DIR}/xmlmodule.c" + "${LIBXML2_SOURCE_DIR}/schematron.c" + "${LIBXML2_SOURCE_DIR}/xzlib.c" +) +add_library(_libxml2 ${SRCS}) + +#target_link_libraries(_libxml2 PRIVATE td_contrib::zlib) +target_link_libraries(_libxml2 PRIVATE zlib) + +target_include_directories(_libxml2 BEFORE PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include") +target_include_directories(_libxml2 BEFORE PUBLIC "${LIBXML2_SOURCE_DIR}/include") + +add_library(td_contrib::libxml2 ALIAS _libxml2) diff --git a/contrib/xml2-cmake/linux_x86_64/include/config.h b/contrib/xml2-cmake/linux_x86_64/include/config.h new file mode 100644 index 0000000000..7969b377dc --- /dev/null +++ b/contrib/xml2-cmake/linux_x86_64/include/config.h @@ -0,0 +1,285 @@ +/* config.h. Generated from config.h.in by configure. */ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Type cast for the gethostbyname() argument */ +#define GETHOSTBYNAME_ARG_CAST /**/ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Whether struct sockaddr::__ss_family exists */ +/* #undef HAVE_BROKEN_SS_FAMILY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_CTYPE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Have dlopen based dso */ +#define HAVE_DLOPEN /**/ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FLOAT_H 1 + +/* Define to 1 if you have the `fprintf' function. */ +#define HAVE_FPRINTF 1 + +/* Define to 1 if you have the `ftime' function. */ +#define HAVE_FTIME 1 + +/* Define if getaddrinfo is there */ +#define HAVE_GETADDRINFO /**/ + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `isascii' function. */ +#define HAVE_ISASCII 1 + +/* Define if isinf is there */ +#define HAVE_ISINF /**/ + +/* Define if isnan is there */ +#define HAVE_ISNAN /**/ + +/* Define if history library is there (-lhistory) */ +/* #undef HAVE_LIBHISTORY */ + +/* Define if pthread library is there (-lpthread) */ +#define HAVE_LIBPTHREAD /**/ + +/* Define if readline library is there (-lreadline) */ +/* #undef HAVE_LIBREADLINE */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* Define to 1 if you have the `localtime' function. */ +#define HAVE_LOCALTIME 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LZMA_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MATH_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define to 1 if you have the `munmap' function. */ +#define HAVE_MUNMAP 1 + +/* mmap() is no good without munmap() */ +#if defined(HAVE_MMAP) && !defined(HAVE_MUNMAP) +# undef /**/ HAVE_MMAP +#endif + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETDB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_IN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the `printf' function. */ +#define HAVE_PRINTF 1 + +/* Define if is there */ +#define HAVE_PTHREAD_H /**/ + +/* Define to 1 if you have the `putenv' function. */ +#define HAVE_PUTENV 1 + +/* Define to 1 if you have the `rand' function. */ +#define HAVE_RAND 1 + +/* Define to 1 if you have the `rand_r' function. */ +#define HAVE_RAND_R 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_RESOLV_H 1 + +/* Have shl_load based dso */ +/* #undef HAVE_SHLLOAD */ + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SIGNAL_H 1 + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* Define to 1 if you have the `sprintf' function. */ +#define HAVE_SPRINTF 1 + +/* Define to 1 if you have the `srand' function. */ +#define HAVE_SRAND 1 + +/* Define to 1 if you have the `sscanf' function. */ +#define HAVE_SSCANF 1 + +/* Define to 1 if you have the `stat' function. */ +#define HAVE_STAT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDARG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIMEB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the `time' function. */ +#define HAVE_TIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Whether va_copy() is available */ +#define HAVE_VA_COPY 1 + +/* Define to 1 if you have the `vfprintf' function. */ +#define HAVE_VFPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `vsprintf' function. */ +#define HAVE_VSPRINTF 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_ZLIB_H */ + +/* Whether __va_copy() is available */ +/* #undef HAVE___VA_COPY */ + +/* Define as const if the declaration of iconv() needs const. */ +#define ICONV_CONST + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libxml2" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* Type cast for the send() function 2nd arg */ +#define SEND_ARG2_CAST /**/ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Support for IPv6 */ +#define SUPPORT_IP6 /**/ + +/* Define if va_list is an array type */ +#define VA_LIST_IS_ARRAY 1 + +/* Version number of package */ +#define VERSION "2.9.8" + +/* Determine what socket length (socklen_t) data type is */ +#define XML_SOCKLEN_T socklen_t + +/* Define for Solaris 2.5.1 so the uint32_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +/* #undef _UINT32_T */ + +/* ss_family is not defined here, use __ss_family instead */ +/* #undef ss_family */ + +/* Define to the type of an unsigned integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +/* #undef uint32_t */ diff --git a/contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h b/contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h new file mode 100644 index 0000000000..c2faeb47cb --- /dev/null +++ b/contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h @@ -0,0 +1,501 @@ +/* + * Summary: compile-time version information + * Description: compile-time version information for the XML library + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_VERSION_H__ +#define __XML_VERSION_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * use those to be sure nothing nasty will happen if + * your library and includes mismatch + */ +#ifndef LIBXML2_COMPILING_MSCCDEF +XMLPUBFUN void XMLCALL xmlCheckVersion(int version); +#endif /* LIBXML2_COMPILING_MSCCDEF */ + +/** + * LIBXML_DOTTED_VERSION: + * + * the version string like "1.2.3" + */ +#define LIBXML_DOTTED_VERSION "2.10.3" + +/** + * LIBXML_VERSION: + * + * the version number: 1.2.3 value is 10203 + */ +#define LIBXML_VERSION 21003 + +/** + * LIBXML_VERSION_STRING: + * + * the version number string, 1.2.3 value is "10203" + */ +#define LIBXML_VERSION_STRING "21003" + +/** + * LIBXML_VERSION_EXTRA: + * + * extra version information, used to show a git commit description + */ +#define LIBXML_VERSION_EXTRA "" + +/** + * LIBXML_TEST_VERSION: + * + * Macro to check that the libxml version in use is compatible with + * the version the software has been compiled against + */ +#define LIBXML_TEST_VERSION xmlCheckVersion(21003); + +#ifndef VMS +#if 0 +/** + * WITH_TRIO: + * + * defined if the trio support need to be configured in + */ +#define WITH_TRIO +#else +/** + * WITHOUT_TRIO: + * + * defined if the trio support should not be configured in + */ +#define WITHOUT_TRIO +#endif +#else /* VMS */ +/** + * WITH_TRIO: + * + * defined if the trio support need to be configured in + */ +#define WITH_TRIO 1 +#endif /* VMS */ + +/** + * LIBXML_THREAD_ENABLED: + * + * Whether the thread support is configured in + */ +#if 1 +#define LIBXML_THREAD_ENABLED +#endif + +/** + * LIBXML_THREAD_ALLOC_ENABLED: + * + * Whether the allocation hooks are per-thread + */ +#if 0 +#define LIBXML_THREAD_ALLOC_ENABLED +#endif + +/** + * LIBXML_TREE_ENABLED: + * + * Whether the DOM like tree manipulation API support is configured in + */ +#if 1 +#define LIBXML_TREE_ENABLED +#endif + +/** + * LIBXML_OUTPUT_ENABLED: + * + * Whether the serialization/saving support is configured in + */ +#if 1 +#define LIBXML_OUTPUT_ENABLED +#endif + +/** + * LIBXML_PUSH_ENABLED: + * + * Whether the push parsing interfaces are configured in + */ +#if 1 +#define LIBXML_PUSH_ENABLED +#endif + +/** + * LIBXML_READER_ENABLED: + * + * Whether the xmlReader parsing interface is configured in + */ +#if 1 +#define LIBXML_READER_ENABLED +#endif + +/** + * LIBXML_PATTERN_ENABLED: + * + * Whether the xmlPattern node selection interface is configured in + */ +#if 1 +#define LIBXML_PATTERN_ENABLED +#endif + +/** + * LIBXML_WRITER_ENABLED: + * + * Whether the xmlWriter saving interface is configured in + */ +#if 1 +#define LIBXML_WRITER_ENABLED +#endif + +/** + * LIBXML_SAX1_ENABLED: + * + * Whether the older SAX1 interface is configured in + */ +#if 1 +#define LIBXML_SAX1_ENABLED +#endif + +/** + * LIBXML_FTP_ENABLED: + * + * Whether the FTP support is configured in + */ +#if 0 +#define LIBXML_FTP_ENABLED +#endif + +/** + * LIBXML_HTTP_ENABLED: + * + * Whether the HTTP support is configured in + */ +#if 1 +#define LIBXML_HTTP_ENABLED +#endif + +/** + * LIBXML_VALID_ENABLED: + * + * Whether the DTD validation support is configured in + */ +#if 1 +#define LIBXML_VALID_ENABLED +#endif + +/** + * LIBXML_HTML_ENABLED: + * + * Whether the HTML support is configured in + */ +#if 1 +#define LIBXML_HTML_ENABLED +#endif + +/** + * LIBXML_LEGACY_ENABLED: + * + * Whether the deprecated APIs are compiled in for compatibility + */ +#if 0 +#define LIBXML_LEGACY_ENABLED +#endif + +/** + * LIBXML_C14N_ENABLED: + * + * Whether the Canonicalization support is configured in + */ +#if 1 +#define LIBXML_C14N_ENABLED +#endif + +/** + * LIBXML_CATALOG_ENABLED: + * + * Whether the Catalog support is configured in + */ +#if 1 +#define LIBXML_CATALOG_ENABLED +#endif + +/** + * LIBXML_XPATH_ENABLED: + * + * Whether XPath is configured in + */ +#if 1 +#define LIBXML_XPATH_ENABLED +#endif + +/** + * LIBXML_XPTR_ENABLED: + * + * Whether XPointer is configured in + */ +#if 1 +#define LIBXML_XPTR_ENABLED +#endif + +/** + * LIBXML_XPTR_LOCS_ENABLED: + * + * Whether support for XPointer locations is configured in + */ +#if 0 +#define LIBXML_XPTR_LOCS_ENABLED +#endif + +/** + * LIBXML_XINCLUDE_ENABLED: + * + * Whether XInclude is configured in + */ +#if 1 +#define LIBXML_XINCLUDE_ENABLED +#endif + +/** + * LIBXML_ICONV_ENABLED: + * + * Whether iconv support is available + */ +#if 0 +#define LIBXML_ICONV_ENABLED +#endif + +/** + * LIBXML_ICU_ENABLED: + * + * Whether icu support is available + */ +#if 0 +#define LIBXML_ICU_ENABLED +#endif + +/** + * LIBXML_ISO8859X_ENABLED: + * + * Whether ISO-8859-* support is made available in case iconv is not + */ +#if 1 +#define LIBXML_ISO8859X_ENABLED +#endif + +/** + * LIBXML_DEBUG_ENABLED: + * + * Whether Debugging module is configured in + */ +#if 1 +#define LIBXML_DEBUG_ENABLED +#endif + +/** + * DEBUG_MEMORY_LOCATION: + * + * Whether the memory debugging is configured in + */ +#if 0 +#define DEBUG_MEMORY_LOCATION +#endif + +/** + * LIBXML_DEBUG_RUNTIME: + * + * Whether the runtime debugging is configured in + */ +#if 0 +#define LIBXML_DEBUG_RUNTIME +#endif + +/** + * LIBXML_UNICODE_ENABLED: + * + * Whether the Unicode related interfaces are compiled in + */ +#if 1 +#define LIBXML_UNICODE_ENABLED +#endif + +/** + * LIBXML_REGEXP_ENABLED: + * + * Whether the regular expressions interfaces are compiled in + */ +#if 1 +#define LIBXML_REGEXP_ENABLED +#endif + +/** + * LIBXML_AUTOMATA_ENABLED: + * + * Whether the automata interfaces are compiled in + */ +#if 1 +#define LIBXML_AUTOMATA_ENABLED +#endif + +/** + * LIBXML_EXPR_ENABLED: + * + * Whether the formal expressions interfaces are compiled in + * + * This code is unused and disabled unconditionally for now. + */ +#if 0 +#define LIBXML_EXPR_ENABLED +#endif + +/** + * LIBXML_SCHEMAS_ENABLED: + * + * Whether the Schemas validation interfaces are compiled in + */ +#if 1 +#define LIBXML_SCHEMAS_ENABLED +#endif + +/** + * LIBXML_SCHEMATRON_ENABLED: + * + * Whether the Schematron validation interfaces are compiled in + */ +#if 1 +#define LIBXML_SCHEMATRON_ENABLED +#endif + +/** + * LIBXML_MODULES_ENABLED: + * + * Whether the module interfaces are compiled in + */ +#if 1 +#define LIBXML_MODULES_ENABLED +/** + * LIBXML_MODULE_EXTENSION: + * + * the string suffix used by dynamic modules (usually shared libraries) + */ +#define LIBXML_MODULE_EXTENSION ".so" +#endif + +/** + * LIBXML_ZLIB_ENABLED: + * + * Whether the Zlib support is compiled in + */ +#if 1 +#define LIBXML_ZLIB_ENABLED +#endif + +/** + * LIBXML_LZMA_ENABLED: + * + * Whether the Lzma support is compiled in + */ +#if 0 +#define LIBXML_LZMA_ENABLED +#endif + +#ifdef __GNUC__ + +/** + * ATTRIBUTE_UNUSED: + * + * Macro used to signal to GCC unused function parameters + */ + +#ifndef ATTRIBUTE_UNUSED +# if ((__GNUC__ > 2) || ((__GNUC__ == 2) && (__GNUC_MINOR__ >= 7))) +# define ATTRIBUTE_UNUSED __attribute__((unused)) +# else +# define ATTRIBUTE_UNUSED +# endif +#endif + +/** + * LIBXML_ATTR_ALLOC_SIZE: + * + * Macro used to indicate to GCC this is an allocator function + */ + +#ifndef LIBXML_ATTR_ALLOC_SIZE +# if (!defined(__clang__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)))) +# define LIBXML_ATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x))) +# else +# define LIBXML_ATTR_ALLOC_SIZE(x) +# endif +#else +# define LIBXML_ATTR_ALLOC_SIZE(x) +#endif + +/** + * LIBXML_ATTR_FORMAT: + * + * Macro used to indicate to GCC the parameter are printf like + */ + +#ifndef LIBXML_ATTR_FORMAT +# if ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3))) +# define LIBXML_ATTR_FORMAT(fmt,args) __attribute__((__format__(__printf__,fmt,args))) +# else +# define LIBXML_ATTR_FORMAT(fmt,args) +# endif +#else +# define LIBXML_ATTR_FORMAT(fmt,args) +#endif + +#ifndef XML_DEPRECATED +# ifdef IN_LIBXML +# define XML_DEPRECATED +# else +/* Available since at least GCC 3.1 */ +# define XML_DEPRECATED __attribute__((deprecated)) +# endif +#endif + +#else /* ! __GNUC__ */ +/** + * ATTRIBUTE_UNUSED: + * + * Macro used to signal to GCC unused function parameters + */ +#define ATTRIBUTE_UNUSED +/** + * LIBXML_ATTR_ALLOC_SIZE: + * + * Macro used to indicate to GCC this is an allocator function + */ +#define LIBXML_ATTR_ALLOC_SIZE(x) +/** + * LIBXML_ATTR_FORMAT: + * + * Macro used to indicate to GCC the parameter are printf like + */ +#define LIBXML_ATTR_FORMAT(fmt,args) +/** + * XML_DEPRECATED: + * + * Macro used to indicate that a function, variable, type or struct member + * is deprecated. + */ +#ifndef XML_DEPRECATED +#define XML_DEPRECATED +#endif +#endif /* __GNUC__ */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif diff --git a/docs/en/08-operation/04-maintenance.md b/docs/en/08-operation/04-maintenance.md index 970ee40d18..5712a710a1 100644 --- a/docs/en/08-operation/04-maintenance.md +++ b/docs/en/08-operation/04-maintenance.md @@ -17,7 +17,9 @@ TDengine is designed for various writing scenarios, and many of these scenarios ```sql COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']; -SHOW COMPACTS [compact_id]; +COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY']; +SHOW COMPACTS; +SHOW COMPACT compact_id; KILL COMPACT compact_id; ``` diff --git a/docs/en/14-reference/02-tools/10-taosbenchmark.md b/docs/en/14-reference/02-tools/10-taosbenchmark.md index d1a18b5d1c..4925f4cb3b 100644 --- a/docs/en/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/en/14-reference/02-tools/10-taosbenchmark.md @@ -246,13 +246,14 @@ The query performance test mainly outputs the QPS indicator of query request spe ``` bash complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ... -INFO: Total specified queries: 30000 INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049 ``` - The first line represents the percentile distribution of query execution and query request delay for each of the three threads executing 10000 queries. The SQL command is the test query statement -- The second line indicates that a total of 10000 * 3 = 30000 queries have been completed -- The third line indicates that the total query time is 26.9653 seconds, and the query rate per second (QPS) is 1113.049 times/second +- The second line indicates that the total query time is 26.9653 seconds, the total queries is 10000 * 3 = 30000, and the query rate per second (QPS) is 1113.049 times/second +- If the `continue_if_fail` option is set to `yes` in the query, the last line will output the number of failed requests and error rate, the format like "error + number of failed requests (error rate)" +- QPS = number of successful requests / time spent (in seconds) +- Error rate = number of failed requests / (number of successful requests + number of failed requests) #### Subscription metrics @@ -334,9 +335,9 @@ Parameters related to supertable creation are configured in the `super_tables` s - **child_table_exists**: Whether the child table already exists, default is "no", options are "yes" or "no". -- **child_table_count**: Number of child tables, default is 10. +- **childtable_count**: Number of child tables, default is 10. -- **child_table_prefix**: Prefix for child table names, mandatory, no default value. +- **childtable_prefix**: Prefix for child table names, mandatory, no default value. - **escape_character**: Whether the supertable and child table names contain escape characters, default is "no", options are "yes" or "no". @@ -403,7 +404,7 @@ Specify the configuration parameters for tag and data columns in `super_tables` - **min**: The minimum value for the data type of the column/tag. Generated values will be greater than or equal to the minimum value. -- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the minimum value. +- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the maximum value. - **scalingFactor**: Floating-point precision enhancement factor, only effective when the data type is float/double, valid values range from 1 to 1000000 positive integers. Used to enhance the precision of generated floating points, especially when min or max values are small. This attribute enhances the precision after the decimal point by powers of 10: a scalingFactor of 10 means enhancing the precision by 1 decimal place, 100 means 2 places, and so on. @@ -431,11 +432,9 @@ Specify the configuration parameters for tag and data columns in `super_tables` - **create_table_thread_count** : The number of threads for creating tables, default is 8. -- **connection_pool_size** : The number of pre-established connections with the TDengine server. If not configured, it defaults to the specified number of threads. - - **result_file** : The path to the result output file, default is ./output.txt. -- **confirm_parameter_prompt** : A toggle parameter that requires user confirmation after a prompt to continue. The default value is false. +- **confirm_parameter_prompt** : A toggle parameter that requires user confirmation after a prompt to continue. The value can be "yes" or "no", by default "no". - **interlace_rows** : Enables interleaved insertion mode and specifies the number of rows to insert into each subtable at a time. Interleaved insertion mode refers to inserting the specified number of rows into each subtable in sequence and repeating this process until all subtable data has been inserted. The default value is 0, meaning data is inserted into one subtable completely before moving to the next. This parameter can also be configured in `super_tables`; if configured, the settings in `super_tables` take higher priority and override the global settings. @@ -464,12 +463,12 @@ For other common parameters, see Common Configuration Parameters. Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`. -- **mixed_query** "yes": `Mixed Query` "no": `Normal Query`, default is "no" -`Mixed Query`: All SQL statements in `sqls` are grouped by the number of threads, with each thread executing one group. Each SQL statement in a thread needs to perform `query_times` queries. -`Normal Query `: Each SQL in `sqls` starts `threads` and exits after executing `query_times` times. The next SQL can only be executed after all previous SQL threads have finished executing and exited. -Regardless of whether it is a `Normal Query` or `Mixed Query`, the total number of query executions is the same. The total number of queries = `sqls` * `threads` * `query_times`. The difference is that `Normal Query` starts `threads` for each SQL query, while ` Mixed Query` only starts `threads` once to complete all SQL queries. The number of thread startups for the two is different. +- `General Query`: Each SQL in `sqls` starts `threads` threads to query this SQL, Each thread exits after executing the `query_times` queries, and only after all threads executing this SQL have completed can the next SQL be executed. +The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads` +- `Mixed Query` : All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries. +The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times` -- **query_interval** : Query interval, in seconds, default is 0. +- **query_interval** : Query interval, in millisecond, default is 0. - **threads** : Number of threads executing the SQL query, default is 1. @@ -491,6 +490,7 @@ The thread mode of the super table query is the same as the `Normal Query` mode - **sqls** : - **sql** : The SQL command to execute, required; for supertable queries, keep "xxxx" in the SQL command, the program will automatically replace it with all subtable names of the supertable. - **result** : File to save the query results, if not specified, results are not saved. + - **Note**: The maximum number of SQL arrays configured under SQL is 100. ### Configuration Parameters for Subscription Scenarios diff --git a/docs/en/14-reference/03-taos-sql/24-show.md b/docs/en/14-reference/03-taos-sql/24-show.md index 36c20df0b4..b46fb41fa0 100644 --- a/docs/en/14-reference/03-taos-sql/24-show.md +++ b/docs/en/14-reference/03-taos-sql/24-show.md @@ -304,9 +304,10 @@ Displays information about all topics in the current database. ```sql SHOW TRANSACTIONS; +SHOW TRANSACTION [tranaction_id]; ``` -Displays information about transactions currently being executed in the system (these transactions are only for metadata level, not for regular tables). +Displays information about one of or all transaction(s) currently being executed in the system (these transactions are only for metadata level, not for regular tables). ## SHOW USERS diff --git a/docs/en/14-reference/05-connector/10-cpp.md b/docs/en/14-reference/05-connector/10-cpp.md index 3b51b47461..edccd8ebd4 100644 --- a/docs/en/14-reference/05-connector/10-cpp.md +++ b/docs/en/14-reference/05-connector/10-cpp.md @@ -510,7 +510,6 @@ For the OpenTSDB text protocol, the parsing of timestamps follows its official p - tmq: [Input] Points to a valid ws_tmq_t structure pointer, which represents a TMQ consumer object. - timeout: [Input] Polling timeout in milliseconds, a negative number indicates a default timeout of 1 second. - **Return Value**: Non-`NULL`: Success, returns a pointer to a WS_RES structure, which contains the received message. `NULL`: indicates no data, the error code can be obtained through ws_errno (NULL), please refer to the reference manual for specific error message. WS_RES results are consistent with taos_query results, and information in WS_RES can be obtained through various query interfaces, such as schema, etc. - - `int32_t ws_tmq_consumer_close(ws_tmq_t *tmq)` - **Interface Description**: Used to close the ws_tmq_t structure. Must be used in conjunction with ws_tmq_consumer_new. - tmq: [Input] Points to a valid ws_tmq_t structure pointer, which represents a TMQ consumer object. @@ -1196,7 +1195,7 @@ In addition to using SQL or parameter binding APIs to insert data, you can also - tmq: [Input] Points to a valid tmq_t structure pointer, representing a TMQ consumer object. - timeout: [Input] Polling timeout in milliseconds, a negative number indicates a default timeout of 1 second. - **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_RES structure containing the received messages. `NULL`: indicates no data, the error code can be obtained through taos_errno (NULL), please refer to the reference manual for specific error message. TAOS_RES results are consistent with taos_query results, and information in TAOS_RES can be obtained through various query interfaces, such as schema, etc. - + - `int32_t tmq_consumer_close(tmq_t *tmq)` - **Interface Description**: Used to close a tmq_t structure. Must be used in conjunction with tmq_consumer_new. - tmq: [Input] Points to a valid tmq_t structure pointer, which represents a TMQ consumer object. diff --git a/docs/en/14-reference/09-error-code.md b/docs/en/14-reference/09-error-code.md index 190c626196..233ac78a19 100644 --- a/docs/en/14-reference/09-error-code.md +++ b/docs/en/14-reference/09-error-code.md @@ -534,5 +534,6 @@ This document details the server error codes that may be encountered when using | 0x80004000 | Invalid message | The subscribed data is illegal, generally does not occur | Check the client-side error logs for details | | 0x80004001 | Consumer mismatch | The vnode requested for subscription and the reassigned vnode are inconsistent, usually occurs when new consumers join the same consumer group | Internal error, not exposed to users | | 0x80004002 | Consumer closed | The consumer no longer exists | Check if it has already been closed | -| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data | +| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data | | 0x80004100 | Stream task not exist | The stream computing task does not exist | Check the server-side error logs | + diff --git a/docs/examples/c/insert_data_demo.c b/docs/examples/c/insert_data_demo.c index 7570af02ad..364872fd88 100644 --- a/docs/examples/c/insert_data_demo.c +++ b/docs/examples/c/insert_data_demo.c @@ -58,12 +58,13 @@ static int DemoInsertData() { taos_cleanup(); return -1; } - taos_free_result(result); // you can check affectedRows here int rows = taos_affected_rows(result); fprintf(stdout, "Successfully inserted %d rows into power.meters.\n", rows); + taos_free_result(result); + // close & clean taos_close(taos); taos_cleanup(); diff --git a/docs/zh/06-advanced/05-data-in/07-mqtt.mdx b/docs/zh/06-advanced/05-data-in/07-mqtt.mdx index 3ffab4dfbf..c3242a80c2 100644 --- a/docs/zh/06-advanced/05-data-in/07-mqtt.mdx +++ b/docs/zh/06-advanced/05-data-in/07-mqtt.mdx @@ -65,6 +65,8 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T 在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称和 QoS。使用如下格式设置: `{topic_name}::{qos}`(如:`my_topic::0`)。MQTT 协议 5.0 支持共享订阅,可以通过多个客户端订阅同一个 Topic 实现负载均衡,使用如下格式: `$share/{group_name}/{topic_name}::{qos}`,其中,`$share` 是固定前缀,表示启用共享订阅,`group_name` 是分组名称,类似 kafka 的消费者组。 +在 **主题解析** 中填写 MQTT 主题解析规则,格式与 MQTT Topic 相同,将 MQTT Topic 各层级内容解析为对应变量名,`_` 表示解析时忽略当前层级。例如:MQTT Topic `a/+/c` 对应解析规则如果设置为 `v1/v2/_`,代表将第一层级的 `a` 赋值给变量 `v1`,第二层级的值(这里通配符 `+` 代表任意值)复制给变量 `v2`,第三层级的值 `c` 忽略,不会赋值给任何变量。在下方的 `payload 解析` 中,Topic 解析得到的变量同样可以参与各种转换和计算。 + 在 **数据压缩** 中,配置消息体压缩算法,taosX 在接收到消息后,使用对应的压缩算法对消息体进行解压缩获取原始数据。可选项 none(不压缩), gzip, snappy, lz4 和 zstd,默认为 none。 在 **字符编码** 中,配置消息体编码格式,taosX 在接收到消息后,使用对应的编码格式对消息体进行解码获取原始数据。可选项 UTF_8, GBK, GB18030, BIG5,默认为 UTF_8 @@ -138,7 +140,11 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解 #### 6.4 表映射 -在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮 +在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮创建新的超级表。 + +当超级表需要根据消息动态生成时,可以选择 **创建模板**。其中,超级表名称,列名,列类型等均可以使用模板变量,当接收到数据后,程序会自动计算模板变量并生成对应的超级表模板,当数据库中超级表不存在时,会使用此模板创建超级表;对于已创建的超级表,如果缺少通过模板变量计算得到的列,也会自动创建对应列。 + +![mqtt-17.png](./mqtt-17.png) 在 **映射** 中,填写目标超级表中的子表名称,例如:`t_{id}`。根据需求填写映射规则,其中 mapping 支持设置缺省值。 @@ -148,6 +154,16 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解 ![mqtt-13.png](./mqtt-13.png) +如果超级表列为模板变量,在子表映射时会进行 pivot 操作,其中模板变量的值展开为列名,列的值为对应的映射列 + +例如: + +![mqtt-18.png](./mqtt-18.png) + +预览结果为: + +![mqtt-19.png](./mqtt-19.png) + ### 7. 高级选项 在 **消息等待队列大小** 中填写接收 MQTT 消息的缓存队列大小,当队列满时,新到达的数据会直接丢弃。可设置为 0,即不缓存。 diff --git a/docs/zh/06-advanced/05-data-in/index.md b/docs/zh/06-advanced/05-data-in/index.md index 2293aec1d0..cf785eb128 100644 --- a/docs/zh/06-advanced/05-data-in/index.md +++ b/docs/zh/06-advanced/05-data-in/index.md @@ -152,7 +152,11 @@ let v3 = data["voltage"].split(","); 使用 json 规则解析出的电压是字符串表达的带单位形式,最终入库希望能使用 int 类型记录电压值和电流值,便于统计分析,此时就需要对电压进一步拆分;另外日期期望拆分为日期和时间入库。 -如下图所示可以对源字段`ts`使用 split 规则拆分成日期和时间,对字段`voltage`使用 regex 提取出电压值和电压单位。split 规则需要设置**分隔符**和**拆分数量**,拆分后的字段命名规则为`{原字段名}_{顺序号}`,Regex 规则同解析过程中的一样,使用**命名捕获组**命名提取字段。 +如下图所示 + +* 对字段`ts`使用 split 规则拆分成日期和时间。split 规则需要设置**分隔符**和**拆分数量**,拆分后的字段命名规则为`{原字段名}_{顺序号}`。 +* 对字段`voltage`使用正则表达式 `^(?[0-9]+)(?[a-zA-Z]+)$` 提取出电压值和电压单位,Regex 规则同解析过程中的一样,使用**命名捕获组**命名提取字段。 +* 对字段 `location` 使用 convert 转换,填写一个 JSON map 对象,其中 key 为字段 `current` 的值,`value` 为转换后的值。如图,`location` 字段的值 `"beijing.chaoyang.datun"` 被转换为 `"beijing.chaoyang.datunludong"`。 ![拆分和提取](./pic/transform-04.png) diff --git a/docs/zh/06-advanced/05-data-in/mqtt-05.png b/docs/zh/06-advanced/05-data-in/mqtt-05.png index c43b2022ae..8ede689c3a 100644 Binary files a/docs/zh/06-advanced/05-data-in/mqtt-05.png and b/docs/zh/06-advanced/05-data-in/mqtt-05.png differ diff --git a/docs/zh/06-advanced/05-data-in/mqtt-17.png b/docs/zh/06-advanced/05-data-in/mqtt-17.png new file mode 100644 index 0000000000..6109cd3caf Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/mqtt-17.png differ diff --git a/docs/zh/06-advanced/05-data-in/mqtt-18.png b/docs/zh/06-advanced/05-data-in/mqtt-18.png new file mode 100644 index 0000000000..fbbde0508c Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/mqtt-18.png differ diff --git a/docs/zh/06-advanced/05-data-in/mqtt-19.png b/docs/zh/06-advanced/05-data-in/mqtt-19.png new file mode 100644 index 0000000000..f3047ee853 Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/mqtt-19.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/transform-04.png b/docs/zh/06-advanced/05-data-in/pic/transform-04.png index 4669d8fed0..a8d7084a38 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/transform-04.png and b/docs/zh/06-advanced/05-data-in/pic/transform-04.png differ diff --git a/docs/zh/08-operation/04-maintenance.md b/docs/zh/08-operation/04-maintenance.md index a82d8c2c17..bb9ea20fbf 100644 --- a/docs/zh/08-operation/04-maintenance.md +++ b/docs/zh/08-operation/04-maintenance.md @@ -17,11 +17,11 @@ TDengine 面向多种写入场景,而很多写入场景下,TDengine 的存 ### 语法 ```SQL -COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']; -COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY']; -SHOW COMPACTS; +COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']; +COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY']; +SHOW COMPACTS; SHOW COMPACT compact_id; -KILL COMPACT compact_id; +KILL COMPACT compact_id; ``` ### 效果 diff --git a/docs/zh/14-reference/01-components/05-taosx-agent.md b/docs/zh/14-reference/01-components/05-taosx-agent.md index 1f1276e834..3245b74fc0 100644 --- a/docs/zh/14-reference/01-components/05-taosx-agent.md +++ b/docs/zh/14-reference/01-components/05-taosx-agent.md @@ -14,6 +14,8 @@ sidebar_label: taosX-Agent - `instanceId`:当前 taosx-agent 服务的实例 ID,如果同一台机器上启动了多个 taosx-agent 实例,必须保证各个实例的实例 ID 互不相同。 - `compression`: 非必填,可配置为 `true` 或 `false`, 默认为 `false`。配置为`true`, 则开启 `Agent` 和 `taosX` 通信数据压缩。 - `in_memory_cache_capacity`: 非必填,表示可在内存中缓存的最大消息批次数,可配置为大于 0 的整数。默认为 `64`。 +- `client_port_range.min`:非必填,取值范围 `[49152-65535]`,默认为 `49152`,当 agent 向 taosx 创建 socket 连接时,socket 客户端会随机监听一个端口,此配置限制了端口范围的最小值。 +- `client_port_range.max`:非必填,取值范围 `[49152-65535]`,默认为 `65535`,此配置限制了端口范围的最大值。 - `log_level`: 非必填,日志级别,默认为 `info`, 同 `taosX` 一样,支持 `error`,`warn`,`info`,`debug`,`trace` 五级。已弃用,请使用 `log.level` 代替。 - `log_keep_days`:非必填,日志保存天数,默认为 `30` 天。已弃用,请使用 `log.keepDays` 代替。 - `log.path`:日志文件存放的目录。 @@ -49,6 +51,15 @@ sidebar_label: taosX-Agent # #in_memory_cache_capacity = 64 +[client_port_range] +# Minimum boundary of listening port of agent, can not less than 49152 +# +# min = 49152 + +# Maximum boundary of listening port of agent, can not greater than 65535 +# +# max = 65535 + # log configuration [log] # All log files are stored in this directory diff --git a/docs/zh/14-reference/02-tools/10-taosbenchmark.md b/docs/zh/14-reference/02-tools/10-taosbenchmark.md index 44dab0ad5f..99ba8bdbff 100644 --- a/docs/zh/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/zh/14-reference/02-tools/10-taosbenchmark.md @@ -169,6 +169,9 @@ INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all thread - 第一行表示 3 个线程每个线程执行 10000 次查询及查询请求延时百分位分布情况,`SQL command` 为测试的查询语句 - 第二行表示总共完成了 10000 * 3 = 30000 次查询总数 - 第三行表示查询总耗时为 26.9653 秒,每秒查询率(QPS)为:1113.049 次/秒 +- 如果在查询中设置了 `continue_if_fail` 选项为 `yes`,在最后一行中会输出失败请求个数及错误率,格式 error + 失败请求个数 (错误率) +- QPS = 成功请求数量 / 花费时间(单位秒) +- 错误率 = 失败请求数量 /(成功请求数量 + 失败请求数量) #### 订阅指标 @@ -207,12 +210,12 @@ INFO: Consumed total msgs: 3000, total rows: 30000000 插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数) -- ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。 +- **keep_trying** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。 -- ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。 -- ** childtable_from 和 childtable_to ** : 指定写入子表范围,开闭区间为 [childtable_from, childtable_to). +- **trying_interval** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。 +- **childtable_from 和 childtable_to** : 指定写入子表范围,开闭区间为 [childtable_from, childtable_to).   -- ** continue_if_fail ** : 允许用户定义失败后行为 +- **continue_if_fail** : 允许用户定义失败后行为 “continue_if_fail”:  “no”, 失败 taosBenchmark 自动退出,默认行为 “continue_if_fail”: “yes”, 失败 taosBenchmark 警告用户,并继续写入 @@ -224,7 +227,7 @@ INFO: Consumed total msgs: 3000, total rows: 30000000 - **name** : 数据库名。 -- **drop** : 数据库已存在时是否删除重建,可选项为 "yes" 或 "no", 默认为 “yes” +- **drop** : 数据库已存在时是否删除,可选项为 "yes" 或 "no", 默认为 “yes” #### 流式计算相关配置参数 @@ -250,9 +253,9 @@ INFO: Consumed total msgs: 3000, total rows: 30000000 - **child_table_exists** : 子表是否已经存在,默认值为 "no",可选值为 "yes" 或 "no"。 -- **child_table_count** : 子表的数量,默认值为 10。 +- **childtable_count** : 子表的数量,默认值为 10。 -- **child_table_prefix** : 子表名称的前缀,必选配置项,没有默认值。 +- **childtable_prefix** : 子表名称的前缀,必选配置项,没有默认值。 - **escape_character** : 超级表和子表名称中是否包含转义字符,默认值为 "no",可选值为 "yes" 或 "no"。 @@ -319,7 +322,7 @@ INFO: Consumed total msgs: 3000, total rows: 30000000 - **min** : 数据类型的 列/标签 的最小值。生成的值将大于或等于最小值。 -- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。 +- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最大值。 - **scalingFactor** : 浮点数精度增强因子,仅当数据类型是 float/double 时生效,有效值范围为 1 至 1000000 的正整数。用于增强生成浮点数的精度,特别是在 min 或 max 值较小的情况下。此属性按 10 的幂次增强小数点后的精度:scalingFactor 为 10 表示增强 1 位小数精度,100 表示增强 2 位,依此类推。 @@ -343,15 +346,13 @@ INFO: Consumed total msgs: 3000, total rows: 30000000 - **thread_count** : 插入数据的线程数量,默认为 8。 -- **thread_bind_vgroup** : 写入时 vgroup 是否和写入线程绑定,绑定后可提升写入速度, 取值为 "yes" 或 "no",默认值为 “no”, 设置为 “no” 后与原来行为一致。 当设为 “yes” 时,如果 thread_count 数量大小写入数据库的 vgroups 数量, thread_count 自动调整为 vgroups 数量;如果 thread_count 数量小于 vgroups 数量,写入线程数量不做调整,一个线程写完一个 vgroup 数据后再写下一个,同时保持一个 vgroup 同时只能由一个线程写入的规则。 +- **thread_bind_vgroup** : 写入时 vgroup 是否和写入线程绑定,绑定后可提升写入速度, 取值为 "yes" 或 "no",默认值为 “no”, 设置为 “no” 后与原来行为一致。 当设为 “yes” 时,如果 thread_count 大于写入数据库 vgroups 数量, thread_count 自动调整为 vgroups 数量;如果 thread_count 小于 vgroups 数量,写入线程数量不做调整,一个线程写完一个 vgroup 数据后再写下一个,同时保持一个 vgroup 同时只能由一个线程写入的规则。 - **create_table_thread_count** : 建表的线程数量,默认为 8。 -- **connection_pool_size** : 预先建立的与 TDengine 服务端之间的连接的数量。若不配置,则与所指定的线程数相同。 - - **result_file** : 结果输出文件的路径,默认值为 ./output.txt。 -- **confirm_parameter_prompt** : 开关参数,要求用户在提示后确认才能继续。默认值为 false 。 +- **confirm_parameter_prompt** : 开关参数,要求用户在提示后确认才能继续, 可取值 "yes" or "no"。默认值为 "no" 。 - **interlace_rows** : 启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。 在 `super_tables` 中也可以配置该参数,若配置则以 `super_tables` 中的配置为高优先级,覆盖全局设置。 @@ -381,12 +382,16 @@ interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为 查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。 -- **mixed_query** : 查询模式,取值 “yes” 为`混合查询`, "no" 为`正常查询` , 默认值为 “no” - `混合查询`:`sqls` 中所有 sql 按 `threads` 线程数分组,每个线程执行一组, 线程中每个 sql 都需执行 `query_times` 次查询 - `正常查询`:`sqls` 中每个 sql 启动 `threads` 个线程,每个线程执行完 `query_times` 次后退出,下个 sql 需等待上个 sql 线程全部执行完退出后方可执行 - 不管 `正常查询` 还是 `混合查询` ,执行查询总次数是相同的 ,查询总次数 = `sqls` 个数 * `threads` * `query_times`, 区别是 `正常查询` 每个 sql 都会启动 `threads` 个线程,而 `混合查询` 只启动一次 `threads` 个线程执行完所有 SQL, 两者启动线程次数不一样。 +- **mixed_query** : 查询模式 + “yes” :`混合查询` + "no"(默认值) :`普通查询` + `普通查询`:`sqls` 中每个 sql 启动 `threads` 个线程查询此 sql, 执行完 `query_times` 次查询后退出,执行此 sql 的所有线程都完成后进入下一个 sql + `查询总次数` = `sqls` 个数 * `query_times` * `threads` + + `混合查询`:`sqls` 中所有 sql 分成 `threads` 个组,每个线程执行一组, 每个 sql 都需执行 `query_times` 次查询 + `查询总次数` = `sqls` 个数 * `query_times` -- **query_interval** : 查询时间间隔,单位是秒,默认值为 0。 +- **query_interval** : 查询时间间隔,单位: millisecond,默认值为 0。 - **threads** : 执行查询 SQL 的线程数,默认值为 1。 @@ -406,9 +411,9 @@ interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为 - **threads** : 执行查询 SQL 的线程数,默认值为 1。 - **sqls** : - - **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中保留 "xxxx",程序会自动将其替换为超级表的所有子表名。 - 替换为超级表中所有的子表名。 + - **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中必须保留 "xxxx",会替换为超级下所有子表名后再执行。 - **result** : 保存查询结果的文件,未指定则不保存。 + - **限制项** : sqls 下配置 sql 数组最大为 100 个 ### 订阅场景配置参数 diff --git a/docs/zh/14-reference/03-taos-sql/24-show.md b/docs/zh/14-reference/03-taos-sql/24-show.md index 4596a03281..3898920e65 100644 --- a/docs/zh/14-reference/03-taos-sql/24-show.md +++ b/docs/zh/14-reference/03-taos-sql/24-show.md @@ -306,9 +306,10 @@ SHOW TOPICS; ```sql SHOW TRANSACTIONS; +SHOW TRANSACTION [tranaction_id]; ``` -显示当前系统中正在执行的事务的信息(该事务仅针对除普通表以外的元数据级别) +显示当前系统中正在执行的所有或者某一个事务的信息(该事务仅针对除普通表以外的元数据级别) ## SHOW USERS diff --git a/docs/zh/14-reference/05-connector/10-cpp.mdx b/docs/zh/14-reference/05-connector/10-cpp.mdx index d18ddb1b3e..a593516e76 100644 --- a/docs/zh/14-reference/05-connector/10-cpp.mdx +++ b/docs/zh/14-reference/05-connector/10-cpp.mdx @@ -1184,7 +1184,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - **接口说明**:用于轮询消费数据,每一个消费者,只能单线程调用该接口。 - tmq:[入参] 指向一个有效的 tmq_t 结构体指针,该结构体代表一个 TMQ 消费者对象。 - timeout:[入参] 轮询的超时时间,单位为毫秒,负数表示默认超时1秒。 - - **返回值**:非 `NULL`:成功,返回一个指向 TAOS_RES 结构体的指针,该结构体包含了接收到的消息。。`NULL`:失败,表示没有数据。TAOS_RES 结果和 taos_query 返回结果一致,可通过查询的各种接口获取 TAOS_RES 里的信息,比如 schema 等。 + - **返回值**:非 `NULL`:成功,返回一个指向 TAOS_RES 结构体的指针,该结构体包含了接收到的消息。。`NULL`:表示没有数据,可通过taos_errno(NULL) 获取错误码,具体错误码参见参考手册。TAOS_RES 结果和 taos_query 返回结果一致,可通过查询的各种接口获取 TAOS_RES 里的信息,比如 schema 等。 - `int32_t tmq_consumer_close(tmq_t *tmq)` - **接口说明**:用于关闭 tmq_t 结构体。需与 tmq_consumer_new 配合使用。 diff --git a/docs/zh/14-reference/09-error-code.md b/docs/zh/14-reference/09-error-code.md index 7f4d36b4b2..d29ff542eb 100644 --- a/docs/zh/14-reference/09-error-code.md +++ b/docs/zh/14-reference/09-error-code.md @@ -554,6 +554,6 @@ description: TDengine 服务端的错误码列表和详细说明 | 0x80004000 | Invalid message | 订阅到的数据非法,一般不会出现 | 具体查看client端的错误日志提示 | | 0x80004001 | Consumer mismatch | 订阅请求的vnode和重新分配的vnode不一致,一般存在于有新消费者加入相同消费者组里时 | 内部错误,不暴露给用户 | | 0x80004002 | Consumer closed | 消费者已经不存在了 | 查看是否已经close掉了 | -| 0x80004017 | Invalid status, please subscribe topic first | 数据订阅状态不对 | 没有调用 subscribe,直接poll数据 | +| 0x80004017 | Invalid status, please subscribe topic first | 数据订阅状态不对 | 没有调用 subscribe,直接 poll 数据 | | 0x80004100 | Stream task not exist | 流计算任务不存在 | 具体查看server端的错误日志 | diff --git a/include/common/systable.h b/include/common/systable.h index bd8ba76f4f..fe867c9ad0 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -63,6 +63,7 @@ extern "C" { #define TSDB_INS_TABLE_TSMAS "ins_tsmas" #define TSDB_INS_DISK_USAGE "ins_disk_usage" #define TSDB_INS_TABLE_FILESETS "ins_filesets" +#define TSDB_INS_TABLE_TRANSACTION_DETAILS "ins_transaction_details" #define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" #define TSDB_PERFS_TABLE_SMAS "perf_smas" diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 0450766535..c30f2ab4ec 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -160,6 +160,7 @@ typedef enum EStreamType { STREAM_PARTITION_DELETE_DATA, STREAM_GET_RESULT, STREAM_DROP_CHILD_TABLE, + STREAM_NOTIFY_EVENT, } EStreamType; #pragma pack(push, 1) @@ -408,6 +409,9 @@ typedef struct STUidTagInfo { #define UD_GROUPID_COLUMN_INDEX 1 #define UD_TAG_COLUMN_INDEX 2 +// stream notify event block column +#define NOTIFY_EVENT_STR_COLUMN_INDEX 0 + int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime); int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol); diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 1103b89ccb..96478047ca 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -285,6 +285,8 @@ bool isAutoTableName(char* ctbName); int32_t buildCtbNameAddGroupId(const char* stbName, char* ctbName, uint64_t groupId, size_t cap); int32_t buildCtbNameByGroupId(const char* stbName, uint64_t groupId, char** pName); int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf); +int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule, + char** dstTableName); int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList); diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 0b6a8b3f1b..82eaa2359e 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -163,6 +163,7 @@ typedef enum _mgmt_table { TSDB_MGMT_TABLE_ANODE_FULL, TSDB_MGMT_TABLE_USAGE, TSDB_MGMT_TABLE_FILESETS, + TSDB_MGMT_TABLE_TRANSACTION_DETAIL, TSDB_MGMT_TABLE_MAX, } EShowType; @@ -268,6 +269,7 @@ typedef enum ENodeType { QUERY_NODE_TSMA_OPTIONS, QUERY_NODE_ANOMALY_WINDOW, QUERY_NODE_RANGE_AROUND, + QUERY_NODE_STREAM_NOTIFY_OPTIONS, // Statement nodes are used in parser and planner module. QUERY_NODE_SET_OPERATOR = 100, @@ -405,6 +407,7 @@ typedef enum ENodeType { QUERY_NODE_SHOW_CREATE_TSMA_STMT, QUERY_NODE_DROP_TSMA_STMT, QUERY_NODE_SHOW_FILESETS_STMT, + QUERY_NODE_SHOW_TRANSACTION_DETAILS_STMT, // logic plan node QUERY_NODE_LOGIC_PLAN_SCAN = 1000, @@ -480,6 +483,7 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_STREAM_ANOMALY, QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC, + QUERY_NODE_RESET_STREAM_STMT, } ENodeType; typedef struct { @@ -2953,6 +2957,11 @@ typedef struct { // 3.3.0.0 SArray* pCols; // array of SField int64_t smaId; + // 3.3.6.0 + SArray* pNotifyAddrUrls; + int32_t notifyEventTypes; + int32_t notifyErrorHandle; + int8_t notifyHistory; } SCMCreateStreamReq; typedef struct { @@ -3913,6 +3922,15 @@ typedef struct { int32_t tSerializeSMResumeStreamReq(void* buf, int32_t bufLen, const SMResumeStreamReq* pReq); int32_t tDeserializeSMResumeStreamReq(void* buf, int32_t bufLen, SMResumeStreamReq* pReq); +typedef struct { + char name[TSDB_STREAM_FNAME_LEN]; + int8_t igNotExists; + int8_t igUntreated; +} SMResetStreamReq; + +int32_t tSerializeSMResetStreamReq(void* buf, int32_t bufLen, const SMResetStreamReq* pReq); +int32_t tDeserializeSMResetStreamReq(void* buf, int32_t bufLen, SMResetStreamReq* pReq); + typedef struct { char name[TSDB_TABLE_FNAME_LEN]; char stb[TSDB_TABLE_FNAME_LEN]; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 8bdc9a9346..9ea27485c2 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -262,6 +262,7 @@ TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_CONFIG, "init-config", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_SDB, "config-sdb", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_RESET_STREAM, "reset-stream", NULL, NULL) TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG) TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8 diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 883c5f7b99..9a7c3912b0 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -98,6 +98,9 @@ int32_t qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId); int32_t qSetStreamOpOpen(qTaskInfo_t tinfo); +int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper, + const char* stbFullName, bool newSubTableRule); + /** * Set multiple input data blocks for the stream scan. * @param tinfo diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 2d329ec879..26482a87d4 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -439,6 +439,11 @@ typedef struct SShowCompactDetailsStmt { SNode* pCompactId; } SShowCompactDetailsStmt; +typedef struct SShowTransactionDetailsStmt { + ENodeType type; + SNode* pTransactionId; +} SShowTransactionDetailsStmt; + typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT, INDEX_TYPE_NORMAL } EIndexType; typedef struct SIndexOptions { @@ -561,19 +566,44 @@ typedef struct SStreamOptions { int64_t setFlag; } SStreamOptions; +typedef enum EStreamNotifyOptionSetFlag { + SNOTIFY_OPT_ERROR_HANDLE_SET = BIT_FLAG_MASK(0), + SNOTIFY_OPT_NOTIFY_HISTORY_SET = BIT_FLAG_MASK(1), +} EStreamNotifyOptionSetFlag; + +typedef enum EStreamNotifyEventType { + SNOTIFY_EVENT_WINDOW_OPEN = BIT_FLAG_MASK(0), + SNOTIFY_EVENT_WINDOW_CLOSE = BIT_FLAG_MASK(1), +} EStreamNotifyEventType; + +typedef enum EStreamNotifyErrorHandleType { + SNOTIFY_ERROR_HANDLE_PAUSE, + SNOTIFY_ERROR_HANDLE_DROP, +} EStreamNotifyErrorHandleType; + +typedef struct SStreamNotifyOptions { + ENodeType type; + SNodeList* pAddrUrls; + EStreamNotifyEventType eventTypes; + EStreamNotifyErrorHandleType errorHandle; + bool notifyHistory; + EStreamNotifyOptionSetFlag setFlag; +} SStreamNotifyOptions; + typedef struct SCreateStreamStmt { - ENodeType type; - char streamName[TSDB_TABLE_NAME_LEN]; - char targetDbName[TSDB_DB_NAME_LEN]; - char targetTabName[TSDB_TABLE_NAME_LEN]; - bool ignoreExists; - SStreamOptions* pOptions; - SNode* pQuery; - SNode* pPrevQuery; - SNodeList* pTags; - SNode* pSubtable; - SNodeList* pCols; - SCMCreateStreamReq* pReq; + ENodeType type; + char streamName[TSDB_TABLE_NAME_LEN]; + char targetDbName[TSDB_DB_NAME_LEN]; + char targetTabName[TSDB_TABLE_NAME_LEN]; + bool ignoreExists; + SStreamOptions* pOptions; + SNode* pQuery; + SNode* pPrevQuery; + SNodeList* pTags; + SNode* pSubtable; + SNodeList* pCols; + SStreamNotifyOptions* pNotifyOptions; + SCMCreateStreamReq* pReq; } SCreateStreamStmt; typedef struct SDropStreamStmt { @@ -595,6 +625,12 @@ typedef struct SResumeStreamStmt { bool ignoreUntreated; } SResumeStreamStmt; +typedef struct SResetStreamStmt { + ENodeType type; + char streamName[TSDB_TABLE_NAME_LEN]; + bool ignoreNotExists; +} SResetStreamStmt; + typedef struct SCreateFunctionStmt { ENodeType type; bool orReplace; diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index a4d89dcdcc..9cd6dd13ca 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -65,10 +65,14 @@ typedef struct SStreamTaskSM SStreamTaskSM; typedef struct SStreamQueueItem SStreamQueueItem; typedef struct SActiveCheckpointInfo SActiveCheckpointInfo; -#define SSTREAM_TASK_VER 4 -#define SSTREAM_TASK_INCOMPATIBLE_VER 1 -#define SSTREAM_TASK_NEED_CONVERT_VER 2 -#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 +#define SSTREAM_TASK_VER 5 +#define SSTREAM_TASK_INCOMPATIBLE_VER 1 +#define SSTREAM_TASK_NEED_CONVERT_VER 2 +#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 // Append subtable name with groupId +#define SSTREAM_TASK_APPEND_STABLE_NAME_VER 4 // Append subtable name with stableName and groupId +#define SSTREAM_TASK_ADD_NOTIFY_VER 5 // Support event notification at window open/close + +#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1)) extern int32_t streamMetaRefPool; extern int32_t streamTaskRefPool; @@ -427,6 +431,15 @@ typedef struct STaskCheckInfo { TdThreadMutex checkInfoLock; } STaskCheckInfo; +typedef struct SNotifyInfo { + SArray* pNotifyAddrUrls; + int32_t notifyEventTypes; + int32_t notifyErrorHandle; + char* streamName; + char* stbFullName; + SSchemaWrapper* pSchemaWrapper; +} SNotifyInfo; + struct SStreamTask { int64_t ver; SStreamTaskId id; @@ -449,6 +462,7 @@ struct SStreamTask { SStreamState* pState; // state backend SUpstreamInfo upstreamInfo; STaskCheckInfo taskCheckInfo; + SNotifyInfo notifyInfo; // the followings attributes don't be serialized SScanhistorySchedInfo schedHistoryInfo; diff --git a/include/util/taoserror.h b/include/util/taoserror.h index a24b3ca7cf..464dffa937 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -412,6 +412,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_MND_TRANS_CTX_SWITCH TAOS_DEF_ERROR_CODE(0, 0x03D8) #define TSDB_CODE_MND_TRANS_CONFLICT_COMPACT TAOS_DEF_ERROR_CODE(0, 0x03D9) #define TSDB_CODE_MND_TRANS_UNKNOW_ERROR TAOS_DEF_ERROR_CODE(0, 0x03DF) +#define TSDB_CODE_MND_TRANS_NOT_ABLE_TO_kILLED TAOS_DEF_ERROR_CODE(0, 0x03D2) // mnode-mq #define TSDB_CODE_MND_TOPIC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03E0) diff --git a/include/util/tconfig.h b/include/util/tconfig.h index b1bd144d77..7d00fe97ba 100644 --- a/include/util/tconfig.h +++ b/include/util/tconfig.h @@ -160,8 +160,6 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char *apolloUrl SArray *taosGetLocalCfg(SConfig *pCfg); SArray *taosGetGlobalCfg(SConfig *pCfg); -void taosSetLocalCfg(SConfig *pCfg, SArray *pArray); -void taosSetGlobalCfg(SConfig *pCfg, SArray *pArray); #ifdef __cplusplus } #endif diff --git a/include/util/tdef.h b/include/util/tdef.h index 0cfc7ab591..f08697b0d4 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -245,6 +245,7 @@ typedef enum ELogicConditionType { #define TSDB_OFFSET_LEN 64 // it is a null-terminated string #define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string #define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string +#define TSDB_STREAM_NOTIFY_URL_LEN 128 // it includes the terminating '\0' #define TSDB_DB_NAME_LEN 65 #define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN) #define TSDB_PRIVILEDGE_CONDITION_LEN 48 * 1024 @@ -329,7 +330,11 @@ typedef enum ELogicConditionType { #define TSDB_TRANS_STAGE_LEN 12 #define TSDB_TRANS_TYPE_LEN 16 -#define TSDB_TRANS_ERROR_LEN 512 +#define TSDB_TRANS_ERROR_LEN 512 +#define TSDB_TRANS_OBJTYPE_LEN 40 +#define TSDB_TRANS_RESULT_LEN 100 +#define TSDB_TRANS_TARGET_LEN 300 +#define TSDB_TRANS_DETAIL_LEN 100 #define TSDB_STEP_NAME_LEN 32 #define TSDB_STEP_DESC_LEN 128 diff --git a/include/util/tlog.h b/include/util/tlog.h index f573d61e73..60ddc29288 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -79,6 +79,9 @@ void taosResetLog(); void taosDumpData(uint8_t *msg, int32_t len); void taosSetNoNewFile(); +// Fast uint64_t to string conversion, equivalent to sprintf(buf, "%lu", val) but with 10x better performance. +char *u64toaFastLut(uint64_t val, char *buf); + void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) #ifdef __GNUC__ __attribute__((format(printf, 4, 5))) diff --git a/include/util/tutil.h b/include/util/tutil.h index 32fc9f215a..b17bdab1ac 100644 --- a/include/util/tutil.h +++ b/include/util/tutil.h @@ -187,6 +187,15 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, } \ } while (0) +#define TAOS_CHECK_RETURN_SET_CODE(CMD, CODE, ERRNO) \ + do { \ + int32_t __c = (CMD); \ + if (__c != TSDB_CODE_SUCCESS) { \ + (CODE) = (ERRNO); \ + TAOS_RETURN(__c); \ + } \ + } while (0) + #define TAOS_CHECK_RETURN_WITH_RELEASE(CMD, PTR1, PTR2) \ do { \ int32_t __c = (CMD); \ @@ -225,6 +234,16 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, } \ } while (0) +#define TAOS_CHECK_EXIT_SET_CODE(CMD, CODE, ERRNO) \ + do { \ + code = (CMD); \ + if (code < TSDB_CODE_SUCCESS) { \ + (CODE) = (ERRNO); \ + lino = __LINE__; \ + goto _exit; \ + } \ + } while (0) + #define TAOS_UNUSED(expr) (void)(expr) bool taosIsBigChar(char c); diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index fcd88ed8d7..86aecae5c3 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1435,7 +1435,7 @@ static int32_t askEp(tmq_t* pTmq, void* param, bool sync, bool updateEpSet) { tqDebugC("consumer:0x%" PRIx64 " ask ep from mnode,QID:0x%" PRIx64, pTmq->consumerId, sendInfo->requestId); code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo); - END: +END: if (code != 0) { tqErrorC("%s failed at %d, msg:%s", __func__, lino, tstrerror(code)); } @@ -1650,6 +1650,7 @@ void tmqMgmtClose(void) { tmqMgmt.rsetId = -1; (void)taosThreadMutexUnlock(&tmqMgmt.lock); } + (void)taosThreadMutexUnlock(&tmqMgmt.lock); } tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { @@ -2271,7 +2272,7 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p pVg->seekUpdated = false; // reset this flag. pTmq->pollCnt++; - END: +END: if (code != 0){ tqErrorC("%s failed at %d msg:%s", __func__, lino, tstrerror(code)); } @@ -2496,7 +2497,7 @@ static SMqRspObj* processMqRsp(tmq_t* tmq, SMqRspWrapper* pRspWrapper){ pRspObj->resType = pRspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_META_RSP ? RES_TYPE__TMQ_META : RES_TYPE__TMQ_BATCH_META; } - END: +END: terrno = code; taosWUnLockLatch(&tmq->lock); return pRspObj; @@ -2534,7 +2535,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq) { } } - END: +END: terrno = code; return returnVal; } @@ -2577,7 +2578,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { } } - END: +END: terrno = code; if (tmq != NULL) { tqErrorC("consumer:0x%" PRIx64 " poll error at line:%d, msg:%s", tmq->consumerId, lino, tstrerror(terrno)); @@ -2646,7 +2647,7 @@ int32_t tmq_unsubscribe(tmq_t* tmq) { int32_t tmq_consumer_close(tmq_t* tmq) { if (tmq == NULL) return TSDB_CODE_INVALID_PARA; int32_t code = 0; - code = taosThreadMutexLock(&tmqMgmt.lock); + (void) taosThreadMutexLock(&tmqMgmt.lock); if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__CLOSED){ goto end; } @@ -2660,8 +2661,8 @@ int32_t tmq_consumer_close(tmq_t* tmq) { } } - end: - code = taosThreadMutexUnlock(&tmqMgmt.lock); +end: + (void)taosThreadMutexUnlock(&tmqMgmt.lock); return code; } diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index ac8fea90e5..4c7940432d 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -78,12 +78,28 @@ else() ) endif() +if(NOT ${TD_WINDOWS}) + target_include_directories( + common + PUBLIC "$ENV{HOME}/.cos-local.2/include" + ) + + find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + target_link_libraries( + common + PUBLIC ${CURL_LIBRARY} + PUBLIC ${SSL_LIBRARY} + PUBLIC ${CRYPTO_LIBRARY} + ) +endif() + if(${BUILD_S3}) if(${BUILD_WITH_S3}) set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2) find_library(S3_LIBRARY s3) - find_library(XML2_LIBRARY xml2) target_link_libraries( common @@ -92,7 +108,7 @@ if(${BUILD_S3}) PUBLIC ${CURL_LIBRARY} PUBLIC ${SSL_LIBRARY} PUBLIC ${CRYPTO_LIBRARY} - PUBLIC ${XML2_LIBRARY} + PUBLIC _libxml2 ) add_definitions(-DUSE_S3) @@ -103,7 +119,6 @@ if(${BUILD_S3}) find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) find_library(MINIXML_LIBRARY mxml) - find_library(CURL_LIBRARY curl) target_link_libraries( common diff --git a/source/common/src/msg/tmsg.c b/source/common/src/msg/tmsg.c index f1ebcb3f42..7a51669d46 100644 --- a/source/common/src/msg/tmsg.c +++ b/source/common/src/msg/tmsg.c @@ -9959,6 +9959,16 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS } TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->smaId)); + + int32_t addrSize = taosArrayGetSize(pReq->pNotifyAddrUrls); + TAOS_CHECK_EXIT(tEncodeI32(&encoder, addrSize)); + for (int32_t i = 0; i < addrSize; ++i) { + const char *url = taosArrayGetP(pReq->pNotifyAddrUrls, i); + TAOS_CHECK_EXIT((tEncodeCStr(&encoder, url))); + } + TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyEventTypes)); + TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyErrorHandle)); + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->notifyHistory)); tEndEncode(&encoder); _exit: @@ -10093,6 +10103,30 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pReq->smaId)); } + if (!tDecodeIsEnd(&decoder)) { + int32_t addrSize = 0; + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &addrSize)); + pReq->pNotifyAddrUrls = taosArrayInit(addrSize, POINTER_BYTES); + if (pReq->pNotifyAddrUrls == NULL) { + TAOS_CHECK_EXIT(terrno); + } + for (int32_t i = 0; i < addrSize; ++i) { + char *url = NULL; + TAOS_CHECK_EXIT(tDecodeCStr(&decoder, &url)); + url = taosStrndup(url, TSDB_STREAM_NOTIFY_URL_LEN); + if (url == NULL) { + TAOS_CHECK_EXIT(terrno); + } + if (taosArrayPush(pReq->pNotifyAddrUrls, &url) == NULL) { + taosMemoryFree(url); + TAOS_CHECK_EXIT(terrno); + } + } + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyEventTypes)); + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyErrorHandle)); + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->notifyHistory)); + } + tEndDecode(&decoder); _exit: tDecoderClear(&decoder); @@ -10155,6 +10189,7 @@ void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) { taosArrayDestroy(pReq->fillNullCols); taosArrayDestroy(pReq->pVgroupVerList); taosArrayDestroy(pReq->pCols); + taosArrayDestroyP(pReq->pNotifyAddrUrls, NULL); } int32_t tEncodeSRSmaParam(SEncoder *pCoder, const SRSmaParam *pRSmaParam) { @@ -12017,6 +12052,43 @@ _exit: return code; } +int32_t tSerializeSMResetStreamReq(void *buf, int32_t bufLen, const SMResetStreamReq *pReq) { + SEncoder encoder = {0}; + int32_t code = 0; + int32_t lino; + int32_t tlen; + tEncoderInit(&encoder, buf, bufLen); + TAOS_CHECK_EXIT(tStartEncode(&encoder)); + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->name)); + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->igNotExists)); + tEndEncode(&encoder); + +_exit: + if (code) { + tlen = code; + } else { + tlen = encoder.pos; + } + tEncoderClear(&encoder); + return tlen; +} + +int32_t tDeserializeSMResetStreamReq(void *buf, int32_t bufLen, SMResetStreamReq *pReq) { + SDecoder decoder = {0}; + int32_t code = 0; + int32_t lino; + + tDecoderInit(&decoder, buf, bufLen); + TAOS_CHECK_EXIT(tStartDecode(&decoder)); + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->name)); + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->igNotExists)); + tEndDecode(&decoder); + +_exit: + tDecoderClear(&decoder); + return code; +} + int32_t tEncodeMqSubTopicEp(void **buf, const SMqSubTopicEp *pTopicEp) { int32_t tlen = 0; tlen += taosEncodeString(buf, pTopicEp->topic); diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 8bed8d23eb..1f018606a8 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -317,6 +317,8 @@ static const SSysDbTableSchema transSchema[] = { {.name = "oper", .bytes = TSDB_TRANS_OPER_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "stable", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "killable", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + //{.name = "kill_mnode", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, {.name = "last_action_info", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, @@ -406,6 +408,15 @@ static const SSysDbTableSchema userCompactsDetailSchema[] = { {.name = "remain_time(s)", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; +static const SSysDbTableSchema userTransactionDetailSchema[] = { + {.name = "transaction_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "action", .bytes = 30 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "obj_type", .bytes = TSDB_TRANS_OBJTYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "result", .bytes = TSDB_TRANS_RESULT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "target", .bytes = TSDB_TRANS_TARGET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "detail", .bytes = TSDB_TRANS_DETAIL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, +}; + static const SSysDbTableSchema anodesSchema[] = { {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "url", .bytes = TSDB_ANALYTIC_ANODE_URL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, @@ -524,6 +535,7 @@ static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_ANODES_FULL, anodesFullSchema, tListLen(anodesFullSchema), true}, {TSDB_INS_DISK_USAGE, diskUsageSchema, tListLen(diskUsageSchema), false}, {TSDB_INS_TABLE_FILESETS, filesetsFullSchema, tListLen(filesetsFullSchema), false}, + {TSDB_INS_TABLE_TRANSACTION_DETAILS, userTransactionDetailSchema, tListLen(userTransactionDetailSchema), false}, }; static const SSysDbTableSchema connectionsSchema[] = { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index bd18c9ceb9..c3e0fff578 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3061,6 +3061,33 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha return code; } +int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule, + char** dstTableName) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + if (parTbName[0]) { + if (newSubTableRule && !isAutoTableName(parTbName) && !alreadyAddGroupId(parTbName, gid) && gid != 0 && + stbFullName) { + *dstTableName = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN); + TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno); + + tstrncpy(*dstTableName, parTbName, TSDB_TABLE_NAME_LEN); + code = buildCtbNameAddGroupId(stbFullName, *dstTableName, gid, TSDB_TABLE_NAME_LEN); + TSDB_CHECK_CODE(code, lino, _end); + } else { + *dstTableName = taosStrdup(parTbName); + TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno); + } + } else { + code = buildCtbNameByGroupId(stbFullName, gid, dstTableName); + TSDB_CHECK_CODE(code, lino, _end); + } + +_end: + return code; +} + // return length of encoded data, return -1 if failed int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) { int32_t code = blockDataCheck(pBlock); diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 84e0ffb313..83b1845fd4 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -832,7 +832,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsLogBufferMemoryAllowed = TRANGE(tsLogBufferMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); // clang-format off - TAOS_CHECK_RETURN(cfgAddDir(pCfg, "dataDir", tsDataDir, CFG_SCOPE_SERVER, CFG_DYN_NONE, CFG_CATEGORY_LOCAL)); + TAOS_CHECK_RETURN(cfgAddDir(pCfg, "dataDir", tsDataDir, CFG_SCOPE_SERVER, CFG_DYN_SERVER, CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddFloat(pCfg, "minimalDataDirGB", 2.0f, 0.001f, 10000000, CFG_SCOPE_SERVER, CFG_DYN_NONE, CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "supportVnodes", tsNumOfSupportVnodes, 0, 4096, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER, CFG_CATEGORY_LOCAL)); @@ -865,9 +865,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeWriteThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * RPC_MEMORY_USAGE_RATIO * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncSnapReplMaxWaitN", tsSnapReplMaxWaitN, 16, (TSDB_SYNC_SNAP_BUFFER_SIZE >> 2), CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "syncLogBufferMemoryAllowed", tsLogBufferMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); @@ -918,7 +918,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "compactPullupInterval", tsCompactPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushIntervalSec, 1, 100000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "ttlBatchDropNum", tsTtlBatchDropNum, 0, INT32_MAX, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "ttlChangeOnWrite", tsTtlChangeOnWrite, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); @@ -926,11 +926,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "trimVDbIntervalSec", tsTrimVDbIntervalSec, 1, 100000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3MigrateIntervalSec", tsS3MigrateIntervalSec, 600, 100000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "s3MigrateEnabled", tsS3MigrateEnabled, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "timeseriesThreshold", tsTimeSeriesThreshold, 0, 2000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "udf", tsStartUdfd, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "udfdResFuncs", tsUdfdResFuncs, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_GLOBAL)); @@ -946,11 +946,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL)); - TAOS_CHECK_RETURN(cfgAddFloat(pCfg, "fPrecision", tsFPrecision, 0.0f, 100000.0f, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); - SConfigItem *pItem = NULL; - TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "fPrecision"); - tsFPrecision = pItem->fval; - TAOS_CHECK_RETURN(cfgAddFloat(pCfg, "dPrecision", tsDPrecision, 0.0f, 1000000.0f, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddFloat(pCfg, "fPrecision", tsFPrecision, 0.0f, 100000.0f, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddFloat(pCfg, "dPrecision", tsDPrecision, 0.0f, 1000000.0f, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxRange", tsMaxRange, 0, 65536, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "curRange", tsCurRange, 0, 65536, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "ifAdtFse", tsIfAdtFse, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_GLOBAL)); @@ -1940,7 +1937,7 @@ int32_t taosReadDataFolder(const char *cfgDir, const char **envCmd, const char * SConfig *pCfg = NULL; TAOS_CHECK_RETURN(cfgInit(&pCfg)); - TAOS_CHECK_GOTO(cfgAddDir(pCfg, "dataDir", tsDataDir, CFG_SCOPE_SERVER, CFG_DYN_NONE, CFG_CATEGORY_LOCAL), &lino, + TAOS_CHECK_GOTO(cfgAddDir(pCfg, "dataDir", tsDataDir, CFG_SCOPE_SERVER, CFG_DYN_SERVER, CFG_CATEGORY_LOCAL), &lino, _exit); TAOS_CHECK_GOTO( cfgAddInt32(pCfg, "debugFlag", dDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER, CFG_CATEGORY_LOCAL), &lino, diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 637713d2f9..0a3543ac07 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -681,6 +681,7 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { if (tDeserializeSRetrieveTableReq(pMsg->pCont, pMsg->contLen, &retrieveReq) != 0) { return TSDB_CODE_INVALID_MSG; } + dInfo("retrieve table:%s, user:%s, compactId:%" PRId64, retrieveReq.tb, retrieveReq.user, retrieveReq.compactId); #if 0 if (strcmp(retrieveReq.user, TSDB_DEFAULT_USER) != 0) { code = TSDB_CODE_MND_NO_RIGHTS; diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index 80ef0d31de..8f110dbcf3 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -185,6 +185,7 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_TB_WITH_TSMA, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_TTL_EXPIRED_TBS_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TABLE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MND_RESET_STREAM, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_RETRIEVE_ANAL_ALGO, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_RETRIEVE_IP_WHITE, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 6fefd47a6f..43231c7283 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -255,14 +255,8 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { pRpc->info.wrapper = pWrapper; EQItype itype = RPC_QITEM; // rsp msg is not restricted by tsQueueMemoryUsed - if (IsReq(pRpc)) { - if (pRpc->msgType == TDMT_SYNC_HEARTBEAT || pRpc->msgType == TDMT_SYNC_HEARTBEAT_REPLY) - itype = DEF_QITEM; - else - itype = RPC_QITEM; - } else { - itype = DEF_QITEM; - } + if (IsReq(pRpc) && pRpc->msgType != TDMT_SYNC_HEARTBEAT && pRpc->msgType != TDMT_SYNC_HEARTBEAT_REPLY) + itype = RPC_QITEM; code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg); if (code) goto _OVER; diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 90f1bd9b8e..9bed10ce99 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -133,6 +133,12 @@ typedef enum { TRN_EXEC_SERIAL = 1, } ETrnExec; +typedef enum { + TRN_KILL_MODE_SKIP = 0, + TRN_KILL_MODE_INTERUPT = 1, + //TRN_KILL_MODE_ROLLBACK = 2, +} ETrnKillMode; + typedef enum { DND_REASON_ONLINE = 0, DND_REASON_STATUS_MSG_TIMEOUT, @@ -201,6 +207,8 @@ typedef struct { SRWLatch lockRpcArray; int64_t mTraceId; TdThreadMutex mutex; + bool ableToBeKilled; + ETrnKillMode killMode; } STrans; typedef struct { diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h index 7f039bc21f..05280d0d68 100644 --- a/source/dnode/mnode/impl/inc/mndTrans.h +++ b/source/dnode/mnode/impl/inc/mndTrans.h @@ -54,6 +54,8 @@ typedef struct { SSdbRaw *pRaw; int64_t mTraceId; + int64_t startTime; + int64_t endTime; } STransAction; typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen); @@ -80,6 +82,8 @@ void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, voi void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbname); void mndTransAddArbGroupId(STrans *pTrans, int32_t groupId); void mndTransSetSerial(STrans *pTrans); +void mndTransSetBeKilled(STrans *pTrans, bool ableToBeKilled); +void mndTransSetKillMode(STrans *pTrans, ETrnKillMode killMode); void mndTransSetParallel(STrans *pTrans); void mndTransSetChangeless(STrans *pTrans); void mndTransSetOper(STrans *pTrans, EOperType oper); diff --git a/source/dnode/mnode/impl/src/mndCompactDetail.c b/source/dnode/mnode/impl/src/mndCompactDetail.c index 9a053066b2..0052f5de56 100644 --- a/source/dnode/mnode/impl/src/mndCompactDetail.c +++ b/source/dnode/mnode/impl/src/mndCompactDetail.c @@ -45,6 +45,8 @@ int32_t mndRetrieveCompactDetail(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB char *sep = NULL; SDbObj *pDb = NULL; + mInfo("retrieve compact detail"); + if (strlen(pShow->db) > 0) { sep = strchr(pShow->db, '.'); if (sep && diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 1d1f5744d4..43abc842f4 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1304,6 +1304,7 @@ static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *p TAOS_RETURN(code); } mInfo("trans:%d, used to alter db:%s", pTrans->id, pOld->name); + mInfo("trans:%d, used to alter db, ableToBeKilled:%d, killMode:%d", pTrans->id, pTrans->ableToBeKilled, pTrans->killMode); mndTransSetDbName(pTrans, pOld->name, NULL); TAOS_CHECK_GOTO(mndTransCheckConflict(pMnode, pTrans), NULL, _OVER); @@ -1312,6 +1313,8 @@ static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *p TAOS_CHECK_GOTO(mndSetAlterDbPrepareLogs(pMnode, pTrans, pOld, pNew), NULL, _OVER); TAOS_CHECK_GOTO(mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew), NULL, _OVER); TAOS_CHECK_GOTO(mndSetAlterDbRedoActions(pMnode, pTrans, pOld, pNew), NULL, _OVER); + + mInfo("trans:%d, used to alter db, ableToBeKilled:%d, killMode:%d", pTrans->id, pTrans->ableToBeKilled, pTrans->killMode); TAOS_CHECK_GOTO(mndTransPrepare(pMnode, pTrans), NULL, _OVER); code = 0; diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 49dc62d471..f19eabd885 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -132,6 +132,8 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_COMPACT; } else if (strncasecmp(name, TSDB_INS_TABLE_COMPACT_DETAILS, len) == 0) { type = TSDB_MGMT_TABLE_COMPACT_DETAIL; + } else if (strncasecmp(name, TSDB_INS_TABLE_TRANSACTION_DETAILS, len) == 0) { + type = TSDB_MGMT_TABLE_TRANSACTION_DETAIL; } else if (strncasecmp(name, TSDB_INS_TABLE_GRANTS_FULL, len) == 0) { type = TSDB_MGMT_TABLE_GRANTS_FULL; } else if (strncasecmp(name, TSDB_INS_TABLE_GRANTS_LOGS, len) == 0) { @@ -236,7 +238,8 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { SRetrieveTableReq retrieveReq = {0}; TAOS_CHECK_RETURN(tDeserializeSRetrieveTableReq(pReq->pCont, pReq->contLen, &retrieveReq)); - mDebug("process to retrieve systable req db:%s, tb:%s", retrieveReq.db, retrieveReq.tb); + mDebug("process to retrieve systable req db:%s, tb:%s, compactId:%" PRId64, retrieveReq.db, retrieveReq.tb, + retrieveReq.compactId); if (retrieveReq.showId == 0) { STableMetaRsp *pMeta = taosHashGet(pMnode->infosMeta, retrieveReq.tb, strlen(retrieveReq.tb)); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 30953736eb..21516e6848 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -52,6 +52,7 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock static void mndCancelGetNextStreamTask(SMnode *pMnode, void *pIter); static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq); static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq); +static int32_t mndProcessResetStreamReq(SRpcMsg *pReq); static int32_t mndBuildStreamCheckpointSourceReq(void **pBuf, int32_t *pLen, int32_t nodeId, int64_t checkpointId, int64_t streamId, int32_t taskId, int32_t transId, int8_t mndTrigger); static int32_t mndProcessNodeCheck(SRpcMsg *pReq); @@ -128,6 +129,7 @@ int32_t mndInitStream(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_PAUSE_STREAM, mndProcessPauseStreamReq); mndSetMsgHandle(pMnode, TDMT_MND_RESUME_STREAM, mndProcessResumeStreamReq); + mndSetMsgHandle(pMnode, TDMT_MND_RESET_STREAM, mndProcessResetStreamReq); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_STREAMS, mndRetrieveStream); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_STREAMS, mndCancelGetNextStream); @@ -749,6 +751,77 @@ static int32_t doStreamCheck(SMnode *pMnode, SStreamObj *pStreamObj) { return TSDB_CODE_SUCCESS; } +static void *notifyAddrDup(void *p) { return taosStrdup((char *)p); } + +static int32_t addStreamTaskNotifyInfo(const SCMCreateStreamReq *createReq, const SStreamObj *pStream, + SStreamTask *pTask) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA); + + pTask->notifyInfo.pNotifyAddrUrls = taosArrayDup(createReq->pNotifyAddrUrls, notifyAddrDup); + TSDB_CHECK_NULL(pTask->notifyInfo.pNotifyAddrUrls, code, lino, _end, terrno); + pTask->notifyInfo.notifyEventTypes = createReq->notifyEventTypes; + pTask->notifyInfo.notifyErrorHandle = createReq->notifyErrorHandle; + pTask->notifyInfo.streamName = taosStrdup(createReq->name); + TSDB_CHECK_NULL(pTask->notifyInfo.streamName, code, lino, _end, terrno); + pTask->notifyInfo.stbFullName = taosStrdup(createReq->targetStbFullName); + TSDB_CHECK_NULL(pTask->notifyInfo.stbFullName, code, lino, _end, terrno); + pTask->notifyInfo.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema); + TSDB_CHECK_NULL(pTask->notifyInfo.pSchemaWrapper, code, lino, _end, terrno); + +_end: + if (code != TSDB_CODE_SUCCESS) { + mError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t addStreamNotifyInfo(SCMCreateStreamReq *createReq, SStreamObj *pStream) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t level = 0; + int32_t nTasks = 0; + SArray *pLevel = NULL; + + TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pStream, code, lino, _end, TSDB_CODE_INVALID_PARA); + + if (taosArrayGetSize(createReq->pNotifyAddrUrls) == 0) { + goto _end; + } + + level = taosArrayGetSize(pStream->tasks); + for (int32_t i = 0; i < level; ++i) { + pLevel = taosArrayGetP(pStream->tasks, i); + nTasks = taosArrayGetSize(pLevel); + for (int32_t j = 0; j < nTasks; ++j) { + code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j)); + TSDB_CHECK_CODE(code, lino, _end); + } + } + + if (pStream->conf.fillHistory && createReq->notifyHistory) { + level = taosArrayGetSize(pStream->pHTasksList); + for (int32_t i = 0; i < level; ++i) { + pLevel = taosArrayGetP(pStream->pHTasksList, i); + nTasks = taosArrayGetSize(pLevel); + for (int32_t j = 0; j < nTasks; ++j) { + code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j)); + TSDB_CHECK_CODE(code, lino, _end); + } + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + mError("%s for stream %s failed at line %d since %s", __func__, pStream->name, lino, tstrerror(code)); + } + return code; +} + static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; @@ -853,6 +926,14 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto _OVER; } + // add notify info into all stream tasks + code = addStreamNotifyInfo(&createReq, &streamObj); + if (code != TSDB_CODE_SUCCESS) { + mError("stream:%s failed to add stream notify info since %s", createReq.name, tstrerror(code)); + mndTransDrop(pTrans); + goto _OVER; + } + // add stream to trans code = mndPersistStream(pTrans, &streamObj); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -1891,6 +1972,37 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { return TSDB_CODE_ACTION_IN_PROGRESS; } +static int32_t mndProcessResetStreamReq(SRpcMsg *pReq) { + SMnode *pMnode = pReq->info.node; + SStreamObj *pStream = NULL; + int32_t code = 0; + + if ((code = grantCheckExpire(TSDB_GRANT_STREAMS)) < 0) { + return code; + } + + SMResetStreamReq resetReq = {0}; + if (tDeserializeSMResetStreamReq(pReq->pCont, pReq->contLen, &resetReq) < 0) { + TAOS_RETURN(TSDB_CODE_INVALID_MSG); + } + + mDebug("recv reset stream req, stream:%s", resetReq.name); + + code = mndAcquireStream(pMnode, resetReq.name, &pStream); + if (pStream == NULL || code != 0) { + if (resetReq.igNotExists) { + mInfo("stream:%s, not exist, not pause stream", resetReq.name); + return 0; + } else { + mError("stream:%s not exist, failed to pause stream", resetReq.name); + TAOS_RETURN(TSDB_CODE_MND_STREAM_NOT_EXIST); + } + } + + //todo(liao hao jun) + return TSDB_CODE_ACTION_IN_PROGRESS; +} + static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo, bool includeAllNodes) { SSdb *pSdb = pMnode->pSdb; SStreamObj *pStream = NULL; diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 718b7d0df6..83fad45919 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -14,19 +14,21 @@ */ #define _DEFAULT_SOURCE +#include "mndTrans.h" #include "mndDb.h" #include "mndPrivilege.h" #include "mndShow.h" #include "mndStb.h" #include "mndSubscribe.h" #include "mndSync.h" -#include "mndTrans.h" #include "mndUser.h" +#include "mndVgroup.h" +#include "osTime.h" #define TRANS_VER1_NUMBER 1 #define TRANS_VER2_NUMBER 2 #define TRANS_ARRAY_SIZE 8 -#define TRANS_RESERVE_SIZE 44 +#define TRANS_RESERVE_SIZE 42 static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans); static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *OldTrans, STrans *pOld); @@ -70,7 +72,7 @@ static int32_t mndProcessKillTransReq(SRpcMsg *pReq); static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextTrans(SMnode *pMnode, void *pIter); - +static int32_t mndRetrieveTransDetail(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static int32_t tsMaxTransId = 0; int32_t mndInitTrans(SMnode *pMnode) { @@ -89,6 +91,7 @@ int32_t mndInitTrans(SMnode *pMnode) { mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_TRANS, mndRetrieveTrans); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_TRANS, mndCancelGetNextTrans); + mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_TRANSACTION_DETAIL, mndRetrieveTransDetail); return sdbSetTable(pMnode->pSdb, table); } @@ -156,7 +159,7 @@ SSdbRaw *mndTransEncode(STrans *pTrans) { int32_t code = 0; int32_t lino = 0; terrno = TSDB_CODE_INVALID_MSG; - int8_t sver = taosArrayGetSize(pTrans->prepareActions) ? TRANS_VER2_NUMBER : TRANS_VER1_NUMBER; + int8_t sver = TRANS_VER2_NUMBER; int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE + pTrans->paramLen; rawDataLen += mndTransGetActionsSize(pTrans->prepareActions); @@ -220,6 +223,11 @@ SSdbRaw *mndTransEncode(STrans *pTrans) { pIter = taosHashIterate(pTrans->arbGroupIds, pIter); } + if (sver > TRANS_VER1_NUMBER) { + SDB_SET_INT8(pRaw, dataPos, pTrans->ableToBeKilled, _OVER) + SDB_SET_INT32(pRaw, dataPos, pTrans->killMode, _OVER) + } + SDB_SET_RESERVE(pRaw, dataPos, TRANS_RESERVE_SIZE, _OVER) SDB_SET_DATALEN(pRaw, dataPos, _OVER) @@ -310,7 +318,7 @@ SSdbRow *mndTransDecode(SSdbRaw *pRaw) { if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto _OVER; - if (sver != TRANS_VER1_NUMBER && sver != TRANS_VER2_NUMBER) { + if (sver > TRANS_VER2_NUMBER) { terrno = TSDB_CODE_SDB_INVALID_DATA_VER; goto _OVER; } @@ -389,6 +397,13 @@ SSdbRow *mndTransDecode(SSdbRaw *pRaw) { if ((terrno = taosHashPut(pTrans->arbGroupIds, &arbGroupId, sizeof(int32_t), NULL, 0)) != 0) goto _OVER; } + int8_t ableKill = 0; + int8_t killMode = 0; + SDB_GET_INT8(pRaw, dataPos, &ableKill, _OVER) + SDB_GET_INT8(pRaw, dataPos, &killMode, _OVER) + pTrans->ableToBeKilled = ableKill; + pTrans->killMode = killMode; + SDB_GET_RESERVE(pRaw, dataPos, TRANS_RESERVE_SIZE, _OVER) terrno = 0; @@ -430,12 +445,25 @@ static const char *mndTransStr(ETrnStage stage) { } } +static const char *mndTransTypeStr(ETrnAct actionType) { + switch (actionType) { + case TRANS_ACTION_MSG: + return "msg"; + case TRANS_ACTION_RAW: + return "sdb"; + default: + return "invalid"; + } +} + static void mndSetTransLastAction(STrans *pTrans, STransAction *pAction) { if (pAction != NULL) { - pTrans->lastAction = pAction->id; - pTrans->lastMsgType = pAction->msgType; - pTrans->lastEpset = pAction->epSet; - pTrans->lastErrorNo = pAction->errCode; + if (pAction->errCode != TSDB_CODE_ACTION_IN_PROGRESS) { + pTrans->lastAction = pAction->id; + pTrans->lastMsgType = pAction->msgType; + pTrans->lastEpset = pAction->epSet; + pTrans->lastErrorNo = pAction->errCode; + } } else { pTrans->lastAction = 0; pTrans->lastMsgType = 0; @@ -636,6 +664,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, pTrans->policy = policy; pTrans->conflict = conflict; pTrans->exec = TRN_EXEC_PARALLEL; + pTrans->ableToBeKilled = false; pTrans->createdTime = taosGetTimestampMs(); pTrans->prepareActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); @@ -804,6 +833,13 @@ void mndTransAddArbGroupId(STrans *pTrans, int32_t groupId) { void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; } +void mndTransSetBeKilled(STrans *pTrans, bool ableToBeKilled) { pTrans->ableToBeKilled = ableToBeKilled; } + +void mndTransSetKillMode(STrans *pTrans, ETrnKillMode killMode) { + pTrans->ableToBeKilled = true; + pTrans->killMode = killMode; +} + void mndTransSetParallel(STrans *pTrans) { pTrans->exec = TRN_EXEC_PARALLEL; } void mndTransSetChangeless(STrans *pTrans) { pTrans->changeless = true; } @@ -1043,6 +1079,39 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { return TSDB_CODE_INVALID_PARA; } + mInfo("trans:%d, action list:", pTrans->id); + int32_t index = 0; + for (int32_t i = 0; i < taosArrayGetSize(pTrans->prepareActions); ++i, ++index) { + STransAction *pAction = taosArrayGet(pTrans->prepareActions, i); + mInfo("trans:%d, action:%d, %s:%d sdbType:%s, sdbStatus:%s", pTrans->id, index, + mndTransStr(pAction->stage), pAction->id, sdbTableName(pAction->pRaw->type), sdbStatusName(pAction->pRaw->status)); + } + + for (int32_t i = 0; i < taosArrayGetSize(pTrans->redoActions); ++i, ++index) { + STransAction *pAction = taosArrayGet(pTrans->redoActions, i); + mInfo("trans:%d, action:%d, %s:%d msgType:%s", pTrans->id, index, + mndTransStr(pAction->stage), pAction->id, TMSG_INFO(pAction->msgType));; + } + + for (int32_t i = 0; i < taosArrayGetSize(pTrans->commitActions); ++i, ++index) { + STransAction *pAction = taosArrayGet(pTrans->commitActions, i); + mInfo("trans:%d, action:%d, %s:%d sdbType:%s, sdbStatus:%s", pTrans->id, index, + mndTransStr(pAction->stage), i, sdbTableName(pAction->pRaw->type), sdbStatusName(pAction->pRaw->status)); + } + + for (int32_t i = 0; i < taosArrayGetSize(pTrans->undoActions); ++i, ++index) { + STransAction *pAction = taosArrayGet(pTrans->undoActions, i); + if(pAction->actionType == TRANS_ACTION_MSG){ + mInfo("trans:%d, action:%d, %s:%d msgType:%s", pTrans->id, index, + mndTransStr(pAction->stage), pAction->id, TMSG_INFO(pAction->msgType));; + } + else{ + mInfo("trans:%d, action:%d, %s:%d sdbType:%s, sdbStatus:%s", pTrans->id, index, + mndTransStr(pAction->stage), pAction->id, sdbTableName(pAction->pRaw->type), sdbStatusName(pAction->pRaw->status)); + } + } + + TAOS_CHECK_RETURN(mndTransCheckConflict(pMnode, pTrans)); TAOS_CHECK_RETURN(mndTransCheckParallelActions(pMnode, pTrans)); @@ -1260,7 +1329,10 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) { if (pAction != NULL) { pAction->msgReceived = 1; pAction->errCode = pRsp->code; - pTrans->lastErrorNo = pRsp->code; + pAction->endTime = taosGetTimestampMs(); + + // pTrans->lastErrorNo = pRsp->code; + mndSetTransLastAction(pTrans, pAction); mInfo("trans:%d, %s:%d response is received, received code:0x%x(%s), accept:0x%x(%s) retry:0x%x(%s)", transId, mndTransStr(pAction->stage), action, pRsp->code, tstrerror(pRsp->code), pAction->acceptableCode, @@ -1374,6 +1446,8 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio pAction->msgSent = 1; // pAction->msgReceived = 0; pAction->errCode = TSDB_CODE_ACTION_IN_PROGRESS; + pAction->startTime = taosGetTimestampMs(); + pAction->endTime = 0; mInfo("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail); mndSetTransLastAction(pTrans, pAction); @@ -1527,8 +1601,9 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr for (int32_t action = pTrans->actionPos; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pActions, action); - mInfo("trans:%d, current action:%d, stage:%s, actionType(1:msg,2:log):%d", pTrans->id, pTrans->actionPos, - mndTransStr(pAction->stage), pAction->actionType); + mInfo("trans:%d, current action:%d, stage:%s, actionType(1:msg,2:log):%d, msgSent:%d, msgReceived:%d", + pTrans->id, pTrans->actionPos, mndTransStr(pAction->stage), pAction->actionType, pAction->msgSent, + pAction->msgReceived); code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf); if (code == 0) { @@ -1924,13 +1999,25 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) { TAOS_RETURN(TSDB_CODE_MND_TRANS_INVALID_STAGE); } - for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) { - STransAction *pAction = taosArrayGet(pArray, i); - mInfo("trans:%d, %s:%d set processed for kill msg received, errCode from %s to success", pTrans->id, - mndTransStr(pAction->stage), i, tstrerror(pAction->errCode)); - pAction->msgSent = 1; - pAction->msgReceived = 1; - pAction->errCode = 0; + if(pTrans->ableToBeKilled == false){ + return TSDB_CODE_MND_TRANS_NOT_ABLE_TO_kILLED; + } + + if(pTrans->killMode == TRN_KILL_MODE_SKIP){ + for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) { + STransAction *pAction = taosArrayGet(pArray, i); + mInfo("trans:%d, %s:%d set processed for kill msg received, errCode from %s to success", pTrans->id, + mndTransStr(pAction->stage), i, tstrerror(pAction->errCode)); + pAction->msgSent = 1; + pAction->msgReceived = 1; + pAction->errCode = 0; + } + } + else if(pTrans->killMode == TRN_KILL_MODE_INTERUPT){ + pTrans->stage = TRN_STAGE_PRE_FINISH; + } + else{ + return TSDB_CODE_MND_TRANS_NOT_ABLE_TO_kILLED; } mndTransExecute(pMnode, pTrans); @@ -2002,6 +2089,114 @@ void mndTransPullup(SMnode *pMnode) { taosArrayDestroy(pArray); } +static char *formatTimestamp(char *buf, int64_t val, int precision) { + time_t tt; + if (precision == TSDB_TIME_PRECISION_MICRO) { + tt = (time_t)(val / 1000000); + } + if (precision == TSDB_TIME_PRECISION_NANO) { + tt = (time_t)(val / 1000000000); + } else { + tt = (time_t)(val / 1000); + } + + struct tm tm; + if (taosLocalTime(&tt, &tm, NULL, 0, NULL) == NULL) { + mError("failed to get local time"); + return NULL; + } + size_t pos = taosStrfTime(buf, 32, "%Y-%m-%d %H:%M:%S", &tm); + + if (precision == TSDB_TIME_PRECISION_MICRO) { + sprintf(buf + pos, ".%06d", (int)(val % 1000000)); + } else if (precision == TSDB_TIME_PRECISION_NANO) { + sprintf(buf + pos, ".%09d", (int)(val % 1000000000)); + } else { + sprintf(buf + pos, ".%03d", (int)(val % 1000)); + } + + return buf; +} + +static void mndTransLogAction(STrans *pTrans) { + char detail[512] = {0}; + int32_t len = 0; + int32_t index = 0; + + if (pTrans->stage == TRN_STAGE_PREPARE) { + for (int32_t i = 0; i < taosArrayGetSize(pTrans->prepareActions); ++i, ++index) { + len = 0; + STransAction *pAction = taosArrayGet(pTrans->prepareActions, i); + len += snprintf(detail + len, sizeof(detail) - len, "action:%d, %s:%d sdbType:%s, sdbStatus:%s\n", index, + mndTransStr(pAction->stage), pAction->id, sdbTableName(pAction->pRaw->type), + sdbStatusName(pAction->pRaw->status)); + mDebug("trans:%d, show tran action, detail:%s", pTrans->id, detail); + } + } + + if (pTrans->stage == TRN_STAGE_REDO_ACTION) { + for (int32_t i = 0; i < taosArrayGetSize(pTrans->redoActions); ++i, ++index) { + len = 0; + STransAction *pAction = taosArrayGet(pTrans->redoActions, i); + if (pAction->actionType == TRANS_ACTION_MSG) { + char bufStart[40] = {0}; + (void)formatTimestamp(bufStart, pAction->startTime, TSDB_TIME_PRECISION_MILLI); + + char endStart[40] = {0}; + (void)formatTimestamp(endStart, pAction->endTime, TSDB_TIME_PRECISION_MILLI); + len += snprintf(detail + len, sizeof(detail) - len, + "action:%d, %s:%d msgType:%s," + "sent:%d, received:%d, startTime:%s, endTime:%s, ", + index, mndTransStr(pAction->stage), pAction->id, TMSG_INFO(pAction->msgType), pAction->msgSent, + pAction->msgReceived, bufStart, endStart); + + SEpSet epset = pAction->epSet; + if (epset.numOfEps > 0) { + len += snprintf(detail + len, sizeof(detail) - len, "numOfEps:%d inUse:%d ", epset.numOfEps, epset.inUse); + for (int32_t i = 0; i < epset.numOfEps; ++i) { + len += + snprintf(detail + len, sizeof(detail) - len, "ep:%d-%s:%u ", i, epset.eps[i].fqdn, epset.eps[i].port); + } + } + + len += snprintf(detail + len, sizeof(detail) - len, ", errCode:0x%x(%s)\n", pAction->errCode & 0xFFFF, + tstrerror(pAction->errCode)); + } else { + len += snprintf(detail + len, sizeof(detail) - len, "action:%d, %s:%d sdbType:%s, sdbStatus:%s, written:%d\n", + index, mndTransStr(pAction->stage), pAction->id, sdbTableName(pAction->pRaw->type), + sdbStatusName(pAction->pRaw->status), pAction->rawWritten); + } + mDebug("trans:%d, show tran action, detail:%s", pTrans->id, detail); + } + } + + if (pTrans->stage == TRN_STAGE_COMMIT_ACTION) { + for (int32_t i = 0; i < taosArrayGetSize(pTrans->commitActions); ++i, ++index) { + len = 0; + STransAction *pAction = taosArrayGet(pTrans->commitActions, i); + len += snprintf(detail + len, sizeof(detail) - len, "action:%d, %s:%d sdbType:%s, sdbStatus:%s\n", index, + mndTransStr(pAction->stage), i, sdbTableName(pAction->pRaw->type), + sdbStatusName(pAction->pRaw->status)); + mDebug("trans:%d, show tran action, detail:%s", pTrans->id, detail); + } + + for (int32_t i = 0; i < taosArrayGetSize(pTrans->undoActions); ++i, ++index) { + len = 0; + STransAction *pAction = taosArrayGet(pTrans->undoActions, i); + if (pAction->actionType == TRANS_ACTION_MSG) { + len += snprintf(detail + len, sizeof(detail) - len, "action:%d, %s:%d msgType:%s\n", index, + mndTransStr(pAction->stage), pAction->id, TMSG_INFO(pAction->msgType)); + ; + } else { + len += snprintf(detail + len, sizeof(detail) - len, "action:%d, %s:%d sdbType:%s, sdbStatus:%s\n", index, + mndTransStr(pAction->stage), pAction->id, sdbTableName(pAction->pRaw->type), + sdbStatusName(pAction->pRaw->status)); + } + mDebug("trans:%d, show tran action, detail:%s", pTrans->id, detail); + } + } +} + static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; @@ -2044,6 +2239,20 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)stbname, false), pTrans, &lino, _OVER); + const char *killableStr = pTrans->ableToBeKilled ? "yes" : "no"; + char killableVstr[10 + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(killableVstr, killableStr, 10 + VARSTR_HEADER_SIZE); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)killableVstr, false), pTrans, &lino, _OVER); + + /* + const char *killModeStr = pTrans->killMode == TRN_KILL_MODE_SKIP ? "skip" : "interrupt"; + char killModeVstr[10 + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(killModeVstr, killModeStr, 24); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)killModeVstr, false), pTrans, &lino, _OVER); + */ + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false), pTrans, &lino, _OVER); @@ -2061,13 +2270,15 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl len += tsnprintf(detail + len, sizeof(detail) - len, "msgType:%s numOfEps:%d inUse:%d ", TMSG_INFO(pTrans->lastMsgType), epset.numOfEps, epset.inUse); for (int32_t i = 0; i < pTrans->lastEpset.numOfEps; ++i) { - len += tsnprintf(detail + len, sizeof(detail) - len, "ep:%d-%s:%u ", i, epset.eps[i].fqdn, epset.eps[i].port); + len += snprintf(detail + len, sizeof(detail) - len, "ep:%d-%s:%u ", i, epset.eps[i].fqdn, epset.eps[i].port); } } STR_WITH_MAXSIZE_TO_VARSTR(lastInfo, detail, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); RETRIEVE_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)lastInfo, false), pTrans, &lino, _OVER); + mndTransLogAction(pTrans); + numOfRows++; sdbRelease(pSdb, pTrans); } @@ -2078,6 +2289,239 @@ _OVER: return numOfRows; } +static int32_t mndShowTransCommonColumns(SShowObj *pShow, SSDataBlock *pBlock, STransAction *pAction, + int32_t transactionId, int32_t curActionId, int32_t numOfRows, int32_t *cols) { + int32_t code = 0; + int32_t lino = 0; + int32_t len = 0; + + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, (*cols)++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)&transactionId, false), &lino, _OVER); + + char action[30 + 1] = {0}; + if (curActionId == pAction->id) { + len += snprintf(action + len, sizeof(action) - len, "%s:%d(%s)<-last", mndTransStr(pAction->stage), pAction->id, + mndTransTypeStr(pAction->actionType)); + } else { + len += snprintf(action + len, sizeof(action) - len, "%s:%d(%s)", mndTransStr(pAction->stage), pAction->id, + mndTransTypeStr(pAction->actionType)); + } + char actionVStr[30 + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(actionVStr, action, pShow->pMeta->pSchemas[*cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, (*cols)++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)actionVStr, false), &lino, _OVER); +_OVER: + if (code != 0) mError("failed to retrieve at line:%d, since %s", lino, tstrerror(code)); + return code; +} + +static void mndShowTransAction(SShowObj *pShow, SSDataBlock *pBlock, STransAction *pAction, int32_t transactionId, + int32_t curActionId, int32_t rows, int32_t numOfRows) { + int32_t code = 0; + int32_t lino = 0; + int32_t len = 0; + int32_t cols = 0; + + cols = 0; + + if (mndShowTransCommonColumns(pShow, pBlock, pAction, transactionId, curActionId, numOfRows, &cols) != 0) return; + + if (pAction->actionType == TRANS_ACTION_MSG) { + int32_t len = 0; + + char objType[TSDB_TRANS_OBJTYPE_LEN + 1] = {0}; + len += snprintf(objType + len, sizeof(objType) - len, "%s(s:%d,r:%d)", TMSG_INFO(pAction->msgType), + pAction->msgSent, pAction->msgReceived); + char objTypeVStr[TSDB_TRANS_OBJTYPE_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(objTypeVStr, objType, pShow->pMeta->pSchemas[cols].bytes); + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)objTypeVStr, false), &lino, _OVER); + + char result[TSDB_TRANS_RESULT_LEN + 1] = {0}; + len = 0; + len += snprintf(result + len, sizeof(result) - len, "errCode:0x%x(%s)", pAction->errCode & 0xFFFF, + tstrerror(pAction->errCode)); + char resultVStr[TSDB_TRANS_RESULT_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(resultVStr, result, pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)resultVStr, false), &lino, _OVER); + + char target[TSDB_TRANS_TARGET_LEN] = {0}; + len = 0; + SEpSet epset = pAction->epSet; + if (epset.numOfEps > 0) { + for (int32_t i = 0; i < epset.numOfEps; ++i) { + len += snprintf(target + len, sizeof(target) - len, "ep:%d-%s:%u,", i, epset.eps[i].fqdn, epset.eps[i].port); + } + len += snprintf(target + len, sizeof(target) - len, "(%d:%d) ", epset.numOfEps, epset.inUse); + } + char targetVStr[TSDB_TRANS_TARGET_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(targetVStr, target, pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)targetVStr, false), &lino, _OVER); + + char detail[TSDB_TRANS_DETAIL_LEN] = {0}; + len = 0; + char bufStart[40] = {0}; + if (pAction->startTime > 0) (void)formatTimestamp(bufStart, pAction->startTime, TSDB_TIME_PRECISION_MILLI); + char bufEnd[40] = {0}; + if (pAction->endTime > 0) (void)formatTimestamp(bufEnd, pAction->endTime, TSDB_TIME_PRECISION_MILLI); + len += snprintf(detail + len, sizeof(detail) - len, "startTime:%s, endTime:%s, ", bufStart, bufEnd); + char detailVStr[TSDB_TRANS_DETAIL_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(detailVStr, detail, pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)detailVStr, false), &lino, _OVER); + + } else { + int32_t len = 0; + + char objType[TSDB_TRANS_OBJTYPE_LEN + 1] = {0}; + if (pAction->pRaw->type == SDB_VGROUP) { + SSdbRow *pRow = mndVgroupActionDecode(pAction->pRaw); + SVgObj *pVgroup = sdbGetRowObj(pRow); + len += snprintf(objType + len, sizeof(objType) - len, "%s(%d)", sdbTableName(pAction->pRaw->type), pVgroup->vgId); + taosMemoryFreeClear(pRow); + } else { + strcpy(objType, sdbTableName(pAction->pRaw->type)); + } + char objTypeVStr[TSDB_TRANS_OBJTYPE_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(objTypeVStr, objType, pShow->pMeta->pSchemas[cols].bytes); + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)objTypeVStr, false), &lino, _OVER); + + char result[TSDB_TRANS_RESULT_LEN + 1] = {0}; + len = 0; + len += snprintf(result + len, sizeof(result) - len, "rawWritten:%d", pAction->rawWritten); + char resultVStr[TSDB_TRANS_RESULT_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(resultVStr, result, pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)resultVStr, false), &lino, _OVER); + + char target[TSDB_TRANS_TARGET_LEN] = ""; + char targetVStr[TSDB_TRANS_TARGET_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(targetVStr, target, pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)targetVStr, false), &lino, _OVER); + + char detail[TSDB_TRANS_DETAIL_LEN] = {0}; + len = 0; + len += snprintf(detail + len, sizeof(detail) - len, "sdbStatus:%s", sdbStatusName(pAction->pRaw->status)); + char detailVStr[TSDB_TRANS_DETAIL_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(detailVStr, detail, pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + TAOS_CHECK_GOTO(colDataSetVal(pColInfo, numOfRows, (const char *)detailVStr, false), &lino, _OVER); + } + +_OVER: + if (code != 0) mError("failed to retrieve at line:%d, since %s", lino, tstrerror(code)); +} + +static SArray *mndTransGetAction(STrans *pTrans, ETrnStage stage) { + if (stage == TRN_STAGE_PREPARE) { + return pTrans->prepareActions; + } + if (stage == TRN_STAGE_REDO_ACTION) { + return pTrans->redoActions; + } + if (stage == TRN_STAGE_COMMIT_ACTION) { + return pTrans->commitActions; + } + if (stage == TRN_STAGE_UNDO_ACTION) { + return pTrans->undoActions; + } + return NULL; +} + +typedef struct STransDetailIter { + void *pIter; + STrans *pTrans; + ETrnStage stage; + int32_t num; +} STransDetailIter; + +static void mndTransShowActions(SSdb *pSdb, STransDetailIter *pShowIter, SShowObj *pShow, SSDataBlock *pBlock, + int32_t rows, int32_t *numOfRows, SArray *pActions, int32_t end, int32_t start) { + int32_t actionNum = taosArrayGetSize(pActions); + mInfo("stage:%s, Actions num:%d", mndTransStr(pShowIter->stage), actionNum); + + for (int32_t i = start; i < actionNum; ++i) { + STransAction *pAction = taosArrayGet(pShowIter->pTrans->redoActions, i); + mndShowTransAction(pShow, pBlock, pAction, pShowIter->pTrans->id, pShowIter->pTrans->lastAction, rows, *numOfRows); + (*numOfRows)++; + if (*numOfRows >= rows) break; + } + + if (*numOfRows == end) { + sdbRelease(pSdb, pShowIter->pTrans); + pShowIter->pTrans = NULL; + pShowIter->num = 0; + } else { + pShowIter->pTrans = pShowIter->pTrans; + pShowIter->stage = pShowIter->pTrans->stage; + pShowIter->num += (*numOfRows); + } +} + +static int32_t mndRetrieveTransDetail(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; + int32_t numOfRows = 0; + + int32_t code = 0; + int32_t lino = 0; + + mInfo("start to mndRetrieveTransDetail, rows:%d, pShow->numOfRows:%d, pShow->pIter:%p", rows, pShow->numOfRows, + pShow->pIter); + + if (pShow->pIter == NULL) { + pShow->pIter = taosMemoryMalloc(sizeof(STransDetailIter)); + if (pShow->pIter == NULL) { + mError("failed to malloc for pShow->pIter"); + return 0; + } + memset(pShow->pIter, 0, sizeof(STransDetailIter)); + } + + STransDetailIter *pShowIter = (STransDetailIter *)pShow->pIter; + + while (numOfRows < rows) { + if (pShowIter->pTrans == NULL) { + pShowIter->pIter = sdbFetch(pSdb, SDB_TRANS, pShowIter->pIter, (void **)&(pShowIter->pTrans)); + mDebug("retrieve trans detail from fetch, pShow->pIter:%p, pTrans:%p", pShowIter->pIter, pShowIter->pTrans); + if (pShowIter->pIter == NULL) break; + mInfo("retrieve trans detail from fetch, id:%d, trans stage:%d, IterNum:%d", pShowIter->pTrans->id, + pShowIter->pTrans->stage, pShowIter->num); + + SArray *pActions = mndTransGetAction(pShowIter->pTrans, pShowIter->pTrans->stage); + + mndTransShowActions(pSdb, pShowIter, pShow, pBlock, rows, &numOfRows, pActions, taosArrayGetSize(pActions), 0); + break; + } else { + mInfo("retrieve trans detail from iter, id:%d, iterStage:%d, IterNum:%d", pShowIter->pTrans->id, pShowIter->stage, + pShowIter->num); + SArray *pActions = mndTransGetAction(pShowIter->pTrans, pShowIter->stage); + + mndTransShowActions(pSdb, pShowIter, pShow, pBlock, rows, &numOfRows, pActions, + taosArrayGetSize(pActions) - pShowIter->num, pShowIter->num); + break; + } + } + +_OVER: + pShow->numOfRows += numOfRows; + + if (code != 0) { + mError("failed to retrieve at line:%d, since %s", lino, tstrerror(code)); + } else { + mInfo("retrieve trans detail, numOfRows:%d, pShow->numOfRows:%d", numOfRows, pShow->numOfRows) + } + if (numOfRows == 0) { + taosMemoryFree(pShow->pIter); + pShow->pIter = NULL; + } + return numOfRows; +} + static void mndCancelGetNextTrans(SMnode *pMnode, void *pIter) { SSdb *pSdb = pMnode->pSdb; sdbCancelFetchByType(pSdb, pIter, SDB_TRANS); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 0bce21290b..e20afb7201 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -2766,12 +2766,14 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb mndTransSetSerial(pTrans); - if (pNewVgroup->replica == 1 && pNewDb->cfg.replications == 3) { + if (pNewDb->cfg.replications == 3) { mInfo("db:%s, vgId:%d, will add 2 vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId, pVgroup->vnodeGid[0].dnodeId); // add second - TAOS_CHECK_RETURN(mndAddVnodeToVgroup(pMnode, pTrans, pNewVgroup, pArray)); + if (pNewVgroup->replica == 1){ + TAOS_CHECK_RETURN(mndAddVnodeToVgroup(pMnode, pTrans, pNewVgroup, pArray)); + } // learner stage pNewVgroup->vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER; @@ -2790,7 +2792,9 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb TAOS_CHECK_RETURN(mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, pNewVgroup)); // add third - TAOS_CHECK_RETURN(mndAddVnodeToVgroup(pMnode, pTrans, pNewVgroup, pArray)); + if (pNewVgroup->replica == 2){ + TAOS_CHECK_RETURN (mndAddVnodeToVgroup(pMnode, pTrans, pNewVgroup, pArray)); + } pNewVgroup->vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER; pNewVgroup->vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER; @@ -2802,7 +2806,7 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb TAOS_CHECK_RETURN(mndAddCreateVnodeAction(pMnode, pTrans, pNewDb, pNewVgroup, &pNewVgroup->vnodeGid[2])); TAOS_CHECK_RETURN(mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, pNewVgroup)); - } else if (pNewVgroup->replica == 3 && pNewDb->cfg.replications == 1) { + } else if (pNewDb->cfg.replications == 1) { mInfo("db:%s, vgId:%d, will remove 2 vnodes, vn:0 dnode:%d vn:1 dnode:%d vn:2 dnode:%d", pVgroup->dbName, pVgroup->vgId, pVgroup->vnodeGid[0].dnodeId, pVgroup->vnodeGid[1].dnodeId, pVgroup->vnodeGid[2].dnodeId); @@ -2819,9 +2823,9 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb TAOS_CHECK_RETURN(mndRemoveVnodeFromVgroup(pMnode, pTrans, pNewVgroup, pArray, &del2)); TAOS_CHECK_RETURN(mndAddDropVnodeAction(pMnode, pTrans, pNewDb, pNewVgroup, &del2, true)); TAOS_CHECK_RETURN( - mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, pNewVgroup, pNewVgroup->vnodeGid[0].dnodeId)); + mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, pNewVgroup, pNewVgroup->vnodeGid[0].dnodeId)); TAOS_CHECK_RETURN(mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, pNewVgroup)); - } else if (pNewVgroup->replica == 1 && pNewDb->cfg.replications == 2) { + } else if (pNewDb->cfg.replications == 2) { mInfo("db:%s, vgId:%d, will add 1 vnode, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId, pVgroup->vnodeGid[0].dnodeId); diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 8f63cc8779..b90e1844ae 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -75,6 +75,7 @@ set( "src/tq/tqSnapshot.c" "src/tq/tqStreamStateSnap.c" "src/tq/tqStreamTaskSnap.c" + "src/tq/tqStreamNotify.c" ) aux_source_directory("src/tsdb/" TSDB_SOURCE_FILES) diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 12a803d1d8..e0bf51b333 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -159,6 +159,11 @@ int32_t buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t n SArray* pTagArray, bool newSubTableRule, SVCreateTbReq** pReq); int32_t tqExtractDropCtbDataBlock(const void* data, int32_t len, int64_t ver, void** pRefBlock, int32_t type); +// tq send notifications +int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap); +void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap); +int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode); + #define TQ_ERR_GO_TO_END(c) \ do { \ code = c; \ diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 5bf0a9b199..5a61c1c124 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -81,6 +81,8 @@ typedef struct SCommitInfo SCommitInfo; typedef struct SCompactInfo SCompactInfo; typedef struct SQueryNode SQueryNode; +typedef struct SStreamNotifyHandleMap SStreamNotifyHandleMap; + #define VNODE_META_TMP_DIR "meta.tmp" #define VNODE_META_BACKUP_DIR "meta.backup" @@ -499,6 +501,9 @@ struct SVnode { int64_t blockSeq; SQHandle* pQuery; SVMonitorObj monitor; + + // Notification Handles + SStreamNotifyHandleMap* pNotifyHandleMap; }; #define TD_VID(PVNODE) ((PVNODE)->config.vgId) diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 7ba77cf813..98ea92125c 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -16,8 +16,6 @@ #include "tcommon.h" #include "tq.h" -#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1)) - typedef struct STableSinkInfo { uint64_t uid; tstr name; @@ -983,7 +981,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat tqDebug("s-task:%s append groupId:%" PRId64 " for generated dstTable:%s", id, groupId, dstTableName); if (pTask->ver == SSTREAM_TASK_SUBTABLE_CHANGED_VER) { code = buildCtbNameAddGroupId(NULL, dstTableName, groupId, sizeof(pDataBlock->info.parTbName)); - } else if (pTask->ver > SSTREAM_TASK_SUBTABLE_CHANGED_VER && stbFullName) { + } else if (pTask->ver >= SSTREAM_TASK_APPEND_STABLE_NAME_VER && stbFullName) { code = buildCtbNameAddGroupId(stbFullName, dstTableName, groupId, sizeof(pDataBlock->info.parTbName)); } if (code != TSDB_CODE_SUCCESS) { @@ -1150,6 +1148,12 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { return; } + code = tqSendAllNotifyEvents(pBlocks, pTask, pVnode); + if (code != TSDB_CODE_SUCCESS) { + tqError("vgId: %d, s-task:%s failed to send all event notifications", vgId, id); + // continue processing even if notification fails + } + bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks); if (!onlySubmitData || pTask->subtableWithoutMd5 == 1) { tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id, @@ -1173,6 +1177,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { continue; } else if (pDataBlock->info.type == STREAM_DROP_CHILD_TABLE && pTask->subtableWithoutMd5) { code = doBuildAndSendDropTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid); + } else if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) { + continue; } else { code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs); } @@ -1317,6 +1323,10 @@ void rebuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVno continue; } + if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) { + continue; + } + hasSubmit = true; pTask->execInfo.sink.numOfBlocks += 1; uint64_t groupId = pDataBlock->info.id.groupId; diff --git a/source/dnode/vnode/src/tq/tqStreamNotify.c b/source/dnode/vnode/src/tq/tqStreamNotify.c new file mode 100644 index 0000000000..46ee95d3b9 --- /dev/null +++ b/source/dnode/vnode/src/tq/tqStreamNotify.c @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "cmdnodes.h" +#include "tq.h" + +#ifndef WINDOWS +#include "curl/curl.h" +#endif + +#define STREAM_EVENT_NOTIFY_RETRY_MS 50 // 50ms + +typedef struct SStreamNotifyHandle { + TdThreadMutex mutex; +#ifndef WINDOWS + CURL* curl; +#endif + char* url; +} SStreamNotifyHandle; + +struct SStreamNotifyHandleMap { + TdThreadMutex gMutex; + SHashObj* handleMap; +}; + +static void stopStreamNotifyConn(SStreamNotifyHandle* pHandle) { +#ifndef WINDOWS + if (pHandle == NULL || pHandle->curl == NULL) { + return; + } + // status code 1000 means normal closure + size_t len = 0; + uint16_t status = htons(1000); + CURLcode res = curl_ws_send(pHandle->curl, &status, sizeof(status), &len, 0, CURLWS_CLOSE); + if (res != CURLE_OK) { + tqWarn("failed to send ws-close msg to %s for %d", pHandle->url ? pHandle->url : "", res); + } + // TODO: add wait mechanism for peer connection close response + curl_easy_cleanup(pHandle->curl); +#endif +} + +static void destroyStreamNotifyHandle(void* ptr) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamNotifyHandle** ppHandle = ptr; + + if (ppHandle == NULL || *ppHandle == NULL) { + return; + } + code = taosThreadMutexDestroy(&(*ppHandle)->mutex); + stopStreamNotifyConn(*ppHandle); + taosMemoryFreeClear((*ppHandle)->url); + taosMemoryFreeClear(*ppHandle); +} + +static void releaseStreamNotifyHandle(SStreamNotifyHandle** ppHandle) { + if (ppHandle == NULL || *ppHandle == NULL) { + return; + } + (void)taosThreadMutexUnlock(&(*ppHandle)->mutex); + *ppHandle = NULL; +} + +static int32_t acquireStreamNotifyHandle(SStreamNotifyHandleMap* pMap, const char* url, + SStreamNotifyHandle** ppHandle) { +#ifndef WINDOWS + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + bool gLocked = false; + SStreamNotifyHandle** ppFindHandle = NULL; + SStreamNotifyHandle* pNewHandle = NULL; + CURL* newCurl = NULL; + CURLcode res = CURLE_OK; + + TSDB_CHECK_NULL(pMap, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(url, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(ppHandle, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *ppHandle = NULL; + + code = taosThreadMutexLock(&pMap->gMutex); + TSDB_CHECK_CODE(code, lino, _end); + gLocked = true; + + ppFindHandle = taosHashGet(pMap->handleMap, url, strlen(url)); + if (ppFindHandle == NULL) { + pNewHandle = taosMemoryCalloc(1, sizeof(SStreamNotifyHandle)); + TSDB_CHECK_NULL(pNewHandle, code, lino, _end, terrno); + code = taosThreadMutexInit(&pNewHandle->mutex, NULL); + TSDB_CHECK_CODE(code, lino, _end); + code = taosHashPut(pMap->handleMap, url, strlen(url), &pNewHandle, POINTER_BYTES); + TSDB_CHECK_CODE(code, lino, _end); + *ppHandle = pNewHandle; + pNewHandle = NULL; + } else { + *ppHandle = *ppFindHandle; + } + + code = taosThreadMutexLock(&(*ppHandle)->mutex); + TSDB_CHECK_CODE(code, lino, _end); + + (void)taosThreadMutexUnlock(&pMap->gMutex); + gLocked = false; + + if ((*ppHandle)->curl == NULL) { + newCurl = curl_easy_init(); + TSDB_CHECK_NULL(newCurl, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_URL, url); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYPEER, 0L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYHOST, 0L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_TIMEOUT, 3L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_CONNECT_ONLY, 2L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_perform(newCurl); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + (*ppHandle)->curl = newCurl; + newCurl = NULL; + } + + if ((*ppHandle)->url == NULL) { + (*ppHandle)->url = taosStrdup(url); + TSDB_CHECK_NULL((*ppHandle)->url, code, lino, _end, terrno); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code)); + if (*ppHandle) { + releaseStreamNotifyHandle(ppHandle); + } + *ppHandle = NULL; + } + if (newCurl) { + curl_easy_cleanup(newCurl); + } + if (pNewHandle) { + destroyStreamNotifyHandle(&pNewHandle); + } + if (gLocked) { + (void)taosThreadMutexUnlock(&pMap->gMutex); + } + return code; +#else + tqError("stream notify events is not supported on windows"); + return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS; +#endif +} + +int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamNotifyHandleMap* pMap = NULL; + + TSDB_CHECK_NULL(ppMap, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *ppMap = NULL; + pMap = taosMemoryCalloc(1, sizeof(SStreamNotifyHandleMap)); + TSDB_CHECK_NULL(pMap, code, lino, _end, terrno); + code = taosThreadMutexInit(&pMap->gMutex, NULL); + TSDB_CHECK_CODE(code, lino, _end); + pMap->handleMap = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + TSDB_CHECK_NULL(pMap->handleMap, code, lino, _end, terrno); + taosHashSetFreeFp(pMap->handleMap, destroyStreamNotifyHandle); + *ppMap = pMap; + pMap = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (pMap != NULL) { + tqDestroyNotifyHandleMap(&pMap); + } + return code; +} + +void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + if (*ppMap == NULL) { + return; + } + taosHashCleanup((*ppMap)->handleMap); + code = taosThreadMutexDestroy(&(*ppMap)->gMutex); + taosMemoryFreeClear((*ppMap)); +} + +#define JSON_CHECK_ADD_ITEM(obj, str, item) \ + TSDB_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY) + +static int32_t getStreamNotifyEventHeader(const char* streamName, char** pHeader) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + cJSON* obj = NULL; + cJSON* streams = NULL; + cJSON* stream = NULL; + char msgId[37]; + + TSDB_CHECK_NULL(streamName, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pHeader, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *pHeader = NULL; + + code = taosGetSystemUUIDLimit36(msgId, sizeof(msgId)); + TSDB_CHECK_CODE(code, lino, _end); + + stream = cJSON_CreateObject(); + TSDB_CHECK_NULL(stream, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + JSON_CHECK_ADD_ITEM(stream, "streamName", cJSON_CreateStringReference(streamName)); + JSON_CHECK_ADD_ITEM(stream, "events", cJSON_CreateArray()); + + streams = cJSON_CreateArray(); + TSDB_CHECK_CONDITION(cJSON_AddItemToArray(streams, stream), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY) + stream = NULL; + + obj = cJSON_CreateObject(); + TSDB_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + JSON_CHECK_ADD_ITEM(obj, "messageId", cJSON_CreateStringReference(msgId)); + JSON_CHECK_ADD_ITEM(obj, "timestamp", cJSON_CreateNumber(taosGetTimestampMs())); + JSON_CHECK_ADD_ITEM(obj, "streams", streams); + streams = NULL; + + *pHeader = cJSON_PrintUnformatted(obj); + TSDB_CHECK_NULL(*pHeader, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (stream != NULL) { + cJSON_Delete(stream); + } + if (streams != NULL) { + cJSON_Delete(streams); + } + if (obj != NULL) { + cJSON_Delete(obj); + } + return code; +} + +static int32_t packupStreamNotifyEvent(const char* streamName, const SArray* pBlocks, char** pMsg, + int32_t* nNotifyEvents) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t numOfBlocks = 0; + int32_t msgHeaderLen = 0; + int32_t msgTailLen = 0; + int32_t msgLen = 0; + char* msgHeader = NULL; + const char* msgTail = "]}]}"; + char* msg = NULL; + + TSDB_CHECK_NULL(pMsg, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *pMsg = NULL; + numOfBlocks = taosArrayGetSize(pBlocks); + *nNotifyEvents = 0; + + for (int32_t i = 0; i < numOfBlocks; ++i) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) { + continue; + } + + SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX); + for (int32_t j = 0; j < pDataBlock->info.rows; ++j) { + char* val = colDataGetVarData(pEventStrCol, j); + msgLen += varDataLen(val) + 1; + } + *nNotifyEvents += pDataBlock->info.rows; + } + + if (msgLen == 0) { + // skip since no notification events found + goto _end; + } + + code = getStreamNotifyEventHeader(streamName, &msgHeader); + TSDB_CHECK_CODE(code, lino, _end); + msgHeaderLen = strlen(msgHeader); + msgTailLen = strlen(msgTail); + msgLen += msgHeaderLen; + + msg = taosMemoryMalloc(msgLen); + TSDB_CHECK_NULL(msg, code, lino, _end, terrno); + char* p = msg; + TAOS_STRNCPY(p, msgHeader, msgHeaderLen); + p += msgHeaderLen - msgTailLen; + + for (int32_t i = 0; i < numOfBlocks; ++i) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) { + continue; + } + + SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX); + for (int32_t j = 0; j < pDataBlock->info.rows; ++j) { + char* val = colDataGetVarData(pEventStrCol, j); + TAOS_STRNCPY(p, varDataVal(val), varDataLen(val)); + p += varDataLen(val); + *(p++) = ','; + } + } + + p -= 1; + TAOS_STRNCPY(p, msgTail, msgTailLen); + *(p + msgTailLen) = '\0'; + + *pMsg = msg; + msg = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (msgHeader != NULL) { + cJSON_free(msgHeader); + } + if (msg != NULL) { + taosMemoryFreeClear(msg); + } + return code; +} + +static int32_t sendSingleStreamNotify(SStreamNotifyHandle* pHandle, char* msg) { +#ifndef WINDOWS + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + CURLcode res = CURLE_OK; + uint64_t sentLen = 0; + uint64_t totalLen = 0; + size_t nbytes = 0; + + TSDB_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pHandle->curl, code, lino, _end, TSDB_CODE_INVALID_PARA); + + totalLen = strlen(msg); + while (sentLen < totalLen) { + res = curl_ws_send(pHandle->curl, msg + sentLen, totalLen - sentLen, &nbytes, 0, CURLWS_TEXT); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + sentLen += nbytes; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code)); + stopStreamNotifyConn(pHandle); + } + return code; +#else + tqError("stream notify events is not supported on windows"); + return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS; +#endif +} + +int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + char* msg = NULL; + int32_t nNotifyAddr = 0; + int32_t nNotifyEvents = 0; + SStreamNotifyHandle* pHandle = NULL; + + TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pVnode, code, lino, _end, TSDB_CODE_INVALID_PARA); + + nNotifyAddr = taosArrayGetSize(pTask->notifyInfo.pNotifyAddrUrls); + if (nNotifyAddr == 0) { + goto _end; + } + + code = packupStreamNotifyEvent(pTask->notifyInfo.streamName, pBlocks, &msg, &nNotifyEvents); + TSDB_CHECK_CODE(code, lino, _end); + if (msg == NULL) { + goto _end; + } + + tqDebug("stream task %s prepare to send %d notify events, total msg length: %" PRIu64, pTask->notifyInfo.streamName, + nNotifyEvents, (uint64_t)strlen(msg)); + + for (int32_t i = 0; i < nNotifyAddr; ++i) { + if (streamTaskShouldStop(pTask)) { + break; + } + const char* url = taosArrayGetP(pTask->notifyInfo.pNotifyAddrUrls, i); + code = acquireStreamNotifyHandle(pVnode->pNotifyHandleMap, url, &pHandle); + if (code != TSDB_CODE_SUCCESS) { + tqError("failed to get stream notify handle of %s", url); + if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) { + // retry for event message sending in PAUSE error handling mode + taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS); + --i; + continue; + } else { + // simply ignore the failure in DROP error handling mode + code = TSDB_CODE_SUCCESS; + continue; + } + } + code = sendSingleStreamNotify(pHandle, msg); + if (code != TSDB_CODE_SUCCESS) { + tqError("failed to send stream notify handle to %s since %s", url, tstrerror(code)); + if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) { + // retry for event message sending in PAUSE error handling mode + taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS); + --i; + } else { + // simply ignore the failure in DROP error handling mode + code = TSDB_CODE_SUCCESS; + } + } else { + tqDebug("stream task %s send %d notify events to %s successfully", pTask->notifyInfo.streamName, nNotifyEvents, + url); + } + releaseStreamNotifyHandle(&pHandle); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (msg) { + taosMemoryFreeClear(msg); + } + return code; +} diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 06b7b33cd8..1880156f61 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -86,6 +86,14 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) { if (code) { return code; } + + code = + qSetStreamNotifyInfo(pTask->exec.pExecutor, pTask->notifyInfo.notifyEventTypes, + pTask->notifyInfo.pSchemaWrapper, pTask->notifyInfo.stbFullName, IS_NEW_SUBTB_RULE(pTask)); + if (code) { + tqError("s-task:%s failed to set stream notify info, code:%s", pTask->id.idStr, tstrerror(code)); + return code; + } } streamSetupScheduleTrigger(pTask); @@ -1357,4 +1365,4 @@ int32_t tqStreamTaskProcessConsenChkptIdReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { streamMetaReleaseTask(pMeta, pTask); return 0; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c index f51ffe0c83..818bce4d48 100644 --- a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c +++ b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#include "tsdbDataFileRW.h" #include "meta.h" +#include "tsdbDataFileRW.h" // SDataFileReader ============================================= struct SDataFileReader { @@ -299,6 +299,7 @@ extern int32_t tBlockDataDecompress(SBufferReader *br, SBlockData *blockData, SB int32_t tsdbDataFileReadBlockData(SDataFileReader *reader, const SBrinRecord *record, SBlockData *bData) { int32_t code = 0; int32_t lino = 0; + int32_t fid = reader->config->files[TSDB_FTYPE_DATA].file.fid; SBuffer *buffer = reader->buffers + 0; SBuffer *assist = reader->buffers + 1; @@ -321,8 +322,8 @@ int32_t tsdbDataFileReadBlockData(SDataFileReader *reader, const SBrinRecord *re _exit: if (code) { - tsdbError("vgId:%d %s failed at %s:%d since %s", TD_VID(reader->config->tsdb->pVnode), __func__, __FILE__, lino, - tstrerror(code)); + tsdbError("vgId:%d %s fid %d failed at %s:%d since %s", TD_VID(reader->config->tsdb->pVnode), __func__, fid, + __FILE__, lino, tstrerror(code)); } return code; } @@ -331,6 +332,7 @@ int32_t tsdbDataFileReadBlockDataByColumn(SDataFileReader *reader, const SBrinRe STSchema *pTSchema, int16_t cids[], int32_t ncid) { int32_t code = 0; int32_t lino = 0; + int32_t fid = reader->config->files[TSDB_FTYPE_DATA].file.fid; SDiskDataHdr hdr; SBuffer *buffer0 = reader->buffers + 0; @@ -505,8 +507,8 @@ int32_t tsdbDataFileReadBlockDataByColumn(SDataFileReader *reader, const SBrinRe _exit: if (code) { - tsdbError("vgId:%d %s failed at %s:%d since %s", TD_VID(reader->config->tsdb->pVnode), __func__, __FILE__, lino, - tstrerror(code)); + tsdbError("vgId:%d %s fid:%d failed at %s:%d since %s", TD_VID(reader->config->tsdb->pVnode), __func__, fid, + __FILE__, lino, tstrerror(code)); } return code; } diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 6de5298728..280ee527f7 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -15,6 +15,7 @@ #include "sync.h" #include "tcs.h" +#include "tq.h" #include "tsdb.h" #include "vnd.h" @@ -483,6 +484,14 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC ret = taosRealPath(tdir, NULL, sizeof(tdir)); TAOS_UNUSED(ret); + // init handle map for stream event notification + ret = tqInitNotifyHandleMap(&pVnode->pNotifyHandleMap); + if (ret != TSDB_CODE_SUCCESS) { + vError("vgId:%d, failed to init StreamNotifyHandleMap", TD_VID(pVnode)); + terrno = ret; + goto _err; + } + // open query vInfo("vgId:%d, start to open vnode query", TD_VID(pVnode)); if (vnodeQueryOpen(pVnode)) { @@ -555,6 +564,7 @@ void vnodeClose(SVnode *pVnode) { vnodeAWait(&pVnode->commitTask); vnodeSyncClose(pVnode); vnodeQueryClose(pVnode); + tqDestroyNotifyHandleMap(&pVnode->pNotifyHandleMap); tqClose(pVnode->pTq); walClose(pVnode->pWal); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 48afa78251..84eba69acb 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -449,9 +449,17 @@ typedef struct STimeWindowAggSupp { SColumnInfoData timeWindowData; // query time window info for scalar function execution. } STimeWindowAggSupp; +typedef struct SStreamNotifyEventSupp { + SArray* pWindowEvents; // Array of SStreamNotifyEvent, storing window events and trigger values. + SHashObj* pTableNameHashMap; // Hash map from groupid to the dest child table name. + SHashObj* pResultHashMap; // Hash map from groupid+skey to the window agg result. + SSDataBlock* pEventBlock; // The datablock contains all window events and results. +} SStreamNotifyEventSupp; + typedef struct SSteamOpBasicInfo { - int32_t primaryPkIndex; - bool updateOperatorInfo; + int32_t primaryPkIndex; + bool updateOperatorInfo; + SStreamNotifyEventSupp windowEventSup; } SSteamOpBasicInfo; typedef struct SStreamFillSupporter { @@ -767,6 +775,8 @@ typedef struct SStreamEventAggOperatorInfo { SSHashObj* pPkDeleted; bool destHasPrimaryKey; struct SOperatorInfo* pOperator; + SNodeList* pStartCondCols; + SNodeList* pEndCondCols; } SStreamEventAggOperatorInfo; typedef struct SStreamCountAggOperatorInfo { diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index f726e4300f..86ee6f4124 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -71,6 +71,10 @@ typedef struct { SVersionRange fillHistoryVer; STimeWindow fillHistoryWindow; SStreamState* pState; + int32_t eventTypes; // event types to notify + SSchemaWrapper* notifyResultSchema; // agg result to notify + char* stbFullName; // used to generate dest child table name + bool newSubTableRule; // used to generate dest child table name } SStreamTaskInfo; struct SExecTaskInfo { diff --git a/source/libs/executor/inc/streamexecutorInt.h b/source/libs/executor/inc/streamexecutorInt.h index 0a69080314..7b3c828351 100644 --- a/source/libs/executor/inc/streamexecutorInt.h +++ b/source/libs/executor/inc/streamexecutorInt.h @@ -19,7 +19,10 @@ extern "C" { #endif +#include "cJSON.h" +#include "cmdnodes.h" #include "executorInt.h" +#include "querytask.h" #include "tutil.h" #define FILL_POS_INVALID 0 @@ -57,7 +60,8 @@ typedef struct SSlicePoint { void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type); bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo); void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo); -void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); +int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); +void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); int64_t getDeleteMarkFromOption(SStreamNodeOption* pOption); void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins); @@ -106,6 +110,13 @@ int32_t buildAllResultKey(SStateStore* pStateStore, SStreamState* pState, TSKEY int32_t initOffsetInfo(int32_t** ppOffset, SSDataBlock* pRes); TSKEY compareTs(void* pKey); +int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey, + const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri, + SStreamNotifyEventSupp* sup); +int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper, + SStreamNotifyEventSupp* sup); +int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup); + #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 1386b0b82f..39bef9c95f 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -250,6 +250,28 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) { return code; } +int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper, + const char* stbFullName, bool newSubTableRule) { + int32_t code = TSDB_CODE_SUCCESS; + SStreamTaskInfo *pStreamInfo = NULL; + + if (tinfo == 0 || eventTypes == 0 || pSchemaWrapper == NULL || stbFullName == NULL) { + goto _end; + } + + pStreamInfo = &((SExecTaskInfo*)tinfo)->streamInfo; + pStreamInfo->eventTypes = eventTypes; + pStreamInfo->notifyResultSchema = tCloneSSchemaWrapper(pSchemaWrapper); + if (pStreamInfo->notifyResultSchema == NULL) { + code = terrno; + } + pStreamInfo->stbFullName = taosStrdup(stbFullName); + pStreamInfo->newSubTableRule = newSubTableRule; + +_end: + return code; +} + int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) { if (tinfo == NULL) { return TSDB_CODE_APP_ERROR; diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c index c6a1900b41..20c80df4fa 100644 --- a/source/libs/executor/src/querytask.c +++ b/source/libs/executor/src/querytask.c @@ -262,6 +262,8 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) { static void cleanupStreamInfo(SStreamTaskInfo* pStreamInfo) { tDeleteSchemaWrapper(pStreamInfo->schema); tOffsetDestroy(&pStreamInfo->currentOffset); + tDeleteSchemaWrapper(pStreamInfo->notifyResultSchema); + taosMemoryFree(pStreamInfo->stbFullName); } static void freeBlock(void* pParam) { diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index fa6008eba7..5f4d6b30fa 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -12,6 +12,8 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ + +#include "cmdnodes.h" #include "executorInt.h" #include "filter.h" #include "function.h" @@ -53,6 +55,8 @@ void destroyStreamEventOperatorInfo(void* param) { &pInfo->groupResInfo); pInfo->pOperator = NULL; } + + destroyStreamBasicInfo(&pInfo->basic); destroyStreamAggSupporter(&pInfo->streamAggSup); clearGroupResInfo(&pInfo->groupResInfo); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); @@ -89,6 +93,16 @@ void destroyStreamEventOperatorInfo(void* param) { pInfo->pEndCondInfo = NULL; } + if (pInfo->pStartCondCols != NULL) { + nodesDestroyList(pInfo->pStartCondCols); + pInfo->pStartCondCols = NULL; + } + + if (pInfo->pEndCondCols != NULL) { + nodesDestroyList(pInfo->pEndCondCols); + pInfo->pEndCondCols = NULL; + } + taosMemoryFreeClear(param); } @@ -121,7 +135,7 @@ void reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI) { } int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t groupId, bool* pStart, bool* pEnd, - int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey) { + int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey, int32_t* pWinCode) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; int32_t winCode = TSDB_CODE_SUCCESS; @@ -143,6 +157,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro setEventWindowInfo(pAggSup, &leftWinKey, pVal, pCurWin); if (inWin || (pCurWin->pWinFlag->startFlag && !pCurWin->pWinFlag->endFlag)) { pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin); + (*pWinCode) = TSDB_CODE_SUCCESS; goto _end; } } @@ -156,6 +171,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro if (endi < 0 || pTs[endi] >= rightWinKey.win.skey) { setEventWindowInfo(pAggSup, &rightWinKey, pVal, pCurWin); pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin); + (*pWinCode) = TSDB_CODE_SUCCESS; goto _end; } } @@ -163,6 +179,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro SSessionKey winKey = {.win.skey = ts, .win.ekey = ts, .groupId = groupId}; code = pAggSup->stateStore.streamStateSessionAllocWinBuffByNextPosition(pAggSup->pState, pCur, &winKey, &pVal, &len); QUERY_CHECK_CODE(code, lino, _error); + (*pWinCode) = TSDB_CODE_FAILED; setEventWindowInfo(pAggSup, &winKey, pVal, pCurWin); pCurWin->pWinFlag->startFlag = start; @@ -373,10 +390,18 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl bool allEqual = true; SEventWindowInfo curWin = {0}; SSessionKey nextWinKey = {0}; + int32_t winCode = TSDB_CODE_SUCCESS; code = setEventOutputBuf(pAggSup, tsCols, groupId, (bool*)pColStart->pData, (bool*)pColEnd->pData, i, rows, &curWin, - &nextWinKey); + &nextWinKey, &winCode); QUERY_CHECK_CODE(code, lino, _end); + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_OPEN) && + *(bool*)colDataGetNumData(pColStart, i) && winCode != TSDB_CODE_SUCCESS) { + code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_OPEN, &curWin.winInfo.sessionWin, pSDataBlock, + pInfo->pStartCondCols, i, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + } + setSessionWinOutputInfo(pSeUpdated, &curWin.winInfo); bool rebuild = false; code = updateEventWindowInfo(pAggSup, &curWin, &nextWinKey, tsCols, (bool*)pColStart->pData, (bool*)pColEnd->pData, @@ -443,6 +468,12 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curWin.winInfo, sizeof(SResultWindowInfo)); QUERY_CHECK_CODE(code, lino, _end); } + + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) { + code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_CLOSE, &curWin.winInfo.sessionWin, pSDataBlock, + pInfo->pEndCondCols, i + winRows - 1, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + } } _end: @@ -563,6 +594,7 @@ void doStreamEventSaveCheckpoint(SOperatorInfo* pOperator) { static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SStreamEventAggOperatorInfo* pInfo = pOperator->info; SOptrBasicInfo* pBInfo = &pInfo->binfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -577,10 +609,27 @@ static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { doBuildSessionResult(pOperator, pInfo->streamAggSup.pState, &pInfo->groupResInfo, pBInfo->pRes); if (pBInfo->pRes->info.rows > 0) { printDataBlock(pBInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) { + code = addAggResultNotifyEvent(pBInfo->pRes, pTaskInfo->streamInfo.notifyResultSchema, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + } (*ppRes) = pBInfo->pRes; return code; } + + code = buildNotifyEventBlock(pTaskInfo, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + if (pInfo->basic.windowEventSup.pEventBlock->info.rows > 0) { + printDataBlock(pInfo->basic.windowEventSup.pEventBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->basic.windowEventSup.pEventBlock; + return code; + } + +_end: (*ppRes) = NULL; + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + } return code; } @@ -957,6 +1006,7 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->pPkDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimaryKey; + initStreamBasicInfo(&pInfo->basic); pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED, @@ -989,6 +1039,12 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* code = filterInitFromNode((SNode*)pEventNode->pEndCond, &pInfo->pEndCondInfo, 0); QUERY_CHECK_CODE(code, lino, _error); + code = + nodesCollectColumnsFromNode((SNode*)pEventNode->pStartCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pStartCondCols); + QUERY_CHECK_CODE(code, lino, _error); + code = nodesCollectColumnsFromNode((SNode*)pEventNode->pEndCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pEndCondCols); + QUERY_CHECK_CODE(code, lino, _error); + *pOptrInfo = pOperator; return TSDB_CODE_SUCCESS; diff --git a/source/libs/executor/src/streamexecutorInt.c b/source/libs/executor/src/streamexecutorInt.c index b94798934c..9cafdfff0c 100644 --- a/source/libs/executor/src/streamexecutorInt.c +++ b/source/libs/executor/src/streamexecutorInt.c @@ -13,7 +13,19 @@ * along with this program. If not, see . */ +#include "streamexecutorInt.h" + #include "executorInt.h" +#include "tdatablock.h" + +#define NOTIFY_EVENT_NAME_CACHE_LIMIT_MB 16 + +typedef struct SStreamNotifyEvent { + uint64_t gid; + TSKEY skey; + char* content; + bool isEnd; +} SStreamNotifyEvent; void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type) { if (type != STREAM_GET_ALL && type != STREAM_CHECKPOINT) { @@ -29,7 +41,509 @@ void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo) { pBasicInfo->updateOperatorInfo = false; } -void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { +static void destroyStreamWindowEvent(void* ptr) { + SStreamNotifyEvent* pEvent = ptr; + if (pEvent == NULL || pEvent->content == NULL) return; + cJSON_free(pEvent->content); +} + +static void destroyStreamNotifyEventSupp(SStreamNotifyEventSupp* sup) { + if (sup == NULL) return; + taosArrayDestroyEx(sup->pWindowEvents, destroyStreamWindowEvent); + taosHashCleanup(sup->pTableNameHashMap); + taosHashCleanup(sup->pResultHashMap); + blockDataDestroy(sup->pEventBlock); + *sup = (SStreamNotifyEventSupp){0}; +} + +static int32_t initStreamNotifyEventSupp(SStreamNotifyEventSupp *sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SSDataBlock* pBlock = NULL; + SColumnInfoData infoData = {0}; + + if (sup == NULL) { + goto _end; + } + + code = createDataBlock(&pBlock); + QUERY_CHECK_CODE(code, lino, _end); + + pBlock->info.type = STREAM_NOTIFY_EVENT; + pBlock->info.watermark = INT64_MIN; + + infoData.info.type = TSDB_DATA_TYPE_VARCHAR; + infoData.info.bytes = tDataTypes[infoData.info.type].bytes; + code = blockDataAppendColInfo(pBlock, &infoData); + QUERY_CHECK_CODE(code, lino, _end); + + sup->pWindowEvents = taosArrayInit(0, sizeof(SStreamNotifyEvent)); + QUERY_CHECK_NULL(sup->pWindowEvents, code, lino, _end, terrno); + sup->pTableNameHashMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK); + QUERY_CHECK_NULL(sup->pTableNameHashMap, code, lino, _end, terrno); + sup->pResultHashMap = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + QUERY_CHECK_NULL(sup->pResultHashMap, code, lino, _end, terrno); + taosHashSetFreeFp(sup->pResultHashMap, destroyStreamWindowEvent); + sup->pEventBlock = pBlock; + pBlock = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + if (sup) { + destroyStreamNotifyEventSupp(sup); + } + } + if (pBlock != NULL) { + blockDataDestroy(pBlock); + } + return code; +} + +int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { pBasicInfo->primaryPkIndex = -1; pBasicInfo->updateOperatorInfo = false; + return initStreamNotifyEventSupp(&pBasicInfo->windowEventSup); +} + +void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { + destroyStreamNotifyEventSupp(&pBasicInfo->windowEventSup); +} + +static void streamNotifyGetEventWindowId(const SSessionKey* pSessionKey, char *buf) { + uint64_t hash = 0; + uint64_t ar[2]; + + ar[0] = pSessionKey->groupId; + ar[1] = pSessionKey->win.skey; + hash = MurmurHash3_64((char*)ar, sizeof(ar)); + buf = u64toaFastLut(hash, buf); +} + +#define JSON_CHECK_ADD_ITEM(obj, str, item) \ + QUERY_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY) + +static int32_t jsonAddColumnField(const char* colName, const SColumnInfoData* pColData, int32_t ri, cJSON* obj) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + char* temp = NULL; + + QUERY_CHECK_NULL(colName, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_INVALID_PARA); + + if (colDataIsNull_s(pColData, ri)) { + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNull()); + goto _end; + } + + switch (pColData->info.type) { + case TSDB_DATA_TYPE_BOOL: { + bool val = *(bool*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateBool(val)); + break; + } + + case TSDB_DATA_TYPE_TINYINT: { + int8_t val = *(int8_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_SMALLINT: { + int16_t val = *(int16_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_INT: { + int32_t val = *(int32_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: { + int64_t val = *(int64_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_FLOAT: { + float val = *(float*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_DOUBLE: { + double val = *(double*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_NCHAR: { + // cJSON requires null-terminated strings, but this data is not null-terminated, + // so we need to manually copy the string and add null termination. + const char* src = varDataVal(colDataGetVarData(pColData, ri)); + int32_t len = varDataLen(colDataGetVarData(pColData, ri)); + temp = cJSON_malloc(len + 1); + QUERY_CHECK_NULL(temp, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + memcpy(temp, src, len); + temp[len] = '\0'; + + cJSON* item = cJSON_CreateStringReference(temp); + JSON_CHECK_ADD_ITEM(obj, colName, item); + + // let the cjson object to free memory later + item->type &= ~cJSON_IsReference; + temp = NULL; + break; + } + + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t val = *(uint8_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t val = *(uint16_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_UINT: { + uint32_t val = *(uint32_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t val = *(uint64_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + default: { + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateStringReference("")); + break; + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (temp) { + cJSON_free(temp); + } + return code; +} + +int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey, + const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri, + SStreamNotifyEventSupp* sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SNode* node = NULL; + cJSON* event = NULL; + cJSON* fields = NULL; + cJSON* cond = NULL; + SStreamNotifyEvent item = {0}; + char windowId[32]; + + QUERY_CHECK_NULL(pSessionKey, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pInputBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pInputBlock->pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pCondCols, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA); + + qDebug("add stream notify event from event window, type: %s, start: %" PRId64 ", end: %" PRId64, + (eventType == SNOTIFY_EVENT_WINDOW_OPEN) ? "WINDOW_OPEN" : "WINDOW_CLOSE", pSessionKey->win.skey, + pSessionKey->win.ekey); + + event = cJSON_CreateObject(); + QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + + // add basic info + streamNotifyGetEventWindowId(pSessionKey, windowId); + if (eventType == SNOTIFY_EVENT_WINDOW_OPEN) { + JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_OPEN")); + } else if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) { + JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_CLOSE")); + } + JSON_CHECK_ADD_ITEM(event, "eventTime", cJSON_CreateNumber(taosGetTimestampMs())); + JSON_CHECK_ADD_ITEM(event, "windowId", cJSON_CreateStringReference(windowId)); + JSON_CHECK_ADD_ITEM(event, "windowType", cJSON_CreateStringReference("Event")); + JSON_CHECK_ADD_ITEM(event, "windowStart", cJSON_CreateNumber(pSessionKey->win.skey)); + if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) { + JSON_CHECK_ADD_ITEM(event, "windowEnd", cJSON_CreateNumber(pSessionKey->win.ekey)); + } + + // create fields object to store matched column values + fields = cJSON_CreateObject(); + QUERY_CHECK_NULL(fields, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + FOREACH(node, pCondCols) { + SColumnNode* pColDef = (SColumnNode*)node; + SColumnInfoData* pColData = taosArrayGet(pInputBlock->pDataBlock, pColDef->slotId); + code = jsonAddColumnField(pColDef->colName, pColData, ri, fields); + QUERY_CHECK_CODE(code, lino, _end); + } + + // add trigger condition + cond = cJSON_CreateObject(); + QUERY_CHECK_NULL(cond, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + JSON_CHECK_ADD_ITEM(cond, "conditionIndex", cJSON_CreateNumber(0)); + JSON_CHECK_ADD_ITEM(cond, "fieldValues", fields); + fields = NULL; + JSON_CHECK_ADD_ITEM(event, "triggerConditions", cond); + cond = NULL; + + // convert json object to string value + item.gid = pSessionKey->groupId; + item.skey = pSessionKey->win.skey; + item.isEnd = (eventType == SNOTIFY_EVENT_WINDOW_CLOSE); + item.content = cJSON_PrintUnformatted(event); + QUERY_CHECK_NULL(taosArrayPush(sup->pWindowEvents, &item), code, lino, _end, terrno); + item.content = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + destroyStreamWindowEvent(&item); + if (cond != NULL) { + cJSON_Delete(cond); + } + if (fields != NULL) { + cJSON_Delete(fields); + } + if (event != NULL) { + cJSON_Delete(event); + } + return code; +} + +int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper, + SStreamNotifyEventSupp* sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SNode * node = NULL; + cJSON* event = NULL; + cJSON* result = NULL; + SStreamNotifyEvent item = {0}; + SColumnInfoData* pWstartCol = NULL; + + QUERY_CHECK_NULL(pResultBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pSchemaWrapper, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA); + + qDebug("add %" PRId64 " stream notify results from window agg", pResultBlock->info.rows); + + pWstartCol = taosArrayGet(pResultBlock->pDataBlock, 0); + for (int32_t i = 0; i< pResultBlock->info.rows; ++i) { + event = cJSON_CreateObject(); + QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + + // convert the result row into json + result = cJSON_CreateObject(); + QUERY_CHECK_NULL(result, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + for (int32_t j = 0; j < pSchemaWrapper->nCols; ++j) { + SSchema *pCol = pSchemaWrapper->pSchema + j; + SColumnInfoData *pColData = taosArrayGet(pResultBlock->pDataBlock, pCol->colId - 1); + code = jsonAddColumnField(pCol->name, pColData, i, result); + QUERY_CHECK_CODE(code, lino, _end); + } + JSON_CHECK_ADD_ITEM(event, "result", result); + result = NULL; + + item.gid = pResultBlock->info.id.groupId; + item.skey = *(uint64_t*)colDataGetNumData(pWstartCol, i); + item.content = cJSON_PrintUnformatted(event); + code = taosHashPut(sup->pResultHashMap, &item.gid, sizeof(item.gid) + sizeof(item.skey), &item, sizeof(item)); + TSDB_CHECK_CODE(code, lino, _end); + item.content = NULL; + + cJSON_Delete(event); + event = NULL; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + destroyStreamWindowEvent(&item); + if (result != NULL) { + cJSON_Delete(result); + } + if (event != NULL) { + cJSON_Delete(event); + } + return code; +} + +static int32_t streamNotifyGetDestTableName(const SExecTaskInfo* pTaskInfo, uint64_t gid, char** pTableName) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + const SStorageAPI* pAPI = NULL; + void* tbname = NULL; + int32_t winCode = TSDB_CODE_SUCCESS; + char parTbName[TSDB_TABLE_NAME_LEN]; + const SStreamTaskInfo* pStreamInfo = NULL; + + QUERY_CHECK_NULL(pTaskInfo, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pTableName, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *pTableName = NULL; + + pAPI = &pTaskInfo->storageAPI; + code = pAPI->stateStore.streamStateGetParName((void*)pTaskInfo->streamInfo.pState, gid, &tbname, false, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + if (winCode != TSDB_CODE_SUCCESS) { + parTbName[0] = '\0'; + } else { + tstrncpy(parTbName, tbname, sizeof(parTbName)); + } + pAPI->stateStore.streamStateFreeVal(tbname); + + pStreamInfo = &pTaskInfo->streamInfo; + code = buildSinkDestTableName(parTbName, pStreamInfo->stbFullName, gid, pStreamInfo->newSubTableRule, pTableName); + QUERY_CHECK_CODE(code, lino, _end); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t streamNotifyFillTableName(const char* tableName, const SStreamNotifyEvent* pEvent, + const SStreamNotifyEvent* pResult, char** pVal) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + static const char* prefix = "{\"tableName\":\""; + uint64_t prefixLen = 0; + uint64_t nameLen = 0; + uint64_t eventLen = 0; + uint64_t resultLen = 0; + uint64_t valLen = 0; + char* val = NULL; + char* p = NULL; + + QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pEvent, code, lino , _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pVal, code, lino , _end, TSDB_CODE_INVALID_PARA); + + *pVal = NULL; + prefixLen = strlen(prefix); + nameLen = strlen(tableName); + eventLen = strlen(pEvent->content); + + if (pResult != NULL) { + resultLen = strlen(pResult->content); + valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + resultLen; + } else { + valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + 1; + } + val = taosMemoryMalloc(valLen); + QUERY_CHECK_NULL(val, code, lino, _end, terrno); + varDataSetLen(val, valLen - VARSTR_HEADER_SIZE); + + p = varDataVal(val); + TAOS_STRNCPY(p, prefix, prefixLen); + p += prefixLen; + TAOS_STRNCPY(p, tableName, nameLen); + p += nameLen; + *(p++) = '\"'; + TAOS_STRNCPY(p, pEvent->content, eventLen); + *p = ','; + + if (pResult != NULL) { + p += eventLen - 1; + TAOS_STRNCPY(p, pResult->content, resultLen); + *p = ','; + } + *pVal = val; + val = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (val != NULL) { + taosMemoryFreeClear(val); + } + return code; +} + +int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SColumnInfoData* pEventStrCol = NULL; + int32_t nWindowEvents = 0; + int32_t nWindowResults = 0; + char* val = NULL; + + if (pTaskInfo == NULL || sup == NULL) { + goto _end; + } + + QUERY_CHECK_NULL(sup->pEventBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + blockDataCleanup(sup->pEventBlock); + nWindowEvents = taosArrayGetSize(sup->pWindowEvents); + nWindowResults = taosHashGetSize(sup->pResultHashMap); + qDebug("start to build stream notify event block, nWindowEvents: %d, nWindowResults: %d", nWindowEvents, + nWindowResults); + if (nWindowEvents == 0) { + goto _end; + } + + code = blockDataEnsureCapacity(sup->pEventBlock, nWindowEvents); + QUERY_CHECK_CODE(code, lino, _end); + + pEventStrCol = taosArrayGet(sup->pEventBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX); + QUERY_CHECK_NULL(pEventStrCol, code, lino, _end, terrno); + + for (int32_t i = 0; i < nWindowEvents; ++i) { + SStreamNotifyEvent* pResult = NULL; + SStreamNotifyEvent* pEvent = taosArrayGet(sup->pWindowEvents, i); + char* tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid)); + if (tableName == NULL) { + code = streamNotifyGetDestTableName(pTaskInfo, pEvent->gid, &tableName); + QUERY_CHECK_CODE(code, lino, _end); + code = taosHashPut(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid), tableName, strlen(tableName) + 1); + taosMemoryFreeClear(tableName); + QUERY_CHECK_CODE(code, lino, _end); + tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid)); + QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + } + if (pEvent->isEnd) { + pResult = taosHashGet(sup->pResultHashMap, &pEvent->gid, sizeof(pEvent->gid) + sizeof(pEvent->skey)); + QUERY_CHECK_NULL(pResult, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + } + code = streamNotifyFillTableName(tableName, pEvent, pResult, &val); + QUERY_CHECK_CODE(code, lino, _end); + code = colDataSetVal(pEventStrCol, i, val, false); + QUERY_CHECK_CODE(code, lino, _end); + taosMemoryFreeClear(val); + sup->pEventBlock->info.rows++; + } + + if (taosHashGetMemSize(sup->pTableNameHashMap) >= NOTIFY_EVENT_NAME_CACHE_LIMIT_MB * 1024 * 1024) { + taosHashClear(sup->pTableNameHashMap); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (val != NULL) { + taosMemoryFreeClear(val); + } + if (sup != NULL) { + taosArrayClearEx(sup->pWindowEvents, destroyStreamWindowEvent); + taosHashClear(sup->pResultHashMap); + } + return code; } diff --git a/source/libs/executor/src/streamintervalsliceoperator.c b/source/libs/executor/src/streamintervalsliceoperator.c index d038e4d82c..44799f193b 100644 --- a/source/libs/executor/src/streamintervalsliceoperator.c +++ b/source/libs/executor/src/streamintervalsliceoperator.c @@ -55,6 +55,7 @@ void destroyStreamIntervalSliceOperatorInfo(void* param) { pInfo->pOperator = NULL; } + destroyStreamBasicInfo(&pInfo->basic); clearGroupResInfo(&pInfo->groupResInfo); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); pInfo->pUpdated = NULL; @@ -651,7 +652,8 @@ int32_t createStreamIntervalSliceOperatorInfo(SOperatorInfo* downstream, SPhysiN optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); setOperatorStreamStateFn(pOperator, streamIntervalSliceReleaseState, streamIntervalSliceReloadState); - initStreamBasicInfo(&pInfo->basic); + code = initStreamBasicInfo(&pInfo->basic); + QUERY_CHECK_CODE(code, lino, _error); if (downstream) { code = initIntervalSliceDownStream(downstream, &pInfo->streamAggSup, pPhyNode->type, pInfo->primaryTsIndex, &pInfo->twAggSup, &pInfo->basic, &pInfo->interval, pInfo->hasInterpoFunc); diff --git a/source/libs/executor/src/streamtimesliceoperator.c b/source/libs/executor/src/streamtimesliceoperator.c index 44004a4c6b..4fe8efe397 100644 --- a/source/libs/executor/src/streamtimesliceoperator.c +++ b/source/libs/executor/src/streamtimesliceoperator.c @@ -150,6 +150,7 @@ void destroyStreamTimeSliceOperatorInfo(void* param) { &pInfo->groupResInfo); pInfo->pOperator = NULL; } + destroyStreamBasicInfo(&pInfo->basic); colDataDestroy(&pInfo->twAggSup.timeWindowData); destroyStreamAggSupporter(&pInfo->streamAggSup); resetPrevAndNextWindow(pInfo->pFillSup); @@ -2201,7 +2202,8 @@ int32_t createStreamTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); setOperatorStreamStateFn(pOperator, streamTimeSliceReleaseState, streamTimeSliceReloadState); - initStreamBasicInfo(&pInfo->basic); + code = initStreamBasicInfo(&pInfo->basic); + QUERY_CHECK_CODE(code, lino, _error); if (downstream) { code = initTimeSliceDownStream(downstream, &pInfo->streamAggSup, pOperator->operatorType, pInfo->primaryTsIndex, &pInfo->twAggSup, &pInfo->basic, pInfo->pFillSup); diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index a556f68c32..56060b0061 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -2545,7 +2545,8 @@ static int32_t doSysTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) if (pInfo->showRewrite) { getDBNameFromCondition(pInfo->pCondition, dbName); if (strncasecmp(name, TSDB_INS_TABLE_COMPACTS, TSDB_TABLE_FNAME_LEN) != 0 && - strncasecmp(name, TSDB_INS_TABLE_COMPACT_DETAILS, TSDB_TABLE_FNAME_LEN) != 0) { + strncasecmp(name, TSDB_INS_TABLE_COMPACT_DETAILS, TSDB_TABLE_FNAME_LEN) != 0 && + strncasecmp(name, TSDB_INS_TABLE_TRANSACTION_DETAILS, TSDB_TABLE_FNAME_LEN) != 0) { TAOS_UNUSED(tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName)); } } else if (strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0) { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 9dcb2e67d4..6966f6a463 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -99,6 +99,8 @@ const char* nodesNodeName(ENodeType type) { return "CountWindow"; case QUERY_NODE_ANOMALY_WINDOW: return "AnomalyWindow"; + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + return "StreamNotifyOptions"; case QUERY_NODE_SET_OPERATOR: return "SetOperator"; case QUERY_NODE_SELECT_STMT: @@ -193,6 +195,8 @@ const char* nodesNodeName(ENodeType type) { return "PauseStreamStmt"; case QUERY_NODE_RESUME_STREAM_STMT: return "ResumeStreamStmt"; + case QUERY_NODE_RESET_STREAM_STMT: + return "ResetStreamStmt"; case QUERY_NODE_BALANCE_VGROUP_STMT: return "BalanceVgroupStmt"; case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT: @@ -292,6 +296,8 @@ const char* nodesNodeName(ENodeType type) { return "ShowCompactsStmt"; case QUERY_NODE_SHOW_COMPACT_DETAILS_STMT: return "ShowCompactDetailsStmt"; + case QUERY_NODE_SHOW_TRANSACTION_DETAILS_STMT: + return "ShowTransactionDetailsStmt"; case QUERY_NODE_SHOW_GRANTS_FULL_STMT: return "ShowGrantsFullStmt"; case QUERY_NODE_SHOW_GRANTS_LOGS_STMT: @@ -5808,6 +5814,45 @@ static int32_t jsonToStreamOptions(const SJson* pJson, void* pObj) { return code; } +static const char* jkStreamNotifyOptionsAddrUrls = "AddrUrls"; +static const char* jkStreamNotifyOptionsEventType = "EventType"; +static const char* jkStreamNotifyOptionsErrorHandle = "ErrorHandle"; +static const char* jkStreamNotifyOptionsNotifyHistory = "NotifyHistory"; + +static int32_t streamNotifyOptionsToJson(const void* pObj, SJson* pJson) { + const SStreamNotifyOptions* pNotifyOption = (const SStreamNotifyOptions*)pObj; + int32_t code = nodeListToJson(pJson, jkStreamNotifyOptionsAddrUrls, pNotifyOption->pAddrUrls); + if (code == TSDB_CODE_SUCCESS) { + code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsEventType, pNotifyOption->eventTypes); + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsErrorHandle, pNotifyOption->errorHandle); + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonAddBoolToObject(pJson, jkStreamNotifyOptionsNotifyHistory, pNotifyOption->notifyHistory); + } + + return code; +} + +static int32_t jsonToStreamNotifyOptions(const SJson* pJson, void* pObj) { + SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pObj; + int32_t code = jsonToNodeList(pJson, jkStreamNotifyOptionsAddrUrls, &pNotifyOption->pAddrUrls); + int32_t val = 0; + if (code == TSDB_CODE_SUCCESS) { + code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsEventType, &val); + pNotifyOption->eventTypes = val; + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsErrorHandle, &val); + pNotifyOption->errorHandle = val; + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonGetBoolValue(pJson, jkStreamNotifyOptionsNotifyHistory, &pNotifyOption->notifyHistory); + } + return code; +} + static const char* jkWhenThenWhen = "When"; static const char* jkWhenThenThen = "Then"; @@ -7203,6 +7248,7 @@ static const char* jkCreateStreamStmtOptions = "Options"; static const char* jkCreateStreamStmtQuery = "Query"; static const char* jkCreateStreamStmtTags = "Tags"; static const char* jkCreateStreamStmtSubtable = "Subtable"; +static const char* jkCreateStreamStmtNotifyOptions = "NotifyOptions"; static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) { const SCreateStreamStmt* pNode = (const SCreateStreamStmt*)pObj; @@ -7229,6 +7275,9 @@ static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkCreateStreamStmtSubtable, nodeToJson, pNode->pSubtable); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkCreateStreamStmtNotifyOptions, nodeToJson, pNode->pNotifyOptions); + } return code; } @@ -7258,6 +7307,9 @@ static int32_t jsonToCreateStreamStmt(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkCreateStreamStmtSubtable, &pNode->pSubtable); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkCreateStreamStmtNotifyOptions, (SNode**)&pNode->pNotifyOptions); + } return code; } @@ -7287,6 +7339,32 @@ static int32_t jsonToDropStreamStmt(const SJson* pJson, void* pObj) { return code; } +static const char* jkResetStreamStmtStreamName = "StreamName"; +static const char* jkResetStreamStmtIgnoreNotExists = "IgnoreNotExists"; + +static int32_t resetStreamStmtToJson(const void* pObj, SJson* pJson) { + const SResetStreamStmt* pNode = (const SResetStreamStmt*)pObj; + + int32_t code = tjsonAddStringToObject(pJson, jkResetStreamStmtStreamName, pNode->streamName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkResetStreamStmtIgnoreNotExists, pNode->ignoreNotExists); + } + + return code; +} + +static int32_t jsonToResetStreamStmt(const SJson* pJson, void* pObj) { + SResetStreamStmt* pNode = (SResetStreamStmt*)pObj; + + int32_t code = tjsonGetStringValue(pJson, jkResetStreamStmtStreamName, pNode->streamName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkResetStreamStmtIgnoreNotExists, &pNode->ignoreNotExists); + } + + return code; +} + + static const char* jkMergeVgroupStmtVgroupId1 = "VgroupId1"; static const char* jkMergeVgroupStmtVgroupId2 = "VgroupId2"; @@ -7999,6 +8077,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return countWindowNodeToJson(pObj, pJson); case QUERY_NODE_ANOMALY_WINDOW: return anomalyWindowNodeToJson(pObj, pJson); + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + return streamNotifyOptionsToJson(pObj, pJson); case QUERY_NODE_SET_OPERATOR: return setOperatorToJson(pObj, pJson); case QUERY_NODE_SELECT_STMT: @@ -8372,6 +8452,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToCountWindowNode(pJson, pObj); case QUERY_NODE_ANOMALY_WINDOW: return jsonToAnomalyWindowNode(pJson, pObj); + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + return jsonToStreamNotifyOptions(pJson, pObj); case QUERY_NODE_SET_OPERATOR: return jsonToSetOperator(pJson, pObj); case QUERY_NODE_SELECT_STMT: diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 47c6292a9a..f178b42988 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -467,6 +467,9 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { case QUERY_NODE_WINDOW_OFFSET: code = makeNode(type, sizeof(SWindowOffsetNode), &pNode); break; + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + code = makeNode(type, sizeof(SStreamNotifyOptions), &pNode); + break; case QUERY_NODE_SET_OPERATOR: code = makeNode(type, sizeof(SSetOperator), &pNode); break; @@ -615,6 +618,9 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { case QUERY_NODE_RESUME_STREAM_STMT: code = makeNode(type, sizeof(SResumeStreamStmt), &pNode); break; + case QUERY_NODE_RESET_STREAM_STMT: + code = makeNode(type, sizeof(SResetStreamStmt), &pNode); + break; case QUERY_NODE_BALANCE_VGROUP_STMT: code = makeNode(type, sizeof(SBalanceVgroupStmt), &pNode); break; @@ -714,6 +720,9 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { break; case QUERY_NODE_SHOW_COMPACT_DETAILS_STMT: code = makeNode(type, sizeof(SShowCompactDetailsStmt), &pNode); + break; + case QUERY_NODE_SHOW_TRANSACTION_DETAILS_STMT: + code = makeNode(type, sizeof(SShowTransactionDetailsStmt), &pNode); break; case QUERY_NODE_KILL_QUERY_STMT: code = makeNode(type, sizeof(SKillQueryStmt), &pNode); @@ -1265,6 +1274,11 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pAround->pTimepoint); break; } + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: { + SStreamNotifyOptions* pNotifyOptions = (SStreamNotifyOptions*)pNode; + nodesDestroyList(pNotifyOptions->pAddrUrls); + break; + } case QUERY_NODE_SET_OPERATOR: { SSetOperator* pStmt = (SSetOperator*)pNode; nodesDestroyList(pStmt->pProjectionList); @@ -1477,6 +1491,7 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pStmt->pQuery); nodesDestroyList(pStmt->pTags); nodesDestroyNode(pStmt->pSubtable); + nodesDestroyNode((SNode*)pStmt->pNotifyOptions); tFreeSCMCreateStreamReq(pStmt->pReq); taosMemoryFreeClear(pStmt->pReq); break; @@ -1484,6 +1499,7 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_DROP_STREAM_STMT: // no pointer field case QUERY_NODE_PAUSE_STREAM_STMT: // no pointer field case QUERY_NODE_RESUME_STREAM_STMT: // no pointer field + case QUERY_NODE_RESET_STREAM_STMT: // no pointer field case QUERY_NODE_BALANCE_VGROUP_STMT: // no pointer field case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT: // no pointer field case QUERY_NODE_BALANCE_VGROUP_LEADER_DATABASE_STMT: // no pointer field @@ -1566,6 +1582,11 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pStmt->pCompactId); break; } + case QUERY_NODE_SHOW_TRANSACTION_DETAILS_STMT: { + SShowTransactionDetailsStmt* pStmt = (SShowTransactionDetailsStmt*)pNode; + nodesDestroyNode(pStmt->pTransactionId); + break; + } case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: taosMemoryFreeClear(((SShowCreateDatabaseStmt*)pNode)->pCfg); break; diff --git a/source/libs/parser/CMakeLists.txt b/source/libs/parser/CMakeLists.txt index bd2dd95ee0..088cdc4368 100644 --- a/source/libs/parser/CMakeLists.txt +++ b/source/libs/parser/CMakeLists.txt @@ -7,7 +7,7 @@ ENDIF() add_custom_command( OUTPUT ${TD_SOURCE_DIR}/source/libs/parser/src/sql.c ${TD_SOURCE_DIR}/include/common/ttokenauto.h COMMAND echo "Running lemon process in ${TD_SOURCE_DIR}/source/libs/parser/inc" - COMMAND ${TD_CONTRIB_DIR}/lemon/lemon sql.y || true + COMMAND ${TD_CONTRIB_DIR}/lemon/lemon sql.y COMMAND echo "copy sql.c from ${TD_SOURCE_DIR}/source/libs/parser/inc/sql.c to ${TD_SOURCE_DIR}/source/libs/parser/src/" COMMAND mv ${TD_SOURCE_DIR}/source/libs/parser/inc/sql.c ${TD_SOURCE_DIR}/source/libs/parser/src/sql.c COMMAND mv ${TD_SOURCE_DIR}/source/libs/parser/inc/sql.h ${TD_SOURCE_DIR}/include/common/ttokenauto.h diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index 293649e06e..559009d215 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -296,11 +296,16 @@ SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, con SNode* createStreamOptions(SAstCreateContext* pCxt); SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptionsSetFlag setflag, SToken* pToken, SNode* pNode); +SNode* createStreamNotifyOptions(SAstCreateContext *pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes); +SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag, + SToken* pToken); SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable, - SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols); + SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols, + SNode* pNotifyOptions); SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName); SNode* createPauseStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName); SNode* createResumeStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, bool ignoreUntreated, SToken* pStreamName); +SNode* createResetStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName); SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId); SNode* createKillQueryStmt(SAstCreateContext* pCxt, const SToken* pQueryId); SNode* createBalanceVgroupStmt(SAstCreateContext* pCxt); @@ -320,6 +325,7 @@ SNode* createCreateViewStmt(SAstCreateContext* pCxt, bool orReplace, SNode* pVie SNode* createDropViewStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SNode* pView); SNode* createShowCompactDetailsStmt(SAstCreateContext* pCxt, SNode* pCompactIdNode); SNode* createShowCompactsStmt(SAstCreateContext* pCxt, ENodeType type); +SNode* createShowTransactionDetailsStmt(SAstCreateContext* pCxt, SNode* pTransactionIdNode); SNode* createCreateTSMAStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* tsmaName, SNode* pOptions, SNode* pRealTable, SNode* pInterval); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 5c16da8665..e470d4517a 100755 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -24,6 +24,14 @@ #include "parAst.h" #define YYSTACKDEPTH 0 + +#define JOINED_TABLE_MK(jt, st, A, B, E, F, G, H) \ + { \ + A = createJoinTableNode(pCxt, jt, st, B, E, F); \ + A = addWindowOffsetClause(pCxt, A, G); \ + A = addJLimitClause(pCxt, A, H); \ + } + } %syntax_error { @@ -46,6 +54,8 @@ %left NK_STAR NK_SLASH NK_REM. %left NK_CONCAT. +%right INNER LEFT RIGHT FULL OUTER SEMI ANTI ASOF WINDOW JOIN ON WINDOW_OFFSET JLIMIT. + /************************************************ create/alter account *****************************************/ cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options. { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } cmd ::= ALTER ACCOUNT NK_ID alter_account_options. { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } @@ -589,6 +599,7 @@ cmd ::= SHOW BNODES. cmd ::= SHOW SNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SNODES_STMT); } cmd ::= SHOW CLUSTER. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_STMT); } cmd ::= SHOW TRANSACTIONS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT); } +cmd ::= SHOW TRANSACTION NK_INTEGER(A). { pCxt->pRootNode = createShowTransactionDetailsStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &A)); } cmd ::= SHOW TABLE DISTRIBUTED full_table_name(A). { pCxt->pRootNode = createShowTableDistributedStmt(pCxt, A); } cmd ::= SHOW CONSUMERS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); } cmd ::= SHOW SUBSCRIPTIONS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); } @@ -774,10 +785,11 @@ full_view_name(A) ::= db_name(B) NK_DOT view_name(C). /************************************************ create/drop stream **************************************************/ cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A) stream_options(B) INTO full_table_name(C) col_list_opt(H) tag_def_or_ref_opt(F) subtable_opt(G) - AS query_or_subquery(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H); } + AS query_or_subquery(D) notify_opt(I). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H, I); } cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); } cmd ::= PAUSE STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createPauseStreamStmt(pCxt, A, &B); } cmd ::= RESUME STREAM exists_opt(A) ignore_opt(C) stream_name(B). { pCxt->pRootNode = createResumeStreamStmt(pCxt, A, C, &B); } +cmd ::= RESET STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createResetStreamStmt(pCxt, A, &B); } %type col_list_opt { SNodeList* } %destructor col_list_opt { nodesDestroyList($$); } @@ -820,6 +832,26 @@ subtable_opt(A) ::= SUBTABLE NK_LP expression(B) NK_RP. ignore_opt(A) ::= . { A = false; } ignore_opt(A) ::= IGNORE UNTREATED. { A = true; } +notify_opt(A) ::= . { A = NULL; } +notify_opt(A) ::= notify_def(B). { A = B; } + +notify_def(A) ::= NOTIFY NK_LP url_def_list(B) NK_RP ON NK_LP event_def_list(C) NK_RP. { A = createStreamNotifyOptions(pCxt, B, C); } +notify_def(A) ::= notify_def(B) ON_FAILURE DROP(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); } +notify_def(A) ::= notify_def(B) ON_FAILURE PAUSE(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); } +notify_def(A) ::= notify_def(B) NOTIFY_HISTORY NK_INTEGER(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_NOTIFY_HISTORY_SET, &C); } + +%type url_def_list { SNodeList* } +%destructor url_def_list { nodesDestroyList($$); } +url_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); } +url_def_list(A) ::= url_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); } + +%type event_def_list { SNodeList* } +%destructor event_def_list { nodesDestroyList($$); } +event_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); } +event_def_list(A) ::= event_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); } + + + /************************************************ kill connection/query ***********************************************/ cmd ::= KILL CONNECTION NK_INTEGER(A). { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &A); } cmd ::= KILL QUERY NK_STRING(A). { pCxt->pRootNode = createKillQueryStmt(pCxt, &A); } @@ -1445,36 +1477,77 @@ parenthesized_joined_table(A) ::= NK_LP joined_table(B) NK_RP. parenthesized_joined_table(A) ::= NK_LP parenthesized_joined_table(B) NK_RP. { A = B; } /************************************************ joined_table ********************************************************/ -joined_table(A) ::= - table_reference(B) join_type(C) join_subtype(D) JOIN table_reference(E) join_on_clause_opt(F) - window_offset_clause_opt(G) jlimit_clause_opt(H). { - A = createJoinTableNode(pCxt, C, D, B, E, F); - A = addWindowOffsetClause(pCxt, A, G); - A = addJLimitClause(pCxt, A, H); - } +joined_table(A) ::= inner_joined(B). { A = B; } +joined_table(A) ::= outer_joined(B). { A = B; } +joined_table(A) ::= semi_joined(B). { A = B; } +joined_table(A) ::= anti_joined(B). { A = B; } +joined_table(A) ::= asof_joined(B). { A = B; } +joined_table(A) ::= win_joined(B). { A = B; } -%type join_type { EJoinType } -%destructor join_type { } -join_type(A) ::= . { A = JOIN_TYPE_INNER; } -join_type(A) ::= INNER. { A = JOIN_TYPE_INNER; } -join_type(A) ::= LEFT. { A = JOIN_TYPE_LEFT; } -join_type(A) ::= RIGHT. { A = JOIN_TYPE_RIGHT; } -join_type(A) ::= FULL. { A = JOIN_TYPE_FULL; } +/************************************************ inner join **********************************************************/ +inner_joined(A) ::= + table_reference(B) JOIN table_reference(E) join_on_clause_opt(F). { JOINED_TABLE_MK(JOIN_TYPE_INNER, JOIN_STYPE_NONE, A, B, E, F, NULL, NULL); } -%type join_subtype { EJoinSubType } -%destructor join_subtype { } -join_subtype(A) ::= . { A = JOIN_STYPE_NONE; } -join_subtype(A) ::= OUTER. { A = JOIN_STYPE_OUTER; } -join_subtype(A) ::= SEMI. { A = JOIN_STYPE_SEMI; } -join_subtype(A) ::= ANTI. { A = JOIN_STYPE_ANTI; } -join_subtype(A) ::= ASOF. { A = JOIN_STYPE_ASOF; } -join_subtype(A) ::= WINDOW. { A = JOIN_STYPE_WIN; } +inner_joined(A) ::= + table_reference(B) INNER JOIN table_reference(E) join_on_clause_opt(F). { JOINED_TABLE_MK(JOIN_TYPE_INNER, JOIN_STYPE_NONE, A, B, E, F, NULL, NULL); } -join_on_clause_opt(A) ::= . { A = NULL; } -join_on_clause_opt(A) ::= ON search_condition(B). { A = B; } +/************************************************ outer join **********************************************************/ +outer_joined(A) ::= + table_reference(B) LEFT JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_LEFT, JOIN_STYPE_OUTER, A, B, E, F, NULL, NULL); } -window_offset_clause_opt(A) ::= . { A = NULL; } -window_offset_clause_opt(A) ::= WINDOW_OFFSET NK_LP window_offset_literal(B) +outer_joined(A) ::= + table_reference(B) RIGHT JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_RIGHT, JOIN_STYPE_OUTER, A, B, E, F, NULL, NULL); } + +outer_joined(A) ::= + table_reference(B) FULL JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_FULL, JOIN_STYPE_OUTER, A, B, E, F, NULL, NULL); } + +outer_joined(A) ::= + table_reference(B) LEFT OUTER JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_LEFT, JOIN_STYPE_OUTER, A, B, E, F, NULL, NULL); } + +outer_joined(A) ::= + table_reference(B) RIGHT OUTER JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_RIGHT, JOIN_STYPE_OUTER, A, B, E, F, NULL, NULL); } + +outer_joined(A) ::= + table_reference(B) FULL OUTER JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_FULL, JOIN_STYPE_OUTER, A, B, E, F, NULL, NULL); } + +/************************************************ semi join ***********************************************************/ +semi_joined(A) ::= + table_reference(B) LEFT SEMI JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_LEFT, JOIN_STYPE_SEMI, A, B, E, F, NULL, NULL); } + +semi_joined(A) ::= + table_reference(B) RIGHT SEMI JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_RIGHT, JOIN_STYPE_SEMI, A, B, E, F, NULL, NULL); } + +/************************************************ ansi join ***********************************************************/ +anti_joined(A) ::= + table_reference(B) LEFT ANTI JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_LEFT, JOIN_STYPE_ANTI, A, B, E, F, NULL, NULL); } + +anti_joined(A) ::= + table_reference(B) RIGHT ANTI JOIN table_reference(E) join_on_clause(F). { JOINED_TABLE_MK(JOIN_TYPE_RIGHT, JOIN_STYPE_ANTI, A, B, E, F, NULL, NULL); } + +/************************************************ asof join ***********************************************************/ +asof_joined(A) ::= + table_reference(B) LEFT ASOF JOIN table_reference(E) join_on_clause_opt(F) + jlimit_clause_opt(H). { JOINED_TABLE_MK(JOIN_TYPE_LEFT, JOIN_STYPE_ASOF, A, B, E, F, NULL, H); } + +asof_joined(A) ::= + table_reference(B) RIGHT ASOF JOIN table_reference(E) join_on_clause_opt(F) + jlimit_clause_opt(H). { JOINED_TABLE_MK(JOIN_TYPE_RIGHT, JOIN_STYPE_ASOF, A, B, E, F, NULL, H); } + +/************************************************ window join *********************************************************/ +win_joined(A) ::= + table_reference(B) LEFT WINDOW JOIN table_reference(E) join_on_clause_opt(F) + window_offset_clause(G) jlimit_clause_opt(H). { JOINED_TABLE_MK(JOIN_TYPE_LEFT, JOIN_STYPE_WIN, A, B, E, F, G, H); } + +win_joined(A) ::= + table_reference(B) RIGHT WINDOW JOIN table_reference(E) join_on_clause_opt(F) + window_offset_clause(G) jlimit_clause_opt(H). { JOINED_TABLE_MK(JOIN_TYPE_RIGHT, JOIN_STYPE_WIN, A, B, E, F, G, H); } + +join_on_clause_opt(A) ::= . [ON] { A = NULL; } +join_on_clause_opt(A) ::= join_on_clause(B). { A = B; } + +join_on_clause(A) ::= ON search_condition(B). { A = B; } + +window_offset_clause(A) ::= WINDOW_OFFSET NK_LP window_offset_literal(B) NK_COMMA window_offset_literal(C) NK_RP. { A = createWindowOffsetNode(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); } window_offset_literal(A) ::= NK_VARIABLE(B). { A = createRawExprNode(pCxt, &B, createTimeOffsetValueNode(pCxt, &B)); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 708c8aa6eb..0c0c42def5 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1526,8 +1526,8 @@ SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhe pCaseWhen->pCase = pCase; pCaseWhen->pWhenThenList = pWhenThenList; pCaseWhen->pElse = pElse; - pCaseWhen->tz = pCxt->pQueryCxt->timezone; - pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt; + pCaseWhen->tz = pCxt->pQueryCxt->timezone; + pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt; return (SNode*)pCaseWhen; _err: nodesDestroyNode(pCase); @@ -2931,6 +2931,18 @@ _err: return NULL; } +SNode* createShowTransactionDetailsStmt(SAstCreateContext* pCxt, SNode* pTransactionIdNode) { + CHECK_PARSER_STATUS(pCxt); + SShowTransactionDetailsStmt* pStmt = NULL; + pCxt->errCode = nodesMakeNode(QUERY_NODE_SHOW_TRANSACTION_DETAILS_STMT, (SNode**)&pStmt); + CHECK_MAKE_NODE(pStmt); + pStmt->pTransactionId = pTransactionIdNode; + return (SNode*)pStmt; +_err: + nodesDestroyNode(pTransactionIdNode); + return NULL; +} + static int32_t getIpV4RangeFromWhitelistItem(char* ipRange, SIpV4Range* pIpRange) { int32_t code = TSDB_CODE_SUCCESS; char* ipCopy = taosStrdup(ipRange); @@ -3645,8 +3657,115 @@ SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptions return pOptions; } +static bool validateNotifyUrl(const char* url) { + const char* prefix[] = {"http://", "https://", "ws://", "wss://"}; + const char* host = NULL; + + if (!url || *url == '\0') return false; + + for (int32_t i = 0; i < ARRAY_SIZE(prefix); ++i) { + if (strncasecmp(url, prefix[i], strlen(prefix[i])) == 0) { + host = url + strlen(prefix[i]); + break; + } + } + + return (host != NULL) && (*host != '\0') && (*host != '/'); +} + +SNode* createStreamNotifyOptions(SAstCreateContext* pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes) { + SNode* pNode = NULL; + EStreamNotifyEventType eventTypes = 0; + const char* eWindowOpenStr = "WINDOW_OPEN"; + const char* eWindowCloseStr = "WINDOW_CLOSE"; + + CHECK_PARSER_STATUS(pCxt); + + if (LIST_LENGTH(pAddrUrls) == 0) { + pCxt->errCode = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "notification address cannot be empty"); + goto _err; + } + + FOREACH(pNode, pAddrUrls) { + char *url = ((SValueNode*)pNode)->literal; + if (strlen(url) >= TSDB_STREAM_NOTIFY_URL_LEN) { + pCxt->errCode = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "notification address \"%s\" exceed maximum length %d", url, TSDB_STREAM_NOTIFY_URL_LEN); + goto _err; + } + if (!validateNotifyUrl(url)) { + pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "invalid notification address \"%s\"", url); + goto _err; + } + } + + if (LIST_LENGTH(pEventTypes) == 0) { + pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "event types must be specified for notification"); + goto _err; + } + + FOREACH(pNode, pEventTypes) { + char *eventStr = ((SValueNode *)pNode)->literal; + if (strncasecmp(eventStr, eWindowOpenStr, strlen(eWindowOpenStr) + 1) == 0) { + BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_OPEN); + } else if (strncasecmp(eventStr, eWindowCloseStr, strlen(eWindowCloseStr) + 1) == 0) { + BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE); + } else { + pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "invalid event type '%s' for notification", eventStr); + goto _err; + } + } + + SStreamNotifyOptions* pNotifyOptions = NULL; + pCxt->errCode = nodesMakeNode(QUERY_NODE_STREAM_NOTIFY_OPTIONS, (SNode**)&pNotifyOptions); + CHECK_MAKE_NODE(pNotifyOptions); + pNotifyOptions->pAddrUrls = pAddrUrls; + pNotifyOptions->eventTypes = eventTypes; + pNotifyOptions->errorHandle = SNOTIFY_ERROR_HANDLE_PAUSE; + pNotifyOptions->notifyHistory = false; + nodesDestroyList(pEventTypes); + return (SNode*)pNotifyOptions; +_err: + nodesDestroyList(pAddrUrls); + nodesDestroyList(pEventTypes); + return NULL; +} + +SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag, + SToken* pToken) { + CHECK_PARSER_STATUS(pCxt); + + SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pNode; + if (BIT_FLAG_TEST_MASK(pNotifyOption->setFlag, setFlag)) { + pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "stream notify options each item can only be set once"); + goto _err; + } + switch (setFlag) { + case SNOTIFY_OPT_ERROR_HANDLE_SET: + pNotifyOption->errorHandle = (pToken->type == TK_DROP) ? SNOTIFY_ERROR_HANDLE_DROP : SNOTIFY_ERROR_HANDLE_PAUSE; + break; + case SNOTIFY_OPT_NOTIFY_HISTORY_SET: + pNotifyOption->notifyHistory = taosStr2Int8(pToken->z, NULL, 10); + break; + default: + break; + } + BIT_FLAG_SET_MASK(pNotifyOption->setFlag, setFlag); + return pNode; +_err: + nodesDestroyNode(pNode); + return NULL; +} + SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable, - SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols) { + SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols, + SNode* pNotifyOptions) { CHECK_PARSER_STATUS(pCxt); CHECK_NAME(checkStreamName(pCxt, pStreamName)); SCreateStreamStmt* pStmt = NULL; @@ -3662,6 +3781,7 @@ SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken pStmt->pTags = pTags; pStmt->pSubtable = pSubtable; pStmt->pCols = pCols; + pStmt->pNotifyOptions = (SStreamNotifyOptions*)pNotifyOptions; return (SNode*)pStmt; _err: nodesDestroyNode(pRealTable); @@ -3670,6 +3790,7 @@ _err: nodesDestroyList(pTags); nodesDestroyNode(pSubtable); nodesDestroyList(pCols); + nodesDestroyNode(pNotifyOptions); return NULL; } @@ -3714,6 +3835,20 @@ _err: return NULL; } +SNode* createResetStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName) { + CHECK_PARSER_STATUS(pCxt); + CHECK_NAME(checkStreamName(pCxt, pStreamName)); + SPauseStreamStmt* pStmt = NULL; + pCxt->errCode = nodesMakeNode(QUERY_NODE_RESET_STREAM_STMT, (SNode**)&pStmt); + CHECK_MAKE_NODE(pStmt); + COPY_STRING_FORM_ID_TOKEN(pStmt->streamName, pStreamName); + pStmt->ignoreNotExists = ignoreNotExists; + return (SNode*)pStmt; +_err: + return NULL; +} + + SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId) { CHECK_PARSER_STATUS(pCxt); SKillStmt* pStmt = NULL; diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 1687916cb0..e876575f48 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -770,6 +770,12 @@ static int32_t collectMetaKeyFromShowCompactDetails(SCollectMetaKeyCxt* pCxt, SS return code; } +static int32_t collectMetaKeyFromShowTransactionDetails(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, + TSDB_INS_TABLE_TRANSACTION_DETAILS, pCxt->pMetaCache); + return code; +} + static int32_t collectMetaKeyFromShowGrantsFull(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_GRANTS_FULL, pCxt->pMetaCache); @@ -1094,6 +1100,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowCompacts(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_COMPACT_DETAILS_STMT: return collectMetaKeyFromShowCompactDetails(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_TRANSACTION_DETAILS_STMT: + return collectMetaKeyFromShowTransactionDetails(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_GRANTS_FULL_STMT: return collectMetaKeyFromShowGrantsFull(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_GRANTS_LOGS_STMT: diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index ea2e9d712f..7ed438a7dc 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -355,6 +355,9 @@ static SKeyword keywordTable[] = { {"FORCE_WINDOW_CLOSE", TK_FORCE_WINDOW_CLOSE}, {"DISK_INFO", TK_DISK_INFO}, {"AUTO", TK_AUTO}, + {"NOTIFY", TK_NOTIFY}, + {"ON_FAILURE", TK_ON_FAILURE}, + {"NOTIFY_HISTORY", TK_NOTIFY_HISTORY}, }; // clang-format on diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 2a4fdb0136..ca65fc6e84 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -371,12 +371,40 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .pShowCols = {"*"} }, { + .showType = QUERY_NODE_CREATE_TSMA_STMT, + .pDbName = "", + .pTableName = "", + .numOfShowCols = 1, + .pShowCols = {"*"} + }, + { + .showType = QUERY_NODE_SHOW_CREATE_TSMA_STMT, + .pDbName = "", + .pTableName = "", + .numOfShowCols = 1, + .pShowCols = {"*"} + }, + { + .showType = QUERY_NODE_DROP_TSMA_STMT, + .pDbName = "", + .pTableName = "", + .numOfShowCols = 1, + .pShowCols = {"*"} + }, + { .showType = QUERY_NODE_SHOW_FILESETS_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, .pTableName = TSDB_INS_TABLE_FILESETS, .numOfShowCols = 1, .pShowCols = {"*"} - }, + }, + { + .showType = QUERY_NODE_SHOW_TRANSACTION_DETAILS_STMT, + .pDbName = TSDB_INFORMATION_SCHEMA_DB, + .pTableName = TSDB_INS_TABLE_TRANSACTION_DETAILS, + .numOfShowCols = 1, + .pShowCols = {"*"} + }, }; // clang-format on @@ -12191,6 +12219,45 @@ static int32_t translateStreamOptions(STranslateContext* pCxt, SCreateStreamStmt return TSDB_CODE_SUCCESS; } +static int32_t buildStreamNotifyOptions(STranslateContext* pCxt, SStreamNotifyOptions* pNotifyOptions, + SCMCreateStreamReq* pReq) { + int32_t code = TSDB_CODE_SUCCESS; + SNode* pNode = NULL; + + if (pNotifyOptions == NULL || pNotifyOptions->pAddrUrls->length == 0) { + return code; + } + + pReq->pNotifyAddrUrls = taosArrayInit(pNotifyOptions->pAddrUrls->length, POINTER_BYTES); + if (pReq->pNotifyAddrUrls != NULL) { + FOREACH(pNode, pNotifyOptions->pAddrUrls) { + char *url = taosStrndup(((SValueNode*)pNode)->literal, TSDB_STREAM_NOTIFY_URL_LEN); + if (url == NULL) { + code = terrno; + break; + } + if (taosArrayPush(pReq->pNotifyAddrUrls, &url) == NULL) { + code = terrno; + taosMemoryFreeClear(url); + break; + } + } + } else { + code = terrno; + } + + if (code == TSDB_CODE_SUCCESS) { + pReq->notifyEventTypes = pNotifyOptions->eventTypes; + pReq->notifyErrorHandle = pNotifyOptions->errorHandle; + pReq->notifyHistory = pNotifyOptions->notifyHistory; + } else { + taosArrayDestroyP(pReq->pNotifyAddrUrls, NULL); + pReq->pNotifyAddrUrls = NULL; + } + + return code; +} + static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { pReq->igExists = pStmt->ignoreExists; @@ -12237,6 +12304,10 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* } } + if (TSDB_CODE_SUCCESS == code) { + code = buildStreamNotifyOptions(pCxt, pStmt->pNotifyOptions, pReq); + } + return code; } @@ -12389,6 +12460,16 @@ static int32_t translateResumeStream(STranslateContext* pCxt, SResumeStreamStmt* return buildCmdMsg(pCxt, TDMT_MND_RESUME_STREAM, (FSerializeFunc)tSerializeSMResumeStreamReq, &req); } +static int32_t translateResetStream(STranslateContext* pCxt, SResetStreamStmt* pStmt) { + SMResetStreamReq req = {0}; + SName name; + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); + if (TSDB_CODE_SUCCESS != code) return code; + (void)tNameGetFullDbName(&name, req.name); + req.igNotExists = pStmt->ignoreNotExists; + return buildCmdMsg(pCxt, TDMT_MND_RESET_STREAM, (FSerializeFunc)tSerializeSMResetStreamReq, &req); +} + static int32_t validateCreateView(STranslateContext* pCxt, SCreateViewStmt* pStmt) { if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) && QUERY_NODE_SET_OPERATOR != nodeType(pStmt->pQuery)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VIEW_QUERY, "Invalid view query type"); @@ -13423,6 +13504,9 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) { case QUERY_NODE_RESUME_STREAM_STMT: code = translateResumeStream(pCxt, (SResumeStreamStmt*)pNode); break; + case QUERY_NODE_RESET_STREAM_STMT: + code = translateResetStream(pCxt, (SResetStreamStmt*)pNode); + break; case QUERY_NODE_CREATE_FUNCTION_STMT: code = translateCreateFunction(pCxt, (SCreateFunctionStmt*)pNode); break; @@ -16433,6 +16517,24 @@ static int32_t rewriteShowCompactDetailsStmt(STranslateContext* pCxt, SQuery* pQ return code; } +static int32_t rewriteShowTransactionDetailsStmt(STranslateContext* pCxt, SQuery* pQuery) { + SShowTransactionDetailsStmt* pShow = (SShowTransactionDetailsStmt*)(pQuery->pRoot); + SSelectStmt* pStmt = NULL; + int32_t code = createSelectStmtForShow(QUERY_NODE_SHOW_TRANSACTION_DETAILS_STMT, &pStmt); + if (TSDB_CODE_SUCCESS == code) { + if (NULL != pShow->pTransactionId) { + code = createOperatorNode(OP_TYPE_EQUAL, "transaction_id", pShow->pTransactionId, &pStmt->pWhere); + } + } + if (TSDB_CODE_SUCCESS == code) { + pCxt->showRewrite = true; + pQuery->showRewrite = true; + nodesDestroyNode(pQuery->pRoot); + pQuery->pRoot = (SNode*)pStmt; + } + return code; +} + static int32_t createParWhenThenNode(SNode* pWhen, SNode* pThen, SNode** ppResWhenThen) { SWhenThenNode* pWThen = NULL; int32_t code = nodesMakeNode(QUERY_NODE_WHEN_THEN, (SNode**)&pWThen); @@ -17008,6 +17110,9 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) { case QUERY_NODE_SHOW_COMPACT_DETAILS_STMT: code = rewriteShowCompactDetailsStmt(pCxt, pQuery); break; + case QUERY_NODE_SHOW_TRANSACTION_DETAILS_STMT: + code = rewriteShowTransactionDetailsStmt(pCxt, pQuery); + break; case QUERY_NODE_SHOW_DB_ALIVE_STMT: case QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT: code = rewriteShowAliveStmt(pCxt, pQuery); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 42d7f44b62..baf36d0453 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -735,7 +735,7 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S !alreadyAddGroupId(pDataBlock->info.parTbName, groupId) && groupId != 0) { if (pTask->ver == SSTREAM_TASK_SUBTABLE_CHANGED_VER) { code = buildCtbNameAddGroupId(NULL, pDataBlock->info.parTbName, groupId, sizeof(pDataBlock->info.parTbName)); - } else if (pTask->ver > SSTREAM_TASK_SUBTABLE_CHANGED_VER) { + } else if (pTask->ver >= SSTREAM_TASK_APPEND_STABLE_NAME_VER) { code = buildCtbNameAddGroupId(pTask->outputInfo.shuffleDispatcher.stbFullName, pDataBlock->info.parTbName, groupId, sizeof(pDataBlock->info.parTbName)); } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 9a2eeb9311..c2a758f490 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -198,6 +198,7 @@ int32_t streamMetaCheckBackendCompatible(SStreamMeta* pMeta) { SCheckpointInfo info; tDecoderInit(&decoder, (uint8_t*)pVal, vLen); if (tDecodeStreamTaskChkInfo(&decoder, &info) < 0) { + tDecoderClear(&decoder); continue; } @@ -1032,6 +1033,7 @@ int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta) { SCheckpointInfo info; tDecoderInit(&decoder, (uint8_t*)pVal, vLen); if (tDecodeStreamTaskChkInfo(&decoder, &info) < 0) { + tDecoderClear(&decoder); continue; } tDecoderClear(&decoder); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index d27ed520c6..5ee8bd43f5 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -326,6 +326,11 @@ void tFreeStreamTask(void* pParam) { streamTaskDestroyActiveChkptInfo(pTask->chkInfo.pActiveInfo); pTask->chkInfo.pActiveInfo = NULL; + taosArrayDestroyP(pTask->notifyInfo.pNotifyAddrUrls, NULL); + taosMemoryFreeClear(pTask->notifyInfo.streamName); + taosMemoryFreeClear(pTask->notifyInfo.stbFullName); + tDeleteSchemaWrapper(pTask->notifyInfo.pSchemaWrapper); + taosMemoryFree(pTask); stDebug("s-task:0x%x free task completed", taskId); } @@ -1318,6 +1323,78 @@ void streamTaskFreeRefId(int64_t* pRefId) { metaRefMgtRemove(pRefId); } +static int32_t tEncodeStreamNotifyInfo(SEncoder* pEncoder, const SNotifyInfo* info) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + QUERY_CHECK_NULL(pEncoder, code, lino, _exit, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(info, code, lino, _exit, TSDB_CODE_INVALID_PARA); + + int32_t addrSize = taosArrayGetSize(info->pNotifyAddrUrls); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, addrSize)); + for (int32_t i = 0; i < addrSize; ++i) { + const char* url = taosArrayGetP(info->pNotifyAddrUrls, i); + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, url)); + } + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, info->notifyEventTypes)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, info->notifyErrorHandle)); + if (addrSize > 0) { + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, info->streamName)); + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, info->stbFullName)); + TAOS_CHECK_EXIT(tEncodeSSchemaWrapper(pEncoder, info->pSchemaWrapper)); + } + +_exit: + if (code != TSDB_CODE_SUCCESS) { + stError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t tDecodeStreamNotifyInfo(SDecoder* pDecoder, SNotifyInfo* info) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + QUERY_CHECK_NULL(pDecoder, code, lino, _exit, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(info, code, lino, _exit, TSDB_CODE_INVALID_PARA); + + int32_t addrSize = 0; + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &addrSize)); + info->pNotifyAddrUrls = taosArrayInit(addrSize, POINTER_BYTES); + QUERY_CHECK_NULL(info->pNotifyAddrUrls, code, lino, _exit, terrno); + for (int32_t i = 0; i < addrSize; ++i) { + char *url = NULL; + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &url)); + url = taosStrndup(url, TSDB_STREAM_NOTIFY_URL_LEN); + QUERY_CHECK_NULL(url, code, lino, _exit, terrno); + if (taosArrayPush(info->pNotifyAddrUrls, &url) == NULL) { + taosMemoryFree(url); + TAOS_CHECK_EXIT(terrno); + } + } + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &info->notifyEventTypes)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &info->notifyErrorHandle)); + if (addrSize > 0) { + char* name = NULL; + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &name)); + info->streamName = taosStrndup(name, TSDB_STREAM_FNAME_LEN + 1); + QUERY_CHECK_NULL(info->streamName, code, lino, _exit, terrno); + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &name)); + info->stbFullName = taosStrndup(name, TSDB_STREAM_FNAME_LEN + 1); + QUERY_CHECK_NULL(info->stbFullName, code, lino, _exit, terrno); + info->pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper)); + if (info->pSchemaWrapper == NULL) { + TAOS_CHECK_EXIT(terrno); + } + TAOS_CHECK_EXIT(tDecodeSSchemaWrapper(pDecoder, info->pSchemaWrapper)); + } + +_exit: + if (code != TSDB_CODE_SUCCESS) { + stError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { int32_t code = 0; @@ -1388,6 +1465,10 @@ int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->subtableWithoutMd5)); TAOS_CHECK_EXIT(tEncodeCStrWithLen(pEncoder, pTask->reserve, sizeof(pTask->reserve) - 1)); + if (pTask->ver >= SSTREAM_TASK_ADD_NOTIFY_VER) { + TAOS_CHECK_EXIT(tEncodeStreamNotifyInfo(pEncoder, &pTask->notifyInfo)); + } + tEndEncode(pEncoder); _exit: return code; @@ -1486,8 +1567,12 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { } TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->reserve)); + if (pTask->ver >= SSTREAM_TASK_ADD_NOTIFY_VER) { + TAOS_CHECK_EXIT(tDecodeStreamNotifyInfo(pDecoder, &pTask->notifyInfo)); + } + tEndDecode(pDecoder); _exit: return code; -} \ No newline at end of file +} diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 1720935e9e..33cc543b09 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -601,8 +601,10 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap // write data sRInfo(pReceiver, "snapshot receiver write about to finish, blockLen:%d seq:%d", pMsg->dataLen, pMsg->seq); if (pMsg->dataLen > 0) { + (void)taosThreadMutexLock(&pReceiver->writerMutex); code = pReceiver->pSyncNode->pFsm->FpSnapshotDoWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, pMsg->data, pMsg->dataLen); + (void)taosThreadMutexUnlock(&pReceiver->writerMutex); if (code != 0) { sRError(pReceiver, "failed to finish snapshot receiver write since %s", tstrerror(code)); TAOS_RETURN(code); diff --git a/source/libs/wal/inc/walInt.h b/source/libs/wal/inc/walInt.h index 14f6503941..3bc69a6393 100644 --- a/source/libs/wal/inc/walInt.h +++ b/source/libs/wal/inc/walInt.h @@ -156,9 +156,11 @@ static inline void walResetVer(SWalVer* pVer) { int32_t walLoadMeta(SWal* pWal); int32_t walSaveMeta(SWal* pWal); int32_t walRemoveMeta(SWal* pWal); +int32_t walRollImpl(SWal* pWal); int32_t walRollFileInfo(SWal* pWal); int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, int64_t* lastVer); int32_t walCheckAndRepairMeta(SWal* pWal); +int64_t walChangeWrite(SWal* pWal, int64_t ver); int32_t walCheckAndRepairIdx(SWal* pWal); diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 470a6b3f40..b4eaa467af 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -57,18 +57,10 @@ FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, int64_t* walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - if (taosStatFile(fnameStr, &fileSize, NULL, NULL) != 0) { - wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); - code = terrno; - goto _err; - } + TAOS_CHECK_GOTO(taosStatFile(fnameStr, &fileSize, NULL, NULL), &lino, _err); TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE); - if (pFile == NULL) { - wError("vgId:%d, failed to open file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); - *lastVer = retVer; - TAOS_RETURN(terrno); - } + TSDB_CHECK_NULL(pFile, code, lino, _err, terrno); // ensure size as non-negative pFileInfo->fileSize = TMAX(0, pFileInfo->fileSize); @@ -102,9 +94,7 @@ FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, int64_t* capacity = readSize + sizeof(magic); ptr = taosMemoryRealloc(buf, capacity); - if (ptr == NULL) { - TAOS_CHECK_GOTO(terrno, &lino, _err); - } + TSDB_CHECK_NULL(ptr, code, lino, _err, terrno); buf = ptr; int64_t ret = taosLSeekFile(pFile, offset, SEEK_SET); @@ -166,9 +156,7 @@ FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, int64_t* if (capacity < readSize + extraSize + sizeof(magic)) { capacity += extraSize; void* ptr = taosMemoryRealloc(buf, capacity); - if (ptr == NULL) { - TAOS_CHECK_GOTO(terrno, &lino, _err); - } + TSDB_CHECK_NULL(ptr, code, lino, _err, terrno); buf = ptr; } int64_t ret = taosLSeekFile(pFile, offset + readSize, SEEK_SET); @@ -187,10 +175,7 @@ FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, int64_t* } logContent = (SWalCkHead*)(buf + pos); - code = decryptBody(&pWal->cfg, logContent, logContent->head.bodyLen, __FUNCTION__); - if (code) { - break; - } + TAOS_CHECK_GOTO(decryptBody(&pWal->cfg, logContent, logContent->head.bodyLen, __FUNCTION__), &lino, _err); if (walValidBodyCksum(logContent) != 0) { code = TSDB_CODE_WAL_CHKSUM_MISMATCH; @@ -233,13 +218,16 @@ FORCE_INLINE int32_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx, int64_t* if (pWal->cfg.level != TAOS_WAL_SKIP && taosFsyncFile(pFile) < 0) { wError("failed to fsync file due to %s. file:%s", strerror(errno), fnameStr); - TAOS_CHECK_GOTO(TAOS_SYSTEM_ERROR(errno), &lino, _err); + TAOS_CHECK_GOTO(terrno, &lino, _err); } } pFileInfo->fileSize = lastEntryEndOffset; _err: + if (code != 0) { + wError("vgId:%d, failed to scan log file due to %s, file:%s", pWal->cfg.vgId, tstrerror(terrno), fnameStr); + } taosCloseFile(&pFile); taosMemoryFree(buf); *lastVer = retVer; @@ -371,45 +359,37 @@ static int32_t walLogEntriesComplete(const SWal* pWal) { } static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) { - int32_t code = TSDB_CODE_SUCCESS; - TdFilePtr pFile = NULL; + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + TdFilePtr pFile = NULL; + SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); - if (!pFileInfo) { - TAOS_RETURN(TSDB_CODE_FAILED); - } + TSDB_CHECK_NULL(pFileInfo, code, lino, _exit, terrno); char fnameStr[WAL_FILE_LEN]; walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - if (taosStatFile(fnameStr, &fileSize, NULL, NULL) != 0) { - wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); - code = terrno; - goto _exit; - } + TAOS_CHECK_EXIT(taosStatFile(fnameStr, &fileSize, NULL, NULL) != 0); + int64_t records = TMAX(0, pFileInfo->lastVer - pFileInfo->firstVer + 1); int64_t lastEndOffset = records * sizeof(SWalIdxEntry); - if (fileSize <= lastEndOffset) { - TAOS_RETURN(TSDB_CODE_SUCCESS); - } + if (fileSize <= lastEndOffset) TAOS_RETURN(TSDB_CODE_SUCCESS); pFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE); - if (pFile == NULL) { - code = terrno; - goto _exit; - } + TSDB_CHECK_NULL(pFile, code, lino, _exit, terrno); wInfo("vgId:%d, trim idx file. file: %s, size: %" PRId64 ", offset: %" PRId64, pWal->cfg.vgId, fnameStr, fileSize, lastEndOffset); - code = taosFtruncateFile(pFile, lastEndOffset); - if (code < 0) { - wError("vgId:%d, failed to truncate file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); - goto _exit; - } + TAOS_CHECK_EXIT(taosFtruncateFile(pFile, lastEndOffset)); _exit: + if (code != TSDB_CODE_SUCCESS) { + wError("vgId:%d, failed to trim idx file %s due to %s", pWal->cfg.vgId, fnameStr, tstrerror(code)); + } + (void)taosCloseFile(&pFile); TAOS_RETURN(code); } @@ -425,35 +405,34 @@ static void printFileSet(int32_t vgId, SArray* fileSet, const char* str) { } } +void walRegfree(regex_t* ptr) { + if (ptr == NULL) { + return; + } + regfree(ptr); +} + int32_t walCheckAndRepairMeta(SWal* pWal) { // load log files, get first/snapshot/last version info int32_t code = 0; + int32_t lino = 0; const char* logPattern = "^[0-9]+.log$"; const char* idxPattern = "^[0-9]+.idx$"; - regex_t logRegPattern; - regex_t idxRegPattern; + regex_t logRegPattern, idxRegPattern; + TdDirPtr pDir = NULL; + SArray* actualLog = NULL; wInfo("vgId:%d, begin to repair meta, wal path:%s, firstVer:%" PRId64 ", lastVer:%" PRId64 ", snapshotVer:%" PRId64, pWal->cfg.vgId, pWal->path, pWal->vers.firstVer, pWal->vers.lastVer, pWal->vers.snapshotVer); - if (regcomp(&logRegPattern, logPattern, REG_EXTENDED) != 0) { - wError("failed to compile log pattern, error:%s", tstrerror(terrno)); - return terrno; - } - if (regcomp(&idxRegPattern, idxPattern, REG_EXTENDED) != 0) { - wError("failed to compile idx pattern"); - return terrno; - } + TAOS_CHECK_EXIT_SET_CODE(regcomp(&logRegPattern, logPattern, REG_EXTENDED), code, terrno); - TdDirPtr pDir = taosOpenDir(pWal->path); - if (pDir == NULL) { - regfree(&logRegPattern); - regfree(&idxRegPattern); - wError("vgId:%d, path:%s, failed to open since %s", pWal->cfg.vgId, pWal->path, strerror(errno)); - return terrno; - } + TAOS_CHECK_EXIT_SET_CODE(regcomp(&idxRegPattern, idxPattern, REG_EXTENDED), code, terrno); - SArray* actualLog = taosArrayInit(8, sizeof(SWalFileInfo)); + pDir = taosOpenDir(pWal->path); + TSDB_CHECK_NULL(pDir, code, lino, _exit, terrno); + + actualLog = taosArrayInit(8, sizeof(SWalFileInfo)); // scan log files and build new meta TdDirEntryPtr pDirEntry; @@ -464,28 +443,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { SWalFileInfo fileInfo; (void)memset(&fileInfo, -1, sizeof(SWalFileInfo)); (void)sscanf(name, "%" PRId64 ".log", &fileInfo.firstVer); - if (!taosArrayPush(actualLog, &fileInfo)) { - regfree(&logRegPattern); - regfree(&idxRegPattern); - int32_t ret = taosCloseDir(&pDir); - if (ret != 0) { - wError("failed to close dir, ret:%s", tstrerror(ret)); - return terrno; - } - - return terrno; - } + TSDB_CHECK_NULL(taosArrayPush(actualLog, &fileInfo), code, lino, _exit, terrno); } } - int32_t ret = taosCloseDir(&pDir); - if (ret != 0) { - wError("failed to close dir, ret:%s", tstrerror(ret)); - return terrno; - } - regfree(&logRegPattern); - regfree(&idxRegPattern); - taosArraySort(actualLog, compareWalFileInfo); wInfo("vgId:%d, actual log file, wal path:%s, num:%d", pWal->cfg.vgId, pWal->path, @@ -500,11 +461,7 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { bool updateMeta = (metaFileNum != actualFileNum); // rebuild meta of file info - code = walRebuildFileInfoSet(pWal->fileInfoSet, actualLog); - taosArrayDestroy(actualLog); - if (code) { - TAOS_RETURN(code); - } + TAOS_CHECK_EXIT(walRebuildFileInfoSet(pWal->fileInfoSet, actualLog)); wInfo("vgId:%d, log file in meta, wal path:%s, num:%d", pWal->cfg.vgId, pWal->path, (int32_t)taosArrayGetSize(pWal->fileInfoSet)); @@ -521,12 +478,7 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); - int32_t code = taosStatFile(fnameStr, &fileSize, NULL, NULL); - if (code < 0) { - wError("failed to stat file since %s. file:%s", terrstr(), fnameStr); - - TAOS_RETURN(terrno); - } + TAOS_CHECK_EXIT(taosStatFile(fnameStr, &fileSize, NULL, NULL)); if (pFileInfo->lastVer >= pFileInfo->firstVer && fileSize == pFileInfo->fileSize) { totSize += pFileInfo->fileSize; @@ -581,22 +533,24 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { wInfo("vgId:%d, success to repair meta, wal path:%s, firstVer:%" PRId64 ", lastVer:%" PRId64 ", snapshotVer:%" PRId64, pWal->cfg.vgId, pWal->path, pWal->vers.firstVer, pWal->vers.lastVer, pWal->vers.snapshotVer); +_exit: + if (code != TSDB_CODE_SUCCESS) { + wError("vgId:%d, failed to repair meta due to %s, at line:%d", pWal->cfg.vgId, tstrerror(code), lino); + } + taosArrayDestroy(actualLog); + TAOS_UNUSED(taosCloseDir(&pDir)); + walRegfree(&logRegPattern); + walRegfree(&idxRegPattern); return code; } static int32_t walReadLogHead(TdFilePtr pLogFile, int64_t offset, SWalCkHead* pCkHead) { - if (taosLSeekFile(pLogFile, offset, SEEK_SET) < 0) { - TAOS_RETURN(terrno); - } + if (taosLSeekFile(pLogFile, offset, SEEK_SET) < 0) return terrno; - if (taosReadFile(pLogFile, pCkHead, sizeof(SWalCkHead)) != sizeof(SWalCkHead)) { - TAOS_RETURN(terrno); - } + if (taosReadFile(pLogFile, pCkHead, sizeof(SWalCkHead)) != sizeof(SWalCkHead)) return terrno; - if (walValidHeadCksum(pCkHead) != 0) { - TAOS_RETURN(TSDB_CODE_WAL_CHKSUM_MISMATCH); - } + if (walValidHeadCksum(pCkHead) != 0) return TSDB_CODE_WAL_CHKSUM_MISMATCH; return TSDB_CODE_SUCCESS; } @@ -838,29 +792,17 @@ int32_t walMetaSerialize(SWal* pWal, char** serialized) { TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); } - if (!cJSON_AddItemToObject(pRoot, "meta", pMeta)) { - wInfo("vgId:%d, failed to add meta to root", pWal->cfg.vgId); - } + if (!cJSON_AddItemToObject(pRoot, "meta", pMeta)) goto _err; snprintf(buf, WAL_JSON_BUF_SIZE, "%" PRId64, pWal->vers.firstVer); - if (cJSON_AddStringToObject(pMeta, "firstVer", buf) == NULL) { - wInfo("vgId:%d, failed to add firstVer to meta", pWal->cfg.vgId); - } + if (cJSON_AddStringToObject(pMeta, "firstVer", buf) == NULL) goto _err; (void)snprintf(buf, WAL_JSON_BUF_SIZE, "%" PRId64, pWal->vers.snapshotVer); - if (cJSON_AddStringToObject(pMeta, "snapshotVer", buf) == NULL) { - wInfo("vgId:%d, failed to add snapshotVer to meta", pWal->cfg.vgId); - } + if (cJSON_AddStringToObject(pMeta, "snapshotVer", buf) == NULL) goto _err; (void)snprintf(buf, WAL_JSON_BUF_SIZE, "%" PRId64, pWal->vers.commitVer); - if (cJSON_AddStringToObject(pMeta, "commitVer", buf) == NULL) { - wInfo("vgId:%d, failed to add commitVer to meta", pWal->cfg.vgId); - } + if (cJSON_AddStringToObject(pMeta, "commitVer", buf) == NULL) goto _err; (void)snprintf(buf, WAL_JSON_BUF_SIZE, "%" PRId64, pWal->vers.lastVer); - if (cJSON_AddStringToObject(pMeta, "lastVer", buf) == NULL) { - wInfo("vgId:%d, failed to add lastVer to meta", pWal->cfg.vgId); - } + if (cJSON_AddStringToObject(pMeta, "lastVer", buf) == NULL) goto _err; - if (!cJSON_AddItemToObject(pRoot, "files", pFiles)) { - wInfo("vgId:%d, failed to add files to root", pWal->cfg.vgId); - } + if (!cJSON_AddItemToObject(pRoot, "files", pFiles)) goto _err; SWalFileInfo* pData = pWal->fileInfoSet->pData; for (int i = 0; i < sz; i++) { SWalFileInfo* pInfo = &pData[i]; @@ -869,31 +811,20 @@ int32_t walMetaSerialize(SWal* pWal, char** serialized) { } if (pField == NULL) { cJSON_Delete(pRoot); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); } // cjson only support int32_t or double // string are used to prohibit the loss of precision (void)snprintf(buf, WAL_JSON_BUF_SIZE, "%" PRId64, pInfo->firstVer); - if (cJSON_AddStringToObject(pField, "firstVer", buf) == NULL) { - wInfo("vgId:%d, failed to add firstVer to field", pWal->cfg.vgId); - } + if (cJSON_AddStringToObject(pField, "firstVer", buf) == NULL) goto _err; (void)snprintf(buf, WAL_JSON_BUF_SIZE, "%" PRId64, pInfo->lastVer); - if (cJSON_AddStringToObject(pField, "lastVer", buf) == NULL) { - wInfo("vgId:%d, failed to add lastVer to field", pWal->cfg.vgId); - } + if (cJSON_AddStringToObject(pField, "lastVer", buf) == NULL) goto _err; (void)snprintf(buf, WAL_JSON_BUF_SIZE, "%" PRId64, pInfo->createTs); - if (cJSON_AddStringToObject(pField, "createTs", buf) == NULL) { - wInfo("vgId:%d, failed to add createTs to field", pWal->cfg.vgId); - } + if (cJSON_AddStringToObject(pField, "createTs", buf) == NULL) goto _err; (void)snprintf(buf, WAL_JSON_BUF_SIZE, "%" PRId64, pInfo->closeTs); - if (cJSON_AddStringToObject(pField, "closeTs", buf) == NULL) { - wInfo("vgId:%d, failed to add closeTs to field", pWal->cfg.vgId); - } + if (cJSON_AddStringToObject(pField, "closeTs", buf) == NULL) goto _err; (void)snprintf(buf, WAL_JSON_BUF_SIZE, "%" PRId64, pInfo->fileSize); - if (cJSON_AddStringToObject(pField, "fileSize", buf) == NULL) { - wInfo("vgId:%d, failed to add fileSize to field", pWal->cfg.vgId); - } + if (cJSON_AddStringToObject(pField, "fileSize", buf) == NULL) goto _err; } char* pSerialized = cJSON_Print(pRoot); cJSON_Delete(pRoot); @@ -901,6 +832,9 @@ int32_t walMetaSerialize(SWal* pWal, char** serialized) { *serialized = pSerialized; TAOS_RETURN(TSDB_CODE_SUCCESS); +_err: + cJSON_Delete(pRoot); + return TSDB_CODE_FAILED; } int32_t walMetaDeserialize(SWal* pWal, const char* bytes) { @@ -1109,8 +1043,12 @@ _err: } int32_t walLoadMeta(SWal* pWal) { - int32_t code = 0; - int n = 0; + int32_t code = 0; + int n = 0; + int32_t lino = 0; + char* buf = NULL; + TdFilePtr pFile = NULL; + // find existing meta file int metaVer = walFindCurMetaVer(pWal); if (metaVer == -1) { @@ -1125,11 +1063,7 @@ int32_t walLoadMeta(SWal* pWal) { } // read metafile int64_t fileSize = 0; - if (taosStatFile(fnameStr, &fileSize, NULL, NULL) != 0) { - wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); - code = terrno; - TAOS_RETURN(code); - } + TAOS_CHECK_EXIT(taosStatFile(fnameStr, &fileSize, NULL, NULL)); if (fileSize == 0) { code = taosRemoveFile(fnameStr); if (code) { @@ -1141,37 +1075,37 @@ int32_t walLoadMeta(SWal* pWal) { TAOS_RETURN(TSDB_CODE_FAILED); } - int size = (int)fileSize; - char* buf = taosMemoryMalloc(size + 5); - if (buf == NULL) { - TAOS_RETURN(terrno); - } + int size = (int)fileSize; + buf = taosMemoryMalloc(size + 5); + TSDB_CHECK_NULL(buf, code, lino, _exit, TSDB_CODE_OUT_OF_MEMORY); + (void)memset(buf, 0, size + 5); - TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ); - if (pFile == NULL) { - taosMemoryFree(buf); + pFile = taosOpenFile(fnameStr, TD_FILE_READ); + TSDB_CHECK_NULL(pFile, code, lino, _exit, terrno); - TAOS_RETURN(TSDB_CODE_WAL_FILE_CORRUPTED); - } if (taosReadFile(pFile, buf, size) != size) { - (void)taosCloseFile(&pFile); - taosMemoryFree(buf); - - TAOS_RETURN(terrno); + code = terrno; + goto _exit; } + // load into fileInfoSet code = walMetaDeserialize(pWal, buf); if (code < 0) { wError("failed to deserialize wal meta. file:%s", fnameStr); code = TSDB_CODE_WAL_FILE_CORRUPTED; } - (void)taosCloseFile(&pFile); - taosMemoryFree(buf); wInfo("vgId:%d, meta file loaded: %s, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileInfoSet size:%d", pWal->cfg.vgId, fnameStr, pWal->vers.firstVer, pWal->vers.lastVer, (int32_t)taosArrayGetSize(pWal->fileInfoSet)); printFileSet(pWal->cfg.vgId, pWal->fileInfoSet, "file in meta"); +_exit: + if (code != TSDB_CODE_SUCCESS) { + wError("vgId:%d, failed to load meta file due to %s, at line:%d", pWal->cfg.vgId, tstrerror(code), lino); + } + + taosMemoryFree(buf); + (void)taosCloseFile(&pFile); TAOS_RETURN(code); } diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 998668beac..4eaf0a9399 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -92,18 +92,15 @@ void walApplyVer(SWal *pWal, int64_t ver) { } int32_t walCommit(SWal *pWal, int64_t ver) { - if (ver < pWal->vers.commitVer) { - TAOS_RETURN(TSDB_CODE_SUCCESS); - } - if (ver > pWal->vers.lastVer || pWal->vers.commitVer < pWal->vers.snapshotVer) { - TAOS_RETURN(TSDB_CODE_WAL_INVALID_VER); - } + if (ver < pWal->vers.commitVer) TAOS_RETURN(TSDB_CODE_SUCCESS); + if (ver > pWal->vers.lastVer || pWal->vers.commitVer < pWal->vers.snapshotVer) TAOS_RETURN(TSDB_CODE_WAL_INVALID_VER); + pWal->vers.commitVer = ver; TAOS_RETURN(TSDB_CODE_SUCCESS); } -static int64_t walChangeWrite(SWal *pWal, int64_t ver) { +int64_t walChangeWrite(SWal *pWal, int64_t ver) { int code; TdFilePtr pIdxTFile, pLogTFile; char fnameStr[WAL_FILE_LEN]; @@ -161,6 +158,7 @@ int32_t walRollback(SWal *pWal, int64_t ver) { TAOS_UNUSED(taosThreadRwlockWrlock(&pWal->mutex)); wInfo("vgId:%d, wal rollback for version %" PRId64, pWal->cfg.vgId, ver); int32_t code = 0; + int32_t lino = 0; int64_t ret; char fnameStr[WAL_FILE_LEN]; TdFilePtr pIdxFile = NULL, pLogFile = NULL; @@ -172,11 +170,7 @@ int32_t walRollback(SWal *pWal, int64_t ver) { // find correct file if (ver < walGetLastFileFirstVer(pWal)) { // change current files - ret = walChangeWrite(pWal, ver); - if (ret < 0) { - code = terrno; - goto _exit; - } + TAOS_CHECK_EXIT_SET_CODE(walChangeWrite(pWal, ver), code, terrno); // delete files in descending order int fileSetSize = taosArrayGetSize(pWal->fileInfoSet); @@ -198,10 +192,7 @@ int32_t walRollback(SWal *pWal, int64_t ver) { walBuildIdxName(pWal, walGetCurFileFirstVer(pWal), fnameStr); pIdxFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND); - if (pIdxFile == NULL) { - code = terrno; - goto _exit; - } + TSDB_CHECK_NULL(pIdxFile, code, lino, _exit, terrno); int64_t idxOff = walGetVerIdxOffset(pWal, ver); ret = taosLSeekFile(pIdxFile, idxOff, SEEK_SET); if (ret < 0) { @@ -218,11 +209,8 @@ int32_t walRollback(SWal *pWal, int64_t ver) { walBuildLogName(pWal, walGetCurFileFirstVer(pWal), fnameStr); pLogFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND); wDebug("vgId:%d, wal truncate file %s", pWal->cfg.vgId, fnameStr); - if (pLogFile == NULL) { - // TODO - code = terrno; - goto _exit; - } + TSDB_CHECK_NULL(pLogFile, code, lino, _exit, terrno); + ret = taosLSeekFile(pLogFile, entry.offset, SEEK_SET); if (ret < 0) { // TODO @@ -238,35 +226,26 @@ int32_t walRollback(SWal *pWal, int64_t ver) { } code = walValidHeadCksum(&head); - if (code != 0) { - code = TSDB_CODE_WAL_FILE_CORRUPTED; - goto _exit; - } - if (head.head.version != ver) { + if (code != 0 || head.head.version != ver) { code = TSDB_CODE_WAL_FILE_CORRUPTED; goto _exit; } // truncate old files - code = taosFtruncateFile(pLogFile, entry.offset); - if (code < 0) { - goto _exit; - } - code = taosFtruncateFile(pIdxFile, idxOff); - if (code < 0) { - goto _exit; - } + if ((code = taosFtruncateFile(pLogFile, entry.offset)) < 0) goto _exit; + + if ((code = taosFtruncateFile(pIdxFile, idxOff)) < 0) goto _exit; + pWal->vers.lastVer = ver - 1; ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->lastVer = ver - 1; ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->fileSize = entry.offset; - code = walSaveMeta(pWal); - if (code < 0) { - wError("vgId:%d, failed to save meta since %s", pWal->cfg.vgId, terrstr()); - goto _exit; - } + TAOS_CHECK_EXIT(walSaveMeta(pWal)); _exit: + if (code != 0) { + wError("vgId:%d, %s failed at line %d since %s", pWal->cfg.vgId, __func__, lino, tstrerror(code)); + } TAOS_UNUSED(taosCloseFile(&pIdxFile)); TAOS_UNUSED(taosCloseFile(&pLogFile)); TAOS_UNUSED(taosThreadRwlockUnlock(&pWal->mutex)); @@ -274,7 +253,7 @@ _exit: TAOS_RETURN(code); } -static int32_t walRollImpl(SWal *pWal) { +int32_t walRollImpl(SWal *pWal) { int32_t code = 0, lino = 0; if (pWal->cfg.level == TAOS_WAL_SKIP && pWal->pIdxFile != NULL && pWal->pLogFile != NULL) { @@ -306,15 +285,12 @@ static int32_t walRollImpl(SWal *pWal) { char fnameStr[WAL_FILE_LEN]; walBuildIdxName(pWal, newFileFirstVer, fnameStr); pIdxFile = taosOpenFile(fnameStr, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); - if (pIdxFile == NULL) { - TAOS_CHECK_GOTO(terrno, &lino, _exit); - } + TSDB_CHECK_NULL(pIdxFile, code, lino, _exit, terrno); + walBuildLogName(pWal, newFileFirstVer, fnameStr); pLogFile = taosOpenFile(fnameStr, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); wDebug("vgId:%d, wal create new file for write:%s", pWal->cfg.vgId, fnameStr); - if (pLogFile == NULL) { - TAOS_CHECK_GOTO(terrno, &lino, _exit); - } + TSDB_CHECK_NULL(pLogFile, code, lino, _exit, terrno); TAOS_CHECK_GOTO(walRollFileInfo(pWal), &lino, _exit); @@ -358,6 +334,7 @@ static FORCE_INLINE int32_t walCheckAndRoll(SWal *pWal) { int32_t walBeginSnapshot(SWal *pWal, int64_t ver, int64_t logRetention) { int32_t code = 0; + int32_t lino = 0; if (pWal->cfg.level == TAOS_WAL_SKIP) { TAOS_RETURN(TSDB_CODE_SUCCESS); @@ -375,16 +352,13 @@ int32_t walBeginSnapshot(SWal *pWal, int64_t ver, int64_t logRetention) { ", last ver %" PRId64, pWal->cfg.vgId, ver, pWal->vers.logRetention, pWal->vers.firstVer, pWal->vers.lastVer); // check file rolling - if (walGetLastFileSize(pWal) != 0) { - if ((code = walRollImpl(pWal)) < 0) { - wError("vgId:%d, failed to roll wal files since %s", pWal->cfg.vgId, terrstr()); - goto _exit; - } - } + if (walGetLastFileSize(pWal) != 0 && (code = walRollImpl(pWal)) < 0) goto _exit; _exit: + if (code) { + wError("vgId:%d, %s failed since %s at line %d", pWal->cfg.vgId, __func__, tstrerror(code), lino); + } TAOS_UNUSED(taosThreadRwlockUnlock(&pWal->mutex)); - TAOS_RETURN(code); } @@ -515,6 +489,13 @@ _exit: return code; } +static void walStopDnode(SWal *pWal) { + if (pWal->stopDnode != NULL) { + wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); + pWal->stopDnode(); + } +} + static int32_t walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) { int32_t code = 0; @@ -528,12 +509,7 @@ static int32_t walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) { int64_t size = taosWriteFile(pWal->pIdxFile, &entry, sizeof(SWalIdxEntry)); if (size != sizeof(SWalIdxEntry)) { wError("vgId:%d, failed to write idx entry due to %s. ver:%" PRId64, pWal->cfg.vgId, strerror(errno), ver); - - if (pWal->stopDnode != NULL) { - wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); - pWal->stopDnode(); - } - + walStopDnode(pWal); TAOS_RETURN(terrno); } @@ -579,12 +555,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy code = terrno; wError("vgId:%d, file:%" PRId64 ".log, failed to write since %s", pWal->cfg.vgId, walGetLastFileFirstVer(pWal), strerror(errno)); - - if (pWal->stopDnode != NULL) { - wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); - pWal->stopDnode(); - } - + walStopDnode(pWal); TAOS_CHECK_GOTO(code, &lino, _exit); } @@ -597,12 +568,8 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy cyptedBodyLen = ENCRYPTED_LEN(cyptedBodyLen); newBody = taosMemoryMalloc(cyptedBodyLen); - if (newBody == NULL) { - wError("vgId:%d, file:%" PRId64 ".log, failed to malloc since %s", pWal->cfg.vgId, walGetLastFileFirstVer(pWal), - strerror(errno)); + TSDB_CHECK_NULL(newBody, code, lino, _exit, terrno); - TAOS_CHECK_GOTO(terrno, &lino, _exit); - } (void)memset(newBody, 0, cyptedBodyLen); (void)memcpy(newBody, body, plainBodyLen); @@ -641,10 +608,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy taosMemoryFreeClear(newBodyEncrypted); } - if (pWal->stopDnode != NULL) { - wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); - pWal->stopDnode(); - } + walStopDnode(pWal); TAOS_CHECK_GOTO(code, &lino, _exit); } @@ -652,8 +616,6 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy if (pWal->cfg.encryptAlgorithm == DND_CA_SM4) { taosMemoryFreeClear(newBody); taosMemoryFreeClear(newBodyEncrypted); - // wInfo("vgId:%d, free newBody newBodyEncrypted %s", - // pWal->cfg.vgId, __FUNCTION__); } // set status @@ -668,6 +630,10 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy return 0; _exit: + if (code) { + wError("vgId:%d, %s failed at line %d since %s", pWal->cfg.vgId, __func__, lino, tstrerror(code)); + } + // recover in a reverse order if (taosFtruncateFile(pWal->pLogFile, offset) < 0) { wFatal("vgId:%d, failed to recover WAL logfile from write error since %s, offset:%" PRId64, pWal->cfg.vgId, diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index b2875bdca1..9e364717f4 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -627,6 +627,68 @@ TEST_F(WalKeepEnv, walRollback) { ASSERT_EQ(code, 0); } +TEST_F(WalKeepEnv, walChangeWrite) { + walResetEnv(); + int code; + + int i; + for (i = 0; i < 100; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + code = walAppendLog(pWal, i, 0, syncMeta, newStr, len); + ASSERT_EQ(code, 0); + } + + code = walChangeWrite(pWal, 50); + ASSERT_EQ(code, 0); +} + +TEST_F(WalCleanEnv, walRepairLogFileTs2) { + int code; + + int i; + for (i = 0; i < 100; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + code = walAppendLog(pWal, i, 0, syncMeta, newStr, len); + ASSERT_EQ(code, 0); + } + + code = walRollImpl(pWal); + ASSERT_EQ(code, 0); + + for (i = 100; i < 200; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + code = walAppendLog(pWal, i, 0, syncMeta, newStr, len); + ASSERT_EQ(code, 0); + } + + code = walRollImpl(pWal); + ASSERT_EQ(code, 0); + + for (i = 200; i < 300; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + code = walAppendLog(pWal, i, 0, syncMeta, newStr, len); + ASSERT_EQ(code, 0); + } + + code = walRollImpl(pWal); + ASSERT_EQ(code, 0); + + // Try to step in ts repair logic. + SWalFileInfo* pFileInfo = (SWalFileInfo*)taosArrayGet(pWal->fileInfoSet, 2); + pFileInfo->closeTs = -1; + + code = walCheckAndRepairMeta(pWal); + ASSERT_EQ(code, 0); +} + TEST_F(WalRetentionEnv, repairMeta1) { walResetEnv(); int code; diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index 9ca53c8202..54d9be4df6 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -46,24 +46,29 @@ int32_t cfgSetItemVal(SConfigItem *pItem, const char *name, const char *value, E extern char **environ; int32_t cfgInit(SConfig **ppCfg) { - SConfig *pCfg = taosMemoryCalloc(1, sizeof(SConfig)); - if (pCfg == NULL) { - TAOS_RETURN(terrno); - } + int32_t code = 0; + int32_t lino = 0; + SConfig *pCfg = NULL; + pCfg = taosMemoryCalloc(1, sizeof(SConfig)); + if (pCfg == NULL) return terrno; + + pCfg->localArray = NULL, pCfg->globalArray = NULL; pCfg->localArray = taosArrayInit(64, sizeof(SConfigItem)); - if (pCfg->localArray == NULL) { - taosMemoryFree(pCfg); - TAOS_RETURN(terrno); - } + TSDB_CHECK_NULL(pCfg->localArray, code, lino, _exit, terrno); + pCfg->globalArray = taosArrayInit(64, sizeof(SConfigItem)); - if (pCfg->globalArray == NULL) { - taosMemoryFree(pCfg); - TAOS_RETURN(terrno); - } + TSDB_CHECK_NULL(pCfg->globalArray, code, lino, _exit, terrno); TAOS_CHECK_RETURN(taosThreadMutexInit(&pCfg->lock, NULL)); *ppCfg = pCfg; + +_exit: + if (code != 0) { + uError("failed to init config, since %s ,at line %d", tstrerror(code), lino); + cfgCleanup(pCfg); + } + TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -187,14 +192,11 @@ int32_t cfgGetGlobalSize(SConfig *pCfg) { return taosArrayGetSize(pCfg->globalAr static int32_t cfgCheckAndSetConf(SConfigItem *pItem, const char *conf) { cfgItemFreeVal(pItem); - if (!(pItem->str == NULL)) { - return TSDB_CODE_INVALID_PARA; - } + if (!(pItem->str == NULL)) return TSDB_CODE_INVALID_PARA; pItem->str = taosStrdup(conf); - if (pItem->str == NULL) { - TAOS_RETURN(terrno); - } + + if (pItem->str == NULL) return terrno; TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -209,9 +211,8 @@ static int32_t cfgCheckAndSetDir(SConfigItem *pItem, const char *inputDir) { taosMemoryFreeClear(pItem->str); pItem->str = taosStrdup(fullDir); - if (pItem->str == NULL) { - TAOS_RETURN(terrno); - } + + if (pItem->str == NULL) return terrno; TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -219,9 +220,8 @@ static int32_t cfgCheckAndSetDir(SConfigItem *pItem, const char *inputDir) { static int32_t cfgSetBool(SConfigItem *pItem, const char *value, ECfgSrcType stype) { int32_t code = 0; bool tmp = false; - if (strcasecmp(value, "true") == 0) { - tmp = true; - } + if (strcasecmp(value, "true") == 0) tmp = true; + int32_t val = 0; if ((code = taosStr2int32(value, &val)) == 0 && val > 0) { tmp = true; @@ -441,9 +441,7 @@ static int32_t cfgUpdateDebugFlagItem(SConfig *pCfg, const char *name, bool rese // logflag names that should 'not' be set by 'debugFlag' if (pDebugFlagItem->array == NULL) { pDebugFlagItem->array = taosArrayInit(16, sizeof(SLogVar)); - if (pDebugFlagItem->array == NULL) { - TAOS_RETURN(terrno); - } + if (pDebugFlagItem->array == NULL) return terrno; } taosArrayClear(pDebugFlagItem->array); TAOS_RETURN(TSDB_CODE_SUCCESS); @@ -454,9 +452,7 @@ static int32_t cfgUpdateDebugFlagItem(SConfig *pCfg, const char *name, bool rese if (pDebugFlagItem->array != NULL) { SLogVar logVar = {0}; tstrncpy(logVar.name, name, TSDB_LOG_VAR_LEN); - if (NULL == taosArrayPush(pDebugFlagItem->array, &logVar)) { - TAOS_RETURN(terrno); - } + if (NULL == taosArrayPush(pDebugFlagItem->array, &logVar)) return terrno; } TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -518,9 +514,8 @@ _exit: int32_t cfgSetItemVal(SConfigItem *pItem, const char *name, const char *value, ECfgSrcType stype) { int32_t code = TSDB_CODE_SUCCESS; - if (pItem == NULL) { - TAOS_RETURN(TSDB_CODE_CFG_NOT_FOUND); - } + if (pItem == NULL) return TSDB_CODE_CFG_NOT_FOUND; + switch (pItem->dtype) { case CFG_DTYPE_BOOL: { code = cfgSetBool(pItem, value, stype); @@ -589,10 +584,7 @@ SConfigItem *cfgGetItem(SConfig *pCfg, const char *pName) { } void cfgLock(SConfig *pCfg) { - if (pCfg == NULL) { - return; - } - + if (pCfg == NULL) return; (void)taosThreadMutexLock(&pCfg->lock); } @@ -600,7 +592,7 @@ void cfgUnLock(SConfig *pCfg) { (void)taosThreadMutexUnlock(&pCfg->lock); } int32_t checkItemDyn(SConfigItem *pItem, bool isServer) { if (pItem->dynScope == CFG_DYN_NONE) { - return TSDB_CODE_SUCCESS; + return TSDB_CODE_INVALID_CFG; } if (isServer) { if (pItem->dynScope == CFG_DYN_CLIENT || pItem->dynScope == CFG_DYN_CLIENT_LAZY) { @@ -617,39 +609,33 @@ int32_t checkItemDyn(SConfigItem *pItem, bool isServer) { int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *pVal, bool isServer, CfgAlterType alterType) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; cfgLock(pCfg); SConfigItem *pItem = cfgGetItem(pCfg, name); - if (pItem == NULL) { - cfgUnLock(pCfg); - TAOS_RETURN(TSDB_CODE_CFG_NOT_FOUND); - } - int32_t code = checkItemDyn(pItem, isServer); - if (code != TSDB_CODE_SUCCESS) { - cfgUnLock(pCfg); - TAOS_RETURN(code); - } + TSDB_CHECK_NULL(pItem, code, lino, _exit, TSDB_CODE_CFG_NOT_FOUND); + + TAOS_CHECK_EXIT(checkItemDyn(pItem, isServer)); if ((pItem->category == CFG_CATEGORY_GLOBAL) && alterType == CFG_ALTER_DNODE) { uError("failed to config:%s, not support update global config on only one dnode", name); - cfgUnLock(pCfg); - TAOS_RETURN(TSDB_CODE_INVALID_CFG); + code = TSDB_CODE_INVALID_CFG; + goto _exit; } switch (pItem->dtype) { case CFG_DTYPE_STRING: { if (strcasecmp(name, "slowLogScope") == 0) { char *tmp = taosStrdup(pVal); if (!tmp) { - cfgUnLock(pCfg); - uError("failed to config:%s since %s", name, terrstr()); - TAOS_RETURN(terrno); + code = terrno; + goto _exit; } int32_t scope = 0; - int32_t code = taosSetSlowLogScope(tmp, &scope); + code = taosSetSlowLogScope(tmp, &scope); if (TSDB_CODE_SUCCESS != code) { - cfgUnLock(pCfg); taosMemoryFree(tmp); - TAOS_RETURN(code); + goto _exit; } taosMemoryFree(tmp); } @@ -659,13 +645,13 @@ int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *p code = taosStr2int32(pVal, &ival); if (code != 0 || (ival != 0 && ival != 1)) { uError("cfg:%s, type:%s value:%d out of range[0, 1]", pItem->name, cfgDtypeStr(pItem->dtype), ival); - cfgUnLock(pCfg); - TAOS_RETURN(TSDB_CODE_OUT_OF_RANGE); + code = TSDB_CODE_OUT_OF_RANGE; + goto _exit; } } break; case CFG_DTYPE_INT32: { int32_t ival; - int32_t code = (int32_t)taosStrHumanToInt32(pVal, &ival); + code = (int32_t)taosStrHumanToInt32(pVal, &ival); if (code != TSDB_CODE_SUCCESS) { cfgUnLock(pCfg); return code; @@ -673,13 +659,13 @@ int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *p if (ival < pItem->imin || ival > pItem->imax) { uError("cfg:%s, type:%s value:%d out of range[%" PRId64 ", %" PRId64 "]", pItem->name, cfgDtypeStr(pItem->dtype), ival, pItem->imin, pItem->imax); - cfgUnLock(pCfg); - TAOS_RETURN(TSDB_CODE_OUT_OF_RANGE); + code = TSDB_CODE_OUT_OF_RANGE; + goto _exit; } } break; case CFG_DTYPE_INT64: { int64_t ival; - int32_t code = taosStrHumanToInt64(pVal, &ival); + code = taosStrHumanToInt64(pVal, &ival); if (code != TSDB_CODE_SUCCESS) { cfgUnLock(pCfg); TAOS_RETURN(code); @@ -687,31 +673,32 @@ int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *p if (ival < pItem->imin || ival > pItem->imax) { uError("cfg:%s, type:%s value:%" PRId64 " out of range[%" PRId64 ", %" PRId64 "]", pItem->name, cfgDtypeStr(pItem->dtype), ival, pItem->imin, pItem->imax); - cfgUnLock(pCfg); - TAOS_RETURN(TSDB_CODE_OUT_OF_RANGE); + code = TSDB_CODE_OUT_OF_RANGE; + goto _exit; } } break; case CFG_DTYPE_FLOAT: case CFG_DTYPE_DOUBLE: { - float dval = 0; - int32_t code = parseCfgReal(pVal, &dval); - if (code != TSDB_CODE_SUCCESS) { - cfgUnLock(pCfg); - TAOS_RETURN(code); - } + float dval = 0; + TAOS_CHECK_EXIT(parseCfgReal(pVal, &dval)); + if (dval < pItem->fmin || dval > pItem->fmax) { uError("cfg:%s, type:%s value:%g out of range[%g, %g]", pItem->name, cfgDtypeStr(pItem->dtype), dval, pItem->fmin, pItem->fmax); - cfgUnLock(pCfg); - TAOS_RETURN(TSDB_CODE_OUT_OF_RANGE); + code = TSDB_CODE_OUT_OF_RANGE; + goto _exit; } } break; default: break; } +_exit: + if (code != TSDB_CODE_SUCCESS) { + uError("failed to check range for cfg:%s, value:%s, since %s at line:%d", name, pVal, tstrerror(code), __LINE__); + } cfgUnLock(pCfg); - TAOS_RETURN(TSDB_CODE_SUCCESS); + TAOS_RETURN(code); } static int32_t cfgAddItem(SConfig *pCfg, SConfigItem *pItem, const char *name) { @@ -720,9 +707,7 @@ static int32_t cfgAddItem(SConfig *pCfg, SConfigItem *pItem, const char *name) { pItem->stype = CFG_STYPE_DEFAULT; pItem->name = taosStrdup(name); - if (pItem->name == NULL) { - TAOS_RETURN(terrno); - } + if (pItem->name == NULL) return terrno; int32_t size = taosArrayGetSize(array); for (int32_t i = 0; i < size; ++i) { @@ -819,9 +804,8 @@ int32_t cfgAddString(SConfig *pCfg, const char *name, const char *defaultVal, in int8_t category) { SConfigItem item = {.dtype = CFG_DTYPE_STRING, .scope = scope, .dynScope = dynScope, .category = category}; item.str = taosStrdup(defaultVal); - if (item.str == NULL) { - TAOS_RETURN(terrno); - } + if (item.str == NULL) return terrno; + return cfgAddItem(pCfg, &item, name); } @@ -943,13 +927,9 @@ int32_t cfgDumpItemValue(SConfigItem *pItem, char *buf, int32_t bufSize, int32_t break; } - if (len < 0) { - TAOS_RETURN(TAOS_SYSTEM_ERROR(errno)); - } + if (len < 0) return terrno; - if (len > bufSize) { - len = bufSize; - } + if (len > bufSize) len = bufSize; *pLen = len; TAOS_RETURN(TSDB_CODE_SUCCESS); @@ -1310,9 +1290,7 @@ int32_t cfgLoadFromEnvFile(SConfig *pConfig, const char *envFile) { } TdFilePtr pFile = taosOpenFile(filepath, TD_FILE_READ | TD_FILE_STREAM); - if (pFile == NULL) { - TAOS_RETURN(terrno); - } + if (pFile == NULL) return terrno; while (!taosEOFFile(pFile)) { name = value = value2 = value3 = value4 = NULL; @@ -1466,8 +1444,10 @@ int32_t cfgLoadFromCfgFile(SConfig *pConfig, const char *filepath) { } int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) { - char *cfgLineBuf = NULL, *name, *value, *value2, *value3, *value4; - SJson *pJson = NULL; + char *cfgLineBuf = NULL, *buf = NULL, *name, *value, *value2, *value3, *value4; + SJson *pJson = NULL; + TdFilePtr pFile = NULL; + int32_t olen, vlen, vlen2, vlen3, vlen4; int32_t code = 0, lino = 0; if (url == NULL || strlen(url) == 0) { @@ -1490,36 +1470,28 @@ int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) { } TdFilePtr pFile = taosOpenFile(filepath, TD_FILE_READ); - if (pFile == NULL) { - TAOS_CHECK_EXIT(terrno); - } + TSDB_CHECK_NULL(pFile, code, lino, _exit, terrno); + size_t fileSize = taosLSeekFile(pFile, 0, SEEK_END); if (fileSize <= 0) { - (void)taosCloseFile(&pFile); - (void)printf("load json file error: %s\n", filepath); - TAOS_CHECK_EXIT(terrno); - } - char *buf = taosMemoryMalloc(fileSize + 1); - if (!buf) { - (void)taosCloseFile(&pFile); - (void)printf("load json file error: %s, failed to alloc memory\n", filepath); - TAOS_RETURN(terrno); + code = terrno; + goto _exit; } + buf = taosMemoryMalloc(fileSize + 1); + TSDB_CHECK_NULL(buf, code, lino, _exit, terrno); + buf[fileSize] = 0; if (taosLSeekFile(pFile, 0, SEEK_SET) < 0) { - (void)taosCloseFile(&pFile); - (void)printf("load json file error: %s\n", filepath); - taosMemoryFreeClear(buf); - TAOS_RETURN(terrno); + code = terrno; + goto _exit; } + if (taosReadFile(pFile, buf, fileSize) <= 0) { - (void)taosCloseFile(&pFile); - (void)printf("load json file error: %s\n", filepath); - taosMemoryFreeClear(buf); - TAOS_RETURN(TSDB_CODE_INVALID_DATA_FMT); + code = TSDB_CODE_INVALID_DATA_FMT; + goto _exit; } - (void)taosCloseFile(&pFile); + pJson = tjsonParse(buf); if (NULL == pJson) { const char *jsonParseError = tjsonGetError(); @@ -1529,7 +1501,6 @@ int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) { taosMemoryFreeClear(buf); TAOS_CHECK_EXIT(TSDB_CODE_INVALID_DATA_FMT); } - taosMemoryFreeClear(buf); int32_t jsonArraySize = tjsonGetArraySize(pJson); for (int32_t i = 0; i < jsonArraySize; i++) { @@ -1596,17 +1567,20 @@ int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) { TAOS_RETURN(TSDB_CODE_INVALID_PARA); } - taosMemoryFree(cfgLineBuf); uInfo("load from apoll url not implemented yet"); - TAOS_RETURN(TSDB_CODE_SUCCESS); _exit: taosMemoryFree(cfgLineBuf); + taosMemoryFree(buf); + (void)taosCloseFile(&pFile); tjsonDelete(pJson); - if (code != 0) { - (void)printf("failed to load from apollo url:%s at line %d since %s\n", url, lino, tstrerror(code)); + if (code == TSDB_CODE_CFG_NOT_FOUND) { + uTrace("load from apoll url success"); + TAOS_RETURN(TSDB_CODE_SUCCESS); + } else { + (void)printf("failed to load from apoll url:%s at line %d since %s\n", url, lino, tstrerror(code)); + TAOS_RETURN(code); } - TAOS_RETURN(code); } int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char *apolloUrl) { @@ -1701,9 +1675,7 @@ struct SConfigIter { int32_t cfgCreateIter(SConfig *pConf, SConfigIter **ppIter) { SConfigIter *pIter = taosMemoryCalloc(1, sizeof(SConfigIter)); - if (pIter == NULL) { - TAOS_RETURN(terrno); - } + if (pIter == NULL) return terrno; pIter->pConf = pConf; @@ -1721,14 +1693,10 @@ SConfigItem *cfgNextIter(SConfigIter *pIter) { } void cfgDestroyIter(SConfigIter *pIter) { - if (pIter == NULL) { - return; - } + if (pIter == NULL) return; taosMemoryFree(pIter); } SArray *taosGetLocalCfg(SConfig *pCfg) { return pCfg->localArray; } -SArray *taosGetGlobalCfg(SConfig *pCfg) { return pCfg->globalArray; } -void taosSetLocalCfg(SConfig *pCfg, SArray *pArray) { pCfg->localArray = pArray; }; -void taosSetGlobalCfg(SConfig *pCfg, SArray *pArray) { pCfg->globalArray = pArray; }; \ No newline at end of file +SArray *taosGetGlobalCfg(SConfig *pCfg) { return pCfg->globalArray; } \ No newline at end of file diff --git a/source/util/src/terror.c b/source/util/src/terror.c index c57d278c3b..ba2d471ccf 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -327,6 +327,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_SYNC_TIMEOUT, "Sync timeout While ex TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CTX_SWITCH, "Wrong transaction execution context") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CONFLICT_COMPACT, "Transaction not completed due to conflict with compact") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_UNKNOW_ERROR, "Unknown transaction error") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_NOT_ABLE_TO_kILLED, "The transaction is not able to be killed") // mnode-mq TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_ALREADY_EXIST, "Topic already exists") diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 4f5ca8d789..03ef00a0c0 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -1490,3 +1490,32 @@ bool taosAssertRelease(bool condition) { return true; } #endif + +char* u64toaFastLut(uint64_t val, char* buf) { + static const char* lut = + "0001020304050607080910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455" + "5657585960616263646566676869707172737475767778798081828384858687888990919293949596979899"; + + char temp[24]; + char* p = temp; + + while (val >= 100) { + strncpy(p, lut + (val % 100) * 2, 2); + val /= 100; + p += 2; + } + + if (val >= 10) { + strncpy(p, lut + val * 2, 2); + p += 2; + } else if (val > 0 || p == temp) { + *(p++) = val + '0'; + } + + while (p != temp) { + *buf++ = *--p; + } + + *buf = '\0'; + return buf; +} diff --git a/source/util/test/cfgTest.cpp b/source/util/test/cfgTest.cpp index 74c34f5c91..27343c9531 100644 --- a/source/util/test/cfgTest.cpp +++ b/source/util/test/cfgTest.cpp @@ -337,6 +337,12 @@ TEST_F(CfgTest, cfgLoadFromEnvFile) { ASSERT_EQ(code, TSDB_CODE_SUCCESS); ASSERT_NE(pConfig, nullptr); + EXPECT_EQ(cfgAddBool(pConfig, "test_bool", 0, 0, 0, 0), 0); + EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 1, 0, 16, 0, 0, 0), 0); + EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 2, 0, 16, 0, 0, 0), 0); + EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 3, 0, 16, 0, 0, 0), 0); + EXPECT_EQ(cfgAddString(pConfig, "test_string", "4", 0, 0, 0), 0); + TdFilePtr envFile = NULL; const char *envFilePath = TD_TMP_DIR_PATH "envFile"; envFile = taosOpenFile(envFilePath, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); @@ -355,6 +361,12 @@ TEST_F(CfgTest, cfgLoadFromApollUrl) { ASSERT_EQ(code, TSDB_CODE_SUCCESS); ASSERT_NE(pConfig, nullptr); + EXPECT_EQ(cfgAddBool(pConfig, "test_bool", 0, 0, 0, 0), 0); + EXPECT_EQ(cfgAddInt32(pConfig, "test_int32", 1, 0, 16, 0, 0, 0), 0); + EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 2, 0, 16, 0, 0, 0), 0); + EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 3, 0, 16, 0, 0, 0), 0); + EXPECT_EQ(cfgAddString(pConfig, "test_string", "4", 0, 0, 0), 0); + TdFilePtr jsonFile = NULL; const char *jsonFilePath = TD_TMP_DIR_PATH "envJson.json"; jsonFile = taosOpenFile(jsonFilePath, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); diff --git a/tests/army/cluster/tsdbSnapshot.py b/tests/army/cluster/tsdbSnapshot.py new file mode 100644 index 0000000000..c008261b00 --- /dev/null +++ b/tests/army/cluster/tsdbSnapshot.py @@ -0,0 +1,264 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from datetime import timedelta +import sys +import time +import random + +import taos +import frame +import frame.etool + + +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * +from frame.srvCtl import * +from frame.clusterCommonCheck import clusterComCheck + + +class TDTestCase(TBase): + def init(self, conn, logSql, replicaVar=3): + # super(TDTestCase, self).init(conn, logSql, replicaVar=3, db="db") + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), logSql) + self.vgroupNum = 3 + self.dbName = 'test' + self.dnode1Path = tdCom.getTaosdPath() + self.dnode1Cfg = f'{self.dnode1Path}/cfg' + self.dnode1Log = f'{self.dnode1Path}/log' + + def _write_bulk_data(self): + tdLog.info("============== write bulk data ===============") + json_content = f""" +{{ + "filetype": "insert", + "cfgdir": "{self.dnode1Cfg}", + "host": "localhost", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "thread_count": 16, + "create_table_thread_count": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 5, + "num_of_records_per_req": 1540, + "prepared_rand": 10000, + "chinese": "no", + "databases": [ + {{ + "dbinfo": {{ + "name": "{self.dbName}", + "drop": "yes", + "vgroups": 5, + "duration": "10d", + "wal_retention_period": 0, + "replica": 3, + "stt_trigger": 2 + }}, + "super_tables": [ + {{ + "name": "stb", + "child_table_exists": "no", + "childtable_count": 100, + "childtable_prefix": "ctb", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 500, + "data_source": "rand", + "insert_mode": "taosc", + "non_stop_mode": "no", + "line_protocol": "line", + "insert_rows": 10000, + "interlace_rows": 0, + "insert_interval": 0, + "partial_col_num": 0, + "disorder_ratio": 0, + "disorder_range": 0, + "timestamp_step": 1000, + "start_timestamp": "{(datetime.now() - timedelta(days=1)).replace(hour=10, minute=0, second=0, microsecond=0).strftime('%Y-%m-%d %H:%M:%S')}", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + {{ + "type": "bigint", + "count": 10 + }} + ], + "tags": [ + {{ + "type": "TINYINT", + "name": "groupid", + "max": 10, + "min": 1 + }}, + {{ + "name": "location", + "type": "BINARY", + "len": 16, + "values": [ + "beijing", + "shanghai" + ] + }} + ] + }} + ] + }} + ] +}} +""" + json_file = '/tmp/test.json' + with open(json_file, 'w') as f: + f.write(json_content) + # Use subprocess.run() to wait for the command to finish + subprocess.run(f'taosBenchmark -f {json_file}', shell=True, check=True) + + def _write_bulk_data2(self): + tdLog.info("============== write bulk data ===============") + json_content = f""" +{{ + "filetype": "insert", + "cfgdir": "{self.dnode1Cfg}", + "host": "localhost", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "thread_count": 16, + "create_table_thread_count": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 5, + "num_of_records_per_req": 1540, + "prepared_rand": 10000, + "chinese": "no", + "databases": [ + {{ + "dbinfo": {{ + "name": "{self.dbName}", + "drop": "no", + "vgroups": 5, + "duration": "10d", + "wal_retention_period": 0, + "replica": 3, + "stt_trigger": 2 + }}, + "super_tables": [ + {{ + "name": "stb", + "child_table_exists": "yes", + "childtable_count": 100, + "childtable_prefix": "ctb", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 500, + "data_source": "rand", + "insert_mode": "taosc", + "non_stop_mode": "no", + "line_protocol": "line", + "insert_rows": 10000, + "interlace_rows": 0, + "insert_interval": 0, + "partial_col_num": 0, + "disorder_ratio": 0, + "disorder_range": 0, + "timestamp_step": 1000, + "start_timestamp": "{(datetime.now() - timedelta(days=1)).replace(hour=14, minute=0, second=0, microsecond=0).strftime('%Y-%m-%d %H:%M:%S')}", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + {{ + "type": "bigint", + "count": 10 + }} + ], + "tags": [ + {{ + "type": "TINYINT", + "name": "groupid", + "max": 10, + "min": 1 + }}, + {{ + "name": "location", + "type": "BINARY", + "len": 16, + "values": [ + "beijing", + "shanghai" + ] + }} + ] + }} + ] + }} + ] +}} +""" + json_file = '/tmp/test.json' + with open(json_file, 'w') as f: + f.write(json_content) + # Use subprocess.run() to wait for the command to finish + subprocess.run(f'taosBenchmark -f {json_file}', shell=True, check=True) + + def run(self): + tdLog.info("============== write bulk data ===============") + self._write_bulk_data() + + tdSql.execute(f'flush database {self.dbName}') + tdLog.sleep(10) + + tdLog.info("============== stop dnode 3 ===============") + cluster.dnodes[2].stoptaosd() + tdLog.sleep(10) + + # tdLog.info("============== write more data ===============") + self._write_bulk_data2() + + tdSql.execute(f'flush database {self.dbName}') + tdLog.sleep(10) + + cluster.dnodes[0].stoptaosd() + cluster.dnodes[1].stoptaosd() + + dnode1_wal = f'{self.dnode1Path}/data/vnode/vnode2/wal' + dnode2_wal = f'{self.dnode1Path}/../dnode2/data/vnode/vnode2/wal' + + tdLog.info("============== remove wal files ===============") + tdLog.info(f"{dnode1_wal}") + tdLog.info(f"{dnode2_wal}") + os.system(f'rm -rf {dnode1_wal}/*') + os.system(f'rm -rf {dnode2_wal}/*') + + tdLog.info("============== restart cluster ===============") + cluster.dnodes[0].starttaosd() + cluster.dnodes[1].starttaosd() + cluster.dnodes[2].starttaosd() + + tdLog.sleep(60) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index 21dc568713..2a7fcfdb9c 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -214,6 +214,16 @@ class TDCom: telnet_url = "http://127.0.0.1:6041/opentsdb/v1/put/telnet" return header, sql_url, sqlt_url, sqlutc_url, influx_url, telnet_url + def getTaosdPath(self, dnodeID="dnode1"): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + taosdPath = buildPath + "/../sim/" + dnodeID + tdLog.info("taosdPath: %s" % taosdPath) + return taosdPath + def genTcpParam(self): MaxBytes = 1024*1024 host ='127.0.0.1' diff --git a/tests/ci/Dockerfile b/tests/ci/Dockerfile index 1caa6fea9e..226b0193f6 100644 --- a/tests/ci/Dockerfile +++ b/tests/ci/Dockerfile @@ -7,7 +7,7 @@ RUN apt-get install -y locales psmisc sudo tree libgeos-dev libgflags2.2 libgfl RUN sed -i 's/# en_US.UTF-8/en_US.UTF-8/' /etc/locale.gen && locale-gen RUN pip3 config set global.index-url http://admin:123456@192.168.0.212:3141/admin/dev/+simple/ RUN pip3 config set global.trusted-host 192.168.0.212 -RUN pip3 install taospy==2.7.16 taos-ws-py==0.3.5 pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog +RUN pip3 install taospy==2.7.21 taos-ws-py==0.3.8 pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8 RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 RUN add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' diff --git a/tests/develop-test/2-query/table_count_scan.py b/tests/develop-test/2-query/table_count_scan.py index 9725c36bee..b5c6140481 100644 --- a/tests/develop-test/2-query/table_count_scan.py +++ b/tests/develop-test/2-query/table_count_scan.py @@ -68,7 +68,7 @@ class TDTestCase: for i in range(0, 3): db_name = tdSql.getData(i, 1) if db_name == 'information_schema': - tdSql.checkData(i, 0, 36) + tdSql.checkData(i, 0, 37) tdSql.checkData(i, 2, None) elif db_name == 'performance_schema': tdSql.checkData(i, 0, 5) @@ -81,7 +81,7 @@ class TDTestCase: tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v desc;') tdSql.checkRows(3) - tdSql.checkData(0, 0, 36) + tdSql.checkData(0, 0, 37) tdSql.checkData(0, 1, 'information_schema') tdSql.checkData(0, 2, None) tdSql.checkData(1, 0, 5) @@ -97,7 +97,7 @@ class TDTestCase: tdSql.checkData(1, 1, 'performance_schema') tdSql.checkData(0, 0, 3) tdSql.checkData(0, 1, 'tbl_count') - tdSql.checkData(2, 0, 36) + tdSql.checkData(2, 0, 37) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -110,7 +110,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 44) + tdSql.checkData(0, 0, 45) tdSql.execute('create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') @@ -193,7 +193,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 36) + tdSql.checkData(3, 0, 37) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -208,7 +208,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 36) + tdSql.checkData(3, 0, 37) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -219,7 +219,7 @@ class TDTestCase: tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(1, 0, 5) tdSql.checkData(1, 1, 'performance_schema') - tdSql.checkData(2, 0, 36) + tdSql.checkData(2, 0, 37) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -232,7 +232,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 45) + tdSql.checkData(0, 0, 46) tdSql.execute('drop database tbl_count') diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/queryQps.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryQps.json index 9e75d52a6c..a80c2caceb 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/queryQps.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryQps.json @@ -20,16 +20,5 @@ "sql": "select last_row(*) from stb00_9 ", "result": "./query_res1.txt" }] - }, - "super_table_query": { - "stblname": "stb1", - "query_interval":20, - "threads": 4, - "sqls": [ - { - "sql": "select last_row(ts) from xxxx", - "result": "./query_res2.txt" - } - ] } } diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/queryQps1.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryQps1.json new file mode 100644 index 0000000000..fe72004e47 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryQps1.json @@ -0,0 +1,22 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 1, + "super_table_query": { + "stblname": "stb1", + "query_interval":20, + "threads": 4, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/queryRestful.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryRestful.json index 5de560fd21..6cb83bc2e1 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/queryRestful.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryRestful.json @@ -22,17 +22,6 @@ "result": "./query_res1.txt" } ] - }, - "super_table_query": { - "stblname": "stb1", - "query_interval": 1, - "threads": 3, - "sqls": [ - { - "sql": "select last_row(ts) from xxxx", - "result": "./query_res2.txt" - } - ] } } diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/queryRestful1.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryRestful1.json new file mode 100644 index 0000000000..54d2589ce9 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryRestful1.json @@ -0,0 +1,24 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "query_mode": "rest", + "super_table_query": { + "stblname": "stb1", + "query_interval": 1, + "threads": 3, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } + } + diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/queryTaosc.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryTaosc.json index 9ce4237660..08a63803bd 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/queryTaosc.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryTaosc.json @@ -22,16 +22,5 @@ "result": "./query_res1.txt" } ] - }, - "super_table_query": { - "stblname": "stb1", - "query_interval": 1, - "threads": 3, - "sqls": [ - { - "sql": "select last_row(ts) from xxxx", - "result": "./query_res2.txt" - } - ] } } diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/queryTaosc1.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryTaosc1.json new file mode 100644 index 0000000000..a3caa1c5e8 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/queryTaosc1.json @@ -0,0 +1,23 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "query_mode": "taosc", + "super_table_query": { + "stblname": "stb1", + "query_interval": 1, + "threads": 3, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json index 459e496f0b..817d733202 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json @@ -14,14 +14,5 @@ "sql": "select count(*) from db.stb", "result": "rest_query_specified" }] - }, - "super_table_query": { - "stblname": "stb", - "sqls": [ - { - "sql": "select count(*) from xxxx", - "result": "rest_query_super" - } - ] } } \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query1.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query1.json new file mode 100644 index 0000000000..e09112737e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query1.json @@ -0,0 +1,18 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_mode": "rest", + "connection_pool_size": 10, + "response_buffer": 10000, + "super_table_query": { + "stblname": "stb", + "sqls": [ + { + "sql": "select count(*) from xxxx", + "result": "rest_query_super" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json index c8ff2e9275..2cf8f648a6 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json @@ -18,16 +18,5 @@ "sql": "select count(*) from db.stb", "result": "taosc_query_specified" }] - }, - "super_table_query": { - "stblname": "stb", - "query_interval": 1, - "concurrent": 1, - "sqls": [ - { - "sql": "select count(*) from xxxx", - "result": "taosc_query_super" - } - ] } } \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query1.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query1.json new file mode 100644 index 0000000000..90f25a4b4e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query1.json @@ -0,0 +1,23 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "localhost", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 1, + "reset_query_cache": "yes", + "super_table_query": { + "stblname": "stb", + "query_interval": 1, + "concurrent": 1, + "sqls": [ + { + "sql": "select count(*) from xxxx", + "result": "taosc_query_super" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py index f300e65203..118468ad5a 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py @@ -74,6 +74,9 @@ class TDTestCase: cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_query.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_query1.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) with open("%s" % "taosc_query_specified-0", 'r+') as f1: for line in f1.readlines(): queryTaosc = line.strip().split()[0] @@ -84,9 +87,13 @@ class TDTestCase: queryTaosc = line.strip().split()[0] assert queryTaosc == '1', "result is %s != expect: 1" % queryTaosc + # split two cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_query.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_query1.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) times = 0 with open("rest_query_super-0", 'r+') as f1: diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py b/tests/develop-test/5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py index 91ebf1b1df..433ea17541 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py @@ -118,6 +118,8 @@ class TDTestCase: # taosc query: query specified table and query super table os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryInsertdata.json" % binPath) os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryTaosc.json" % binPath) + # forbid parallel spec query with super query + os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryTaosc1.json" % binPath) os.system("cat query_res0.txt* > all_query_res0_taosc.txt") os.system("cat query_res1.txt* > all_query_res1_taosc.txt") os.system("cat query_res2.txt* > all_query_res2_taosc.txt") @@ -144,6 +146,7 @@ class TDTestCase: # use restful api to query os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryInsertrestdata.json" % binPath) os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryRestful.json" % binPath) + os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryRestful1.json" % binPath) os.system("cat query_res0.txt* > all_query_res0_rest.txt") os.system("cat query_res1.txt* > all_query_res1_rest.txt") os.system("cat query_res2.txt* > all_query_res2_rest.txt") @@ -191,6 +194,8 @@ class TDTestCase: os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryInsertdata.json" % binPath) exceptcode = os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryQps.json" % binPath) assert exceptcode == 0 + exceptcode = os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryQps1.json" % binPath) + assert exceptcode == 0 # 2021.02.09 need modify taosBenchmakr code # use illegal or out of range parameters query json file diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index ebec0ad38e..637894d793 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -44,6 +44,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f cluster/test_drop_table_by_uid.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f cluster/incSnapshot.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f cluster/clusterBasic.py -N 5 +,,y,army,./pytest.sh python3 ./test.py -f cluster/tsdbSnapshot.py -N 3 -M 3 ,,y,army,./pytest.sh python3 ./test.py -f query/query_basic.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f query/accuracy/test_query_accuracy.py ,,y,army,./pytest.sh python3 ./test.py -f query/accuracy/test_ts5400.py @@ -367,6 +368,7 @@ ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-column-false.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-db.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-db-false.py -N 3 -n 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/walRemoveLog.py -N 3 ,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeReplicate.py -M 3 -N 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-19201.py @@ -426,6 +428,9 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/test_hot_refresh_configurations.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/subscribe_stream_privilege.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/empty_identifier.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show_transaction_detail.py -N 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/kill_balance_leader.py -N 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/kill_restore_dnode.py -N 5 ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/persisit_config.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/qmemCtrl.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/compact_vgroups.py diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh index c3b3b993e6..b72a7cacca 100755 --- a/tests/parallel_test/run_case.sh +++ b/tests/parallel_test/run_case.sh @@ -76,7 +76,7 @@ ulimit -c unlimited md5sum /usr/lib/libtaos.so.1 md5sum /home/TDinternal/debug/build/lib/libtaos.so -#get python connector and update: taospy 2.7.16 taos-ws-py 0.3.5 +#get python connector and update: taospy and taos-ws-py to latest pip3 install taospy==2.7.21 pip3 install taos-ws-py==0.3.8 $TIMEOUT_CMD $cmd diff --git a/tests/pytest/util/tserror.py b/tests/pytest/util/tserror.py index 35d74153c3..0d88d78e80 100644 --- a/tests/pytest/util/tserror.py +++ b/tests/pytest/util/tserror.py @@ -7,7 +7,11 @@ TAOS_DEF_ERROR_CODE = ctypes.c_int32(0x80000000).value TSDB_CODE_MND_FUNC_NOT_EXIST = (TAOS_DEF_ERROR_CODE | 0x0374) +TSDB_CODE_TSC_INVALID_OPERATION = (TAOS_DEF_ERROR_CODE | 0x0200) + TSDB_CODE_UDF_FUNC_EXEC_FAILURE = (TAOS_DEF_ERROR_CODE | 0x290A) TSDB_CODE_TSC_INTERNAL_ERROR = (TAOS_DEF_ERROR_CODE | 0x02FF) + +TSDB_CODE_PAR_SYNTAX_ERROR = (TAOS_DEF_ERROR_CODE | 0x2600) diff --git a/tests/script/repair/repair_tsdb_data.py b/tests/script/repair/repair_tsdb_data.py new file mode 100644 index 0000000000..d8aad23802 --- /dev/null +++ b/tests/script/repair/repair_tsdb_data.py @@ -0,0 +1,46 @@ +import json +import sys +import shutil +import time +import os +import argparse + +def main(): + parser = argparse.ArgumentParser(description='Repair TSDB data by removing specified fid.') + parser.add_argument('fid', type=int, help='The fid to be removed') + parser.add_argument('file_path', nargs='?', default='current.json', help='The path to the JSON file (default: current.json)') + args = parser.parse_args() + + target_fid = args.fid + file_path = args.file_path + + # Read file content + with open(file_path, 'r') as file: + data = json.load(file) + + # Check if the fid exists + fid_exists = any(item.get('fid') == target_fid for item in data['fset']) + if not fid_exists: + print(f"Error: fid {target_fid} does not exist in the file.") + sys.exit(1) + + # Generate backup file name + timestamp = time.strftime("%Y%m%d%H%M%S") + parent_directory = os.path.dirname(os.path.dirname(file_path)) + backup_file_path = os.path.join(parent_directory, f"current.json.{timestamp}") + + # Backup file + shutil.copy(file_path, backup_file_path) + print(f"Backup created: {backup_file_path}") + + # Remove objects with the specified fid from the fset list + data['fset'] = [item for item in data['fset'] if item.get('fid') != target_fid] + + # Write the updated content back to the file, preserving the original format + with open(file_path, 'w') as file: + json.dump(data, file, separators=(',', ':'), ensure_ascii=False) + + print(f"Removed content with fid {target_fid}.") + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/tests/script/tsim/query/sys_tbname.sim b/tests/script/tsim/query/sys_tbname.sim index f8c1d6a986..c6c979e97c 100644 --- a/tests/script/tsim/query/sys_tbname.sim +++ b/tests/script/tsim/query/sys_tbname.sim @@ -58,7 +58,7 @@ endi sql select tbname from information_schema.ins_tables; print $rows $data00 -if $rows != 45 then +if $rows != 46 then return -1 endi if $data00 != @ins_tables@ then diff --git a/tests/script/tsim/query/tableCount.sim b/tests/script/tsim/query/tableCount.sim index 49e9126361..19568b42b6 100644 --- a/tests/script/tsim/query/tableCount.sim +++ b/tests/script/tsim/query/tableCount.sim @@ -53,7 +53,7 @@ sql select stable_name,count(table_name) from information_schema.ins_tables grou if $rows != 3 then return -1 endi -if $data01 != 42 then +if $data01 != 43 then return -1 endi if $data11 != 10 then @@ -72,7 +72,7 @@ endi if $data11 != 5 then return -1 endi -if $data21 != 36 then +if $data21 != 37 then return -1 endi if $data31 != 5 then @@ -97,7 +97,7 @@ endi if $data42 != 3 then return -1 endi -if $data52 != 36 then +if $data52 != 37 then return -1 endi if $data62 != 5 then diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index d4a1aa938d..390bf3d9dd 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -61,7 +61,7 @@ class TDTestCase: self.ins_list = ['ins_dnodes','ins_mnodes','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ 'ins_indexes','ins_stables','ins_tables','ins_tags','ins_columns','ins_users','ins_grants','ins_vgroups','ins_configs','ins_dnode_variables',\ 'ins_topics','ins_subscriptions','ins_streams','ins_stream_tasks','ins_vnodes','ins_user_privileges','ins_views', - 'ins_compacts', 'ins_compact_details', 'ins_grants_full','ins_grants_logs', 'ins_machines', 'ins_arbgroups', 'ins_tsmas', "ins_encryptions", "ins_anodes", "ins_anodes_full", "ins_disk_usagea", "ins_filesets"] + 'ins_compacts', 'ins_compact_details', 'ins_grants_full','ins_grants_logs', 'ins_machines', 'ins_arbgroups', 'ins_tsmas', "ins_encryptions", "ins_anodes", "ins_anodes_full", "ins_disk_usagea", "ins_filesets", "ins_transaction_details"] self.perf_list = ['perf_connections','perf_queries','perf_consumers','perf_trans','perf_apps'] def insert_data(self,column_dict,tbname,row_num): insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str) @@ -222,10 +222,10 @@ class TDTestCase: tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'") tdLog.info(len(tdSql.queryResult)) - tdSql.checkEqual(True, len(tdSql.queryResult) in range(306, 307)) + tdSql.checkEqual(True, len(tdSql.queryResult) in range(312, 313)) tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'") - tdSql.checkEqual(60, len(tdSql.queryResult)) + tdSql.checkEqual(61, len(tdSql.queryResult)) def ins_dnodes_check(self): tdSql.execute('drop database if exists db2') diff --git a/tests/system-test/0-others/kill_balance_leader.py b/tests/system-test/0-others/kill_balance_leader.py new file mode 100644 index 0000000000..be86336661 --- /dev/null +++ b/tests/system-test/0-others/kill_balance_leader.py @@ -0,0 +1,64 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from util.log import * +from util.cases import * +from util.dnodes import * +from util.sql import * + + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + tdLog.debug(f"start to init {__file__}") + self.replicaVar = int(replicaVar) + tdSql.init(conn.cursor(), logSql) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + tdSql.execute('CREATE DATABASE db vgroups 160 replica 3;') + + tdSql.execute('balance vgroup leader') + + sql ="show transactions;" + rows = tdSql.query(sql) + + if rows > 0: + tranId = tdSql.getData(0, 0) + tdLog.info('kill transaction %d'%tranId) + tdSql.execute('kill transaction %d'%tranId, queryTimes=1 ) + + if self.waitTransactionZero() is False: + tdLog.exit(f"{sql} transaction not finished") + return False + + def waitTransactionZero(self, seconds = 300, interval = 1): + # wait end + for i in range(seconds): + sql ="show transactions;" + rows = tdSql.query(sql) + if rows == 0: + tdLog.info("transaction count became zero.") + return True + #tdLog.info(f"i={i} wait ...") + time.sleep(interval) + + return False + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/0-others/show_transaction_detail.py b/tests/system-test/0-others/show_transaction_detail.py new file mode 100644 index 0000000000..ba588f8f01 --- /dev/null +++ b/tests/system-test/0-others/show_transaction_detail.py @@ -0,0 +1,115 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from util.log import * +from util.cases import * +from util.dnodes import * +from util.sql import * +from util.cluster import * +import threading + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + tdLog.debug(f"start to init {__file__}") + self.replicaVar = int(replicaVar) + tdSql.init(conn.cursor(), logSql) + self.dnodes = cluster.dnodes + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + tdLog.info("CREATE DATABASE db1 vgroups 16 replica 1;") + tdSql.execute('CREATE DATABASE db1 vgroups 16 replica 1;') + + if self.waitTransactionZero() is False: + tdLog.exit(f"{sql} transaction not finished") + return False + + newTdSql1=tdCom.newTdSql() + t1 = threading.Thread(target=self.alterDbThread, args=('', newTdSql1)) + + newTdSql2=tdCom.newTdSql() + t2 = threading.Thread(target=self.createDbThread, args=('', newTdSql2)) + + t1.start() + t2.start() + + #time.sleep(1) + + dnode = self.dnodes[2] + + # stop dnode + tdLog.info(f"stop dnode 2") + dnode.stoptaosd() + + tdLog.info(f"show transactions;") + rows = tdSql.query("show transactions;", queryTimes=1) + tdLog.info(f"rows={rows}") + if rows > 0: + tranId1 = tdSql.getData(0, 0) + oper1 = tdSql.getData(0, 3) + tdLog.info(f"show transaction {tranId1}, {oper1}") + + #tranId2 = tdSql.getData(1, 0) + #oper2 = tdSql.getData(1, 3) + #tdLog.info(f"show transaction {tranId2}, {oper2}") + + rows = tdSql.query(f"show transaction {tranId1}", queryTimes=1) + if rows != 120 and rows != 176: + tdLog.exit(f"show transaction detial error, rows={rows}") + return False + + #rows = tdSql.query(f"show transaction {tranId2}", queryTimes=1) + #if rows != 176 and rows != 120: + # tdLog.exit(f"show transaction detial error, rows={rows}") + # return False + + tdLog.info(f"select * from ins_transaction_details") + rows = tdSql.query(f"select * from information_schema.ins_transaction_details", queryTimes=1) + + #if rows != 296: + if rows != 176: + tdLog.exit(f"show transaction detial error, rows={rows}") + return False + + dnode.starttaosd() + + t1.join() + t2.join() + + def createDbThread(self, sql, newTdSql): + tdLog.info("CREATE DATABASE db2 vgroups 40 replica 3;") + newTdSql.execute('CREATE DATABASE db2 vgroups 40 replica 3;', queryTimes=1) + + def alterDbThread(self, sql, newTdSql): + tdLog.info("alter DATABASE db1 replica 3;") + newTdSql.execute('alter DATABASE db1 replica 3;', queryTimes=1) + + def waitTransactionZero(self, seconds = 300, interval = 1): + # wait end + for i in range(seconds): + sql ="show transactions;" + rows = tdSql.query(sql) + if rows == 0: + tdLog.info("transaction count became zero.") + return True + #tdLog.info(f"i={i} wait ...") + time.sleep(interval) + + return False + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py index 12cbdea484..e230798cac 100644 --- a/tests/system-test/2-query/join.py +++ b/tests/system-test/2-query/join.py @@ -6,6 +6,7 @@ from util.log import * from util.sql import * from util.cases import * from util.dnodes import * +from util.tserror import * PRIMARY_COL = "ts" @@ -352,6 +353,70 @@ class TDTestCase: tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )" ) tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )" ) + def join_semantic_test(self, dbname=DBNAME): + tdSql.query("select ct1.c_int from db.ct1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.error("select ct1.c_int from db.ct1 as ct1 semi join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + tdSql.error("select ct1.c_int from db.ct1 as ct1 anti join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + tdSql.error("select ct1.c_int from db.ct1 as ct1 outer join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + tdSql.error("select ct1.c_int from db.ct1 as ct1 asof join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + tdSql.error("select ct1.c_int from db.ct1 as ct1 window join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + + tdSql.query("select ct1.c_int from db.ct1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.query("select ct1.c_int from db.ct1 as ct1 left join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.query("select ct1.c_int from db.ct1 as ct1 left semi join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.query("select ct1.c_int from db.ct1 as ct1 left anti join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(0) + tdSql.query("select ct1.c_int from db.ct1 as ct1 left outer join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.query("select ct1.c_int from db.ct1 as ct1 left asof join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.error("select ct1.c_int from db.ct1 as ct1 left window join db1.ct1 as cy1 on ct1.ts=cy1.ts") + + tdSql.query("select ct1.c_int from db.ct1 as ct1 right join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.query("select ct1.c_int from db.ct1 as ct1 right semi join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.query("select ct1.c_int from db.ct1 as ct1 right anti join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(0) + tdSql.query("select ct1.c_int from db.ct1 as ct1 right outer join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.query("select ct1.c_int from db.ct1 as ct1 right asof join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.error("select ct1.c_int from db.ct1 as ct1 right window join db1.ct1 as cy1 on ct1.ts=cy1.ts") + + tdSql.query("select ct1.c_int from db.ct1 as ct1 full join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.checkRows(self.rows) + + + tdSql.query("select ct1.c_int from db.ct1 as ct1 full join db1.ct1 as cy1 on ct1.ts=cy1.ts join db1.ct1 as cy2 on ct1.ts=cy2.ts") + tdSql.checkRows(self.rows) + tdSql.error("select ct1.c_int from db.ct1 as ct1 full semi join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + tdSql.error("select ct1.c_int from db.ct1 as ct1 full anti join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + tdSql.query("select ct1.c_int from db.ct1 as ct1 full outer join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + tdSql.query("select * from db.ct1 join db.ct2 join db.ct3 on ct2.ts=ct3.ts on ct1.ts=ct2.ts") + tdSql.checkRows(0) + tdSql.execute(f'create table db.ct1_2 using db.stb1 tags ( 102 )') + tdSql.execute(f'create table db.ct1_3 using db.stb1 tags ( 103 )') + tdSql.execute(f'insert into db.ct1_2 (select * from db.ct1)') + tdSql.execute(f'insert into db.ct1_3 (select * from db.ct1)') + tdSql.query("select * from db.ct1 join db.ct1_2 join db.ct1_3 on ct1_2.ts=ct1_3.ts on ct1.ts=ct1_2.ts") + tdSql.checkRows(self.rows) + tdSql.error("select ct1.c_int from db.ct1 as ct1 full asof join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + tdSql.error("select ct1.c_int from db.ct1 as ct1 full window join db1.ct1 as cy1 on ct1.ts=cy1.ts", TSDB_CODE_PAR_SYNTAX_ERROR) + + tdSql.query("select ct1.c_int from db.ct1 as ct1 left join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + tdSql.query("select ct1.c_int from db.ct1 as ct1 right join db1.ct1 as cy1 on ct1.ts=cy1.ts") + tdSql.checkRows(self.rows) + + tdSql.execute("drop table db.ct1_2") + tdSql.execute("drop table db.ct1_3") + def ts5863(self, dbname=DBNAME): tdSql.execute(f"CREATE STABLE {dbname}.`st_quality` (`ts` TIMESTAMP, `quality` INT, `val` NCHAR(64), `rts` TIMESTAMP) \ TAGS (`cx` VARCHAR(10), `gyd` VARCHAR(10), `gx` VARCHAR(10), `lx` VARCHAR(10)) SMA(`ts`,`quality`,`val`)") @@ -469,6 +534,8 @@ class TDTestCase: tdSql.execute(f"use {dbname1}") self.__create_tb(dbname=dbname1) self.__insert_data(dbname=dbname1) + + self.join_semantic_test({dbname1}) tdSql.query("select ct1.c_int from db.ct1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts") tdSql.checkRows(self.rows) diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py index dd510459b6..40d752e40e 100644 --- a/tests/system-test/2-query/last_row.py +++ b/tests/system-test/2-query/last_row.py @@ -969,6 +969,11 @@ class TDTestCase: tdSql.checkData(0, 0, 6) tdSql.checkData(0, 1, 1734574929004) tdSql.checkData(0, 2, 4) + tdSql.query(f'select count(1), last_row(ts), last_row(c0) from (select * from (select * from {dbname}.meters))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 6) + tdSql.checkData(0, 1, 1734574929004) + tdSql.checkData(0, 2, 4) tdSql.query(f'select tbname, last_row(ts), last_row(c0) from (select *, tbname from {dbname}.meters) group by tbname order by tbname') tdSql.checkRows(2) tdSql.checkData(0, 0, 'd0') @@ -977,6 +982,14 @@ class TDTestCase: tdSql.checkData(1, 0, 'd1') tdSql.checkData(1, 1, 1734574929000) tdSql.checkData(1, 2, 1) + tdSql.query(f'select tbname, last_row(ts), last_row(c0) from (select * from (select *, tbname from {dbname}.meters)) group by tbname order by tbname') + tdSql.checkRows(2) + tdSql.checkData(0, 0, 'd0') + tdSql.checkData(0, 1, 1734574929004) + tdSql.checkData(0, 2, 4) + tdSql.checkData(1, 0, 'd1') + tdSql.checkData(1, 1, 1734574929000) + tdSql.checkData(1, 2, 1) tdSql.query(f'select count(1), last_row(ts), last_row(c0) from (select * from {dbname}.d0)') tdSql.checkRows(1) tdSql.checkData(0, 0, 5) @@ -988,6 +1001,43 @@ class TDTestCase: tdSql.checkData(0, 1, 1734574929004) tdSql.checkData(0, 2, 4) + tdSql.execute(f'insert into {dbname}.d0 values(1734574930000, 1, 1, "c2", true)') + tdSql.execute(f'insert into {dbname}.d0 values(1734574931000, 1, 1, "c2", true)') + tdSql.execute(f'insert into {dbname}.d0 values(1734574932000, 1, 1, "c2", true)') + tdSql.query(f'select last_row(_wstart) from (select _wstart, _wend, count(1) from {dbname}.meters interval(1s))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1734574932000) + tdSql.query(f'select last_row(_wstart), count(1) from (select _wstart, _wend, count(1) from {dbname}.meters interval(1s))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1734574932000) + tdSql.checkData(0, 1, 4) + tdSql.query(f'select last_row(_wstart) from (select _wstart, _wend, count(1) from {dbname}.meters partition by tbname interval(1s))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1734574932000) + tdSql.query(f'select last_row(_wstart), count(1) from (select _wstart, _wend, count(1) from {dbname}.meters partition by tbname interval(1s))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1734574932000) + tdSql.checkData(0, 1, 5) + tdSql.query(f'select first(_wstart), count(1) from (select _wstart, _wend, count(1) from {dbname}.meters interval(1s))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1734574929000) + tdSql.checkData(0, 1, 4) + + tdSql.query(f'select last_row(_wstart) from (select * from (select _wstart, _wend, count(1) from {dbname}.meters interval(1s)))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1734574932000) + tdSql.query(f'select last_row(_wstart), count(1) from (select * from (select _wstart, _wend, count(1) from {dbname}.meters interval(1s)))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1734574932000) + tdSql.checkData(0, 1, 4) + tdSql.query(f'select last_row(_wstart) from (select * from (select _wstart, _wend, count(1) from {dbname}.meters partition by tbname interval(1s)))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1734574932000) + tdSql.query(f'select last_row(_wstart), count(1) from (select * from (select _wstart, _wend, count(1) from {dbname}.meters partition by tbname interval(1s)))') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1734574932000) + tdSql.checkData(0, 1, 5) + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring # tdSql.prepare() @@ -1023,7 +1073,6 @@ class TDTestCase: self.lastrow_in_subquery("db1") - def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index 380b7879c4..1abe2b1a65 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -441,7 +441,7 @@ class TDTestCase: tdSql.checkRows(2) sql = "select db_name `TABLE_CAT`, '' `TABLE_SCHEM`, stable_name `TABLE_NAME`, 'TABLE' `TABLE_TYPE`, table_comment `REMARKS` from information_schema.ins_stables union all select db_name `TABLE_CAT`, '' `TABLE_SCHEM`, table_name `TABLE_NAME`, case when `type`='SYSTEM_TABLE' then 'TABLE' when `type`='NORMAL_TABLE' then 'TABLE' when `type`='CHILD_TABLE' then 'TABLE' else 'UNKNOWN' end `TABLE_TYPE`, table_comment `REMARKS` from information_schema.ins_tables union all select db_name `TABLE_CAT`, '' `TABLE_SCHEM`, view_name `TABLE_NAME`, 'VIEW' `TABLE_TYPE`, NULL `REMARKS` from information_schema.ins_views" tdSql.query(sql, queryTimes=1) - tdSql.checkRows(49) + tdSql.checkRows(50) sql = "select null union select null" tdSql.query(sql, queryTimes=1) diff --git a/tests/system-test/3-enterprise/restore/kill_restore_dnode.py b/tests/system-test/3-enterprise/restore/kill_restore_dnode.py new file mode 100644 index 0000000000..d29a11f159 --- /dev/null +++ b/tests/system-test/3-enterprise/restore/kill_restore_dnode.py @@ -0,0 +1,90 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys + +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +sys.path.append("./3-enterprise/restore") +from restoreBasic import * +from util.common import tdCom +import threading + + +class TDTestCase: + # init + def init(self, conn, logSql, replicaVar=1): + tdLog.debug("start to execute %s" % __file__) + self.basic = RestoreBasic() + self.basic.init(conn, logSql, replicaVar) + + # run + def run(self): + self.basic.restore_dnode_prepare(2) + + self.execute() + + def execute(self): + newTdSql=tdCom.newTdSql() + t0 = threading.Thread(target=self.restoreDnodeThread, args=('', newTdSql)) + t0.start() + + time.sleep(2) + sql ="show transactions;" + tdLog.info(sql) + rows = tdSql.query(sql) + + if rows > 0: + self.basic.stop_dnode(2) + + tranId = tdSql.getData(0, 0) + + tdLog.info('show transaction %d'%tranId) + rows=tdSql.query('show transaction %d'%tranId, queryTimes=1) + if rows != 11: + tdLog.exit(f"restore transaction detial error, rows={rows}") + return False + + tdLog.info('kill transaction %d'%tranId) + tdSql.execute('kill transaction %d'%tranId, queryTimes=1 ) + + time.sleep(3) + sql ="show transactions;" + tdLog.info(sql) + rows = tdSql.query(sql) + if rows > 0: + tdLog.info(f"{sql} transaction not finished") + return False + + self.basic.restore_dnode_exec(2) + else: + tdLog.exit(f"{sql} no transaction exist") + return False + + def restoreDnodeThread(self, p, newTdSql): + sleep(1) + + sql = f"restore dnode 2" + tdLog.info(sql) + newTdSql.error(sql, expectErrInfo="Wrong transaction execution context") + tdLog.info(f"{sql} finished") + + # stop + def stop(self): + self.basic.stop() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/3-enterprise/restore/restoreBasic.py b/tests/system-test/3-enterprise/restore/restoreBasic.py index 77fa606b9c..74cf572018 100644 --- a/tests/system-test/3-enterprise/restore/restoreBasic.py +++ b/tests/system-test/3-enterprise/restore/restoreBasic.py @@ -143,6 +143,34 @@ class RestoreBasic: tdSql.execute(sql) self.check_corrent() + def restore_dnode_prepare(self, index): + tdLog.info(f"start restore dnode {index}") + dnode = self.dnodes[index - 1] + + # stop dnode + tdLog.info(f"stop dnode {index}") + dnode.stoptaosd() + + # remove dnode folder + try: + shutil.rmtree(dnode.dataDir) + tdLog.info(f"delete dir {dnode.dataDir} successful") + except OSError as x: + tdLog.exit(f"remove path {dnode.dataDir} error : {x.strerror}") + + dnode.starttaosd() + + def restore_dnode_exec(self, index): + # exec restore + sql = f"restore dnode {index}" + tdLog.info(sql) + tdSql.execute(sql) + self.check_corrent() + + def stop_dnode(self, index): + dnode = self.dnodes[index - 1] + + dnode.starttaosd() # restore vnode def restore_vnode(self, index): tdLog.info(f"start restore vnode on dnode {index}") diff --git a/tests/system-test/7-tmq/walRemoveLog.py b/tests/system-test/7-tmq/walRemoveLog.py new file mode 100644 index 0000000000..3a69a31d14 --- /dev/null +++ b/tests/system-test/7-tmq/walRemoveLog.py @@ -0,0 +1,188 @@ +import taos +import sys +import time +import socket +import os +import platform +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.cluster import * +from taos.tmq import * +from taos import * + +sys.path.append("./7-tmq") +from tmqCommon import * + + +class TDTestCase: + global cmd_list + cmd_list = [] + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def prepareData(self): + tdLog.info("create database db_repl_1 and insert data") + cmd1 = "taosBenchmark -y -a 1 -n 100 -t 100 -v 1 -d %s" % ("db_repl_1") + os.system(cmd1) + + tdLog.info("create database db_repl_2 and insert data") + cmd2 = "taosBenchmark -y -a 2 -n 100 -t 100 -v 1 -d %s" % ("db_repl_2") + os.system(cmd2) + + tdLog.info("create database db_repl_3 and insert data") + cmd3 = "taosBenchmark -y -a 3 -n 100 -t 100 -v 1 -d %s" % ("db_repl_3") + os.system(cmd3) + + def insertData(self): + tdLog.info("insert one record into db_repl_*.d0") + tdSql.execute("insert into db_repl_1.d0 values(now(),6.8358979,250,148.5000000);") + tdSql.execute("insert into db_repl_2.d0 values(now(),6.8358979,250,148.5000000);") + tdSql.execute("insert into db_repl_3.d0 values(now(),6.8358979,250,148.5000000);") + + def flushDatabase(self): + tdLog.info("flush database db_repl_1") + tdSql.execute("flush database db_repl_1") + + tdLog.info("flush database db_repl_2") + tdSql.execute("flush database db_repl_2") + + tdLog.info("flush database db_repl_3") + tdSql.execute("flush database db_repl_3") + + def checkDatacount(self,expCount): + tdLog.info("select data count from db_repl_1") + tdSql.query("select count(*) from db_repl_1.meters") + actCount = tdSql.getData(0, 0) + assert actCount == expCount, f"db_repl_1.meters count is {actCount}, expect {expCount}" + + tdLog.info("select data count from db_repl_2") + tdSql.query("select count(*) from db_repl_2.meters") + actCount = tdSql.getData(0, 0) + assert actCount == expCount, f"db_repl_2.meters count is {actCount}, expect {expCount}" + + + tdLog.info("select data count from db_repl_3") + tdSql.query("select count(*) from db_repl_3.meters") + actCount = tdSql.getData(0, 0) + assert actCount == expCount, f"db_repl_3.meters count is {actCount}, expect {expCount}" + + + def collect_rm_wal_cmds(self): + global cmd_list + buildPath = self.getBuildPath() + rowLen = tdSql.query('show vnodes on dnode 1') + for i in range(rowLen): + vgroupId = tdSql.getData(i, 1) + walPath = buildPath + "/../sim/dnode1/data/vnode/vnode{}/wal/*".format(vgroupId) + cmd = "rm -rf %s" % walPath + cmd_list.append(cmd) + + rowLen = tdSql.query('show vnodes on dnode 2') + for i in range(rowLen): + vgroupId = tdSql.getData(i, 1) + walPath = buildPath + "/../sim/dnode2/data/vnode/vnode{}/wal/*".format(vgroupId) + cmd = "rm -rf %s" % walPath + cmd_list.append(cmd) + + rowLen = tdSql.query('show vnodes on dnode 3') + for i in range(rowLen): + vgroupId = tdSql.getData(i, 1) + walPath = buildPath + "/../sim/dnode3/data/vnode/vnode{}/wal/*".format(vgroupId) + cmd = "rm -rf %s" % walPath + cmd_list.append(cmd) + + def execute_rm_wal_cmds(self): + for cmd in cmd_list: + print(cmd) + os.system(cmd) + + def run(self): + print("======== run remove wal test ========") + self.prepareData() + self.flushDatabase() + + self.collect_rm_wal_cmds() + tdSql.execute(f'create topic data_repl_1 as select ts,current from db_repl_1.meters') + tdSql.execute(f'create topic data_repl_2 as select ts,current from db_repl_2.meters') + tdSql.execute(f'create topic data_repl_3 as select ts,current from db_repl_3.meters') + + tdDnodes=cluster.dnodes + tdDnodes[0].stoptaosd() + tdDnodes[1].stoptaosd() + tdDnodes[2].stoptaosd() + + + time.sleep(10) + + self.execute_rm_wal_cmds() + + tdDnodes[0].starttaosd() + tdDnodes[1].starttaosd() + tdDnodes[2].starttaosd() + + self.checkDatacount(10000) + self.insertData() + self.checkDatacount(10001) + + consumer_dict = { + "group.id": "g1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "earliest", + } + consumer = Consumer(consumer_dict) + + try: + consumer.subscribe(["data_repl_1", "data_repl_2", "data_repl_3"]) + except TmqError: + tdLog.exit(f"subscribe error") + + cnt = 0 + try: + while True: + res = consumer.poll(1) + print(res) + if not res: + print("cnt:",cnt) + if cnt == 0 or cnt != 3: + tdLog.exit("consume error") + break + val = res.value() + if val is None: + continue + for block in val: + print(block.fetchall(),len(block.fetchall())) + cnt += len(block.fetchall()) + finally: + consumer.close() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase())