From 4237da3c8472c3f1e99c9be475a96750ead789ed Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 4 Jan 2023 16:43:06 +0800 Subject: [PATCH 001/267] enh(query):add xxhash and do some internal refactor. --- include/util/xxhash.h | 328 +++++++++++ source/util/src/thash.c | 6 +- source/util/src/thashutil.c | 10 +- source/util/src/tpagedbuf.c | 4 +- source/util/src/xxhash.c | 1030 +++++++++++++++++++++++++++++++++++ 5 files changed, 1372 insertions(+), 6 deletions(-) create mode 100644 include/util/xxhash.h create mode 100644 source/util/src/xxhash.c diff --git a/include/util/xxhash.h b/include/util/xxhash.h new file mode 100644 index 0000000000..d6bad94335 --- /dev/null +++ b/include/util/xxhash.h @@ -0,0 +1,328 @@ +/* + xxHash - Extremely Fast Hash algorithm + Header File + Copyright (C) 2012-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + +/* Notice extracted from xxHash homepage : + +xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +It also successfully passes all tests from the SMHasher suite. + +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) + +Name Speed Q.Score Author +xxHash 5.4 GB/s 10 +CrapWow 3.2 GB/s 2 Andrew +MumurHash 3a 2.7 GB/s 10 Austin Appleby +SpookyHash 2.0 GB/s 10 Bob Jenkins +SBox 1.4 GB/s 9 Bret Mulvey +Lookup3 1.2 GB/s 9 Bob Jenkins +SuperFastHash 1.2 GB/s 1 Paul Hsieh +CityHash64 1.05 GB/s 10 Pike & Alakuijala +FNV 0.55 GB/s 5 Fowler, Noll, Vo +CRC32 0.43 GB/s 9 +MD5-32 0.33 GB/s 10 Ronald L. Rivest +SHA1-32 0.28 GB/s 10 + +Q.Score is a measure of quality of the hash function. +It depends on successfully passing SMHasher test set. +10 is a perfect score. + +A 64-bit version, named XXH64, is available since r35. +It offers much better speed, but for 64-bit applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ + +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* **************************** +* Definitions +******************************/ +#include /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + + +/* **************************** + * API modifier + ******************************/ +/** XXH_INLINE_ALL (and XXH_PRIVATE_API) + * This is useful to include xxhash functions in `static` mode + * in order to inline them, and remove their symbol from the public list. + * Inlining can offer dramatic performance improvement on small keys. + * Methodology : + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * `xxhash.c` is automatically included. + * It's not useful to compile and link it as a separate module. + */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +# endif +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else + /* this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static +# endif +#else +# define XXH_PUBLIC_API /* do nothing */ +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/*! XXH_NAMESPACE, aka Namespace Emulation : + * + * If you want to include _and expose_ xxHash functions from within your own library, + * but also want to avoid symbol collisions with other libraries which may also include xxHash, + * + * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library + * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). + * + * Note that no change is required within the calling program as long as it includes `xxhash.h` : + * regular symbol name will be automatically translated by this header. + */ +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#endif + + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 5 +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber (void); + + +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +typedef unsigned int XXH32_hash_t; + +/*! XXH32() : + Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); + +/*====== Streaming ======*/ +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); + +/* + * Streaming functions generate the xxHash of an input provided in multiple segments. + * Note that, for small input, they are slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * XXH state must first be allocated, using XXH*_createState() . + * + * Start a new hash by initializing state with a seed, using XXH*_reset(). + * + * Then, feed the hash state by calling XXH*_update() as many times as necessary. + * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using XXH*_digest(). + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a digest, + * and generate some new hashes later on, by calling again XXH*_digest(). + * + * When done, free XXH state space if it was allocated dynamically. + */ + +/*====== Canonical representation ======*/ + +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + +/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. + * The canonical representation uses human-readable write convention, aka big-endian (large digits first). + * These functions allow transformation of hash result into and from its canonical format. + * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. + */ + + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +typedef unsigned long long XXH64_hash_t; + +/*! XXH64() : + Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). +*/ +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); + +/*====== Streaming ======*/ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); + +/*====== Canonical representation ======*/ +typedef struct { unsigned char digest[8]; } XXH64_canonical_t; +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); +#endif /* XXH_NO_LONG_LONG */ + + + +#ifdef XXH_STATIC_LINKING_ONLY + +/* ================================================================================================ + This section contains declarations which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the library. + These declarations should only be used with static linking. + Never use them in association with dynamic linking ! +=================================================================================================== */ + +/* These definitions are only present to allow + * static allocation of XXH state, on stack or in a struct for example. + * Never **ever** use members directly. */ + +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + +struct XXH32_state_s { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; + uint32_t reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +struct XXH64_state_s { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; + uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ + +# else + +struct XXH32_state_s { + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; + unsigned memsize; + unsigned reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ +struct XXH64_state_s { + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; + unsigned memsize; + unsigned reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ +# endif + +# endif + + +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ +#endif + +#endif /* XXH_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* XXHASH_H_5627135585666179 */ diff --git a/source/util/src/thash.c b/source/util/src/thash.c index e9548613aa..926dc304a4 100644 --- a/source/util/src/thash.c +++ b/source/util/src/thash.c @@ -421,7 +421,11 @@ int32_t taosHashGetDup_m(SHashObj *pHashObj, const void *key, size_t keyLen, voi } void *taosHashGetImpl(SHashObj *pHashObj, const void *key, size_t keyLen, void **d, int32_t *size, bool addRef) { - if (pHashObj == NULL || taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) { + if (pHashObj == NULL || keyLen == 0 || key == NULL) { + return NULL; + } + + if ((atomic_load_64((int64_t *)&pHashObj->size) == 0)) { return NULL; } diff --git a/source/util/src/thashutil.c b/source/util/src/thashutil.c index 59f7d389c2..97e29712e0 100644 --- a/source/util/src/thashutil.c +++ b/source/util/src/thashutil.c @@ -17,6 +17,7 @@ #include "tcompare.h" #include "thash.h" #include "types.h" +#include "xxhash.h" #define ROTL32(x, r) ((x) << (r) | (x) >> (32u - (r))) @@ -49,6 +50,11 @@ uint32_t taosDJB2Hash(const char *key, uint32_t len) { return hash; } +uint32_t xxHash(const char *key, uint32_t len) { + int32_t seed = 0xcc9e2d51; + return XXH32(key, len, seed); +} + uint32_t MurmurHash3_32(const char *key, uint32_t len) { const uint8_t *data = (const uint8_t *)key; const int32_t nblocks = len >> 2u; @@ -192,10 +198,8 @@ _hash_fn_t taosGetDefaultHashFunction(int32_t type) { fn = taosIntHash_64; break; case TSDB_DATA_TYPE_BINARY: - fn = MurmurHash3_32; - break; case TSDB_DATA_TYPE_NCHAR: - fn = MurmurHash3_32; + fn = xxHash; break; case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_INT: diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index ced5b4f25e..7e7712ec8c 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -407,8 +407,8 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) { // allocate buf if (availablePage == NULL) { - pi->pData = - taosMemoryCalloc(1, getAllocPageSize(pBuf->pageSize)); // add extract bytes in case of zipped buffer increased. + // add extract bytes in case of zipped buffer increased. + pi->pData = taosMemoryMalloc(getAllocPageSize(pBuf->pageSize)); } else { pi->pData = availablePage; } diff --git a/source/util/src/xxhash.c b/source/util/src/xxhash.c new file mode 100644 index 0000000000..ff28749e31 --- /dev/null +++ b/source/util/src/xxhash.c @@ -0,0 +1,1030 @@ +/* +* xxHash - Fast Hash algorithm +* Copyright (C) 2012-2016, Yann Collet +* +* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* You can contact the author at : +* - xxHash homepage: http://www.xxhash.com +* - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + + +/* ************************************* +* Tuning parameters +***************************************/ +/*!XXH_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. + * It can generate buggy code on targets which do not support unaligned memory accesses. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7S__) )) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. + * When this macro is enabled, xxHash actively checks input for null pointer. + * It it is, result for null input pointers is the same as a null-length input. + */ +#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ +# define XXH_ACCEPT_NULL_INPUT_POINTER 0 +#endif + +/*!XXH_FORCE_NATIVE_FORMAT : + * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. + * Results are therefore identical for little-endian and big-endian CPU. + * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. + * Should endian-independence be of no importance for your application, you may set the #define below to 1, + * to improve speed for Big-endian CPU. + * This option has no impact on Little_Endian CPU. + */ +#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ +# define XXH_FORCE_NATIVE_FORMAT 0 +#endif + +/*!XXH_FORCE_ALIGN_CHECK : + * This is a minor performance trick, only useful with lots of very small keys. + * It means : check for aligned/unaligned input. + * The check costs one initial branch per hash; + * set it to 0 when the input is guaranteed to be aligned, + * or when alignment doesn't matter for performance. + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/*! Modify the local functions below should you wish to use some other memory routines +* for malloc(), free() */ +#include +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free (void* p) { free(p); } +/*! and for memcpy() */ +#include +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#include /* assert */ + +#define XXH_STATIC_LINKING_ONLY +#include "xxhash.h" + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# define FORCE_INLINE static __forceinline +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/* ************************************* +* Basic Types +***************************************/ +#ifndef MEM_MODULE +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; +# else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; +# endif +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; } __attribute__((packed)) unalign; +static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ +static U32 XXH_read32(const void* memPtr) +{ + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +#if defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) +# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) +#endif + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static U32 XXH_swap32 (U32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* ************************************* +* Architecture Macros +***************************************/ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN +static int XXH_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); + else + return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); +} + +FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE32_align(ptr, endian, XXH_unaligned); +} + +static U32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} + + +/* ************************************* +* Macros +***************************************/ +#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +static const U32 PRIME32_1 = 2654435761U; +static const U32 PRIME32_2 = 2246822519U; +static const U32 PRIME32_3 = 3266489917U; +static const U32 PRIME32_4 = 668265263U; +static const U32 PRIME32_5 = 374761393U; + +static U32 XXH32_round(U32 seed, U32 input) +{ + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +/* mix all bits */ +static U32 XXH32_avalanche(U32 h32) +{ + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + return(h32); +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +static U32 +XXH32_finalize(U32 h32, const void* ptr, size_t len, + XXH_endianess endian, XXH_alignment align) + +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1 \ + h32 += (*p++) * PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; + +#define PROCESS4 \ + h32 += XXH_get32bits(p) * PRIME32_3; \ + p+=4; \ + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + + switch(len&15) /* or switch(bEnd - p) */ + { + case 12: PROCESS4; + /* fallthrough */ + case 8: PROCESS4; + /* fallthrough */ + case 4: PROCESS4; + return XXH32_avalanche(h32); + + case 13: PROCESS4; + /* fallthrough */ + case 9: PROCESS4; + /* fallthrough */ + case 5: PROCESS4; + PROCESS1; + return XXH32_avalanche(h32); + + case 14: PROCESS4; + /* fallthrough */ + case 10: PROCESS4; + /* fallthrough */ + case 6: PROCESS4; + PROCESS1; + PROCESS1; + return XXH32_avalanche(h32); + + case 15: PROCESS4; + /* fallthrough */ + case 11: PROCESS4; + /* fallthrough */ + case 7: PROCESS4; + /* fallthrough */ + case 3: PROCESS1; + /* fallthrough */ + case 2: PROCESS1; + /* fallthrough */ + case 1: PROCESS1; + /* fallthrough */ + case 0: return XXH32_avalanche(h32); + } + assert(0); + return h32; /* reaching this point is deemed impossible */ +} + + +FORCE_INLINE U32 +XXH32_endian_align(const void* input, size_t len, U32 seed, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U32 h32; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)16; + } +#endif + + if (len>=16) { + const BYTE* const limit = bEnd - 15; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; + v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; + v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; + v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; + } while (p < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32)len; + + return XXH32_finalize(h32, p, len&15, endian, align); +} + + +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, input, len); + return XXH32_digest(&state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + + +/*====== Hash streaming ======*/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; +} + + +FORCE_INLINE XXH_errorcode +XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); +} + + +FORCE_INLINE U32 +XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +{ + U32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + + XXH_rotl32(state->v2, 7) + + XXH_rotl32(state->v3, 12) + + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); +} + + +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, remaining comparable across different systems. +*/ + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ + +/*====== Memory access ======*/ + +#ifndef MEM_MODULE +# define MEM_MODULE +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t U64; +# else + /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ + typedef unsigned long long U64; +# endif +#endif + + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; +static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ + +static U64 XXH_read64(const void* memPtr) +{ + U64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static U64 XXH_swap64 (U64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + +FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); + else + return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); +} + +FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE64_align(ptr, endian, XXH_unaligned); +} + +static U64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} + + +/*====== xxh64 ======*/ + +static const U64 PRIME64_1 = 11400714785074694791ULL; +static const U64 PRIME64_2 = 14029467366897019727ULL; +static const U64 PRIME64_3 = 1609587929392839161ULL; +static const U64 PRIME64_4 = 9650029242287828579ULL; +static const U64 PRIME64_5 = 2870177450012600261ULL; + +static U64 XXH64_round(U64 acc, U64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static U64 XXH64_mergeRound(U64 acc, U64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +static U64 XXH64_avalanche(U64 h64) +{ + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + return h64; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +static U64 +XXH64_finalize(U64 h64, const void* ptr, size_t len, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1_64 \ + h64 ^= (*p++) * PRIME64_5; \ + h64 = XXH_rotl64(h64, 11) * PRIME64_1; + +#define PROCESS4_64 \ + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ + p+=4; \ + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + +#define PROCESS8_64 { \ + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ + p+=8; \ + h64 ^= k1; \ + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ +} + + switch(len&31) { + case 24: PROCESS8_64; + /* fallthrough */ + case 16: PROCESS8_64; + /* fallthrough */ + case 8: PROCESS8_64; + return XXH64_avalanche(h64); + + case 28: PROCESS8_64; + /* fallthrough */ + case 20: PROCESS8_64; + /* fallthrough */ + case 12: PROCESS8_64; + /* fallthrough */ + case 4: PROCESS4_64; + return XXH64_avalanche(h64); + + case 25: PROCESS8_64; + /* fallthrough */ + case 17: PROCESS8_64; + /* fallthrough */ + case 9: PROCESS8_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 29: PROCESS8_64; + /* fallthrough */ + case 21: PROCESS8_64; + /* fallthrough */ + case 13: PROCESS8_64; + /* fallthrough */ + case 5: PROCESS4_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 26: PROCESS8_64; + /* fallthrough */ + case 18: PROCESS8_64; + /* fallthrough */ + case 10: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 30: PROCESS8_64; + /* fallthrough */ + case 22: PROCESS8_64; + /* fallthrough */ + case 14: PROCESS8_64; + /* fallthrough */ + case 6: PROCESS4_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 27: PROCESS8_64; + /* fallthrough */ + case 19: PROCESS8_64; + /* fallthrough */ + case 11: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 31: PROCESS8_64; + /* fallthrough */ + case 23: PROCESS8_64; + /* fallthrough */ + case 15: PROCESS8_64; + /* fallthrough */ + case 7: PROCESS4_64; + /* fallthrough */ + case 3: PROCESS1_64; + /* fallthrough */ + case 2: PROCESS1_64; + /* fallthrough */ + case 1: PROCESS1_64; + /* fallthrough */ + case 0: return XXH64_avalanche(h64); + } + + /* impossible to reach */ + assert(0); + return 0; /* unreachable, but some compilers complain without it */ +} + +FORCE_INLINE U64 +XXH64_endian_align(const void* input, size_t len, U64 seed, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U64 h64; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)32; + } +#endif + + if (len>=32) { + const BYTE* const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; + v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; + v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; + v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; + } while (p<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64) len; + + return XXH64_finalize(h64, p, len, endian, align); +} + + +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, input, len); + return XXH64_digest(&state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + +/*====== Hash Streaming ======*/ + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; +} + +FORCE_INLINE XXH_errorcode +XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); +} + +FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) +{ + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 /*seed*/ + PRIME64_5; + } + + h64 += (U64) state->total_len; + + return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); +} + +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#endif /* XXH_NO_LONG_LONG */ From fbf728f8079c0c741cabd2303f29f966403b27af Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 4 Jan 2023 17:02:56 +0800 Subject: [PATCH 002/267] enh(query):init result row size. --- source/libs/executor/src/executorimpl.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index ecd6f382f4..56f1e890b7 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -172,6 +172,11 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num); pResultRow->pageId = pageId; pResultRow->offset = (int32_t)pData->num; + pResultRow->numOfRows = 0; + pResultRow->closed = false; + pResultRow->startInterp = false; + pResultRow->endInterp = false; + *currentPageId = pageId; pData->num += interBufSize; From c7020e0b82b246c46dfe4bfc655285958d51f12b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 4 Jan 2023 17:08:40 +0800 Subject: [PATCH 003/267] fix(query): add error check. --- source/libs/executor/src/executorimpl.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 56f1e890b7..b7d3aa83e9 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1159,9 +1159,15 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS } if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) { - ASSERT(pBlock->info.rows > 0); releaseBufPage(pBuf, page); - break; + + if (pBlock->info.rows <= 0 || pRow->numOfRows > pBlock->info.capacity) { + qError("error in copy data to ssdatablock, existed rows in block:%d, rows in pRow:%d, capacity:%d, %s", + pBlock->info.rows, pRow->numOfRows, pBlock->info.capacity, GET_TASKID(pTaskInfo)); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR); + } else { + break; + } } pGroupResInfo->index += 1; From c6512b09e87a763c15900ba6353173e9532224c3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 4 Jan 2023 17:29:02 +0800 Subject: [PATCH 004/267] fix(query): memset the resultrow info --- source/libs/executor/src/executorimpl.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index b7d3aa83e9..593c29041a 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -170,15 +170,12 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i // set the number of rows in current disk page SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num); + + memset((char*) pResultRow, 0, interBufSize); pResultRow->pageId = pageId; pResultRow->offset = (int32_t)pData->num; - pResultRow->numOfRows = 0; - pResultRow->closed = false; - pResultRow->startInterp = false; - pResultRow->endInterp = false; *currentPageId = pageId; - pData->num += interBufSize; return pResultRow; } From 9cc248ec57e6bad4b720cd574aec244fc96b9843 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 4 Jan 2023 18:10:38 +0800 Subject: [PATCH 005/267] fix(query): update the hash function for varchar data. --- source/util/src/thashutil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/src/thashutil.c b/source/util/src/thashutil.c index 97e29712e0..21b9359076 100644 --- a/source/util/src/thashutil.c +++ b/source/util/src/thashutil.c @@ -199,7 +199,7 @@ _hash_fn_t taosGetDefaultHashFunction(int32_t type) { break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - fn = xxHash; + fn = MurmurHash3_32; break; case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_INT: From b0885dddbaf38db288132e111f2ba5805ab896f0 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 4 Jan 2023 18:47:12 +0800 Subject: [PATCH 006/267] fix(query): update the hash function for varchar data. --- source/libs/executor/src/executorimpl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 593c29041a..f43ad99966 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1701,12 +1701,12 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize, const char* pKey) { int32_t code = 0; - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); +// _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pAggSup->currentPageId = -1; pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput); pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t)); - pAggSup->pResultRowHashTable = tSimpleHashInit(100, hashFn); + pAggSup->pResultRowHashTable = tSimpleHashInit(100, taosFastHash); if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) { return TSDB_CODE_OUT_OF_MEMORY; From 3373324668f676a97bfde5e39172103b5a3d5f74 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 4 Jan 2023 19:46:02 +0800 Subject: [PATCH 007/267] enh(query): opt decomp performance. --- source/util/src/tcompression.c | 80 ++++++++++++++++++++++------------ 1 file changed, 52 insertions(+), 28 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 7cf4a7f510..7b3ebec931 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -274,46 +274,70 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int32_t elems = selector_to_elems[(int32_t)selector]; // Optimize the performance, by remove the constantly switch operation. - int32_t v = 0; - uint64_t zigzag_value; + int32_t v = 0; + uint64_t zigzag_value = 0; + uint64_t mask = INT64MASK(bit); switch (type) { case TSDB_DATA_TYPE_BIGINT: { - for (int32_t i = 0; i < elems; i++) { - if (selector == 0 || selector == 1) { - zigzag_value = 0; - } else { - zigzag_value = ((w >> (4 + v)) & INT64MASK(bit)); + if (selector == 0 || selector == 1) { + zigzag_value = 0; + + for (int32_t i = 0; i < elems; i++) { + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + int64_t curr_value = diff + prev_value; + prev_value = curr_value; + + *((int64_t *)output + _pos) = (int64_t)curr_value; + _pos++; + + v += bit; + if ((++count) == nelements) break; } + } else { + for (int32_t i = 0; i < elems; i++) { + zigzag_value = ((w >> (4 + v)) & mask); - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + int64_t curr_value = diff + prev_value; + prev_value = curr_value; - *((int64_t *)output + _pos) = (int64_t)curr_value; - _pos++; + *((int64_t *)output + _pos) = (int64_t)curr_value; + _pos++; - v += bit; - if ((++count) == nelements) break; + v += bit; + if ((++count) == nelements) break; + } } } break; case TSDB_DATA_TYPE_INT: { - for (int32_t i = 0; i < elems; i++) { - if (selector == 0 || selector == 1) { - zigzag_value = 0; - } else { - zigzag_value = ((w >> (4 + v)) & INT64MASK(bit)); + if (selector == 0 || selector == 1) { + zigzag_value = 0; + for (int32_t i = 0; i < elems; i++) { + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + int64_t curr_value = diff + prev_value; + prev_value = curr_value; + + *((int32_t *)output + _pos) = (int32_t)curr_value; + _pos++; + + v += bit; + if ((++count) == nelements) break; } + } else { + for (int32_t i = 0; i < elems; i++) { + zigzag_value = ((w >> (4 + v)) & mask); - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + int64_t curr_value = diff + prev_value; + prev_value = curr_value; - *((int32_t *)output + _pos) = (int32_t)curr_value; - _pos++; + *((int32_t *)output + _pos) = (int32_t)curr_value; + _pos++; - v += bit; - if ((++count) == nelements) break; + v += bit; + if ((++count) == nelements) break; + } } } break; case TSDB_DATA_TYPE_SMALLINT: { @@ -321,7 +345,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha if (selector == 0 || selector == 1) { zigzag_value = 0; } else { - zigzag_value = ((w >> (4 + v)) & INT64MASK(bit)); + zigzag_value = ((w >> (4 + v)) & mask); } int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); @@ -341,7 +365,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha if (selector == 0 || selector == 1) { zigzag_value = 0; } else { - zigzag_value = ((w >> (4 + v)) & INT64MASK(bit)); + zigzag_value = ((w >> (4 + v)) & mask); } int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); From 5e4141a76fe2900d88b869d4b54a3cf84426da74 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 4 Jan 2023 23:08:30 +0800 Subject: [PATCH 008/267] enh(query): opt decomp performance. --- source/util/src/tcompression.c | 198 +++++++++++++++++++++++---------- 1 file changed, 140 insertions(+), 58 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 7b3ebec931..96c18b6e48 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -228,6 +228,7 @@ int32_t tsCompressINTImp(const char *const input, const int32_t nelements, char } int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, char *const output, const char type) { +#if 1 int32_t word_length = 0; switch (type) { case TSDB_DATA_TYPE_BIGINT: @@ -280,103 +281,96 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha switch (type) { case TSDB_DATA_TYPE_BIGINT: { + int64_t* p = (int64_t*) output; + if (selector == 0 || selector == 1) { zigzag_value = 0; - for (int32_t i = 0; i < elems; i++) { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; - - *((int64_t *)output + _pos) = (int64_t)curr_value; - _pos++; - - v += bit; - if ((++count) == nelements) break; + prev_value = diff + prev_value; + p[_pos++] = prev_value; } } else { - for (int32_t i = 0; i < elems; i++) { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; - - *((int64_t *)output + _pos) = (int64_t)curr_value; - _pos++; + prev_value = diff + prev_value; + p[_pos++] = prev_value; v += bit; - if ((++count) == nelements) break; } } } break; case TSDB_DATA_TYPE_INT: { + int32_t* p = (int32_t*) output; + if (selector == 0 || selector == 1) { zigzag_value = 0; - for (int32_t i = 0; i < elems; i++) { + + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; - - *((int32_t *)output + _pos) = (int32_t)curr_value; - _pos++; - - v += bit; - if ((++count) == nelements) break; + prev_value = diff + prev_value; + p[_pos++] = (int32_t)prev_value; } } else { - for (int32_t i = 0; i < elems; i++) { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; - - *((int32_t *)output + _pos) = (int32_t)curr_value; - _pos++; + prev_value = diff + prev_value; + p[_pos++] = (int32_t)prev_value; v += bit; - if ((++count) == nelements) break; } } } break; case TSDB_DATA_TYPE_SMALLINT: { - for (int32_t i = 0; i < elems; i++) { - if (selector == 0 || selector == 1) { - zigzag_value = 0; - } else { - zigzag_value = ((w >> (4 + v)) & mask); + int16_t* p = (int16_t*) output; + + if (selector == 0 || selector == 1) { + zigzag_value = 0; + + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + prev_value = diff + prev_value; + p[_pos++] = (int16_t)prev_value; } + } else { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + zigzag_value = ((w >> (4 + v)) & mask); - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + prev_value = diff + prev_value; - *((int16_t *)output + _pos) = (int16_t)curr_value; - _pos++; - - v += bit; - if ((++count) == nelements) break; + p[_pos++] = (int16_t)prev_value; + v += bit; + } } } break; case TSDB_DATA_TYPE_TINYINT: { - for (int32_t i = 0; i < elems; i++) { - if (selector == 0 || selector == 1) { - zigzag_value = 0; - } else { - zigzag_value = ((w >> (4 + v)) & mask); + int8_t *p = (int8_t *)output; + + if (selector == 0 || selector == 1) { + zigzag_value = 0; + + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + prev_value = diff + prev_value; + p[_pos++] = (int8_t)prev_value; } + } else { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + zigzag_value = ((w >> (4 + v)) & mask); - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + prev_value = diff + prev_value; - *((int8_t *)output + _pos) = (int8_t)curr_value; - _pos++; - - v += bit; - if ((++count) == nelements) break; + p[_pos++] = (int8_t)prev_value; + v += bit; + } } } break; } @@ -385,6 +379,94 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha } return nelements * word_length; +#else + + int32_t word_length = 0; + switch (type) { + case TSDB_DATA_TYPE_BIGINT: + word_length = LONG_BYTES; + break; + case TSDB_DATA_TYPE_INT: + word_length = INT_BYTES; + break; + case TSDB_DATA_TYPE_SMALLINT: + word_length = SHORT_BYTES; + break; + case TSDB_DATA_TYPE_TINYINT: + word_length = CHAR_BYTES; + break; + default: + uError("Invalid decompress integer type:%d", type); + return -1; + } + + // If not compressed. + if (input[0] == 1) { + memcpy(output, input + 1, nelements * word_length); + return nelements * word_length; + } + + // Selector value: 0 1 2 3 4 5 6 7 8 9 10 11 + // 12 13 14 15 + char bit_per_integer[] = {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 15, 20, 30, 60}; + int32_t selector_to_elems[] = {240, 120, 60, 30, 20, 15, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1}; + + const char *ip = input + 1; + int32_t count = 0; + int32_t _pos = 0; + int64_t prev_value = 0; + + while (1) { + if (count == nelements) break; + + uint64_t w = 0; + memcpy(&w, ip, LONG_BYTES); + + char selector = (char)(w & INT64MASK(4)); // selector = 4 + char bit = bit_per_integer[(int32_t)selector]; // bit = 3 + int32_t elems = selector_to_elems[(int32_t)selector]; + + for (int32_t i = 0; i < elems; i++) { + uint64_t zigzag_value; + + if (selector == 0 || selector == 1) { + zigzag_value = 0; + } else { + zigzag_value = ((w >> (4 + bit * i)) & INT64MASK(bit)); + } + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + int64_t curr_value = diff + prev_value; + prev_value = curr_value; + + switch (type) { + case TSDB_DATA_TYPE_BIGINT: + *((int64_t *)output + _pos) = (int64_t)curr_value; + _pos++; + break; + case TSDB_DATA_TYPE_INT: + *((int32_t *)output + _pos) = (int32_t)curr_value; + _pos++; + break; + case TSDB_DATA_TYPE_SMALLINT: + *((int16_t *)output + _pos) = (int16_t)curr_value; + _pos++; + break; + case TSDB_DATA_TYPE_TINYINT: + *((int8_t *)output + _pos) = (int8_t)curr_value; + _pos++; + break; + default: + perror("Wrong integer types.\n"); + return -1; + } + count++; + if (count == nelements) break; + } + ip += LONG_BYTES; + } + + return nelements * word_length; +#endif } /* ----------------------------------------------Bool Compression From 44e103a6a9c0d472dc9c537e4b7f93809926b907 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 5 Jan 2023 09:38:01 +0800 Subject: [PATCH 009/267] enh(query): remove unnecessary malloc. --- include/libs/function/function.h | 4 ++-- source/libs/executor/src/executil.c | 2 +- source/libs/executor/src/executorimpl.c | 4 ++++ source/util/src/tpagedbuf.c | 12 +++++++----- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 32b8cc7389..0ef094b92e 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -136,11 +136,11 @@ typedef struct SqlFunctionCtx { uint8_t scanFlag; // record current running step, default: 0 int16_t functionId; // function id char *pOutput; // final result output buffer, point to sdata->data - int32_t numOfParams; // input parameter, e.g., top(k, 20), the number of results of top query is kept in param SFunctParam *param; // corresponding output buffer for timestamp of each result, e.g., diff/csum SColumnInfoData *pTsOutput; + int32_t numOfParams; int32_t offset; SResultRowEntryInfo *resultInfo; SSubsidiaryResInfo subsidiaries; @@ -152,7 +152,7 @@ typedef struct SqlFunctionCtx { struct SSDataBlock *pSrcBlock; struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity SSerializeDataHandle saveHandle; - char udfName[TSDB_FUNC_NAME_LEN]; + char *udfName; } SqlFunctionCtx; typedef struct tExprNode { diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index fc3cfbd0f6..cad6c33f17 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1513,7 +1513,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet); } else { char* udfName = pExpr->pExpr->_function.pFunctNode->functionName; - tstrncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN); + pCtx->udfName = strdup(udfName); fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet); } pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index f43ad99966..2c192bb9bb 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1785,6 +1785,10 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) { taosMemoryFreeClear(pCtx[i].subsidiaries.buf); taosMemoryFree(pCtx[i].input.pData); taosMemoryFree(pCtx[i].input.pColumnDataAgg); + + if (pCtx[i].udfName != NULL) { + taosMemoryFree(pCtx[i].udfName); + } } taosMemoryFreeClear(pCtx); diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 7e7712ec8c..984474ceaa 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -360,16 +360,13 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem // init id hash table _hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); pPBuf->pIdList = taosArrayInit(4, POINTER_BYTES); - - pPBuf->assistBuf = taosMemoryMalloc(pPBuf->pageSize + 2); // EXTRA BYTES pPBuf->all = taosHashInit(10, fn, true, false); pPBuf->prefix = (char*) dir; pPBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t)); // qDebug("QInfo:0x%"PRIx64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId, - // pPBuf->pageSize, - // pPBuf->inMemPages, pPBuf->path); + // pPBuf->pageSize, pPBuf->inMemPages, pPBuf->path); return TSDB_CODE_SUCCESS; } @@ -593,7 +590,12 @@ void setBufPageDirty(void* pPage, bool dirty) { ppi->dirty = dirty; } -void setBufPageCompressOnDisk(SDiskbasedBuf* pBuf, bool comp) { pBuf->comp = comp; } +void setBufPageCompressOnDisk(SDiskbasedBuf* pBuf, bool comp) { + pBuf->comp = comp; + if (comp && (pBuf->assistBuf == NULL)) { + pBuf->assistBuf = taosMemoryMalloc(pBuf->pageSize + 2); // EXTRA BYTES + } +} void dBufSetBufPageRecycled(SDiskbasedBuf* pBuf, void* pPage) { SPageInfo* ppi = getPageInfoFromPayload(pPage); From 4fd8954c9ca2f95daa79d6dc221fd3131d4e0c22 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 5 Jan 2023 10:13:05 +0800 Subject: [PATCH 010/267] enh(query): opt decompress perf. --- source/util/src/tcompression.c | 55 ++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 96c18b6e48..382a4baddf 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -292,6 +292,60 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha p[_pos++] = prev_value; } } else { + int32_t batch = elems >> 2; + int32_t globalBatch = (nelements - count) >> 2; + + int32_t minBatch = TMIN(batch, globalBatch); + +#if 1 + // manual unrolling, to erase the hotspot + for (int32_t i = 0; i < minBatch; ++i, count += 4) { + zigzag_value = ((w >> (4 + v)) & mask); + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + prev_value = diff + prev_value; + + p[_pos++] = prev_value; + v += bit; + + zigzag_value = ((w >> (4 + v)) & mask); + diff = ZIGZAG_DECODE(int64_t, zigzag_value); + prev_value = diff + prev_value; + + p[_pos++] = prev_value; + v += bit; + + zigzag_value = ((w >> (4 + v)) & mask); + + diff = ZIGZAG_DECODE(int64_t, zigzag_value); + prev_value = diff + prev_value; + + p[_pos++] = prev_value; + v += bit; + + zigzag_value = ((w >> (4 + v)) & mask); + + diff = ZIGZAG_DECODE(int64_t, zigzag_value); + prev_value = diff + prev_value; + + p[_pos++] = prev_value; + v += bit; + } + + // handle the remain + int32_t remain = elems % 4; + int32_t globalRemain = (nelements - count); + int32_t minRemain = TMIN(globalRemain,remain); + + for (int32_t i = 0; i < minRemain; i++, count++) { + zigzag_value = ((w >> (4 + v)) & mask); + + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + prev_value = diff + prev_value; + + p[_pos++] = prev_value; + v += bit; + } +#else for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); @@ -301,6 +355,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha p[_pos++] = prev_value; v += bit; } +#endif } } break; case TSDB_DATA_TYPE_INT: { From 264383c2be45cb8d5c780eb64270feff05ef026a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 5 Jan 2023 18:59:36 +0800 Subject: [PATCH 011/267] enh(query): disable an error log. --- source/libs/executor/src/executil.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index cad6c33f17..1e4a97a0e2 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1066,8 +1066,8 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SIdxFltStatus status = SFLT_NOT_INDEX; code = doFilterTag(pTagIndexCond, &metaArg, res, &status); - if (code != 0 || status == SFLT_NOT_INDEX) { - qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid); + if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake +// qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid); code = TDB_CODE_SUCCESS; } } From 29155ad4604bdc93f9ca5e8f5d22bd584567b977 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 5 Jan 2023 19:11:08 +0800 Subject: [PATCH 012/267] enh(query): disable the file length check. --- source/dnode/vnode/src/tsdb/tsdbReaderWriter.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index c7bce6182a..5583d68fd6 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -47,6 +47,8 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd taosMemoryFree(pFD); goto _exit; } + +#if 0 // temporarily disable it for performance evaluation. if (taosStatFile(path, &pFD->szFile, NULL) < 0) { code = TAOS_SYSTEM_ERROR(errno); taosMemoryFree(pFD->pBuf); @@ -54,6 +56,8 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd taosMemoryFree(pFD); goto _exit; } +#endif + ASSERT(pFD->szFile % szPage == 0); pFD->szFile = pFD->szFile / szPage; *ppFD = pFD; From 5f17d81b189bdbf426cf351f624b8b9ff7aa37fe Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 5 Jan 2023 19:14:42 +0800 Subject: [PATCH 013/267] enh(query): do some internal refactor. --- source/util/src/tcompression.c | 40 ++++++++++------------------------ 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 382a4baddf..5352692162 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -287,8 +287,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha zigzag_value = 0; for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = prev_value; } } else { @@ -301,31 +300,25 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha // manual unrolling, to erase the hotspot for (int32_t i = 0; i < minBatch; ++i, count += 4) { zigzag_value = ((w >> (4 + v)) & mask); - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = prev_value; v += bit; zigzag_value = ((w >> (4 + v)) & mask); - diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = prev_value; v += bit; zigzag_value = ((w >> (4 + v)) & mask); - - diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = prev_value; v += bit; zigzag_value = ((w >> (4 + v)) & mask); - - diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = prev_value; v += bit; @@ -340,7 +333,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha zigzag_value = ((w >> (4 + v)) & mask); int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = prev_value; v += bit; @@ -365,16 +358,13 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha zigzag_value = 0; for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = (int32_t)prev_value; } } else { for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); - - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = (int32_t)prev_value; v += bit; @@ -388,17 +378,14 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha zigzag_value = 0; for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = (int16_t)prev_value; } } else { for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; - + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = (int16_t)prev_value; v += bit; } @@ -412,16 +399,13 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha zigzag_value = 0; for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = (int8_t)prev_value; } } else { for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); - - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = (int8_t)prev_value; v += bit; From cbc421825364126b01f1802b097fd1fed2d1b17e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 5 Jan 2023 19:15:18 +0800 Subject: [PATCH 014/267] enh(query): do some internal refactor. --- source/util/src/tcompression.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 5352692162..0bdce44772 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -331,8 +331,6 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha for (int32_t i = 0; i < minRemain; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); - - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = prev_value; From 4f89bf78d82b3bc2aa46053256d9b3135689b188 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 5 Jan 2023 19:18:24 +0800 Subject: [PATCH 015/267] enh(query): do some internal refactor. --- source/dnode/vnode/src/tsdb/tsdbReaderWriter.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 5583d68fd6..cd8454ade0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -48,7 +48,6 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd goto _exit; } -#if 0 // temporarily disable it for performance evaluation. if (taosStatFile(path, &pFD->szFile, NULL) < 0) { code = TAOS_SYSTEM_ERROR(errno); taosMemoryFree(pFD->pBuf); @@ -56,7 +55,6 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd taosMemoryFree(pFD); goto _exit; } -#endif ASSERT(pFD->szFile % szPage == 0); pFD->szFile = pFD->szFile / szPage; From cf4d60a76c92fa764cd3feab0d80f0063ce6b374 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 6 Jan 2023 11:18:44 +0800 Subject: [PATCH 016/267] enh(query): do some internal refactor. --- source/util/src/tcompression.c | 78 +++++++++++++++++----------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 0bdce44772..b46d0ebc3d 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -275,7 +275,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int32_t elems = selector_to_elems[(int32_t)selector]; // Optimize the performance, by remove the constantly switch operation. - int32_t v = 0; + int32_t v = 4; uint64_t zigzag_value = 0; uint64_t mask = INT64MASK(bit); @@ -287,7 +287,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha zigzag_value = 0; for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = prev_value; } } else { @@ -298,44 +298,44 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha #if 1 // manual unrolling, to erase the hotspot - for (int32_t i = 0; i < minBatch; ++i, count += 4) { - zigzag_value = ((w >> (4 + v)) & mask); - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + for (int32_t i = 0; i < minBatch; ++i, count += 4) { + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prev_value; - v += bit; + p[_pos++] = prev_value; + v += bit; - zigzag_value = ((w >> (4 + v)) & mask); - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prev_value; - v += bit; + p[_pos++] = prev_value; + v += bit; - zigzag_value = ((w >> (4 + v)) & mask); - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prev_value; - v += bit; + p[_pos++] = prev_value; + v += bit; - zigzag_value = ((w >> (4 + v)) & mask); - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prev_value; - v += bit; - } + p[_pos++] = prev_value; + v += bit; + } - // handle the remain - int32_t remain = elems % 4; - int32_t globalRemain = (nelements - count); - int32_t minRemain = TMIN(globalRemain,remain); + // handle the remain + int32_t remain = elems & 0x03; + int32_t globalRemain = (nelements - count); + int32_t minRemain = TMIN(globalRemain, remain); - for (int32_t i = 0; i < minRemain; i++, count++) { - zigzag_value = ((w >> (4 + v)) & mask); - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + for (int32_t i = 0; i < minRemain; i++, count++) { + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prev_value; - v += bit; - } + p[_pos++] = prev_value; + v += bit; + } #else for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); @@ -356,13 +356,13 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha zigzag_value = 0; for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = (int32_t)prev_value; } } else { for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - zigzag_value = ((w >> (4 + v)) & mask); - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = (int32_t)prev_value; v += bit; @@ -376,14 +376,14 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha zigzag_value = 0; for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = (int16_t)prev_value; } } else { for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - zigzag_value = ((w >> (4 + v)) & mask); + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; p[_pos++] = (int16_t)prev_value; v += bit; } @@ -397,13 +397,13 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha zigzag_value = 0; for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = (int8_t)prev_value; } } else { for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - zigzag_value = ((w >> (4 + v)) & mask); - prev_value = ZIGZAG_DECODE(int64_t, zigzag_value) + prev_value; + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = (int8_t)prev_value; v += bit; From d208282a2b387fe4fa9cacafd572961ef1a9dc22 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 6 Jan 2023 13:17:22 +0800 Subject: [PATCH 017/267] refactor: do some internal refactor. --- source/libs/executor/src/tlinearhash.c | 7 ------ source/libs/function/src/tpercentile.c | 2 -- source/util/src/tpagedbuf.c | 35 +++++++++++++++----------- 3 files changed, 21 insertions(+), 23 deletions(-) diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c index d97f81c994..2cba3855c7 100644 --- a/source/libs/executor/src/tlinearhash.c +++ b/source/libs/executor/src/tlinearhash.c @@ -123,8 +123,6 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t } static void doRemoveFromBucket(SFilePage* pPage, SLHashNode* pNode, SLHashBucket* pBucket) { - ASSERT(pPage != NULL && pNode != NULL && pBucket->size >= 1); - int32_t len = GET_LHASH_NODE_LEN(pNode); char* p = (char*)pNode + len; @@ -301,8 +299,6 @@ void* tHashCleanup(SLHashObj* pHashObj) { } int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data, size_t size) { - ASSERT(pHashObj != NULL && key != NULL); - if (pHashObj->bits == 0) { SLHashBucket* pBucket = pHashObj->pBucket[0]; doAddToBucket(pHashObj, pBucket, 0, key, keyLen, data, size); @@ -363,14 +359,12 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data if (v1 != splitBucketId) { // place it into the new bucket ASSERT(v1 == newBucketId); // printf("move key:%d to 0x%x bucket, remain items:%d\n", *(int32_t*)k, v1, pBucket->size - 1); - SLHashBucket* pNewBucket = pHashObj->pBucket[newBucketId]; doAddToBucket(pHashObj, pNewBucket, newBucketId, (void*)GET_LHASH_NODE_KEY(pNode), pNode->keyLen, GET_LHASH_NODE_KEY(pNode), pNode->dataLen); doRemoveFromBucket(p, pNode, pBucket); } else { // printf("check key:%d, located into: %d, skip it\n", *(int*) k, v1); - int32_t nodeSize = GET_LHASH_NODE_LEN(pStart); pStart += nodeSize; } @@ -385,7 +379,6 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data } char* tHashGet(SLHashObj* pHashObj, const void* key, size_t keyLen) { - ASSERT(pHashObj != NULL && key != NULL && keyLen > 0); int32_t hashv = pHashObj->hashFn(key, keyLen); int32_t bucketId = doGetBucketIdFromHashVal(hashv, pHashObj->bits); diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 04472c42ec..163107f483 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -346,8 +346,6 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT * in memory bucket, we only accept data array list */ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { - ASSERT(pBucket != NULL && data != NULL && size > 0); - int32_t count = 0; int32_t bytes = pBucket->bytes; for (int32_t i = 0; i < size; ++i) { diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index f84da15110..1ddb934668 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -132,7 +132,6 @@ static char* doFlushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { if (pg->offset == -1 || pg->dirty) { void* payload = GET_DATA_PAYLOAD(pg); t = doCompressData(payload, pBuf->pageSize, &size, pBuf); - ASSERTS(size >= 0, "size is negative"); } // this page is flushed to disk for the first time @@ -272,13 +271,15 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) { SListNode* pn = NULL; while ((pn = tdListNext(&iter)) != NULL) { SPageInfo* pageInfo = *(SPageInfo**)pn->data; - ASSERT(pageInfo->pageId >= 0 && pageInfo->pn == pn); + if (pageInfo->pageId < 0 || pageInfo->pn != pn) { + uError("data in consistent in paged buffer, %s", pBuf->id); + return NULL; + } if (!pageInfo->used) { - // printf("%d is chosen\n", pageInfo->pageId); break; } else { - // printf("page %d is used, dirty:%d\n", pageInfo->pageId, pageInfo->dirty); + // printf("page %d is used, dirty:%d\n", pageInfo->pageId, pageInfo->dirty); } } @@ -353,7 +354,9 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem pPBuf->freePgList = tdListNew(POINTER_BYTES); // at least more than 2 pages must be in memory - ASSERT(inMemBufSize >= pagesize * 2); + if (pPBuf->inMemPages < 2) { + pPBuf->inMemPages = 2; + } pPBuf->lruList = tdListNew(POINTER_BYTES); @@ -418,11 +421,17 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) { } void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) { - ASSERT(pBuf != NULL && id >= 0); + if (id < 0) { + return NULL; + } + pBuf->statis.getPages += 1; SPageInfo** pi = taosHashGet(pBuf->all, &id, sizeof(int32_t)); - ASSERT(pi != NULL && *pi != NULL); + if (pi == NULL || *pi == NULL) { + uError("no pages exist, id:%d, %s", id, pBuf->id); + return NULL; + } if ((*pi)->pData != NULL) { // it is in memory // no need to update the LRU list if only one page exists @@ -432,7 +441,10 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) { } SPageInfo** pInfo = (SPageInfo**)((*pi)->pn->data); - ASSERT(*pInfo == *pi); + if (*pInfo != *pi) { + uError("data inconsistent in paged buf, %s", pBuf->id); + return NULL; + } lruListMoveToFront(pBuf->lruList, (*pi)); (*pi)->used = true; @@ -479,9 +491,6 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) { } void releaseBufPage(SDiskbasedBuf* pBuf, void* page) { - if (ASSERTS(pBuf != NULL && page != NULL, "pBuf or page is NULL")) { - return; - } SPageInfo* ppi = getPageInfoFromPayload(page); releaseBufPageInfo(pBuf, ppi); } @@ -490,7 +499,7 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) { #ifdef BUF_PAGE_DEBUG uDebug("page_releaseBufPageInfo pageId:%d, used:%d, offset:%" PRId64, pi->pageId, pi->used, pi->offset); #endif - if (ASSERTS(pi->pData != NULL, "pi->pData is NULL")) { + if (pi->pData == NULL) { return; } @@ -501,7 +510,6 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) { size_t getTotalBufSize(const SDiskbasedBuf* pBuf) { return (size_t)pBuf->totalBufSize; } SArray* getDataBufPagesIdList(SDiskbasedBuf* pBuf) { - ASSERT(pBuf != NULL); return pBuf->pIdList; } @@ -579,7 +587,6 @@ SPageInfo* getLastPageInfo(SArray* pList) { } int32_t getPageId(const SPageInfo* pPgInfo) { - ASSERT(pPgInfo != NULL); return pPgInfo->pageId; } From d8dd3d44afc5e36dee57a73ad3c99b4a8ab6d522 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 6 Jan 2023 13:21:06 +0800 Subject: [PATCH 018/267] refactor: do some internal refactor. --- include/util/tlog.h | 2 +- source/util/src/tutil.c | 13 ------------- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/include/util/tlog.h b/include/util/tlog.h index 6e9b304e1d..e403ce6df2 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -89,7 +89,7 @@ bool taosAssertRelease(bool condition); // Disable all asserts that may compromise the performance. #if defined DISABLE_ASSERT #define ASSERT(condition) -#define ASSERTS(condition, ...) +#define ASSERTS(condition, ...) (0) #else #define ASSERTS(condition, ...) taosAssertDebug(condition, __FILE__, __LINE__, __VA_ARGS__) #ifdef NDEBUG diff --git a/source/util/src/tutil.c b/source/util/src/tutil.c index e94f94a00d..5fe25a8e5e 100644 --- a/source/util/src/tutil.c +++ b/source/util/src/tutil.c @@ -159,10 +159,6 @@ char *strtolower(char *dst, const char *src) { int32_t esc = 0; char quote = 0, *p = dst, c; - if (ASSERTS(dst != NULL, "dst is NULL")) { - return NULL; - } - for (c = *src++; c; c = *src++) { if (esc) { esc = 0; @@ -188,10 +184,6 @@ char *strntolower(char *dst, const char *src, int32_t n) { int32_t esc = 0; char quote = 0, *p = dst, c; - if (ASSERTS(dst != NULL, "dst is NULL")) { - return NULL; - } - if (n == 0) { *p = 0; return dst; @@ -219,11 +211,6 @@ char *strntolower(char *dst, const char *src, int32_t n) { char *strntolower_s(char *dst, const char *src, int32_t n) { char *p = dst, c; - - if (ASSERTS(dst != NULL, "dst is NULL")) { - return NULL; - } - if (n == 0) { return NULL; } From 18738ecdd6108931a99a0619fd00191e86ae846c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 6 Jan 2023 15:45:15 +0800 Subject: [PATCH 019/267] refactor: do some internal refactor. --- source/util/src/tcompression.c | 37 ++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index e2f8cab281..dd8f36a372 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -284,21 +284,38 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int64_t* p = (int64_t*) output; if (selector == 0 || selector == 1) { - zigzag_value = 0; + int32_t batch = elems >> 2; + int32_t remainder = elems & 0x3; - for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + int32_t gRemainder = nelements - count; + int32_t gBatch = gRemainder >> 2; + + int32_t minBatch = TMIN(batch, gBatch); + int32_t minRemain = TMIN(remainder, gRemainder); + for(int32_t i = 0; i < minBatch; ++i) { + p[_pos++] = prev_value; + p[_pos++] = prev_value; + p[_pos++] = prev_value; p[_pos++] = prev_value; } + + for (int32_t i = 0; i < minRemain; i++) { + p[_pos++] = prev_value; + } + + count += ((minBatch << 2)+ minRemain); } else { int32_t batch = elems >> 2; - int32_t globalBatch = (nelements - count) >> 2; + int32_t remain = elems & 0x03; + + int32_t globalRemain = (nelements - count); + int32_t globalBatch = globalRemain >> 2; int32_t minBatch = TMIN(batch, globalBatch); - + int32_t minRemain = TMIN(remain, globalRemain); #if 1 // manual unrolling, to erase the hotspot - for (int32_t i = 0; i < minBatch; ++i, count += 4) { + for (int32_t i = 0; i < minBatch; ++i) { zigzag_value = ((w >> v) & mask); prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); @@ -325,17 +342,15 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha } // handle the remain - int32_t remain = elems & 0x03; - int32_t globalRemain = (nelements - count); - int32_t minRemain = TMIN(globalRemain, remain); - - for (int32_t i = 0; i < minRemain; i++, count++) { + for (int32_t i = 0; i < minRemain; i++) { zigzag_value = ((w >> v) & mask); prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = prev_value; v += bit; } + + count += ((minBatch << 2)+ minRemain); #else for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); From 0966528a2ba99bf31c5c08693dfbf0ad121bd7bd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 6 Jan 2023 16:05:13 +0800 Subject: [PATCH 020/267] refactor: do some internal refactor. --- source/libs/function/src/builtinsimpl.c | 39 +++++++++++++++++------ source/libs/function/src/detail/tminmax.c | 30 ++++++++++++++++- 2 files changed, 59 insertions(+), 10 deletions(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 8fde27e046..c524e39532 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -789,11 +789,37 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); pEntryInfo->isNullRes = (pEntryInfo->numOfRes == 0) ? 1 : 0; - if (pCol->info.type == TSDB_DATA_TYPE_FLOAT) { - float v = GET_FLOAT_VAL(&pRes->v); - colDataAppend(pCol, currentRow, (const char*)&v, pEntryInfo->isNullRes); + // NOTE: do nothing change it, for performance issue + if (!pEntryInfo->isNullRes) { + switch (pCol->info.type) { + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_BIGINT: + colDataAppendInt64(pCol, currentRow, &pRes->v); + break; + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_INT: + colDataAppendInt32(pCol, currentRow, (int32_t*)&pRes->v); + break; + case TSDB_DATA_TYPE_USMALLINT: + case TSDB_DATA_TYPE_SMALLINT: + colDataAppendInt16(pCol, currentRow, (int16_t*)&pRes->v); + break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_TINYINT: + colDataAppendInt8(pCol, currentRow, (int8_t*)&pRes->v); + break; + case TSDB_DATA_TYPE_DOUBLE: + colDataAppendDouble(pCol, currentRow, (double*)&pRes->v); + break; + case TSDB_DATA_TYPE_FLOAT: { + float v = GET_FLOAT_VAL(&pRes->v); + colDataAppendFloat(pCol, currentRow, &v); + break; + } + } } else { - colDataAppend(pCol, currentRow, (const char*)&pRes->v, pEntryInfo->isNullRes); + colDataAppendNULL(pCol, currentRow); } if (pEntryInfo->numOfRes > 0) { @@ -1674,11 +1700,6 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { } tMemBucketDestroy(pMemBucket); - - if (ppInfo->result < 0) { - return TSDB_CODE_NO_AVAIL_DISK; - } - return functionFinalize(pCtx, pBlock); } diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index 847c738655..33ee33899d 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -836,7 +836,35 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) int32_t i = findFirstValPosition(pCol, start, numOfRows); if ((i < end) && (!pBuf->assign)) { - memcpy(&pBuf->v, pCol->pData + (pCol->info.bytes * i), pCol->info.bytes); + char* p = pCol->pData + pCol->info.bytes * i; + + switch (pCol->info.type) { + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_BIGINT: + pBuf->v = *(int64_t*)p; + break; + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_INT: + pBuf->v = *(int32_t*)p; + break; + case TSDB_DATA_TYPE_USMALLINT: + case TSDB_DATA_TYPE_SMALLINT: + pBuf->v = *(int16_t*)p; + break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_TINYINT: + pBuf->v = *(int8_t*)p; + break; + case TSDB_DATA_TYPE_FLOAT: { + *(float*)&pBuf->v = *(float*)p; + break; + } + default: + memcpy(&pBuf->v, p, pCol->info.bytes); + break; + } if (pCtx->subsidiaries.num > 0) { int32_t code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); From 7f29a4a62d493e9f22a0aa70b85f20b6ef87465a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 6 Jan 2023 17:10:52 +0800 Subject: [PATCH 021/267] refactor: do some internal refactor. --- source/util/src/tcompression.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index dd8f36a372..e4ca936d22 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -286,12 +286,9 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha if (selector == 0 || selector == 1) { int32_t batch = elems >> 2; int32_t remainder = elems & 0x3; - - int32_t gRemainder = nelements - count; - int32_t gBatch = gRemainder >> 2; + int32_t gBatch = (nelements - count) >> 2; int32_t minBatch = TMIN(batch, gBatch); - int32_t minRemain = TMIN(remainder, gRemainder); for(int32_t i = 0; i < minBatch; ++i) { p[_pos++] = prev_value; p[_pos++] = prev_value; @@ -299,20 +296,21 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha p[_pos++] = prev_value; } + count += (minBatch << 2); + int32_t gRemainder = nelements - count; + int32_t minRemain = TMIN(remainder, gRemainder); + for (int32_t i = 0; i < minRemain; i++) { p[_pos++] = prev_value; } - count += ((minBatch << 2)+ minRemain); + count += minRemain; } else { int32_t batch = elems >> 2; int32_t remain = elems & 0x03; - - int32_t globalRemain = (nelements - count); - int32_t globalBatch = globalRemain >> 2; + int32_t globalBatch = (nelements - count) >> 2; int32_t minBatch = TMIN(batch, globalBatch); - int32_t minRemain = TMIN(remain, globalRemain); #if 1 // manual unrolling, to erase the hotspot for (int32_t i = 0; i < minBatch; ++i) { @@ -342,6 +340,10 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha } // handle the remain + count += (minBatch << 2); + int32_t globalRemain = (nelements - count); + int32_t minRemain = TMIN(remain, globalRemain); + for (int32_t i = 0; i < minRemain; i++) { zigzag_value = ((w >> v) & mask); prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); @@ -350,7 +352,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha v += bit; } - count += ((minBatch << 2)+ minRemain); + count += minRemain; #else for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); From 65e7dbda0cf45e16b18e2b1065fee2073643865e Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Fri, 6 Jan 2023 16:40:00 +0800 Subject: [PATCH 022/267] enh: enlarge state buffer size --- source/libs/stream/src/streamState.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 6670bf463e..327de89660 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -107,8 +107,8 @@ static inline int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, } SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages) { - szPage = szPage < 0 ? 4096 : szPage; - pages = pages < 0 ? 256 : pages; + szPage = szPage < 0 ? 4096 * 8 : szPage; + pages = pages < 0 ? 256 * 32 : pages; SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState)); if (pState == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; From e73129fda5ad59ec36ac47fc1ea9532dd698f995 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 6 Jan 2023 17:38:18 +0800 Subject: [PATCH 023/267] refactor: do some internal refactor. --- source/util/src/tcompression.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index e4ca936d22..b4c3ecd12a 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -284,6 +284,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int64_t* p = (int64_t*) output; if (selector == 0 || selector == 1) { +#if 0 int32_t batch = elems >> 2; int32_t remainder = elems & 0x3; int32_t gBatch = (nelements - count) >> 2; @@ -305,6 +306,10 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha } count += minRemain; +#endif + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + p[_pos++] = prev_value; + } } else { int32_t batch = elems >> 2; int32_t remain = elems & 0x03; From db48c2350d2a6be3cd79e356c21df8f0d0941529 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 6 Jan 2023 17:44:36 +0800 Subject: [PATCH 024/267] refactor: do some internal refactor. --- source/util/src/tcompression.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index b4c3ecd12a..e2d42385ab 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -284,9 +284,9 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int64_t* p = (int64_t*) output; if (selector == 0 || selector == 1) { -#if 0 +#if 1 int32_t batch = elems >> 2; - int32_t remainder = elems & 0x3; + int32_t remainder = elems & 0x03; int32_t gBatch = (nelements - count) >> 2; int32_t minBatch = TMIN(batch, gBatch); @@ -301,15 +301,16 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int32_t gRemainder = nelements - count; int32_t minRemain = TMIN(remainder, gRemainder); - for (int32_t i = 0; i < minRemain; i++) { + for (int32_t i = 0; i < minRemain; ++i) { p[_pos++] = prev_value; } count += minRemain; -#endif +#else for (int32_t i = 0; i < elems && count < nelements; i++, count++) { p[_pos++] = prev_value; } +#endif } else { int32_t batch = elems >> 2; int32_t remain = elems & 0x03; From f408c795da1a9a20cf7854edac4c64d08d3e8784 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 6 Jan 2023 18:41:22 +0800 Subject: [PATCH 025/267] refactor: do some internal refactor. --- source/util/src/tcompression.c | 47 +++++++++++++++------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index e2d42385ab..a72f14f57f 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -285,41 +285,38 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha if (selector == 0 || selector == 1) { #if 1 - int32_t batch = elems >> 2; - int32_t remainder = elems & 0x03; - int32_t gBatch = (nelements - count) >> 2; - - int32_t minBatch = TMIN(batch, gBatch); - for(int32_t i = 0; i < minBatch; ++i) { - p[_pos++] = prev_value; - p[_pos++] = prev_value; - p[_pos++] = prev_value; - p[_pos++] = prev_value; - } - - count += (minBatch << 2); int32_t gRemainder = nelements - count; - int32_t minRemain = TMIN(remainder, gRemainder); + int32_t num = gRemainder > elems? elems:gRemainder; - for (int32_t i = 0; i < minRemain; ++i) { + int32_t batch = num >> 2; + int32_t remainder = num & 0x03; + for (int32_t i = 0; i < batch; ++i) { + p[_pos++] = prev_value; + p[_pos++] = prev_value; + p[_pos++] = prev_value; p[_pos++] = prev_value; } - count += minRemain; + for (int32_t i = 0; i < remainder; ++i) { + p[_pos++] = prev_value; + } + + count += num; #else for (int32_t i = 0; i < elems && count < nelements; i++, count++) { p[_pos++] = prev_value; } #endif } else { - int32_t batch = elems >> 2; - int32_t remain = elems & 0x03; - int32_t globalBatch = (nelements - count) >> 2; + int32_t gRemainder = (nelements - count); - int32_t minBatch = TMIN(batch, globalBatch); + int32_t num = gRemainder > elems? elems:gRemainder; + + int32_t batch = num >> 2; + int32_t remain = num & 0x03; #if 1 // manual unrolling, to erase the hotspot - for (int32_t i = 0; i < minBatch; ++i) { + for (int32_t i = 0; i < batch; ++i) { zigzag_value = ((w >> v) & mask); prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); @@ -346,11 +343,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha } // handle the remain - count += (minBatch << 2); - int32_t globalRemain = (nelements - count); - int32_t minRemain = TMIN(remain, globalRemain); - - for (int32_t i = 0; i < minRemain; i++) { + for (int32_t i = 0; i < remain; i++) { zigzag_value = ((w >> v) & mask); prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); @@ -358,7 +351,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha v += bit; } - count += minRemain; + count += num; #else for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> (4 + v)) & mask); From 972f9b694809611403b9c61f83f8851e934f1148 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 7 Jan 2023 00:59:05 +0800 Subject: [PATCH 026/267] refactor: do some internal refactor. --- include/util/tutil.h | 2 ++ source/dnode/vnode/src/meta/metaCache.c | 28 ++++++++++++++++++------- source/libs/executor/src/executorimpl.c | 23 ++++++++++++++++---- source/util/src/tcompression.c | 16 -------------- source/util/src/tutil.c | 16 ++++++++++++++ 5 files changed, 58 insertions(+), 27 deletions(-) diff --git a/include/util/tutil.h b/include/util/tutil.h index 9fb68aebdc..82d4c86f12 100644 --- a/include/util/tutil.h +++ b/include/util/tutil.h @@ -46,6 +46,8 @@ char *paGetToken(char *src, char **token, int32_t *tokenLen); int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]); int32_t taosHexStrToByteArray(char hexstr[], char bytes[]); +int32_t tintToHex(uint64_t val, char hex[]); + char *taosIpStr(uint32_t ipInt); uint32_t ip2uint(const char *const ip_addr); void taosIp2String(uint32_t ip, char *str); diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index d68658b0d9..33ff438490 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -32,7 +32,7 @@ typedef struct SMetaStbStatsEntry { } SMetaStbStatsEntry; typedef struct STagFilterResEntry { - uint64_t suid; // uid for super table +// uint64_t suid; // uid for super table SList list; // the linked list of md5 digest, extracted from the serialized tag query condition uint32_t qTimes; // queried times for current super table } STagFilterResEntry; @@ -533,6 +533,11 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int SHashObj* pTableEntry = pMeta->pCache->sTagFilterResCache.pTableEntry; TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; + uint64_t buf[3] = {0}; + buf[0] = suid; + memcpy(&buf[1], pKey, keyLen); + ASSERT(sizeof(uint64_t) + keyLen == 24); + taosThreadMutexLock(pLock); STagFilterResEntry** pEntry = taosHashGet(pTableEntry, &suid, sizeof(uint64_t)); @@ -543,15 +548,24 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES); tdListAppend(&p->list, pKey); } else { + // check if it exists or not + SListIter iter = {0}; + tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD); + + SListNode* pNode = NULL; + while ((pNode = tdListNext(&iter)) != NULL) { + uint64_t* p = (uint64_t*) pNode->data; + + // key already exists in cache, quit + if (p[1] == ((uint64_t*)pKey)[1] && p[2] == ((uint64_t*)pKey)[2]) { + taosThreadMutexUnlock(pLock); + return TSDB_CODE_SUCCESS; + } + } + tdListAppend(&(*pEntry)->list, pKey); } - uint64_t buf[3] = {0}; - buf[0] = suid; - - memcpy(&buf[1], pKey, keyLen); - ASSERT(sizeof(uint64_t) + keyLen == 24); - // add to cache. taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL, TAOS_LRU_PRIORITY_LOW); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 3f5ac211c0..f37a31d5a9 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1962,6 +1962,22 @@ void destroyAggOperatorInfo(void* param) { taosMemoryFreeClear(param); } +static char* buildTaskId(uint64_t taskId, uint64_t queryId) { + char* p = taosMemoryMalloc(64); + + int32_t offset = 6; + memcpy(p, "TID:0x", offset); + offset += tintToHex(taskId, &p[offset]); + + memcpy(&p[offset], " QID:0x", 7); + offset += 7; + offset += tintToHex(queryId, &p[offset]); + + p[offset] = 0; + + return p; +} + static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPTR_EXEC_MODEL model, char* dbFName) { SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo)); if (pTaskInfo == NULL) { @@ -1978,10 +1994,9 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT pTaskInfo->stopInfo.pStopInfo = taosArrayInit(4, sizeof(SExchangeOpStopInfo)); pTaskInfo->pResultBlockList = taosArrayInit(128, POINTER_BYTES); - char* p = taosMemoryCalloc(1, 128); - snprintf(p, 128, "TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, queryId); - pTaskInfo->id.str = p; - +// char* p = taosMemoryMalloc(64); +// snprintf(p, 64, "TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, queryId); + pTaskInfo->id.str = buildTaskId(taskId, queryId); return pTaskInfo; } diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index a72f14f57f..4f0d27850d 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -284,7 +284,6 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int64_t* p = (int64_t*) output; if (selector == 0 || selector == 1) { -#if 1 int32_t gRemainder = nelements - count; int32_t num = gRemainder > elems? elems:gRemainder; @@ -302,14 +301,8 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha } count += num; -#else - for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - p[_pos++] = prev_value; - } -#endif } else { int32_t gRemainder = (nelements - count); - int32_t num = gRemainder > elems? elems:gRemainder; int32_t batch = num >> 2; @@ -369,10 +362,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int32_t* p = (int32_t*) output; if (selector == 0 || selector == 1) { - zigzag_value = 0; - for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = (int32_t)prev_value; } } else { @@ -389,10 +379,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int16_t* p = (int16_t*) output; if (selector == 0 || selector == 1) { - zigzag_value = 0; - for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = (int16_t)prev_value; } } else { @@ -410,10 +397,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int8_t *p = (int8_t *)output; if (selector == 0 || selector == 1) { - zigzag_value = 0; - for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = (int8_t)prev_value; } } else { diff --git a/source/util/src/tutil.c b/source/util/src/tutil.c index 5fe25a8e5e..d7980bce64 100644 --- a/source/util/src/tutil.c +++ b/source/util/src/tutil.c @@ -320,6 +320,22 @@ char *strbetween(char *string, char *begin, char *end) { return result; } +int32_t tintToHex(uint64_t val, char hex[]) { + const char hexstr[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + int32_t j = 0; + int32_t k = 0; + while((val & (((uint64_t)0xfL) << ((15 - k) * 4))) == 0) { + k += 1; + } + + for (j = 0; k < 16; ++k, ++j) { + hex[j] = hexstr[(val & (((uint64_t)0xfL) << ((15 - k) * 4))) >> (15 - k) * 4]; + } + + return j; +} + int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]) { int32_t i; char hexval[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; From f90fa07ea9ca54a73fb3aff87d14ddbe725396e3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 8 Jan 2023 12:34:18 +0800 Subject: [PATCH 027/267] refactor: add avx support zigzag decode. --- include/util/tutil.h | 1 + source/dnode/vnode/src/inc/tsdb.h | 1 + source/dnode/vnode/src/tsdb/tsdbFile.c | 28 ++++++++- source/util/src/tcompression.c | 79 +++++++++++++++++++++----- source/util/src/tutil.c | 23 ++++++++ 5 files changed, 115 insertions(+), 17 deletions(-) diff --git a/include/util/tutil.h b/include/util/tutil.h index 82d4c86f12..513806459d 100644 --- a/include/util/tutil.h +++ b/include/util/tutil.h @@ -47,6 +47,7 @@ int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]); int32_t taosHexStrToByteArray(char hexstr[], char bytes[]); int32_t tintToHex(uint64_t val, char hex[]); +int32_t tintToStr(uint64_t val, size_t radix, char str[]); char *taosIpStr(uint32_t ipInt); uint32_t ip2uint(const char *const ip_addr); diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 5a2e462c8c..2536ec621f 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -235,6 +235,7 @@ void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]); void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]); void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]); + // SDelFile void tsdbDelFileName(STsdb *pTsdb, SDelFile *pFile, char fname[]); // tsdbFS.c ============================================================================================== diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c index 3c944584de..5b27497998 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFile.c +++ b/source/dnode/vnode/src/tsdb/tsdbFile.c @@ -93,8 +93,32 @@ static int32_t tGetSmaFile(uint8_t *p, SSmaFile *pSmaFile) { // EXPOSED APIS ================================================== void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, char fname[]) { - snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did), - TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pHeadF->commitID, ".head"); + const char* p1 = tfsGetDiskPath(pTsdb->pVnode->pTfs, did); + int32_t len = strlen(p1); + + char* p = memcpy(fname, p1, len); + p += len; + + *(p++) = TD_DIRSEP[0]; + len = strlen(pTsdb->path); + + memcpy(p, pTsdb->path, len); + p += len; + + *(p++) = TD_DIRSEP[0]; + *(p++) = 'v'; + + p += tintToStr(TD_VID(pTsdb->pVnode), 10, p); + *(p++) = 'f'; + + p += tintToStr(fid, 10, p); + + memcpy(p, "ver", 3); + p += 3; + + p += tintToStr(pHeadF->commitID, 10, p); + memcpy(p, ".head", 5); + p[5] = 0; } void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]) { diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 4f0d27850d..5c738c0ed7 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -308,30 +308,79 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int32_t batch = num >> 2; int32_t remain = num & 0x03; #if 1 +#if 1 + __m256i base = _mm256_set1_epi64x(w); + __m256i mask_ = _mm256_set1_epi64x(mask); + + __m256i shiftBits = _mm256_set_epi64x(bit * 3 + 4, bit * 2 + 4, bit + 4, 4); + __m256i inc = _mm256_set1_epi64x(bit << 2); + + for(int32_t i = 0; i < batch; ++i) { + __m256i after = _mm256_srlv_epi64(base, shiftBits); + __m256i zz = _mm256_and_si256(after, mask_); + printf("1\n"); + + //#define ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) // zigzag decode + __m256i signmask = _mm256_and_si256(_mm256_set_epi64x(1, 1, 1, 1), zz); + signmask = _mm256_sub_epi64(_mm256_setzero_si256(), signmask); + + // now here we get the four zigzag value + __m256i final = _mm256_xor_si256(_mm256_srli_epi64(zz, 1), signmask); + + // calculate the cumulative sum (prefix sum) + // decode[0] = prev_value + final[0] + // decode[1] = decode[0] + final[1] -----> prev_value + final[0] + final[1] + // decode[2] = decode[1] + final[1] -----> prev_value + final[0] + final[1] + final[2] + // decode[3] = decode[2] + final[1] -----> prev_value + final[0] + final[1] + final[2] + final[3] + + printf("2\n"); + + __m128i prev = _mm_set1_epi64x(prev_value); + final = _mm256_add_epi64(final, _mm256_slli_si256(final, 8)); + // x = 1, 2, 3, 4 + // + 0, 1, 2, 3 + // = 1, 3, 5, 7 + _mm256_storeu_si256((__m256i *)&p[_pos], final); + + __m128i first = _mm_loadu_si128((__m128i *)&p[_pos]); + __m128i sec = _mm_add_epi64(_mm_loadu_si128((__m128i *)&p[_pos + 2]), first); + sec = _mm_add_epi64(sec, prev); + first = _mm_add_epi64(first, prev); + + _mm_storeu_si128((__m128i *)&p[_pos], first); + _mm_storeu_si128((__m128i *)&p[_pos + 2], sec); + + shiftBits = _mm256_add_epi64(shiftBits, inc); + prev_value = p[_pos + 3]; + _pos += 4; + + printf("3\n"); + } +#else // manual unrolling, to erase the hotspot + uint64_t zz[4]; + for (int32_t i = 0; i < batch; ++i) { zigzag_value = ((w >> v) & mask); - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + zz[0] = ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prev_value; v += bit; - zigzag_value = ((w >> v) & mask); - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + zz[1] = ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prev_value; v += bit; - zigzag_value = ((w >> v) & mask); - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + zz[2] = ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prev_value; v += bit; - zigzag_value = ((w >> v) & mask); - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + zz[3] = ZIGZAG_DECODE(int64_t, zigzag_value); - p[_pos++] = prev_value; + p[_pos] = prev_value + zz[0]; + p[_pos + 1] = p[_pos] + zz[1]; + p[_pos + 2] = p[_pos + 1] + zz[2]; + p[_pos + 3] = p[_pos + 2] + zz[3]; + prev_value = p[_pos + 3]; v += bit; } @@ -345,12 +394,12 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha } count += num; +#endif + #else for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - zigzag_value = ((w >> (4 + v)) & mask); - - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - prev_value = diff + prev_value; + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = prev_value; v += bit; diff --git a/source/util/src/tutil.c b/source/util/src/tutil.c index d7980bce64..780dfe9105 100644 --- a/source/util/src/tutil.c +++ b/source/util/src/tutil.c @@ -336,6 +336,29 @@ int32_t tintToHex(uint64_t val, char hex[]) { return j; } +int32_t tintToStr(uint64_t val, size_t radix, char str[]) { + if (radix < 2 || radix > 16) { + return 0; + } + + const char* s = "0123456789abcdef"; + char buf[65] = {0}; + + int32_t i = 0; + uint64_t v = val; + while(v > 0) { + buf[i++] = s[v % radix]; + v /= radix; + } + + // reverse order + for(int32_t j = 0; j < i; ++j) { + str[j] = buf[i - j - 1]; + } + + return i; +} + int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]) { int32_t i; char hexval[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; From c79b7223721702d16185b8602df52fda2f38a355 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 9 Jan 2023 10:12:24 +0800 Subject: [PATCH 028/267] remove state commit --- source/libs/executor/src/timewindowoperator.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d78e9c4edf..449c52d77f 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -119,8 +119,8 @@ static void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsL pRowSup->groupId = groupId; } -FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, - int32_t pos, int32_t order, int64_t* pData) { +FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int32_t pos, + int32_t order, int64_t* pData) { int32_t forwardRows = 0; if (order == TSDB_ORDER_ASC) { @@ -639,7 +639,7 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num if (NULL == pr) { T_LONG_JMP(pTaskInfo->env, terrno); } - + ASSERT(pr->offset == p1->offset && pr->pageId == p1->pageId); if (pr->closed) { @@ -1318,11 +1318,11 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type } static void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprSupp* pSup, int32_t numOfOutput) { - SResultRow* pResult = getResultRowByPos(pResultBuf, p1, false); + SResultRow* pResult = getResultRowByPos(pResultBuf, p1, false); if (NULL == pResult) { return; } - + SqlFunctionCtx* pCtx = pSup->pCtx; for (int32_t i = 0; i < numOfOutput; ++i) { pCtx[i].resultInfo = getResultEntryInfo(pResult, i, pSup->rowEntryInfoOffset); @@ -2534,7 +2534,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { } else { deleteIntervalDiscBuf(pInfo->pState, pInfo->pPullDataMap, pInfo->twAggSup.maxTs - pInfo->twAggSup.deleteMark, &pInfo->interval, &pInfo->delKey); - streamStateCommit(pTaskInfo->streamInfo.pState); + // streamStateCommit(pTaskInfo->streamInfo.pState); } return NULL; } else { @@ -4734,7 +4734,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { deleteIntervalDiscBuf(pInfo->pState, NULL, pInfo->twAggSup.maxTs - pInfo->twAggSup.deleteMark, &pInfo->interval, &pInfo->delKey); setOperatorCompleted(pOperator); - streamStateCommit(pTaskInfo->streamInfo.pState); + // streamStateCommit(pTaskInfo->streamInfo.pState); return NULL; } From c7560202f12e58c77923e81a24c59a3470ae5728 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 9 Jan 2023 14:06:31 +0800 Subject: [PATCH 029/267] refactor: do some internal refactor. --- source/util/src/tcompression.c | 94 +++++++++++----------------------- 1 file changed, 31 insertions(+), 63 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 5c738c0ed7..da67e6c397 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -265,7 +265,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int64_t prev_value = 0; while (1) { - if (count == nelements) break; + if (_pos == nelements) break; uint64_t w = 0; memcpy(&w, ip, LONG_BYTES); @@ -284,8 +284,8 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int64_t* p = (int64_t*) output; if (selector == 0 || selector == 1) { - int32_t gRemainder = nelements - count; - int32_t num = gRemainder > elems? elems:gRemainder; + int32_t gRemainder = nelements - _pos; + int32_t num = gRemainder < elems? gRemainder:elems; int32_t batch = num >> 2; int32_t remainder = num & 0x03; @@ -302,100 +302,68 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha count += num; } else { - int32_t gRemainder = (nelements - count); - int32_t num = gRemainder > elems? elems:gRemainder; + int32_t gRemainder = (nelements - _pos); + int32_t num = (gRemainder > elems)? elems:gRemainder; int32_t batch = num >> 2; int32_t remain = num & 0x03; -#if 1 #if 1 __m256i base = _mm256_set1_epi64x(w); - __m256i mask_ = _mm256_set1_epi64x(mask); + __m256i maskVal = _mm256_set1_epi64x(mask); __m256i shiftBits = _mm256_set_epi64x(bit * 3 + 4, bit * 2 + 4, bit + 4, 4); __m256i inc = _mm256_set1_epi64x(bit << 2); for(int32_t i = 0; i < batch; ++i) { __m256i after = _mm256_srlv_epi64(base, shiftBits); - __m256i zz = _mm256_and_si256(after, mask_); - printf("1\n"); + __m256i zigzagVal= _mm256_and_si256(after, maskVal); - //#define ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) // zigzag decode - __m256i signmask = _mm256_and_si256(_mm256_set_epi64x(1, 1, 1, 1), zz); + // ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) + __m256i signmask = _mm256_and_si256(_mm256_set1_epi64x(1), zigzagVal); signmask = _mm256_sub_epi64(_mm256_setzero_si256(), signmask); + // get the four zigzag values here + __m256i delta = _mm256_xor_si256(_mm256_srli_epi64(zigzagVal, 1), signmask); - // now here we get the four zigzag value - __m256i final = _mm256_xor_si256(_mm256_srli_epi64(zz, 1), signmask); - - // calculate the cumulative sum (prefix sum) + // calculate the cumulative sum (prefix sum) for each number // decode[0] = prev_value + final[0] // decode[1] = decode[0] + final[1] -----> prev_value + final[0] + final[1] // decode[2] = decode[1] + final[1] -----> prev_value + final[0] + final[1] + final[2] // decode[3] = decode[2] + final[1] -----> prev_value + final[0] + final[1] + final[2] + final[3] - printf("2\n"); - + // 1, 2, 3, 4 + //+ 0, 1, 2, 3 + // 1, 3, 5, 7 + // shift and add for the first round __m128i prev = _mm_set1_epi64x(prev_value); - final = _mm256_add_epi64(final, _mm256_slli_si256(final, 8)); - // x = 1, 2, 3, 4 - // + 0, 1, 2, 3 - // = 1, 3, 5, 7 - _mm256_storeu_si256((__m256i *)&p[_pos], final); + delta = _mm256_add_epi64(delta, _mm256_slli_si256(delta, 8)); + _mm256_storeu_si256((__m256i *)&p[_pos], delta); - __m128i first = _mm_loadu_si128((__m128i *)&p[_pos]); - __m128i sec = _mm_add_epi64(_mm_loadu_si128((__m128i *)&p[_pos + 2]), first); - sec = _mm_add_epi64(sec, prev); - first = _mm_add_epi64(first, prev); + // 1, 3, 5, 7 + //+ 0, 0, 1, 3 + // 1, 3, 6, 10 + // shift and add operation for the second round + __m128i firstPart = _mm_loadu_si128((__m128i *)&p[_pos]); + __m128i secPart = _mm_add_epi64(_mm_loadu_si128((__m128i *)&p[_pos + 2]), firstPart); + firstPart = _mm_add_epi64(firstPart, prev); + secPart = _mm_add_epi64(secPart, prev); - _mm_storeu_si128((__m128i *)&p[_pos], first); - _mm_storeu_si128((__m128i *)&p[_pos + 2], sec); + // save it in the memory + _mm_storeu_si128((__m128i *)&p[_pos], firstPart); + _mm_storeu_si128((__m128i *)&p[_pos + 2], secPart); shiftBits = _mm256_add_epi64(shiftBits, inc); prev_value = p[_pos + 3]; _pos += 4; - - printf("3\n"); - } -#else - // manual unrolling, to erase the hotspot - uint64_t zz[4]; - - for (int32_t i = 0; i < batch; ++i) { - zigzag_value = ((w >> v) & mask); - zz[0] = ZIGZAG_DECODE(int64_t, zigzag_value); - - v += bit; - zigzag_value = ((w >> v) & mask); - zz[1] = ZIGZAG_DECODE(int64_t, zigzag_value); - - v += bit; - zigzag_value = ((w >> v) & mask); - zz[2] = ZIGZAG_DECODE(int64_t, zigzag_value); - - v += bit; - zigzag_value = ((w >> v) & mask); - zz[3] = ZIGZAG_DECODE(int64_t, zigzag_value); - - p[_pos] = prev_value + zz[0]; - p[_pos + 1] = p[_pos] + zz[1]; - p[_pos + 2] = p[_pos + 1] + zz[2]; - p[_pos + 3] = p[_pos + 2] + zz[3]; - prev_value = p[_pos + 3]; - v += bit; } - // handle the remain + // handle the remain value for (int32_t i = 0; i < remain; i++) { - zigzag_value = ((w >> v) & mask); + zigzag_value = ((w >> (v + (batch * bit))) & mask); prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); p[_pos++] = prev_value; v += bit; } - - count += num; -#endif - #else for (int32_t i = 0; i < elems && count < nelements; i++, count++) { zigzag_value = ((w >> v) & mask); From 642651c1bdc420e81e5ce3428e1fd54936cbd55b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 9 Jan 2023 14:50:24 +0800 Subject: [PATCH 030/267] refactor: do some internal refactor. --- source/util/src/tcompression.c | 151 ++++++++++++++++----------------- 1 file changed, 75 insertions(+), 76 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index da67e6c397..d3605cd02c 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -283,96 +283,95 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha case TSDB_DATA_TYPE_BIGINT: { int64_t* p = (int64_t*) output; + int32_t gRemainder = (nelements - _pos); + int32_t num = (gRemainder > elems)? elems:gRemainder; + + int32_t batch = num >> 2; + int32_t remain = num & 0x03; if (selector == 0 || selector == 1) { - int32_t gRemainder = nelements - _pos; - int32_t num = gRemainder < elems? gRemainder:elems; + if (tsAVX2Enable && tsSIMDBuiltins) { + for (int32_t i = 0; i < batch; ++i) { + __m256i prev = _mm256_set1_epi64x(prev_value); + _mm256_storeu_si256((__m256i *)&p[_pos], prev); + _pos += 4; + } - int32_t batch = num >> 2; - int32_t remainder = num & 0x03; - for (int32_t i = 0; i < batch; ++i) { - p[_pos++] = prev_value; - p[_pos++] = prev_value; - p[_pos++] = prev_value; - p[_pos++] = prev_value; + for (int32_t i = 0; i < remain; ++i) { + p[_pos++] = prev_value; + } + } else { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + p[_pos++] = prev_value; + v += bit; + } } - - for (int32_t i = 0; i < remainder; ++i) { - p[_pos++] = prev_value; - } - - count += num; } else { - int32_t gRemainder = (nelements - _pos); - int32_t num = (gRemainder > elems)? elems:gRemainder; + if (tsAVX2Enable && tsSIMDBuiltins) { + __m256i base = _mm256_set1_epi64x(w); + __m256i maskVal = _mm256_set1_epi64x(mask); - int32_t batch = num >> 2; - int32_t remain = num & 0x03; -#if 1 - __m256i base = _mm256_set1_epi64x(w); - __m256i maskVal = _mm256_set1_epi64x(mask); + __m256i shiftBits = _mm256_set_epi64x(bit * 3 + 4, bit * 2 + 4, bit + 4, 4); + __m256i inc = _mm256_set1_epi64x(bit << 2); - __m256i shiftBits = _mm256_set_epi64x(bit * 3 + 4, bit * 2 + 4, bit + 4, 4); - __m256i inc = _mm256_set1_epi64x(bit << 2); + for (int32_t i = 0; i < batch; ++i) { + __m256i after = _mm256_srlv_epi64(base, shiftBits); + __m256i zigzagVal = _mm256_and_si256(after, maskVal); - for(int32_t i = 0; i < batch; ++i) { - __m256i after = _mm256_srlv_epi64(base, shiftBits); - __m256i zigzagVal= _mm256_and_si256(after, maskVal); + // ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) + __m256i signmask = _mm256_and_si256(_mm256_set1_epi64x(1), zigzagVal); + signmask = _mm256_sub_epi64(_mm256_setzero_si256(), signmask); + // get the four zigzag values here + __m256i delta = _mm256_xor_si256(_mm256_srli_epi64(zigzagVal, 1), signmask); - // ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) - __m256i signmask = _mm256_and_si256(_mm256_set1_epi64x(1), zigzagVal); - signmask = _mm256_sub_epi64(_mm256_setzero_si256(), signmask); - // get the four zigzag values here - __m256i delta = _mm256_xor_si256(_mm256_srli_epi64(zigzagVal, 1), signmask); + // calculate the cumulative sum (prefix sum) for each number + // decode[0] = prev_value + final[0] + // decode[1] = decode[0] + final[1] -----> prev_value + final[0] + final[1] + // decode[2] = decode[1] + final[1] -----> prev_value + final[0] + final[1] + final[2] + // decode[3] = decode[2] + final[1] -----> prev_value + final[0] + final[1] + final[2] + final[3] - // calculate the cumulative sum (prefix sum) for each number - // decode[0] = prev_value + final[0] - // decode[1] = decode[0] + final[1] -----> prev_value + final[0] + final[1] - // decode[2] = decode[1] + final[1] -----> prev_value + final[0] + final[1] + final[2] - // decode[3] = decode[2] + final[1] -----> prev_value + final[0] + final[1] + final[2] + final[3] + // 1, 2, 3, 4 + //+ 0, 1, 2, 3 + // 1, 3, 5, 7 + // shift and add for the first round + __m128i prev = _mm_set1_epi64x(prev_value); + delta = _mm256_add_epi64(delta, _mm256_slli_si256(delta, 8)); + _mm256_storeu_si256((__m256i *)&p[_pos], delta); - // 1, 2, 3, 4 - //+ 0, 1, 2, 3 - // 1, 3, 5, 7 - // shift and add for the first round - __m128i prev = _mm_set1_epi64x(prev_value); - delta = _mm256_add_epi64(delta, _mm256_slli_si256(delta, 8)); - _mm256_storeu_si256((__m256i *)&p[_pos], delta); + // 1, 3, 5, 7 + //+ 0, 0, 1, 3 + // 1, 3, 6, 10 + // shift and add operation for the second round + __m128i firstPart = _mm_loadu_si128((__m128i *)&p[_pos]); + __m128i secPart = _mm_add_epi64(_mm_loadu_si128((__m128i *)&p[_pos + 2]), firstPart); + firstPart = _mm_add_epi64(firstPart, prev); + secPart = _mm_add_epi64(secPart, prev); - // 1, 3, 5, 7 - //+ 0, 0, 1, 3 - // 1, 3, 6, 10 - // shift and add operation for the second round - __m128i firstPart = _mm_loadu_si128((__m128i *)&p[_pos]); - __m128i secPart = _mm_add_epi64(_mm_loadu_si128((__m128i *)&p[_pos + 2]), firstPart); - firstPart = _mm_add_epi64(firstPart, prev); - secPart = _mm_add_epi64(secPart, prev); + // save it in the memory + _mm_storeu_si128((__m128i *)&p[_pos], firstPart); + _mm_storeu_si128((__m128i *)&p[_pos + 2], secPart); - // save it in the memory - _mm_storeu_si128((__m128i *)&p[_pos], firstPart); - _mm_storeu_si128((__m128i *)&p[_pos + 2], secPart); + shiftBits = _mm256_add_epi64(shiftBits, inc); + prev_value = p[_pos + 3]; + _pos += 4; + } - shiftBits = _mm256_add_epi64(shiftBits, inc); - prev_value = p[_pos + 3]; - _pos += 4; + // handle the remain value + for (int32_t i = 0; i < remain; i++) { + zigzag_value = ((w >> (v + (batch * bit))) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + + p[_pos++] = prev_value; + v += bit; + } + } else { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + + p[_pos++] = prev_value; + v += bit; + } } - - // handle the remain value - for (int32_t i = 0; i < remain; i++) { - zigzag_value = ((w >> (v + (batch * bit))) & mask); - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); - - p[_pos++] = prev_value; - v += bit; - } -#else - for (int32_t i = 0; i < elems && count < nelements; i++, count++) { - zigzag_value = ((w >> v) & mask); - prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); - - p[_pos++] = prev_value; - v += bit; - } -#endif } } break; case TSDB_DATA_TYPE_INT: { From b2a70c0a34e86dd9acffa19df6fbb33d9e544ea2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 13:33:27 +0800 Subject: [PATCH 031/267] enh(query): jump out of loop once queried tables are all found. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 91690af4c8..0f0803fb4c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -680,6 +680,9 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, } taosArrayPush(pIndexList, pBlockIdx); + if (taosArrayGetSize(pIndexList) == taosHashGetSize(pReader->status.pTableMap)) { + break; + } } int64_t et2 = taosGetTimestampUs(); From 08321f3c6d547b2d170b28074bd521d4e13131bc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 13:35:25 +0800 Subject: [PATCH 032/267] enh(query): update the log. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 0f0803fb4c..e88d38a20e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -651,6 +651,8 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, goto _end; } + int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap); + size_t num = taosArrayGetSize(aBlockIdx); if (num == 0) { taosArrayDestroy(aBlockIdx); @@ -680,14 +682,15 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, } taosArrayPush(pIndexList, pBlockIdx); - if (taosArrayGetSize(pIndexList) == taosHashGetSize(pReader->status.pTableMap)) { + if (taosArrayGetSize(pIndexList) == numOfTables) { break; } } int64_t et2 = taosGetTimestampUs(); - tsdbDebug("load block index for %d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s", - (int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0, pReader->idStr); + tsdbDebug("load block index for %d/%d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s", + numOfTables, (int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0, + pReader->idStr); pReader->cost.headFileLoadTime += (et1 - st) / 1000.0; From e7d017863407f4ac5abd700429be7fdbdd78e5c2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 15:48:01 +0800 Subject: [PATCH 033/267] enh(query): dynamic invoke the intrinsic instruction according to different length for different data type. --- source/libs/function/src/detail/tminmax.c | 96 +++++++++-------------- 1 file changed, 37 insertions(+), 59 deletions(-) diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index 33ee33899d..c2efe5b7ea 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -61,6 +61,8 @@ } \ } +static int32_t getInvokeThreshold(int32_t bits, int32_t bytes) { return bits / (bytes << 3u); } + static void calculateRounds(int32_t numOfRows, int32_t bytes, int32_t* remainder, int32_t* rounds, int32_t* width) { const int32_t bitWidth = 256; @@ -700,8 +702,29 @@ static void doExtractVal(SColumnInfoData* pCol, int32_t i, int32_t end, SqlFunct } } +static int32_t saveRelatedTuple(SqlFunctionCtx* pCtx, SInputColumnInfoData* pInput, int32_t index, void* tval) { + SColumnInfoData* pCol = pInput->pData[0]; + + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + SMinmaxResInfo* pBuf = GET_ROWCELL_INTERBUF(pResInfo); + + int32_t code = 0; + if (pCtx->subsidiaries.num > 0) { + index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); + if (index >= 0) { + code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + } + + return code; +} + int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) { int32_t numOfElems = 0; + int32_t code = TSDB_CODE_SUCCESS; SInputColumnInfoData* pInput = &pCtx->input; SColumnDataAgg* pAgg = pInput->pColumnDataAgg[0]; @@ -720,20 +743,15 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) // data in current data block are qualified to the query if (pInput->colDataSMAIsSet) { - numOfElems = pInput->numOfRows - pAgg->numOfNull; + numOfElems = pInput->numOfRows - pAgg->numOfNull; if (numOfElems == 0) { goto _over; } void* tval = NULL; int16_t index = 0; - - if (isMinFunc) { - tval = &pInput->pColumnDataAgg[0]->min; - } else { - tval = &pInput->pColumnDataAgg[0]->max; - } + tval = isMinFunc? &pInput->pColumnDataAgg[0]->min: &pInput->pColumnDataAgg[0]->max; if (!pBuf->assign) { if (type == TSDB_DATA_TYPE_FLOAT) { @@ -742,15 +760,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) pBuf->v = GET_INT64_VAL(tval); } - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } else { if (IS_SIGNED_NUMERIC_TYPE(type)) { int64_t prev = 0; @@ -759,15 +769,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) int64_t val = GET_INT64_VAL(tval); if ((prev < val) ^ isMinFunc) { GET_INT64_VAL(&pBuf->v) = val; - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { uint64_t prev = 0; @@ -776,15 +778,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) uint64_t val = GET_UINT64_VAL(tval); if ((prev < val) ^ isMinFunc) { GET_UINT64_VAL(&pBuf->v) = val; - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } } else if (type == TSDB_DATA_TYPE_DOUBLE) { double prev = 0; @@ -793,15 +787,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) double val = GET_DOUBLE_VAL(tval); if ((prev < val) ^ isMinFunc) { GET_DOUBLE_VAL(&pBuf->v) = val; - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } } else if (type == TSDB_DATA_TYPE_FLOAT) { float prev = 0; @@ -810,35 +796,26 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) float val = GET_DOUBLE_VAL(tval); if ((prev < val) ^ isMinFunc) { GET_FLOAT_VAL(&pBuf->v) = val; - } - - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } } } pBuf->assign = true; - return TSDB_CODE_SUCCESS; + return code; } int32_t start = pInput->startRowIndex; int32_t numOfRows = pInput->numOfRows; int32_t end = start + numOfRows; - if (pCol->hasNull || numOfRows < 32 || pCtx->subsidiaries.num > 0) { + if (pCol->hasNull || numOfRows < getInvokeThreshold(256, type) || pCtx->subsidiaries.num > 0) { int32_t i = findFirstValPosition(pCol, start, numOfRows); if ((i < end) && (!pBuf->assign)) { char* p = pCol->pData + pCol->info.bytes * i; - switch (pCol->info.type) { + switch (type) { case TSDB_DATA_TYPE_DOUBLE: case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_BIGINT: @@ -867,7 +844,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) } if (pCtx->subsidiaries.num > 0) { - int32_t code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -884,7 +861,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) } else { numOfElems = numOfRows; - switch (pCol->info.type) { + switch (type) { case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: { handleInt8Col(pCol->pData, start, numOfRows, pBuf, isMinFunc, true); @@ -933,10 +910,11 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) _over: if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) { - int32_t code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos); + code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos); if (code != TSDB_CODE_SUCCESS) { return code; } + pBuf->nullTupleSaved = true; } From f67808a227402a377e5197a9f930bd4312d7de9e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 16:43:15 +0800 Subject: [PATCH 034/267] refactor: do some internal refactor. --- source/libs/function/src/detail/tminmax.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index c2efe5b7ea..e8cacb837b 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -61,7 +61,7 @@ } \ } -static int32_t getInvokeThreshold(int32_t bits, int32_t bytes) { return bits / (bytes << 3u); } +#define GET_INVOKE_INTRINSIC_THRESHOLD(_bits, _bytes) ((_bits) / ((_bytes) << 3u)) static void calculateRounds(int32_t numOfRows, int32_t bytes, int32_t* remainder, int32_t* rounds, int32_t* width) { const int32_t bitWidth = 256; @@ -809,7 +809,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) int32_t numOfRows = pInput->numOfRows; int32_t end = start + numOfRows; - if (pCol->hasNull || numOfRows < getInvokeThreshold(256, type) || pCtx->subsidiaries.num > 0) { + if (pCol->hasNull || numOfRows < GET_INVOKE_INTRINSIC_THRESHOLD(256, pCol->info.bytes) || pCtx->subsidiaries.num > 0) { int32_t i = findFirstValPosition(pCol, start, numOfRows); if ((i < end) && (!pBuf->assign)) { From 0b4f1298d74bb918d65af64db979224da2317a75 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 18:48:20 +0800 Subject: [PATCH 035/267] enh(query): opt agg. --- source/libs/function/src/detail/tminmax.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index e8cacb837b..1388fca8fa 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -809,7 +809,16 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) int32_t numOfRows = pInput->numOfRows; int32_t end = start + numOfRows; - if (pCol->hasNull || numOfRows < GET_INVOKE_INTRINSIC_THRESHOLD(256, pCol->info.bytes) || pCtx->subsidiaries.num > 0) { + // clang-format off + int32_t threshold[] = { + //NULL, BOOL, TINYINT, SMALLINT, INT, BIGINT, FLOAT, DOUBLE, VARCHAR, TIMESTAMP, NCHAR, + INT32_MAX, INT32_MAX, 32, 16, 8, 4, 8, 4, INT32_MAX, INT32_MAX, INT32_MAX, + // UTINYINT,USMALLINT, UINT, UBIGINT, JSON, VARBINARY, DECIMAL, BLOB, MEDIUMBLOB, BINARY + 32, 16, 8, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, + }; + // clang-format on + + if (pCol->hasNull || numOfRows < threshold[pCol->info.type] || pCtx->subsidiaries.num > 0) { int32_t i = findFirstValPosition(pCol, start, numOfRows); if ((i < end) && (!pBuf->assign)) { From 94c416eb309f24ac3f9ab6e4d4c5d1f8aaed5299 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 18:48:55 +0800 Subject: [PATCH 036/267] enh(query): opt agg. --- source/libs/function/src/detail/tminmax.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index 1388fca8fa..257b27f8dd 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -814,7 +814,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) //NULL, BOOL, TINYINT, SMALLINT, INT, BIGINT, FLOAT, DOUBLE, VARCHAR, TIMESTAMP, NCHAR, INT32_MAX, INT32_MAX, 32, 16, 8, 4, 8, 4, INT32_MAX, INT32_MAX, INT32_MAX, // UTINYINT,USMALLINT, UINT, UBIGINT, JSON, VARBINARY, DECIMAL, BLOB, MEDIUMBLOB, BINARY - 32, 16, 8, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, + 32, 16, 8, 4, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, }; // clang-format on From 2d73a50469c42b2999595e9759b78a3d10f3bd5e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Jan 2023 19:31:30 +0800 Subject: [PATCH 037/267] enh(query): opt filter check. --- source/dnode/vnode/src/meta/metaCache.c | 32 ++++++++++++++++++------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 33ff438490..5cf079e414 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -549,21 +549,35 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int tdListAppend(&p->list, pKey); } else { // check if it exists or not - SListIter iter = {0}; - tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD); - - SListNode* pNode = NULL; - while ((pNode = tdListNext(&iter)) != NULL) { + size_t size = listNEles(&(*pEntry)->list); + if (size == 0) { + tdListAppend(&(*pEntry)->list, pKey); + } else if (size == 1) { + SListNode* pNode = listHead(&(*pEntry)->list); uint64_t* p = (uint64_t*) pNode->data; - - // key already exists in cache, quit if (p[1] == ((uint64_t*)pKey)[1] && p[2] == ((uint64_t*)pKey)[2]) { taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; + } else { // not equal, append it + tdListAppend(&(*pEntry)->list, pKey); } - } + } else { // more than one element + SListIter iter = {0}; + tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD); - tdListAppend(&(*pEntry)->list, pKey); + SListNode* pNode = NULL; + while ((pNode = tdListNext(&iter)) != NULL) { + uint64_t* p = (uint64_t*)pNode->data; + + // key already exists in cache, quit + if (p[1] == ((uint64_t*)pKey)[1] && p[2] == ((uint64_t*)pKey)[2]) { + taosThreadMutexUnlock(pLock); + return TSDB_CODE_SUCCESS; + } + } + + tdListAppend(&(*pEntry)->list, pKey); + } } // add to cache. From e9fc109edf06d48c8677ee2b751938c335e90f5d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 11 Jan 2023 18:47:45 +0800 Subject: [PATCH 038/267] enh(query): opt query perf. --- source/libs/executor/inc/executil.h | 5 +--- source/libs/executor/src/executil.c | 27 ++++++++++--------- source/libs/executor/src/timewindowoperator.c | 5 ++-- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index e0d2276e6f..523957b54d 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -44,6 +44,7 @@ typedef struct SGroupResInfo { int32_t index; SArray* pRows; // SArray + char* pBuf; } SGroupResInfo; typedef struct SResultRow { @@ -115,10 +116,6 @@ struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t i static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos, bool forUpdate) { SFilePage* bufPage = (SFilePage*)getBufPage(pBuf, pos->pageId); - if (NULL == bufPage) { - return NULL; - } - if (forUpdate) { setBufPageDirty(bufPage, true); } diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 857d57c6b1..06c90481aa 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -89,13 +89,7 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) { } void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) { - assert(pGroupResInfo != NULL); - - for (int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) { - SResKeyPos* pRes = taosArrayGetP(pGroupResInfo->pRows, i); - taosMemoryFree(pRes); - } - + taosMemoryFreeClear(pGroupResInfo->pBuf); pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows); pGroupResInfo->index = 0; } @@ -126,20 +120,28 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in } // extract the result rows information from the hash map - void* pData = NULL; - pGroupResInfo->pRows = taosArrayInit(10, POINTER_BYTES); + int32_t size = tSimpleHashGetSize(pHashmap); + + void* pData = NULL; + pGroupResInfo->pRows = taosArrayInit(size, POINTER_BYTES); - // todo avoid repeated malloc memory size_t keyLen = 0; - int32_t iter = 0; + int32_t num = 0, iter = 0, itemSize = 0; + while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) { void* key = tSimpleHashGetKey(pData, &keyLen); - SResKeyPos* p = taosMemoryMalloc(keyLen + sizeof(SResultRowPosition)); + if (pGroupResInfo->pBuf == NULL) { + itemSize = keyLen + sizeof(SResultRowPosition); + pGroupResInfo->pBuf = taosMemoryMalloc(size * itemSize); + } + + SResKeyPos* p = (SResKeyPos*)(pGroupResInfo->pBuf + num * itemSize); p->groupId = *(uint64_t*)key; p->pos = *(SResultRowPosition*)pData; memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t)); + taosArrayPush(pGroupResInfo->pRows, &p); } @@ -172,7 +174,6 @@ bool hasRemainResults(SGroupResInfo* pGroupResInfo) { } int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) { - assert(pGroupResInfo != NULL); if (pGroupResInfo->pRows == 0) { return 0; } diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d78e9c4edf..4cac29cec8 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -3392,9 +3392,11 @@ static void copyDeleteWindowInfo(SArray* pResWins, SSHashObj* pStDeleted) { } } +// the allocated memory comes from outer function. void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) { pGroupResInfo->pRows = pArrayList; pGroupResInfo->index = 0; + pGroupResInfo->pBuf = NULL; } void doBuildSessionResult(SOperatorInfo* pOperator, SStreamState* pState, SGroupResInfo* pGroupResInfo, @@ -3405,8 +3407,7 @@ void doBuildSessionResult(SOperatorInfo* pOperator, SStreamState* pState, SGroup blockDataCleanup(pBlock); if (!hasRemainResults(pGroupResInfo)) { - taosArrayDestroy(pGroupResInfo->pRows); - pGroupResInfo->pRows = NULL; + cleanupGroupResInfo(pGroupResInfo); return; } From fc42143c47908623d9adf01242492072d2ba4190 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 11 Jan 2023 18:53:20 +0800 Subject: [PATCH 039/267] fix(query): fix error in generated the group results. --- source/libs/executor/src/executil.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 06c90481aa..cfaeaebe41 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -141,13 +141,13 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in p->groupId = *(uint64_t*)key; p->pos = *(SResultRowPosition*)pData; memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t)); - taosArrayPush(pGroupResInfo->pRows, &p); + num += 1; } if (order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC) { __compar_fn_t fn = (order == TSDB_ORDER_ASC) ? resultrowComparAsc : resultrowComparDesc; - int32_t size = POINTER_BYTES; + size = POINTER_BYTES; taosSort(pGroupResInfo->pRows->pData, taosArrayGetSize(pGroupResInfo->pRows), size, fn); } From a2e35ce13b705688324571849e465f058922a2ee Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 12 Jan 2023 11:26:13 +0800 Subject: [PATCH 040/267] streamState/tdb: use NULL for tdbBegin's heap source --- source/libs/stream/src/streamState.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 327de89660..1952d9ab52 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -192,7 +192,7 @@ void streamStateClose(SStreamState* pState) { } int32_t streamStateBegin(SStreamState* pState) { - if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { tdbAbort(pState->pTdbState->db, pState->pTdbState->txn); return -1; @@ -208,7 +208,7 @@ int32_t streamStateCommit(SStreamState* pState) { return -1; } - if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { return -1; } @@ -220,7 +220,7 @@ int32_t streamStateAbort(SStreamState* pState) { return -1; } - if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { return -1; } From ef12805c3b6162e5126110483e588cf285e5a3e6 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 12 Jan 2023 14:15:29 +0800 Subject: [PATCH 041/267] fix: concurrency issue --- include/libs/stream/tstream.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index c00625c51c..c5352eee46 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -354,7 +354,8 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask); void tFreeSStreamTask(SStreamTask* pTask); static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) { - if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { + int8_t type = pItem->type; + if (type == STREAM_INPUT__DATA_SUBMIT) { SStreamDataSubmit* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit*)pItem); if (pSubmitClone == NULL) { qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask); @@ -365,19 +366,19 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem qDebug("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data); taosWriteQitem(pTask->inputQueue->queue, pSubmitClone); // qStreamInput(pTask->exec.executor, pSubmitClone); - } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE || - pItem->type == STREAM_INPUT__REF_DATA_BLOCK) { + } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE || + type == STREAM_INPUT__REF_DATA_BLOCK) { taosWriteQitem(pTask->inputQueue->queue, pItem); // qStreamInput(pTask->exec.executor, pItem); - } else if (pItem->type == STREAM_INPUT__CHECKPOINT) { + } else if (type == STREAM_INPUT__CHECKPOINT) { taosWriteQitem(pTask->inputQueue->queue, pItem); // qStreamInput(pTask->exec.executor, pItem); - } else if (pItem->type == STREAM_INPUT__GET_RES) { + } else if (type == STREAM_INPUT__GET_RES) { taosWriteQitem(pTask->inputQueue->queue, pItem); // qStreamInput(pTask->exec.executor, pItem); } - if (pItem->type != STREAM_INPUT__GET_RES && pItem->type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { + if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE); } From effcfc057db4d44f5960f8856b8472366b1e11e2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Jan 2023 15:00:07 +0800 Subject: [PATCH 042/267] fix(query): fix compare error. --- source/dnode/vnode/src/meta/metaCache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 5cf079e414..f52944d5e5 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -555,7 +555,7 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int } else if (size == 1) { SListNode* pNode = listHead(&(*pEntry)->list); uint64_t* p = (uint64_t*) pNode->data; - if (p[1] == ((uint64_t*)pKey)[1] && p[2] == ((uint64_t*)pKey)[2]) { + if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; } else { // not equal, append it @@ -570,7 +570,7 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int uint64_t* p = (uint64_t*)pNode->data; // key already exists in cache, quit - if (p[1] == ((uint64_t*)pKey)[1] && p[2] == ((uint64_t*)pKey)[2]) { + if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; } From 96feaaadbec81a502f93d1bf4098d337317c5ce5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Jan 2023 19:24:23 +0800 Subject: [PATCH 043/267] fix(query): add the check of item when putting in cache. --- source/dnode/vnode/src/meta/metaCache.c | 38 +++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index f52944d5e5..3b1c0f2c82 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -431,7 +431,7 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK SHashObj* pTableMap = pMeta->pCache->sTagFilterResCache.pTableEntry; TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; - uint64_t buf[3] = {0}; + uint64_t buf[3]; uint32_t times = 0; *acquireRes = 0; @@ -465,7 +465,7 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK taosThreadMutexUnlock(pLock); // check if scanning all items are necessary or not - if (times >= 5000 && TD_DLIST_NELES(&(*pEntry)->list) > 10) { + if (times >= 5000 && TD_DLIST_NELES(&(*pEntry)->list) > 100) { taosThreadMutexLock(pLock); SArray* pInvalidRes = taosArrayInit(64, POINTER_BYTES); @@ -549,6 +549,8 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int tdListAppend(&p->list, pKey); } else { // check if it exists or not + int32_t times = atomic_add_fetch_32(&(*pEntry)->qTimes, 1); + size_t size = listNEles(&(*pEntry)->list); if (size == 0) { tdListAppend(&(*pEntry)->list, pKey); @@ -562,6 +564,17 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int tdListAppend(&(*pEntry)->list, pKey); } } else { // more than one element + bool checkCacheEntry = false; + SArray* pInvalidRes = NULL; + uint64_t keyBuf[3]; + + if (size >= 100 || times > 5000) { + // if the threshold value is reached, need to check the value. + checkCacheEntry = true; + keyBuf[0] = suid; + pInvalidRes = taosArrayInit(64, POINTER_BYTES); + } + SListIter iter = {0}; tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD); @@ -574,6 +587,27 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; } + + // check whether it is existed in LRU cache, and remove it from linked list if not. + if (checkCacheEntry) { + keyBuf[1] = p[1]; + keyBuf[2] = p[2]; + + LRUHandle* pRes = taosLRUCacheLookup(pCache, keyBuf, 24); + if (pRes == NULL) { // remove the item in the linked list + taosArrayPush(pInvalidRes, &pNode); + } else { + taosLRUCacheRelease(pCache, pRes, false); + } + } + } + + // do remove invalid entry in hash + size_t s = taosArrayGetSize(pInvalidRes); + for (int32_t i = 0; i < s; ++i) { + SListNode** p1 = taosArrayGet(pInvalidRes, i); + tdListPopNode(&(*pEntry)->list, *p1); + taosMemoryFree(*p1); } tdListAppend(&(*pEntry)->list, pKey); From d36e68bd9b807f740269b4679c5fda2d81f51bf1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Jan 2023 19:25:28 +0800 Subject: [PATCH 044/267] fix(query): add the check of item when putting in cache. --- source/dnode/vnode/src/meta/metaCache.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 3b1c0f2c82..f8361c5d0c 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -584,6 +584,14 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int // key already exists in cache, quit if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { + // do remove invalid entry in hash + size_t s = taosArrayGetSize(pInvalidRes); + for (int32_t i = 0; i < s; ++i) { + SListNode** p1 = taosArrayGet(pInvalidRes, i); + tdListPopNode(&(*pEntry)->list, *p1); + taosMemoryFree(*p1); + } + taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; } From a96de23363ffee6d32897bf01a3b9a94b74b8580 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Jan 2023 22:50:02 +0800 Subject: [PATCH 045/267] fix(query): add the check of item when putting in cache. --- source/dnode/vnode/src/meta/metaCache.c | 96 ++++++++++++++----------- 1 file changed, 55 insertions(+), 41 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index f8361c5d0c..3767d5fdb3 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -424,6 +424,50 @@ int32_t metaStatsCacheGet(SMeta* pMeta, int64_t uid, SMetaStbStats* pInfo) { return code; } +static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInvalidRes, int32_t keyLen, SLRUCache* pCache, uint64_t suid) { + SListIter iter = {0}; + tdListInitIter((SList*)&(pEntry->list), &iter, TD_LIST_FORWARD); + + SListNode* pNode = NULL; + uint64_t buf[3]; + buf[0] = suid; + + int32_t len = sizeof(uint64_t) * tListLen(buf); + + while ((pNode = tdListNext(&iter)) != NULL) { + memcpy(&buf[1], pNode->data, keyLen); + + // check whether it is existed in LRU cache, and remove it from linked list if not. + LRUHandle* pRes = taosLRUCacheLookup(pCache, buf, len); + if (pRes == NULL) { // remove the item in the linked list + taosArrayPush(pInvalidRes, &pNode); + } else { + taosLRUCacheRelease(pCache, pRes, false); + } + } + + return 0; +} + +#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 100 || (_acc_times) > 5000) + +static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntry* pEntry) { + if (pInvalidRes == NULL) { + return; + } + + // remove the keys, of which query uid lists have been replaced already. + size_t s = taosArrayGetSize(pInvalidRes); + for (int32_t i = 0; i < s; ++i) { + SListNode** p1 = taosArrayGet(pInvalidRes, i); + tdListPopNode(&(pEntry->list), *p1); + taosMemoryFree(*p1); + } + + atomic_store_32(&(pEntry->qTimes), 0); // reset the query times + taosArrayDestroy(pInvalidRes); +} + int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1, bool* acquireRes) { // generate the composed key for LRU cache @@ -465,38 +509,13 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK taosThreadMutexUnlock(pLock); // check if scanning all items are necessary or not - if (times >= 5000 && TD_DLIST_NELES(&(*pEntry)->list) > 100) { + if (NEED_CHECK_CACHE_ITEM(listNEles(&(*pEntry)->list), times)) { taosThreadMutexLock(pLock); SArray* pInvalidRes = taosArrayInit(64, POINTER_BYTES); + checkAllEntriesInCache(*pEntry, pInvalidRes, keyLen, pCache, suid); - SListIter iter = {0}; - tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD); - - SListNode* pNode = NULL; - while ((pNode = tdListNext(&iter)) != NULL) { - memcpy(&buf[1], pNode->data, keyLen); - - // check whether it is existed in LRU cache, and remove it from linked list if not. - LRUHandle* pRes = taosLRUCacheLookup(pCache, buf, len); - if (pRes == NULL) { // remove the item in the linked list - taosArrayPush(pInvalidRes, &pNode); - } else { - taosLRUCacheRelease(pCache, pRes, false); - } - } - - // remove the keys, of which query uid lists have been replaced already. - size_t s = taosArrayGetSize(pInvalidRes); - for (int32_t i = 0; i < s; ++i) { - SListNode** p1 = taosArrayGet(pInvalidRes, i); - tdListPopNode(&(*pEntry)->list, *p1); - taosMemoryFree(*p1); - } - - atomic_store_32(&(*pEntry)->qTimes, 0); // reset the query times - taosArrayDestroy(pInvalidRes); - + removeInvalidCacheItem(pInvalidRes, *pEntry); // remove the keys, of which query uid lists have been replaced already. taosThreadMutexUnlock(pLock); } @@ -568,8 +587,8 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int SArray* pInvalidRes = NULL; uint64_t keyBuf[3]; - if (size >= 100 || times > 5000) { - // if the threshold value is reached, need to check the value. + // if the threshold value is reached, need to check the value. + if (NEED_CHECK_CACHE_ITEM(size, times)) { checkCacheEntry = true; keyBuf[0] = suid; pInvalidRes = taosArrayInit(64, POINTER_BYTES); @@ -585,18 +604,16 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int // key already exists in cache, quit if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { // do remove invalid entry in hash - size_t s = taosArrayGetSize(pInvalidRes); - for (int32_t i = 0; i < s; ++i) { - SListNode** p1 = taosArrayGet(pInvalidRes, i); - tdListPopNode(&(*pEntry)->list, *p1); - taosMemoryFree(*p1); + if (pInvalidRes != NULL) { + removeInvalidCacheItem(pInvalidRes, *pEntry); } taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; } - // check whether it is existed in LRU cache, and remove it from linked list if not. + // check whether it is existed in LRU cache, and remove it from linked list if not + // we record every invalid items and remove when the loop is over. if (checkCacheEntry) { keyBuf[1] = p[1]; keyBuf[2] = p[2]; @@ -611,11 +628,8 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int } // do remove invalid entry in hash - size_t s = taosArrayGetSize(pInvalidRes); - for (int32_t i = 0; i < s; ++i) { - SListNode** p1 = taosArrayGet(pInvalidRes, i); - tdListPopNode(&(*pEntry)->list, *p1); - taosMemoryFree(*p1); + if (pInvalidRes != NULL) { + removeInvalidCacheItem(pInvalidRes, *pEntry); } tdListAppend(&(*pEntry)->list, pKey); From 8bed2612a6ac27078e4676da556b768411389e4f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Jan 2023 23:24:41 +0800 Subject: [PATCH 046/267] refactor: do some internal refactor. --- source/dnode/vnode/src/meta/metaCache.c | 41 ++++++++++++++++--------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 3767d5fdb3..1ef112700d 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -464,7 +464,7 @@ static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntr taosMemoryFree(*p1); } - atomic_store_32(&(pEntry->qTimes), 0); // reset the query times + pEntry->qTimes = 0; // reset the query times taosArrayDestroy(pInvalidRes); } @@ -476,7 +476,6 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; uint64_t buf[3]; - uint32_t times = 0; *acquireRes = 0; buf[0] = suid; @@ -502,14 +501,14 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK // set the result into the buffer taosArrayAddBatch(pList1, p + sizeof(int32_t), size); - times = atomic_add_fetch_32(&(*pEntry)->qTimes, 1); + (*pEntry)->qTimes += 1; taosLRUCacheRelease(pCache, pHandle, false); // unlock meta taosThreadMutexUnlock(pLock); // check if scanning all items are necessary or not - if (NEED_CHECK_CACHE_ITEM(listNEles(&(*pEntry)->list), times)) { + if (NEED_CHECK_CACHE_ITEM(listNEles(&(*pEntry)->list), (*pEntry)->qTimes)) { taosThreadMutexLock(pLock); SArray* pInvalidRes = taosArrayInit(64, POINTER_BYTES); @@ -529,6 +528,19 @@ static void freePayload(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); } +static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyLen, uint64_t suid) { + STagFilterResEntry* p = taosMemoryMalloc(sizeof(STagFilterResEntry)); + if (p == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + p->qTimes = 1; + tdListInit(&p->list, keyLen); + taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES); + tdListAppend(&p->list, pKey); + return 0; +} + // check both the payload size and selectivity ratio int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen, double selectivityRatio) { @@ -556,19 +568,19 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int buf[0] = suid; memcpy(&buf[1], pKey, keyLen); ASSERT(sizeof(uint64_t) + keyLen == 24); + int32_t code = 0; taosThreadMutexLock(pLock); STagFilterResEntry** pEntry = taosHashGet(pTableEntry, &suid, sizeof(uint64_t)); if (pEntry == NULL) { - STagFilterResEntry* p = taosMemoryMalloc(sizeof(STagFilterResEntry)); - p->qTimes = 0; - tdListInit(&p->list, keyLen); - taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES); - tdListAppend(&p->list, pKey); + code = addNewEntry(pTableEntry, pKey, keyLen, suid); + if (code != TSDB_CODE_SUCCESS) { + goto _end; + } } else { // check if it exists or not - int32_t times = atomic_add_fetch_32(&(*pEntry)->qTimes, 1); + (*pEntry)->qTimes += 1; size_t size = listNEles(&(*pEntry)->list); if (size == 0) { @@ -577,6 +589,7 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int SListNode* pNode = listHead(&(*pEntry)->list); uint64_t* p = (uint64_t*) pNode->data; if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { + // we have already found the existed items, no need to added to cache anymore. taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; } else { // not equal, append it @@ -588,7 +601,7 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int uint64_t keyBuf[3]; // if the threshold value is reached, need to check the value. - if (NEED_CHECK_CACHE_ITEM(size, times)) { + if (NEED_CHECK_CACHE_ITEM(size, (*pEntry)->qTimes)) { checkCacheEntry = true; keyBuf[0] = suid; pInvalidRes = taosArrayInit(64, POINTER_BYTES); @@ -619,7 +632,7 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int keyBuf[2] = p[2]; LRUHandle* pRes = taosLRUCacheLookup(pCache, keyBuf, 24); - if (pRes == NULL) { // remove the item in the linked list + if (pRes == NULL) { // add the invalid item in the array list to be removed. taosArrayPush(pInvalidRes, &pNode); } else { taosLRUCacheRelease(pCache, pRes, false); @@ -639,13 +652,13 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int // add to cache. taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL, TAOS_LRU_PRIORITY_LOW); - + _end: taosThreadMutexUnlock(pLock); metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", TD_VID(pMeta->pVnode), suid, (int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry)); - return TSDB_CODE_SUCCESS; + return code; } // remove the lru cache that are expired due to the tags value update, or creating, or dropping, of child tables From a71f029eec82c502d0e062edaa56b4afda70f8e4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 10:36:05 +0800 Subject: [PATCH 047/267] refactor: do some internal refactor. --- source/dnode/vnode/src/meta/metaCache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 1ef112700d..40abcaf5c3 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -126,7 +126,7 @@ int32_t metaCacheOpen(SMeta* pMeta) { goto _err2; } - pCache->sTagFilterResCache.pUidResCache = taosLRUCacheInit(5 * 1024 * 1024, -1, 0.5); + pCache->sTagFilterResCache.pUidResCache = taosLRUCacheInit(25 * 1024 * 1024, -1, 0.5); if (pCache->sTagFilterResCache.pUidResCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err2; @@ -449,7 +449,7 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv return 0; } -#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 100 || (_acc_times) > 5000) +#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 250 || (_acc_times) > 5000) static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntry* pEntry) { if (pInvalidRes == NULL) { From 28baef10519fbfd65900e18485fefcc36e1a21ad Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 10:40:12 +0800 Subject: [PATCH 048/267] refactor: do some internal refactor. --- source/dnode/vnode/src/meta/metaCache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 40abcaf5c3..9dddcbc70a 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -449,7 +449,7 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv return 0; } -#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 250 || (_acc_times) > 5000) +#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 100 || (_acc_times) > 5000) static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntry* pEntry) { if (pInvalidRes == NULL) { From 6bffcfaad82c429ae749670bd6abc44e92516af6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 10:45:07 +0800 Subject: [PATCH 049/267] refactor: do some internal refactor. --- source/dnode/vnode/src/meta/metaCache.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 9dddcbc70a..b6780af9c2 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -457,13 +457,14 @@ static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntr } // remove the keys, of which query uid lists have been replaced already. - size_t s = taosArrayGetSize(pInvalidRes); + int32_t s = taosArrayGetSize(pInvalidRes); for (int32_t i = 0; i < s; ++i) { SListNode** p1 = taosArrayGet(pInvalidRes, i); tdListPopNode(&(pEntry->list), *p1); taosMemoryFree(*p1); } + metaInfo("clear %d items in cache, remain:%d", s, TD_DLIST_NELES(&pEntry->list)); pEntry->qTimes = 0; // reset the query times taosArrayDestroy(pInvalidRes); } From 71ca1d84ef450bd8f9d17c214d537b4a2b518e5c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 10:53:19 +0800 Subject: [PATCH 050/267] refactor: do some internal refactor. --- source/dnode/vnode/src/meta/metaCache.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index b6780af9c2..0a592ad073 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -451,7 +451,7 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv #define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 100 || (_acc_times) > 5000) -static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntry* pEntry) { +static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntry* pEntry, bool triggerByGet) { if (pInvalidRes == NULL) { return; } @@ -464,7 +464,8 @@ static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntr taosMemoryFree(*p1); } - metaInfo("clear %d items in cache, remain:%d", s, TD_DLIST_NELES(&pEntry->list)); + metaInfo("clear %d items in cache, remain:%d, acctime:%d, trigger by get:%d", s, listNEles(&pEntry->list), + pEntry->qTimes, triggerByGet); pEntry->qTimes = 0; // reset the query times taosArrayDestroy(pInvalidRes); } @@ -515,7 +516,7 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK SArray* pInvalidRes = taosArrayInit(64, POINTER_BYTES); checkAllEntriesInCache(*pEntry, pInvalidRes, keyLen, pCache, suid); - removeInvalidCacheItem(pInvalidRes, *pEntry); // remove the keys, of which query uid lists have been replaced already. + removeInvalidCacheItem(pInvalidRes, *pEntry, true); // remove the keys, of which query uid lists have been replaced already. taosThreadMutexUnlock(pLock); } @@ -619,7 +620,7 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { // do remove invalid entry in hash if (pInvalidRes != NULL) { - removeInvalidCacheItem(pInvalidRes, *pEntry); + removeInvalidCacheItem(pInvalidRes, *pEntry, false); } taosThreadMutexUnlock(pLock); @@ -643,7 +644,7 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int // do remove invalid entry in hash if (pInvalidRes != NULL) { - removeInvalidCacheItem(pInvalidRes, *pEntry); + removeInvalidCacheItem(pInvalidRes, *pEntry, false); } tdListAppend(&(*pEntry)->list, pKey); From 5ff84757fe375e5f87c25a436a0379a636ecc66c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 10:53:36 +0800 Subject: [PATCH 051/267] refactor: do some internal refactor. --- source/dnode/vnode/src/meta/metaCache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 0a592ad073..5bc8292227 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -449,7 +449,7 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv return 0; } -#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 100 || (_acc_times) > 5000) +#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 150 || (_acc_times) > 5000) static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntry* pEntry, bool triggerByGet) { if (pInvalidRes == NULL) { From 7dcad6277501da7f8636205dc4c679626c68ed03 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 10:56:47 +0800 Subject: [PATCH 052/267] refactor: do some internal refactor. --- source/dnode/vnode/src/meta/metaCache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 5bc8292227..7934153753 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -449,7 +449,7 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv return 0; } -#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 150 || (_acc_times) > 5000) +#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 300 || (_acc_times) > 5000) static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntry* pEntry, bool triggerByGet) { if (pInvalidRes == NULL) { From bb5fb42c2fd077db0c17540f996e051389b36fb4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 13:48:20 +0800 Subject: [PATCH 053/267] refactor: do some internal refactor. --- source/dnode/vnode/src/meta/metaCache.c | 166 ++++++++++++++---------- 1 file changed, 96 insertions(+), 70 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 7934153753..30656a77a4 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -32,7 +32,6 @@ typedef struct SMetaStbStatsEntry { } SMetaStbStatsEntry; typedef struct STagFilterResEntry { -// uint64_t suid; // uid for super table SList list; // the linked list of md5 digest, extracted from the serialized tag query condition uint32_t qTimes; // queried times for current super table } STagFilterResEntry; @@ -126,7 +125,7 @@ int32_t metaCacheOpen(SMeta* pMeta) { goto _err2; } - pCache->sTagFilterResCache.pUidResCache = taosLRUCacheInit(25 * 1024 * 1024, -1, 0.5); + pCache->sTagFilterResCache.pUidResCache = taosLRUCacheInit(5 * 1024 * 1024, -1, 0.5); if (pCache->sTagFilterResCache.pUidResCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err2; @@ -477,15 +476,17 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK SHashObj* pTableMap = pMeta->pCache->sTagFilterResCache.pTableEntry; TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; - uint64_t buf[3]; + uint64_t buf[4]; *acquireRes = 0; - buf[0] = suid; - memcpy(&buf[1], pKey, keyLen); + + buf[0] = (uint64_t) pTableMap; + buf[1] = suid; + memcpy(&buf[2], pKey, keyLen); taosThreadMutexLock(pLock); - int32_t len = keyLen + sizeof(uint64_t); + int32_t len = keyLen + sizeof(uint64_t) * 2; LRUHandle* pHandle = taosLRUCacheLookup(pCache, buf, len); if (pHandle == NULL) { taosThreadMutexUnlock(pLock); @@ -508,18 +509,6 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK // unlock meta taosThreadMutexUnlock(pLock); - - // check if scanning all items are necessary or not - if (NEED_CHECK_CACHE_ITEM(listNEles(&(*pEntry)->list), (*pEntry)->qTimes)) { - taosThreadMutexLock(pLock); - - SArray* pInvalidRes = taosArrayInit(64, POINTER_BYTES); - checkAllEntriesInCache(*pEntry, pInvalidRes, keyLen, pCache, suid); - - removeInvalidCacheItem(pInvalidRes, *pEntry, true); // remove the keys, of which query uid lists have been replaced already. - taosThreadMutexUnlock(pLock); - } - return TSDB_CODE_SUCCESS; } @@ -527,6 +516,36 @@ static void freePayload(const void* key, size_t keyLen, void* value) { if (value == NULL) { return; } + + const uint64_t* p = key; + if (keyLen != sizeof(int64_t) * 4) { + metaError("key length is invalid, length:%d, expect:%d", (int32_t) keyLen, (int32_t) sizeof(uint64_t)*2); + return; + } + + SHashObj* pHashObj = (SHashObj*)p[0]; + STagFilterResEntry** pEntry = taosHashGet(pHashObj, &p[1], sizeof(uint64_t)); + + { + int64_t st = taosGetTimestampUs(); + + SListIter iter = {0}; + tdListInitIter((SList*)&((*pEntry)->list), &iter, TD_LIST_FORWARD); + + SListNode* pNode = NULL; + while ((pNode = tdListNext(&iter)) != NULL) { + uint64_t* digest = (uint64_t*)pNode->data; + if (digest[0] == p[2] && digest[1] == p[3]) { + tdListPopNode(&((*pEntry)->list), pNode); + + int64_t et = taosGetTimestampUs(); + metaInfo("clear items in cache, remain cached item:%d, elapsed time:%.2fms, acc count:%d", listNEles(&((*pEntry)->list)), + (et - st)/1000.0, (*pEntry)->qTimes); + return; + } + } + } + taosMemoryFree(value); } @@ -566,12 +585,16 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int SHashObj* pTableEntry = pMeta->pCache->sTagFilterResCache.pTableEntry; TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; - uint64_t buf[3] = {0}; - buf[0] = suid; - memcpy(&buf[1], pKey, keyLen); - ASSERT(sizeof(uint64_t) + keyLen == 24); - int32_t code = 0; + // the format of key: + // hash table address(8bytes) + suid(8bytes) + MD5 digest(16bytes) + uint64_t buf[4] = {0}; + buf[0] = (uint64_t) pTableEntry; + buf[1] = suid; + memcpy(&buf[2], pKey, keyLen); + ASSERT(keyLen == 16); + + int32_t code = 0; taosThreadMutexLock(pLock); STagFilterResEntry** pEntry = taosHashGet(pTableEntry, &suid, sizeof(uint64_t)); @@ -587,72 +610,73 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int size_t size = listNEles(&(*pEntry)->list); if (size == 0) { tdListAppend(&(*pEntry)->list, pKey); - } else if (size == 1) { + } else { SListNode* pNode = listHead(&(*pEntry)->list); - uint64_t* p = (uint64_t*) pNode->data; + uint64_t* p = (uint64_t*)pNode->data; if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { // we have already found the existed items, no need to added to cache anymore. taosThreadMutexUnlock(pLock); return TSDB_CODE_SUCCESS; - } else { // not equal, append it + } else { // not equal, append it tdListAppend(&(*pEntry)->list, pKey); } - } else { // more than one element - bool checkCacheEntry = false; - SArray* pInvalidRes = NULL; - uint64_t keyBuf[3]; + /*} else { // more than one element + bool checkCacheEntry = false; + SArray* pInvalidRes = NULL; + uint64_t keyBuf[3]; - // if the threshold value is reached, need to check the value. - if (NEED_CHECK_CACHE_ITEM(size, (*pEntry)->qTimes)) { - checkCacheEntry = true; - keyBuf[0] = suid; - pInvalidRes = taosArrayInit(64, POINTER_BYTES); - } + // if the threshold value is reached, need to check the value. + // if (NEED_CHECK_CACHE_ITEM(size, (*pEntry)->qTimes)) { + // checkCacheEntry = true; + // keyBuf[0] = suid; + // pInvalidRes = taosArrayInit(64, POINTER_BYTES); + // } - SListIter iter = {0}; - tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD); + SListIter iter = {0}; + tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD); - SListNode* pNode = NULL; - while ((pNode = tdListNext(&iter)) != NULL) { - uint64_t* p = (uint64_t*)pNode->data; + SListNode* pNode = NULL; + while ((pNode = tdListNext(&iter)) != NULL) { + uint64_t* p = (uint64_t*)pNode->data; - // key already exists in cache, quit - if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { - // do remove invalid entry in hash - if (pInvalidRes != NULL) { - removeInvalidCacheItem(pInvalidRes, *pEntry, false); + // key already exists in cache, quit + if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { + // do remove invalid entry in hash + if (pInvalidRes != NULL) { + removeInvalidCacheItem(pInvalidRes, *pEntry, false); + } + + taosThreadMutexUnlock(pLock); + return TSDB_CODE_SUCCESS; } - taosThreadMutexUnlock(pLock); - return TSDB_CODE_SUCCESS; - } + // check whether it is existed in LRU cache, and remove it from linked list if not + // we record every invalid items and remove when the loop is over. + if (checkCacheEntry) { + keyBuf[1] = p[1]; + keyBuf[2] = p[2]; - // check whether it is existed in LRU cache, and remove it from linked list if not - // we record every invalid items and remove when the loop is over. - if (checkCacheEntry) { - keyBuf[1] = p[1]; - keyBuf[2] = p[2]; - - LRUHandle* pRes = taosLRUCacheLookup(pCache, keyBuf, 24); - if (pRes == NULL) { // add the invalid item in the array list to be removed. - taosArrayPush(pInvalidRes, &pNode); - } else { - taosLRUCacheRelease(pCache, pRes, false); + LRUHandle* pRes = taosLRUCacheLookup(pCache, keyBuf, 24); + if (pRes == NULL) { // add the invalid item in the array list to be removed. + taosArrayPush(pInvalidRes, &pNode); + } else { + taosLRUCacheRelease(pCache, pRes, false); + } } } - } - // do remove invalid entry in hash - if (pInvalidRes != NULL) { - removeInvalidCacheItem(pInvalidRes, *pEntry, false); - } + // do remove invalid entry in hash + if (pInvalidRes != NULL) { + removeInvalidCacheItem(pInvalidRes, *pEntry, false); + } - tdListAppend(&(*pEntry)->list, pKey); + tdListAppend(&(*pEntry)->list, pKey); + }*/ } } // add to cache. - taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL, + taosLRUCacheInsert(pCache, buf, sizeof(uint64_t)*2 + keyLen, pPayload, payloadLen, freePayload, NULL, TAOS_LRU_PRIORITY_LOW); _end: taosThreadMutexUnlock(pLock); @@ -666,8 +690,10 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int // remove the lru cache that are expired due to the tags value update, or creating, or dropping, of child tables int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) { int32_t keyLen = sizeof(uint64_t) * 3; - uint64_t p[3] = {0}; - p[0] = suid; + uint64_t p[4] = {0}; + + p[0] = (uint64_t) pMeta->pCache->sTagFilterResCache.pTableEntry; + p[1] = suid; TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; @@ -683,7 +709,7 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) { SListNode* pNode = NULL; while ((pNode = tdListNext(&iter)) != NULL) { - memcpy(&p[1], pNode->data, 16); + memcpy(&p[2], pNode->data, 16); taosLRUCacheErase(pMeta->pCache->sTagFilterResCache.pUidResCache, p, keyLen); } From 6332c982b50914c83809146f576f9f6c84ca8d7b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 14:16:59 +0800 Subject: [PATCH 054/267] fix(query): fix invalid read --- source/dnode/vnode/src/meta/metaCache.c | 54 +------------------------ 1 file changed, 1 insertion(+), 53 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 30656a77a4..2803dbe6c3 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -158,9 +158,9 @@ void metaCacheClose(SMeta* pMeta) { entryCacheClose(pMeta); statsCacheClose(pMeta); - taosHashCleanup(pMeta->pCache->sTagFilterResCache.pTableEntry); taosLRUCacheCleanup(pMeta->pCache->sTagFilterResCache.pUidResCache); taosThreadMutexDestroy(&pMeta->pCache->sTagFilterResCache.lock); + taosHashCleanup(pMeta->pCache->sTagFilterResCache.pTableEntry); taosMemoryFree(pMeta->pCache); pMeta->pCache = NULL; @@ -620,58 +620,6 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int } else { // not equal, append it tdListAppend(&(*pEntry)->list, pKey); } - /*} else { // more than one element - bool checkCacheEntry = false; - SArray* pInvalidRes = NULL; - uint64_t keyBuf[3]; - - // if the threshold value is reached, need to check the value. - // if (NEED_CHECK_CACHE_ITEM(size, (*pEntry)->qTimes)) { - // checkCacheEntry = true; - // keyBuf[0] = suid; - // pInvalidRes = taosArrayInit(64, POINTER_BYTES); - // } - - SListIter iter = {0}; - tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD); - - SListNode* pNode = NULL; - while ((pNode = tdListNext(&iter)) != NULL) { - uint64_t* p = (uint64_t*)pNode->data; - - // key already exists in cache, quit - if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { - // do remove invalid entry in hash - if (pInvalidRes != NULL) { - removeInvalidCacheItem(pInvalidRes, *pEntry, false); - } - - taosThreadMutexUnlock(pLock); - return TSDB_CODE_SUCCESS; - } - - // check whether it is existed in LRU cache, and remove it from linked list if not - // we record every invalid items and remove when the loop is over. - if (checkCacheEntry) { - keyBuf[1] = p[1]; - keyBuf[2] = p[2]; - - LRUHandle* pRes = taosLRUCacheLookup(pCache, keyBuf, 24); - if (pRes == NULL) { // add the invalid item in the array list to be removed. - taosArrayPush(pInvalidRes, &pNode); - } else { - taosLRUCacheRelease(pCache, pRes, false); - } - } - } - - // do remove invalid entry in hash - if (pInvalidRes != NULL) { - removeInvalidCacheItem(pInvalidRes, *pEntry, false); - } - - tdListAppend(&(*pEntry)->list, pKey); - }*/ } } From eea9d4f21db84b34e8b151f5e01ecac1e59275bf Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 14:34:15 +0800 Subject: [PATCH 055/267] enh(query): prepare the buffer on the stack, instead of heap. --- source/libs/executor/src/executil.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index cfaeaebe41..7b26df0091 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -481,14 +481,15 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { colDataAppend(pColInfo, i, p, false); } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { - char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); + char* tmp = alloca(tagVal.nData + VARSTR_HEADER_SIZE + 1); +// char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); varDataSetLen(tmp, tagVal.nData); memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); colDataAppend(pColInfo, i, tmp, false); #if TAG_FILTER_DEBUG qDebug("tagfilter varch:%s", tmp + 2); #endif - taosMemoryFree(tmp); +// taosMemoryFree(tmp); } else { colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); #if TAG_FILTER_DEBUG From cd6532ca7e009ffda6b2a0691b9121f9173951f1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 14:49:07 +0800 Subject: [PATCH 056/267] enh(query): prepare the buffer on the stack, instead of heap. --- source/dnode/vnode/src/meta/metaCache.c | 44 +++++++++---------------- 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 2803dbe6c3..234c8066e1 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -33,7 +33,8 @@ typedef struct SMetaStbStatsEntry { typedef struct STagFilterResEntry { SList list; // the linked list of md5 digest, extracted from the serialized tag query condition - uint32_t qTimes; // queried times for current super table + uint32_t hitTimes; // queried times for current super table + uint32_t accTime; } STagFilterResEntry; struct SMetaCache { @@ -54,6 +55,7 @@ struct SMetaCache { // query cache struct STagFilterResCache { TdThreadMutex lock; + uint32_t accTimes; SHashObj* pTableEntry; SLRUCache* pUidResCache; } sTagFilterResCache; @@ -448,27 +450,6 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv return 0; } -#define NEED_CHECK_CACHE_ITEM(_size, _acc_times) ((_size) >= 300 || (_acc_times) > 5000) - -static void removeInvalidCacheItem(SArray* pInvalidRes, struct STagFilterResEntry* pEntry, bool triggerByGet) { - if (pInvalidRes == NULL) { - return; - } - - // remove the keys, of which query uid lists have been replaced already. - int32_t s = taosArrayGetSize(pInvalidRes); - for (int32_t i = 0; i < s; ++i) { - SListNode** p1 = taosArrayGet(pInvalidRes, i); - tdListPopNode(&(pEntry->list), *p1); - taosMemoryFree(*p1); - } - - metaInfo("clear %d items in cache, remain:%d, acctime:%d, trigger by get:%d", s, listNEles(&pEntry->list), - pEntry->qTimes, triggerByGet); - pEntry->qTimes = 0; // reset the query times - taosArrayDestroy(pInvalidRes); -} - int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1, bool* acquireRes) { // generate the composed key for LRU cache @@ -485,6 +466,7 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK memcpy(&buf[2], pKey, keyLen); taosThreadMutexLock(pLock); + pMeta->pCache->sTagFilterResCache.accTimes += 1; int32_t len = keyLen + sizeof(uint64_t) * 2; LRUHandle* pHandle = taosLRUCacheLookup(pCache, buf, len); @@ -504,7 +486,13 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK // set the result into the buffer taosArrayAddBatch(pList1, p + sizeof(int32_t), size); - (*pEntry)->qTimes += 1; + (*pEntry)->hitTimes += 1; + + int32_t acc = pMeta->pCache->sTagFilterResCache.accTimes; + if ((*pEntry)->hitTimes % 5000 == 8 && (*pEntry)->hitTimes > 0) { + metaInfo("cache hit:%d, total acc:%d, rate:%.2f", (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes)/acc); + } + taosLRUCacheRelease(pCache, pHandle, false); // unlock meta @@ -539,8 +527,8 @@ static void freePayload(const void* key, size_t keyLen, void* value) { tdListPopNode(&((*pEntry)->list), pNode); int64_t et = taosGetTimestampUs(); - metaInfo("clear items in cache, remain cached item:%d, elapsed time:%.2fms, acc count:%d", listNEles(&((*pEntry)->list)), - (et - st)/1000.0, (*pEntry)->qTimes); + metaInfo("clear items in cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)), + (et - st)/1000.0); return; } } @@ -555,7 +543,7 @@ static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyL return TSDB_CODE_OUT_OF_MEMORY; } - p->qTimes = 1; + p->hitTimes = 0; tdListInit(&p->list, keyLen); taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES); tdListAppend(&p->list, pKey); @@ -605,8 +593,6 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int } } else { // check if it exists or not - (*pEntry)->qTimes += 1; - size_t size = listNEles(&(*pEntry)->list); if (size == 0) { tdListAppend(&(*pEntry)->list, pKey); @@ -661,7 +647,7 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) { taosLRUCacheErase(pMeta->pCache->sTagFilterResCache.pUidResCache, p, keyLen); } - (*pEntry)->qTimes = 0; + (*pEntry)->hitTimes = 0; tdListEmpty(&(*pEntry)->list); taosThreadMutexUnlock(pLock); From 5386fe7cc4052a2e708d5dd781af5330b005fb3d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 14:57:09 +0800 Subject: [PATCH 057/267] fix(query): init the value. --- source/dnode/vnode/src/meta/metaCache.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 234c8066e1..b2d41b9b34 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -133,6 +133,7 @@ int32_t metaCacheOpen(SMeta* pMeta) { goto _err2; } + pCache->sTagFilterResCache.accTimes = 0; pCache->sTagFilterResCache.pTableEntry = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK); if (pCache->sTagFilterResCache.pTableEntry == NULL) { @@ -488,7 +489,7 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK (*pEntry)->hitTimes += 1; - int32_t acc = pMeta->pCache->sTagFilterResCache.accTimes; + uint32_t acc = pMeta->pCache->sTagFilterResCache.accTimes; if ((*pEntry)->hitTimes % 5000 == 8 && (*pEntry)->hitTimes > 0) { metaInfo("cache hit:%d, total acc:%d, rate:%.2f", (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes)/acc); } From 7aee35c457f0f1c599ab5fbcda274bce68b379f5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Jan 2023 15:54:23 +0800 Subject: [PATCH 058/267] fix(query): init the value. --- source/dnode/vnode/src/meta/metaCache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index b2d41b9b34..21a1014e87 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -490,8 +490,8 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK (*pEntry)->hitTimes += 1; uint32_t acc = pMeta->pCache->sTagFilterResCache.accTimes; - if ((*pEntry)->hitTimes % 5000 == 8 && (*pEntry)->hitTimes > 0) { - metaInfo("cache hit:%d, total acc:%d, rate:%.2f", (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes)/acc); + if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) { + metaInfo("cache hit:%d, total acc:%d, rate:%.2f", (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc); } taosLRUCacheRelease(pCache, pHandle, false); From 82f4db302e8b389d14794269ef5a0dc62a849a6a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 14 Jan 2023 23:17:57 +0800 Subject: [PATCH 059/267] refactor: do some internal refactor. --- source/dnode/vnode/src/meta/metaQuery.c | 26 ++++++----- source/libs/executor/src/executil.c | 59 ++++++++++++++----------- 2 files changed, 48 insertions(+), 37 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index cfdb4ab8d1..8e932e0c73 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1368,7 +1368,7 @@ int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHas taosHashPut(tags, id, sizeof(tb_uid_t), val, len); tdbFree(val); } else { - metaError("vgId:%d, failed to table IDs, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid, + metaError("vgId:%d, failed to table tags, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid, *id); } } @@ -1381,31 +1381,35 @@ int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHas int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags) { SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid, 1); - SHashObj *uHash = NULL; - size_t len = taosArrayGetSize(uidList); // len > 0 means there already have uids + // If len > 0 means there already have uids, and we only want the + // tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept + // in the hash map, that may require a lot of memory + SHashObj *pSepecifiedUidMap = NULL; + size_t len = taosArrayGetSize(uidList); if (len > 0) { - uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + pSepecifiedUidMap = taosHashInit(len / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); for (int i = 0; i < len; i++) { int64_t *uid = taosArrayGet(uidList, i); - taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i)); + taosHashPut(pSepecifiedUidMap, uid, sizeof(int64_t), 0, 0); } } + while (1) { - tb_uid_t id = metaCtbCursorNext(pCur); - if (id == 0) { + tb_uid_t uid = metaCtbCursorNext(pCur); + if (uid == 0) { break; } - if (len > 0 && taosHashGet(uHash, &id, sizeof(int64_t)) == NULL) { + if (len > 0 && taosHashGet(pSepecifiedUidMap, &uid, sizeof(int64_t)) == NULL) { continue; } else if (len == 0) { - taosArrayPush(uidList, &id); + taosArrayPush(uidList, &uid); } - taosHashPut(tags, &id, sizeof(int64_t), pCur->pVal, pCur->vLen); + taosHashPut(tags, &uid, sizeof(uint64_t), pCur->pVal, pCur->vLen); } - taosHashCleanup(uHash); + taosHashCleanup(pSepecifiedUidMap); metaCloseCtbCursor(pCur, 1); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 7b26df0091..900f64ba5b 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -43,9 +43,9 @@ typedef struct tagFilterAssist { SArray* cInfoList; } tagFilterAssist; -static int32_t removeInvalidTable(SArray* uids, SHashObj* tags); +static int32_t removeInvalidUid(SArray* uids, SHashObj* tags); static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SHashObj* tags); -static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond); +static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* pExistedUidList, SNode* pTagCond); static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo); @@ -433,6 +433,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* int32_t filter = optimizeTbnameInCond(metaHandle, suid, uidList, pTagCond, tags); if (filter == -1) { + // here we retrieve all tags from the vnode table-meta store code = metaGetTableTags(metaHandle, suid, uidList, tags); if (code != TSDB_CODE_SUCCESS) { qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid); @@ -440,22 +441,23 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* goto end; } } + if (suid != 0) { - removeInvalidTable(uidList, tags); + removeInvalidUid(uidList, tags); } - int32_t rows = taosArrayGetSize(uidList); - if (rows == 0) { + int32_t size = taosArrayGetSize(uidList); + if (size == 0) { goto end; } - code = blockDataEnsureCapacity(pResBlock, rows); + code = blockDataEnsureCapacity(pResBlock, size); if (code != TSDB_CODE_SUCCESS) { terrno = code; goto end; } - for (int32_t i = 0; i < rows; i++) { + for (int32_t i = 0; i < size; i++) { int64_t* uid = taosArrayGet(uidList, i); for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); @@ -468,13 +470,14 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); #endif } else { - void* tag = taosHashGet(tags, uid, sizeof(int64_t)); - if (tag == NULL) { + void* pTagsVal = taosHashGet(tags, uid, sizeof(uint64_t)); + if (pTagsVal == NULL) { continue; } + STagVal tagVal = {0}; tagVal.cid = pColInfo->info.colId; - const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); + const char* p = metaGetTableTagVal(pTagsVal, pColInfo->info.type, &tagVal); if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { colDataAppend(pColInfo, i, p, true); @@ -504,7 +507,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* } } - pResBlock->info.rows = rows; + pResBlock->info.rows = size; // int64_t st1 = taosGetTimestampUs(); // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); @@ -513,7 +516,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* taosArrayPush(pBlockList, &pResBlock); SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; - code = createResultData(&type, rows, &output); + code = createResultData(&type, size, &output); if (code != TSDB_CODE_SUCCESS) { terrno = code; qError("failed to create result, reason:%s", tstrerror(code)); @@ -850,7 +853,7 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list ret = optimizeTbnameInCondImpl(metaHandle, suid, list, cond); if (ret != -1) { metaGetTableTagsByUids(metaHandle, suid, list, tags); - removeInvalidTable(list, tags); + removeInvalidUid(list, tags); } } @@ -882,7 +885,7 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list if (hasTbnameCond) { ret = metaGetTableTagsByUids(metaHandle, suid, list, tags); - removeInvalidTable(list, tags); + removeInvalidUid(list, tags); } return ret; @@ -891,12 +894,15 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list /* * handle invalid uid */ -static int32_t removeInvalidTable(SArray* uids, SHashObj* tags) { - if (taosArrayGetSize(uids) <= 0) return 0; +static int32_t removeInvalidUid(SArray* uids, SHashObj* tags) { + int32_t size = taosArrayGetSize(uids); + if (size <= 0) { + return 0; + } - SArray* validUid = taosArrayInit(taosArrayGetSize(uids), sizeof(int64_t)); + SArray* validUid = taosArrayInit(size, sizeof(int64_t)); - for (int32_t i = 0; i < taosArrayGetSize(uids); i++) { + for (int32_t i = 0; i < size; i++) { int64_t* uid = taosArrayGet(uids, i); if (taosHashGet(tags, uid, sizeof(int64_t)) != NULL) { taosArrayPush(validUid, uid); @@ -908,7 +914,8 @@ static int32_t removeInvalidTable(SArray* uids, SHashObj* tags) { return 0; } -static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond) { +// only return uid that does not contained in pExistedUidList +static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* pExistedUidList, SNode* pTagCond) { if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) { return -1; } @@ -931,11 +938,11 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* SArray* pTbList = getTableNameList(pList); int32_t numOfTables = taosArrayGetSize(pTbList); SHashObj* uHash = NULL; - size_t listlen = taosArrayGetSize(list); // len > 0 means there already have uids - if (listlen > 0) { - uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - for (int i = 0; i < listlen; i++) { - int64_t* uid = taosArrayGet(list, i); + size_t numOfExisted = taosArrayGetSize(pExistedUidList); // len > 0 means there already have uids + if (numOfExisted > 0) { + uHash = taosHashInit(numOfExisted / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + for (int i = 0; i < numOfExisted; i++) { + int64_t* uid = taosArrayGet(pExistedUidList, i); taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i)); } } @@ -948,7 +955,7 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* ETableType tbType = TSDB_TABLE_MAX; if (metaGetTableTypeByName(metaHandle, name, &tbType) == 0 && tbType == TSDB_CHILD_TABLE) { if (NULL == uHash || taosHashGet(uHash, &uid, sizeof(uid)) == NULL) { - taosArrayPush(list, &uid); + taosArrayPush(pExistedUidList, &uid); } } else { taosArrayDestroy(pTbList); @@ -1057,7 +1064,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, goto _end; } - if (!pTagCond) { // no tag condition exists, let's fetch all tables of this super table + if (!pTagCond) { // no tag filter condition exists, let's fetch all tables of this super table ASSERT(pTagIndexCond == NULL); vnodeGetCtbIdList(pVnode, pScanNode->suid, res); } else { From 138d1d26f01fd9bc2e5da9dab702adf8ce8bdfc8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 16 Jan 2023 17:55:25 +0800 Subject: [PATCH 060/267] refactor: opt hash perf. --- source/libs/executor/src/tsimplehash.c | 64 +++++++++++++++++++++----- source/util/src/tarray.c | 8 +++- 2 files changed, 59 insertions(+), 13 deletions(-) diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c index fd6215e3a1..a995c257cf 100644 --- a/source/libs/executor/src/tsimplehash.c +++ b/source/libs/executor/src/tsimplehash.c @@ -18,12 +18,13 @@ #include "tlog.h" #include "tdef.h" +#define DEFAULT_BUF_PAGE_SIZE 1024 #define SHASH_DEFAULT_LOAD_FACTOR 0.75 #define HASH_MAX_CAPACITY (1024 * 1024 * 16L) #define SHASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * SHASH_DEFAULT_LOAD_FACTOR) -#define GET_SHASH_NODE_KEY(_n, _dl) ((char *)(_n) + sizeof(SHNode) + (_dl)) -#define GET_SHASH_NODE_DATA(_n) ((char *)(_n) + sizeof(SHNode)) +#define GET_SHASH_NODE_DATA(_n) (((SHNode*)_n)->data) +#define GET_SHASH_NODE_KEY(_n, _dl) ((char*)GET_SHASH_NODE_DATA(_n) + (_dl)) #define HASH_INDEX(v, c) ((v) & ((c)-1)) @@ -38,6 +39,8 @@ struct SSHashObj { int64_t size; // number of elements in hash table _hash_fn_t hashFp; // hash function _equal_fn_t equalFp; // equal function + SArray* pHashNodeBuf;// hash node allocation buffer, 1k size of each page by default + int32_t offset; // allocation offset in current page }; static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { @@ -57,18 +60,21 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn) { capacity = 4; } - SSHashObj *pHashObj = (SSHashObj *)taosMemoryCalloc(1, sizeof(SSHashObj)); + SSHashObj *pHashObj = (SSHashObj *)taosMemoryMalloc(sizeof(SSHashObj)); if (!pHashObj) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } // the max slots is not defined by user - pHashObj->capacity = taosHashCapacity((int32_t)capacity); - - pHashObj->equalFp = memcmp; pHashObj->hashFp = fn; + pHashObj->capacity = taosHashCapacity((int32_t)capacity); + pHashObj->equalFp = memcmp; + pHashObj->pHashNodeBuf = taosArrayInit(10, sizeof(void*)); + pHashObj->offset = 0; + pHashObj->size = 0; + pHashObj->hashList = (SHNode **)taosMemoryCalloc(pHashObj->capacity, sizeof(void *)); if (!pHashObj->hashList) { taosMemoryFree(pHashObj); @@ -85,16 +91,43 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) { return (int32_t)atomic_load_64((int64_t *)&pHashObj->size); } -static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *data, size_t dataLen, uint32_t hashVal) { - SHNode *pNewNode = taosMemoryMalloc(sizeof(SHNode) + keyLen + dataLen); +static void* doInternalAlloc(SSHashObj* pHashObj, int32_t size) { + void** p = taosArrayGetLast(pHashObj->pHashNodeBuf); + if (p == NULL || (pHashObj->offset + size) > DEFAULT_BUF_PAGE_SIZE) { + // let's allocate one new page + if (size > DEFAULT_BUF_PAGE_SIZE) { + // TODO + } + + void* pNewPage = taosMemoryMalloc(DEFAULT_BUF_PAGE_SIZE); + if (pNewPage == NULL) { + return NULL; + } + + pHashObj->offset = size; + taosArrayPush(pHashObj->pHashNodeBuf, &pNewPage); + return pNewPage; + } else { + void* pPos = (*p) + pHashObj->offset; + pHashObj->offset += size; + return pPos; + } +} + +static SHNode *doCreateHashNode(SSHashObj* pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen) { + SHNode *pNewNode = doInternalAlloc(pHashObj, sizeof(SHNode) + keyLen + dataLen); if (!pNewNode) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } + pNewNode->keyLen = keyLen; pNewNode->dataLen = dataLen; pNewNode->next = NULL; - if (data) memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen); + if (data) { + memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen); + } + memcpy(GET_SHASH_NODE_KEY(pNewNode, dataLen), key, keyLen); return pNewNode; } @@ -179,7 +212,7 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons SHNode *pNode = pHashObj->hashList[slot]; if (!pNode) { - SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal); + SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen); if (!pNewNode) { return -1; } @@ -197,7 +230,7 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons } if (!pNode) { - SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal); + SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen); if (!pNewNode) { return -1; } @@ -320,6 +353,7 @@ void tSimpleHashClear(SSHashObj *pHashObj) { return; } + // TODO recycle the allocated buffer. SHNode *pNode = NULL, *pNext = NULL; for (int32_t i = 0; i < pHashObj->capacity; ++i) { pNode = pHashObj->hashList[i]; @@ -329,7 +363,7 @@ void tSimpleHashClear(SSHashObj *pHashObj) { while (pNode) { pNext = pNode->next; - FREE_HASH_NODE(pNode); +// FREE_HASH_NODE(pNode); pNode = pNext; } pHashObj->hashList[i] = NULL; @@ -337,6 +371,10 @@ void tSimpleHashClear(SSHashObj *pHashObj) { atomic_store_64(&pHashObj->size, 0); } +static void destroyItems(void* pItem) { + taosMemoryFree(*(void**)pItem); +} + void tSimpleHashCleanup(SSHashObj *pHashObj) { if (!pHashObj) { return; @@ -344,6 +382,8 @@ void tSimpleHashCleanup(SSHashObj *pHashObj) { tSimpleHashClear(pHashObj); taosMemoryFreeClear(pHashObj->hashList); + + taosArrayDestroyEx(pHashObj->pHashNodeBuf, destroyItems); taosMemoryFree(pHashObj); } diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 4bd8294423..65a91663a3 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -220,7 +220,13 @@ void* taosArrayGetP(const SArray* pArray, size_t index) { return *(void**)d; } -void* taosArrayGetLast(const SArray* pArray) { return TARRAY_GET_ELEM(pArray, pArray->size - 1); } +void* taosArrayGetLast(const SArray* pArray) { + if (pArray->size == 0) { + return NULL; + } + + return TARRAY_GET_ELEM(pArray, pArray->size - 1); +} size_t taosArrayGetSize(const SArray* pArray) { if (pArray == NULL) { From abc8c0fda135641c7b1092c8c0343c33b8a05922 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 16 Jan 2023 18:40:44 +0800 Subject: [PATCH 061/267] refactor: do some internal refactor. --- source/libs/executor/src/tsimplehash.c | 55 ++++++++++---------------- 1 file changed, 20 insertions(+), 35 deletions(-) diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c index a995c257cf..42938b848d 100644 --- a/source/libs/executor/src/tsimplehash.c +++ b/source/libs/executor/src/tsimplehash.c @@ -88,22 +88,22 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) { if (!pHashObj) { return 0; } - return (int32_t)atomic_load_64((int64_t *)&pHashObj->size); + return (int32_t) pHashObj->size; } static void* doInternalAlloc(SSHashObj* pHashObj, int32_t size) { void** p = taosArrayGetLast(pHashObj->pHashNodeBuf); if (p == NULL || (pHashObj->offset + size) > DEFAULT_BUF_PAGE_SIZE) { // let's allocate one new page - if (size > DEFAULT_BUF_PAGE_SIZE) { - // TODO - } - - void* pNewPage = taosMemoryMalloc(DEFAULT_BUF_PAGE_SIZE); + int32_t allocSize = TMAX(size, DEFAULT_BUF_PAGE_SIZE); + void* pNewPage = taosMemoryMalloc(allocSize); if (pNewPage == NULL) { return NULL; } + // if the allocate the buffer page is greater than the DFFAULT_BUF_PAGE_SIZE, + // pHashObj->offset will always be greater than DEFAULT_BUF_PAGE_SIZE, which means that + // current buffer page is full. And a new buffer page needs to be allocated. pHashObj->offset = size; taosArrayPush(pHashObj->pHashNodeBuf, &pNewPage); return pNewPage; @@ -144,7 +144,7 @@ static void tSimpleHashTableResize(SSHashObj *pHashObj) { return; } - int64_t st = taosGetTimestampUs(); +// int64_t st = taosGetTimestampUs(); void *pNewEntryList = taosMemoryRealloc(pHashObj->hashList, POINTER_BYTES * newCapacity); if (!pNewEntryList) { uWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity); @@ -189,8 +189,7 @@ static void tSimpleHashTableResize(SSHashObj *pHashObj) { } } - int64_t et = taosGetTimestampUs(); - +// int64_t et = taosGetTimestampUs(); // uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", // (int32_t)pHashObj->capacity, // ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); @@ -218,7 +217,7 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons } pHashObj->hashList[slot] = pNewNode; - atomic_add_fetch_64(&pHashObj->size, 1); + pHashObj->size += 1; return 0; } @@ -236,7 +235,7 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons } pNewNode->next = pHashObj->hashList[slot]; pHashObj->hashList[slot] = pNewNode; - atomic_add_fetch_64(&pHashObj->size, 1); + pHashObj->size += 1; } else if (data) { // update data memcpy(GET_SHASH_NODE_DATA(pNode), data, dataLen); } @@ -303,7 +302,7 @@ int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen) { pPrev->next = pNode->next; } FREE_HASH_NODE(pNode); - atomic_sub_fetch_64(&pHashObj->size, 1); + pHashObj->size -= 1; code = TSDB_CODE_SUCCESS; break; } @@ -338,7 +337,7 @@ int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t ke } FREE_HASH_NODE(pNode); - atomic_sub_fetch_64(&pHashObj->size, 1); + pHashObj->size -= 1; break; } pPrev = pNode; @@ -348,31 +347,19 @@ int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t ke return TSDB_CODE_SUCCESS; } +static void destroyItems(void* pItem) { + taosMemoryFree(*(void**)pItem); +} + void tSimpleHashClear(SSHashObj *pHashObj) { if (!pHashObj || taosHashTableEmpty(pHashObj)) { return; } - // TODO recycle the allocated buffer. - SHNode *pNode = NULL, *pNext = NULL; - for (int32_t i = 0; i < pHashObj->capacity; ++i) { - pNode = pHashObj->hashList[i]; - if (!pNode) { - continue; - } - - while (pNode) { - pNext = pNode->next; -// FREE_HASH_NODE(pNode); - pNode = pNext; - } - pHashObj->hashList[i] = NULL; - } - atomic_store_64(&pHashObj->size, 0); -} - -static void destroyItems(void* pItem) { - taosMemoryFree(*(void**)pItem); + memset(pHashObj->hashList, 0, pHashObj->capacity * sizeof(void*)); + taosArrayClearEx(pHashObj->pHashNodeBuf, destroyItems); + pHashObj->offset = 0; + pHashObj->size = 0; } void tSimpleHashCleanup(SSHashObj *pHashObj) { @@ -382,8 +369,6 @@ void tSimpleHashCleanup(SSHashObj *pHashObj) { tSimpleHashClear(pHashObj); taosMemoryFreeClear(pHashObj->hashList); - - taosArrayDestroyEx(pHashObj->pHashNodeBuf, destroyItems); taosMemoryFree(pHashObj); } From f69e8509c3439bb3ef83cce165176845e39a73a8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 16 Jan 2023 19:17:37 +0800 Subject: [PATCH 062/267] fix(query): disable some warnings. --- source/libs/executor/src/executil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 900f64ba5b..015affd6c7 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -963,7 +963,7 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* return -1; } } else { - qWarn("failed to get tableIds from by table name: %s, reason: %s", name, tstrerror(terrno)); +// qWarn("failed to get tableIds from by table name: %s, reason: %s", name, tstrerror(terrno)); terrno = 0; } } From 010452e5698d8996fbd429e5ab1a3ea80c2f147c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 16 Jan 2023 19:19:49 +0800 Subject: [PATCH 063/267] refactor: do some internal refactor. --- source/util/src/tarray.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 65a91663a3..67c7b4af17 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -33,7 +33,7 @@ SArray* taosArrayInit(size_t size, size_t elemSize) { } pArray->size = 0; - pArray->pData = taosMemoryCalloc(size, elemSize); + pArray->pData = taosMemoryMalloc(size, elemSize); if (pArray->pData == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; taosMemoryFree(pArray); From cea5e9b1c0a6eae5fd3269672f76e0555276ac26 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 16 Jan 2023 19:20:55 +0800 Subject: [PATCH 064/267] refactor: do some internal refactor. --- source/util/src/tarray.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 67c7b4af17..03f68c359d 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -33,7 +33,7 @@ SArray* taosArrayInit(size_t size, size_t elemSize) { } pArray->size = 0; - pArray->pData = taosMemoryMalloc(size, elemSize); + pArray->pData = taosMemoryMalloc(size * elemSize); if (pArray->pData == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; taosMemoryFree(pArray); From a9ed671dc284ac2a8c6fcf06b263399434d97b78 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 16 Jan 2023 19:46:23 +0800 Subject: [PATCH 065/267] refactor: do some internal refactor. --- source/dnode/mgmt/node_util/inc/dmUtil.h | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 92b66230e3..eb83bc9ff8 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -53,12 +53,20 @@ extern "C" { #define dDebug(...) { if (dDebugFlag & DEBUG_DEBUG) { taosPrintLog("DND ", DEBUG_DEBUG, dDebugFlag, __VA_ARGS__); }} #define dTrace(...) { if (dDebugFlag & DEBUG_TRACE) { taosPrintLog("DND ", DEBUG_TRACE, dDebugFlag, __VA_ARGS__); }} -#define dGFatal(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dFatal(param ", gtid:%s", __VA_ARGS__, buf);} -#define dGError(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dError(param ", gtid:%s", __VA_ARGS__, buf);} -#define dGWarn(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dWarn (param ", gtid:%s", __VA_ARGS__, buf);} -#define dGInfo(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dInfo (param ", gtid:%s", __VA_ARGS__, buf);} -#define dGDebug(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dDebug(param ", gtid:%s", __VA_ARGS__, buf);} -#define dGTrace(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dTrace(param ", gtid:%s", __VA_ARGS__, buf);} +//#define dGFatal(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dFatal(param ", gtid:%s", __VA_ARGS__, buf);} +//#define dGError(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dError(param ", gtid:%s", __VA_ARGS__, buf);} +//#define dGWarn(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dWarn (param ", gtid:%s", __VA_ARGS__, buf);} +//#define dGInfo(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dInfo (param ", gtid:%s", __VA_ARGS__, buf);} +//#define dGDebug(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dDebug(param ", gtid:%s", __VA_ARGS__, buf);} +//#define dGTrace(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dTrace(param ", gtid:%s", __VA_ARGS__, buf);} + +// TODO: disable it temporarily +#define dGFatal(param, ...) +#define dGError(param, ...) +#define dGWarn(param, ...) +#define dGInfo(param, ...) +#define dGDebug(param, ...) +#define dGTrace(param, ...) // clang-format on From 7425820c216f18fd4d781af61beb5217f24d8cc5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 16 Jan 2023 23:58:45 +0800 Subject: [PATCH 066/267] refactor: do some internal refactor. --- include/util/ttrace.h | 14 ++++++++++++++ include/util/tutil.h | 2 +- source/client/src/clientImpl.c | 2 +- source/dnode/vnode/src/tsdb/tsdbFile.c | 6 +++--- source/util/src/tutil.c | 2 +- 5 files changed, 20 insertions(+), 6 deletions(-) diff --git a/include/util/ttrace.h b/include/util/ttrace.h index 579768228a..6d40971cc2 100644 --- a/include/util/ttrace.h +++ b/include/util/ttrace.h @@ -52,6 +52,20 @@ typedef struct STraceId { sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \ } while (0) +#define TRACE_TO_STR_(_traceId, _buf) \ + do { \ + int64_t rootId = (_traceId) != NULL ? (_traceId)->rootId : 0; \ + int64_t msgId = (_traceId) != NULL ? (_traceId)->msgId : 0; \ + char* _t = _buf; \ + _t[0] = '0'; \ + _t[1] = 'x'; \ + _t += titoa(rootId, 16, &_t[2]); \ + _t[0] = ':'; \ + _t[1] = '0'; \ + _t[2] = 'x'; \ + _t += titoa(msgId, 16, &_t[3]); \ + } while (0) + #ifdef __cplusplus } #endif diff --git a/include/util/tutil.h b/include/util/tutil.h index 513806459d..e0801e5295 100644 --- a/include/util/tutil.h +++ b/include/util/tutil.h @@ -47,7 +47,7 @@ int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]); int32_t taosHexStrToByteArray(char hexstr[], char bytes[]); int32_t tintToHex(uint64_t val, char hex[]); -int32_t tintToStr(uint64_t val, size_t radix, char str[]); +int32_t titoa(uint64_t val, size_t radix, char str[]); char *taosIpStr(uint32_t ipInt); uint32_t ip2uint(const char *const ip_addr); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 53acafeeaa..7ed95a40e2 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1371,7 +1371,7 @@ int32_t doProcessMsgFromServer(void* param) { STraceId* trace = &pMsg->info.traceId; char tbuf[40] = {0}; - TRACE_TO_STR(trace, tbuf); + TRACE_TO_STR_(trace, tbuf); tscDebug("processMsgFromServer handle %p, message: %s, size:%d, code: %s, gtid: %s", pMsg->info.handle, TMSG_INFO(pMsg->msgType), pMsg->contLen, tstrerror(pMsg->code), tbuf); diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c index 5b27497998..faf335a62c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFile.c +++ b/source/dnode/vnode/src/tsdb/tsdbFile.c @@ -108,15 +108,15 @@ void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, *(p++) = TD_DIRSEP[0]; *(p++) = 'v'; - p += tintToStr(TD_VID(pTsdb->pVnode), 10, p); + p += titoa(TD_VID(pTsdb->pVnode), 10, p); *(p++) = 'f'; - p += tintToStr(fid, 10, p); + p += titoa(fid, 10, p); memcpy(p, "ver", 3); p += 3; - p += tintToStr(pHeadF->commitID, 10, p); + p += titoa(pHeadF->commitID, 10, p); memcpy(p, ".head", 5); p[5] = 0; } diff --git a/source/util/src/tutil.c b/source/util/src/tutil.c index 780dfe9105..8beda55c79 100644 --- a/source/util/src/tutil.c +++ b/source/util/src/tutil.c @@ -336,7 +336,7 @@ int32_t tintToHex(uint64_t val, char hex[]) { return j; } -int32_t tintToStr(uint64_t val, size_t radix, char str[]) { +int32_t titoa(uint64_t val, size_t radix, char str[]) { if (radix < 2 || radix > 16) { return 0; } From e5ee9689aa8b1388fe1d37d33edf29bcc07cdee6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 17 Jan 2023 09:00:44 +0800 Subject: [PATCH 067/267] refactor: disable some logs. --- include/util/ttrace.h | 14 +++++++------- source/client/src/clientImpl.c | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/util/ttrace.h b/include/util/ttrace.h index 6d40971cc2..5cdb1eecaa 100644 --- a/include/util/ttrace.h +++ b/include/util/ttrace.h @@ -45,14 +45,14 @@ typedef struct STraceId { #define TRACE_GET_MSGID(traceId) (traceId)->msgId -#define TRACE_TO_STR(traceId, buf) \ - do { \ - int64_t rootId = (traceId) != NULL ? (traceId)->rootId : 0; \ - int64_t msgId = (traceId) != NULL ? (traceId)->msgId : 0; \ - sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \ - } while (0) +//#define TRACE_TO_STR(traceId, buf) \ +// do { \ +// int64_t rootId = (traceId) != NULL ? (traceId)->rootId : 0; \ +// int64_t msgId = (traceId) != NULL ? (traceId)->msgId : 0; \ +// sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \ +// } while (0) -#define TRACE_TO_STR_(_traceId, _buf) \ +#define TRACE_TO_STR(_traceId, _buf) \ do { \ int64_t rootId = (_traceId) != NULL ? (_traceId)->rootId : 0; \ int64_t msgId = (_traceId) != NULL ? (_traceId)->msgId : 0; \ diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 7ed95a40e2..53acafeeaa 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1371,7 +1371,7 @@ int32_t doProcessMsgFromServer(void* param) { STraceId* trace = &pMsg->info.traceId; char tbuf[40] = {0}; - TRACE_TO_STR_(trace, tbuf); + TRACE_TO_STR(trace, tbuf); tscDebug("processMsgFromServer handle %p, message: %s, size:%d, code: %s, gtid: %s", pMsg->info.handle, TMSG_INFO(pMsg->msgType), pMsg->contLen, tstrerror(pMsg->code), tbuf); From 07cf336fa095a58f5e4678f1449604a86a5f5c7b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 17 Jan 2023 09:43:33 +0800 Subject: [PATCH 068/267] refactor: add an array list api. --- include/util/tarray.h | 1 + source/common/src/tdatablock.c | 1 + source/libs/catalog/inc/catalogInt.h | 4 ++-- source/libs/catalog/src/ctgAsync.c | 12 ++++++------ source/libs/catalog/src/ctgCache.c | 16 ++++++++-------- source/libs/function/src/tudf.c | 2 +- source/libs/stream/src/streamData.c | 5 ++--- source/util/src/tarray.c | 20 ++++++++++++++++++++ 8 files changed, 41 insertions(+), 20 deletions(-) diff --git a/include/util/tarray.h b/include/util/tarray.h index 0e78397ecb..f2fe5bc844 100644 --- a/include/util/tarray.h +++ b/include/util/tarray.h @@ -53,6 +53,7 @@ typedef struct SArray { * @return */ SArray* taosArrayInit(size_t size, size_t elemSize); +SArray* taosArrayInit_s(size_t size, size_t elemSize, size_t initialSize); /** * diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 43f272d599..52d45f1eda 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2353,6 +2353,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) { if (pBlock->pDataBlock == NULL) { pBlock->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); + taosArraySetSize(pBlock->pDataBlock, numOfCols); } diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 3e8300e05d..b25097b837 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -300,7 +300,7 @@ typedef struct SCtgSubRes { ctgSubTaskCbFp fp; } SCtgSubRes; -typedef struct SCtgTask { +struct SCtgTask { CTG_TASK_TYPE type; int32_t taskId; SCtgJob* pJob; @@ -313,7 +313,7 @@ typedef struct SCtgTask { SRWLatch lock; SArray* pParents; SCtgSubRes subRes; -} SCtgTask; +}; typedef struct SCtgTaskReq { SCtgTask* pTask; diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 438128203e..9237d77c47 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -1705,9 +1705,7 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask* pTask) { return TSDB_CODE_SUCCESS; } - pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx)); - taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum); - + pTask->msgCtxs = taosArrayInit_s(pCtx->fetchNum, sizeof(SCtgMsgCtx), pCtx->fetchNum); for (int32_t i = 0; i < pCtx->fetchNum; ++i) { SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i); SName* pName = ctgGetFetchName(pCtx->pNames, pFetch); @@ -1842,7 +1840,10 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) { ctgAddFetch(&pCtx->pFetchs, i, -1, &fetchIdx, baseResIdx, 0); baseResIdx += taosArrayGetSize(pReq->pTables); - taosArraySetSize(pCtx->pResList, baseResIdx); + int32_t inc = baseResIdx - taosArrayGetSize(pCtx->pResList); + for(int32_t j = 0; j < inc; ++j) { + taosArrayPush(pCtx->pResList, &(SMetaRes){0}); + } } } @@ -1854,8 +1855,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) { return TSDB_CODE_SUCCESS; } - pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx)); - taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum); + pTask->msgCtxs = taosArrayInit_s(pCtx->fetchNum, sizeof(SCtgMsgCtx), pCtx->fetchNum); for (int32_t i = 0; i < pCtx->fetchNum; ++i) { SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i); diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index c266cc1df9..c41a7e5967 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -2480,20 +2480,20 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe ctgDebug("db %s not in cache", dbFName); for (int32_t i = 0; i < tbNum; ++i) { ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaData){0}); } return TSDB_CODE_SUCCESS; } for (int32_t i = 0; i < tbNum; ++i) { - SName *pName = taosArrayGet(pList, i); + pName = taosArrayGet(pList, i); pCache = taosHashAcquire(dbCache->tbCache, pName->tname, strlen(pName->tname)); if (NULL == pCache) { ctgDebug("tb %s not in cache, dbFName:%s", pName->tname, dbFName); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); continue; } @@ -2503,7 +2503,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe CTG_UNLOCK(CTG_READ, &pCache->metaLock); ctgDebug("tb %s meta not in cache, dbFName:%s", pName->tname, dbFName); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); continue; } @@ -2576,7 +2576,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe if (NULL == stName) { ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", pTableMeta->suid, dbFName); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); taosMemoryFreeClear(pTableMeta); continue; @@ -2588,7 +2588,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe taosHashRelease(dbCache->stbCache, stName); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); taosMemoryFreeClear(pTableMeta); continue; @@ -2603,7 +2603,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe taosHashRelease(dbCache->tbCache, pCache); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); taosMemoryFreeClear(pTableMeta); @@ -2619,7 +2619,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe nctx.tbInfo.suid); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); taosMemoryFreeClear(pTableMeta); diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index c9fa70ff11..b9e72847a1 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -812,7 +812,7 @@ int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) { block->info.hasVarCol = IS_VAR_DATA_TYPE(udfCol->colMeta.type); block->pDataBlock = taosArrayInit(1, sizeof(SColumnInfoData)); - taosArraySetSize(block->pDataBlock, 1); + taosArrayPush(block->pDataBlock, &(SColumnInfoData){0}); SColumnInfoData *col = taosArrayGet(block->pDataBlock, 0); SUdfColumnMeta *meta = &udfCol->colMeta; col->info.precision = meta->precision; diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index 6cc684dddf..8b869cc59f 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -17,11 +17,10 @@ int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData) { int32_t blockNum = pReq->blockNum; - SArray* pArray = taosArrayInit(blockNum, sizeof(SSDataBlock)); + SArray* pArray = taosArrayInit_s(blockNum, sizeof(SSDataBlock), blockNum); if (pArray == NULL) { return -1; } - taosArraySetSize(pArray, blockNum); ASSERT(pReq->blockNum == taosArrayGetSize(pReq->data)); ASSERT(pReq->blockNum == taosArrayGetSize(pReq->dataLen)); @@ -49,7 +48,7 @@ int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock if (pArray == NULL) { return -1; } - taosArraySetSize(pArray, 1); + taosArrayPush(pArray, &(SSDataBlock){0}); SRetrieveTableRsp* pRetrieve = pReq->pRetrieve; SSDataBlock* pDataBlock = taosArrayGet(pArray, 0); blockDecode(pDataBlock, pRetrieve->data); diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 03f68c359d..e8215dce2b 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -45,6 +45,26 @@ SArray* taosArrayInit(size_t size, size_t elemSize) { return pArray; } +SArray* taosArrayInit_s(size_t size, size_t elemSize, size_t initialSize) { + SArray* pArray = taosMemoryMalloc(sizeof(SArray)); + if (pArray == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + pArray->size = 0; + pArray->pData = taosMemoryCalloc(initialSize, elemSize); + if (pArray->pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + taosMemoryFree(pArray); + return NULL; + } + + pArray->capacity = initialSize; + pArray->elemSize = elemSize; + return pArray; +} + static int32_t taosArrayResize(SArray* pArray) { assert(pArray->size >= pArray->capacity); From 49dedc3c3fdb11a3ab3658c50805052c04474224 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 17 Jan 2023 10:24:19 +0800 Subject: [PATCH 069/267] fix(query): opt perf. --- source/dnode/vnode/src/tsdb/tsdbFile.c | 24 ++++++++++++++++-------- source/util/src/tarray.c | 2 +- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c index faf335a62c..42728be657 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFile.c +++ b/source/dnode/vnode/src/tsdb/tsdbFile.c @@ -92,7 +92,7 @@ static int32_t tGetSmaFile(uint8_t *p, SSmaFile *pSmaFile) { } // EXPOSED APIS ================================================== -void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, char fname[]) { +static char* getFileNamePrefix(STsdb *pTsdb, SDiskID did, int32_t fid, uint64_t commitId, char fname[]) { const char* p1 = tfsGetDiskPath(pTsdb->pVnode->pTfs, did); int32_t len = strlen(p1); @@ -116,24 +116,32 @@ void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, memcpy(p, "ver", 3); p += 3; - p += titoa(pHeadF->commitID, 10, p); + p += titoa(commitId, 10, p); + return p; +} + +void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, char fname[]) { + char* p = getFileNamePrefix(pTsdb, did, fid, pHeadF->commitID, fname); memcpy(p, ".head", 5); p[5] = 0; } void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]) { - snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did), - TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pDataF->commitID, ".data"); + char* p = getFileNamePrefix(pTsdb, did, fid, pDataF->commitID, fname); + memcpy(p, ".data", 5); + p[5] = 0; } void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]) { - snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did), - TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSttF->commitID, ".stt"); + char* p = getFileNamePrefix(pTsdb, did, fid, pSttF->commitID, fname); + memcpy(p, ".stt", 4); + p[4] = 0; } void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]) { - snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did), - TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSmaF->commitID, ".sma"); + char* p = getFileNamePrefix(pTsdb, did, fid, pSmaF->commitID, fname); + memcpy(p, ".sma", 4); + p[4] = 0; } bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2) { return pDelFile1->commitID == pDelFile2->commitID; } diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index e8215dce2b..0eec9fd48f 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -52,7 +52,7 @@ SArray* taosArrayInit_s(size_t size, size_t elemSize, size_t initialSize) { return NULL; } - pArray->size = 0; + pArray->size = initialSize; pArray->pData = taosMemoryCalloc(initialSize, elemSize); if (pArray->pData == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; From ac09a05cfac7e5750fac20ab15c7e18d8cec9517 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 17 Jan 2023 15:34:58 +0800 Subject: [PATCH 070/267] feat: create stream support delete_mark option --- include/common/tmsg.h | 1 + source/common/src/tmsg.c | 3 + source/libs/parser/inc/sql.y | 1 + source/libs/parser/src/parTranslater.c | 30 +- source/libs/parser/src/sql.c | 2533 ++++++++++++------------ 5 files changed, 1293 insertions(+), 1275 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index ad6077db09..a7e87a0e08 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1768,6 +1768,7 @@ typedef struct { SArray* pTags; // array of SField // 3.0.20 int64_t checkpointFreq; // ms + int64_t deleteMark; } SCMCreateStreamReq; typedef struct { diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 95625e8d93..841a194be8 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -5424,6 +5424,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS if (tEncodeI32(&encoder, pField->bytes) < 0) return -1; if (tEncodeCStr(&encoder, pField->name) < 0) return -1; } + if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1; tEndEncode(&encoder); @@ -5485,6 +5486,8 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea } } + if (tDecodeI64(&decoder, &pReq->deleteMark) < 0) return -1; + tEndDecode(&decoder); tDecoderClear(&decoder); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 7136a555cd..06211658ea 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -544,6 +544,7 @@ stream_options(A) ::= stream_options(B) TRIGGER MAX_DELAY duration_literal(C). stream_options(A) ::= stream_options(B) WATERMARK duration_literal(C). { ((SStreamOptions*)B)->pWatermark = releaseRawExprNode(pCxt, C); A = B; } stream_options(A) ::= stream_options(B) IGNORE EXPIRED NK_INTEGER(C). { ((SStreamOptions*)B)->ignoreExpired = taosStr2Int8(C.z, NULL, 10); A = B; } stream_options(A) ::= stream_options(B) FILL_HISTORY NK_INTEGER(C). { ((SStreamOptions*)B)->fillHistory = taosStr2Int8(C.z, NULL, 10); A = B; } +stream_options(A) ::= stream_options(B) DELETE_MARK duration_literal(C). { ((SStreamOptions*)B)->pDeleteMark = releaseRawExprNode(pCxt, C); A = B; } subtable_opt(A) ::= . { A = NULL; } subtable_opt(A) ::= SUBTABLE NK_LP expression(B) NK_RP. { A = releaseRawExprNode(pCxt, B); } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 05d49bb027..fd2332fa93 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -666,6 +666,9 @@ static uint8_t getPrecisionFromCurrStmt(SNode* pCurrStmt, uint8_t defaultVal) { if (isSetOperator(pCurrStmt)) { return ((SSetOperator*)pCurrStmt)->precision; } + if (NULL != pCurrStmt && QUERY_NODE_CREATE_STREAM_STMT == nodeType(pCurrStmt)) { + return getPrecisionFromCurrStmt(((SCreateStreamStmt*)pCurrStmt)->pQuery, defaultVal); + } return defaultVal; } @@ -5483,16 +5486,6 @@ static bool crossTableWithUdaf(SSelectStmt* pSelect) { } static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { - if (NULL != pStmt->pOptions->pWatermark && - (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pStmt->pOptions->pWatermark))) { - return pCxt->errCode; - } - - if (NULL != pStmt->pOptions->pDelay && - (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pStmt->pOptions->pDelay))) { - return pCxt->errCode; - } - if (NULL == pStmt->pQuery) { return TSDB_CODE_SUCCESS; } @@ -5685,6 +5678,17 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt return code; } +static int32_t translateStreamOptions(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { + pCxt->pCurrStmt = (SNode*)pStmt; + SStreamOptions* pOptions = pStmt->pOptions; + if ((NULL != pOptions->pWatermark && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pOptions->pWatermark))) || + (NULL != pOptions->pDeleteMark && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pOptions->pDeleteMark))) || + (NULL != pOptions->pDelay && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pOptions->pDelay)))) { + return pCxt->errCode; + } + return TSDB_CODE_SUCCESS; +} + static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { pReq->igExists = pStmt->ignoreExists; @@ -5706,10 +5710,16 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* } } + if (TSDB_CODE_SUCCESS == code) { + code = translateStreamOptions(pCxt, pStmt); + } + if (TSDB_CODE_SUCCESS == code) { pReq->triggerType = pStmt->pOptions->triggerType; pReq->maxDelay = (NULL != pStmt->pOptions->pDelay ? ((SValueNode*)pStmt->pOptions->pDelay)->datum.i : 0); pReq->watermark = (NULL != pStmt->pOptions->pWatermark ? ((SValueNode*)pStmt->pOptions->pWatermark)->datum.i : 0); + pReq->deleteMark = + (NULL != pStmt->pOptions->pDeleteMark ? ((SValueNode*)pStmt->pOptions->pDeleteMark)->datum.i : 0); pReq->fillHistory = pStmt->pOptions->fillHistory; pReq->igExpired = pStmt->pOptions->ignoreExpired; columnDefNodeToField(pStmt->pTags, &pReq->pTags); diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index d3b9bf069b..6a7fc010d0 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -139,17 +139,17 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 711 -#define YYNRULE 541 +#define YYNSTATE 712 +#define YYNRULE 542 #define YYNTOKEN 322 -#define YY_MAX_SHIFT 710 -#define YY_MIN_SHIFTREDUCE 1054 -#define YY_MAX_SHIFTREDUCE 1594 -#define YY_ERROR_ACTION 1595 -#define YY_ACCEPT_ACTION 1596 -#define YY_NO_ACTION 1597 -#define YY_MIN_REDUCE 1598 -#define YY_MAX_REDUCE 2138 +#define YY_MAX_SHIFT 711 +#define YY_MIN_SHIFTREDUCE 1056 +#define YY_MAX_SHIFTREDUCE 1597 +#define YY_ERROR_ACTION 1598 +#define YY_ACCEPT_ACTION 1599 +#define YY_NO_ACTION 1600 +#define YY_MIN_REDUCE 1601 +#define YY_MAX_REDUCE 2142 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -218,279 +218,279 @@ typedef union { *********** Begin parsing tables **********************************************/ #define YY_ACTTAB_COUNT (2723) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 459, 353, 460, 1634, 566, 600, 578, 2114, 2109, 155, - /* 10 */ 1940, 2109, 43, 41, 1525, 39, 38, 37, 1753, 123, - /* 20 */ 361, 1936, 1375, 565, 173, 599, 498, 2113, 2110, 567, - /* 30 */ 1954, 2110, 2112, 1455, 1400, 1373, 1751, 131, 468, 406, - /* 40 */ 460, 1634, 36, 35, 1940, 586, 42, 40, 39, 38, - /* 50 */ 37, 1932, 1938, 344, 541, 1936, 167, 578, 1450, 465, - /* 60 */ 585, 1972, 610, 16, 366, 461, 1804, 1797, 1799, 581, - /* 70 */ 1381, 599, 1401, 330, 1922, 158, 616, 333, 1851, 319, - /* 80 */ 1705, 1972, 1802, 43, 41, 1932, 1938, 356, 131, 560, - /* 90 */ 477, 361, 475, 1375, 1860, 12, 610, 326, 172, 2049, - /* 100 */ 2050, 1953, 129, 2054, 1455, 1988, 1373, 2113, 100, 1955, - /* 110 */ 620, 1957, 1958, 615, 600, 610, 1106, 707, 1105, 654, - /* 120 */ 170, 458, 2041, 599, 463, 1640, 355, 2037, 123, 1450, - /* 130 */ 559, 210, 1457, 1458, 16, 503, 33, 275, 1484, 566, - /* 140 */ 175, 1381, 157, 2109, 1610, 1751, 46, 1107, 2067, 255, - /* 150 */ 2049, 577, 578, 124, 576, 1621, 1954, 2109, 565, 173, - /* 160 */ 600, 1431, 1440, 2110, 567, 578, 12, 42, 40, 39, - /* 170 */ 38, 37, 565, 173, 52, 1309, 1310, 2110, 567, 97, - /* 180 */ 1376, 586, 1374, 131, 1256, 1257, 467, 1972, 707, 463, - /* 190 */ 1640, 1751, 1402, 132, 1485, 614, 131, 561, 46, 1922, - /* 200 */ 1922, 1743, 616, 1457, 1458, 1379, 1380, 62, 1430, 1433, - /* 210 */ 1434, 1435, 1436, 1437, 1438, 1439, 612, 608, 1448, 1449, - /* 220 */ 1451, 1452, 1453, 1454, 1456, 1459, 2, 1953, 595, 58, - /* 230 */ 1860, 1988, 1431, 1440, 311, 1955, 620, 1957, 1958, 615, - /* 240 */ 613, 610, 601, 2006, 174, 2049, 2050, 1400, 129, 2054, - /* 250 */ 338, 1376, 2056, 1374, 1798, 1799, 580, 171, 2049, 2050, - /* 260 */ 556, 129, 2054, 1620, 58, 32, 359, 1479, 1480, 1481, - /* 270 */ 1482, 1483, 1487, 1488, 1489, 1490, 1379, 1380, 2053, 1430, - /* 280 */ 1433, 1434, 1435, 1436, 1437, 1438, 1439, 612, 608, 1448, - /* 290 */ 1449, 1451, 1452, 1453, 1454, 1456, 1459, 2, 58, 9, - /* 300 */ 43, 41, 1667, 47, 1551, 1596, 1847, 1922, 361, 1529, - /* 310 */ 1375, 339, 1729, 337, 336, 1400, 500, 181, 1954, 1619, - /* 320 */ 502, 1455, 58, 1373, 1213, 642, 641, 640, 1217, 639, - /* 330 */ 1219, 1220, 638, 1222, 635, 257, 1228, 632, 1230, 1231, - /* 340 */ 629, 626, 501, 562, 557, 258, 1450, 176, 399, 1972, - /* 350 */ 398, 16, 553, 1549, 1550, 1552, 1553, 617, 1381, 1106, - /* 360 */ 1173, 1105, 1922, 1922, 616, 569, 395, 513, 512, 511, - /* 370 */ 376, 43, 41, 1460, 477, 128, 507, 400, 176, 361, - /* 380 */ 506, 1375, 58, 12, 85, 505, 510, 397, 393, 1953, - /* 390 */ 1107, 504, 1455, 1988, 1373, 1175, 100, 1955, 620, 1957, - /* 400 */ 1958, 615, 1954, 610, 80, 707, 134, 1618, 141, 2012, - /* 410 */ 2041, 600, 546, 176, 355, 2037, 2109, 1450, 127, 546, - /* 420 */ 1457, 1458, 169, 2109, 1401, 178, 226, 1746, 573, 1381, - /* 430 */ 1617, 2115, 173, 1972, 1599, 1791, 2110, 567, 2115, 173, - /* 440 */ 1375, 617, 1751, 2110, 567, 167, 1922, 176, 616, 1431, - /* 450 */ 1440, 1922, 176, 1373, 44, 113, 1432, 587, 112, 111, - /* 460 */ 110, 109, 108, 107, 106, 105, 104, 1852, 1376, 352, - /* 470 */ 1374, 176, 1864, 1953, 1922, 227, 707, 1988, 1781, 1804, - /* 480 */ 160, 1955, 620, 1957, 1958, 615, 354, 610, 1381, 1352, - /* 490 */ 1353, 1457, 1458, 1379, 1380, 1802, 1430, 1433, 1434, 1435, - /* 500 */ 1436, 1437, 1438, 1439, 612, 608, 1448, 1449, 1451, 1452, - /* 510 */ 1453, 1454, 1456, 1459, 2, 509, 508, 678, 676, 113, - /* 520 */ 1431, 1440, 112, 111, 110, 109, 108, 107, 106, 105, - /* 530 */ 104, 176, 568, 2130, 364, 707, 513, 512, 511, 1376, - /* 540 */ 367, 1374, 155, 1804, 128, 507, 1465, 1909, 155, 506, - /* 550 */ 365, 1753, 1400, 1403, 505, 510, 1728, 1753, 1954, 1802, - /* 560 */ 504, 266, 267, 1400, 1379, 1380, 265, 1430, 1433, 1434, - /* 570 */ 1435, 1436, 1437, 1438, 1439, 612, 608, 1448, 1449, 1451, - /* 580 */ 1452, 1453, 1454, 1456, 1459, 2, 43, 41, 1598, 1972, - /* 590 */ 1616, 600, 185, 527, 361, 383, 1375, 581, 1376, 1727, - /* 600 */ 1374, 1615, 1922, 1399, 616, 404, 525, 1455, 523, 1373, - /* 610 */ 1742, 600, 122, 121, 120, 119, 118, 117, 116, 115, - /* 620 */ 114, 1936, 1751, 1379, 1380, 405, 11, 10, 77, 1953, - /* 630 */ 1614, 76, 1450, 1988, 1922, 1613, 100, 1955, 620, 1957, - /* 640 */ 1958, 615, 1751, 610, 1381, 1922, 91, 80, 170, 574, - /* 650 */ 2041, 1932, 1938, 133, 355, 2037, 2012, 43, 41, 600, - /* 660 */ 518, 654, 610, 652, 2114, 361, 1954, 1375, 1744, 44, - /* 670 */ 1747, 442, 1612, 414, 1922, 528, 2068, 587, 1455, 1922, - /* 680 */ 1373, 1402, 146, 145, 649, 648, 647, 143, 29, 224, - /* 690 */ 1751, 707, 1865, 1432, 36, 35, 182, 1972, 42, 40, - /* 700 */ 39, 38, 37, 1450, 521, 617, 1457, 1458, 1609, 515, - /* 710 */ 1922, 1608, 616, 2056, 223, 1381, 1922, 1607, 2114, 36, - /* 720 */ 35, 1498, 2109, 42, 40, 39, 38, 37, 1522, 189, - /* 730 */ 188, 1403, 1641, 1486, 2056, 1431, 1440, 1953, 2113, 2052, - /* 740 */ 12, 1988, 2110, 2111, 101, 1955, 620, 1957, 1958, 615, - /* 750 */ 64, 610, 1922, 63, 1376, 1922, 1374, 1584, 2041, 31, - /* 760 */ 2051, 1922, 707, 2038, 1606, 36, 35, 1605, 225, 42, - /* 770 */ 40, 39, 38, 37, 9, 1604, 1087, 1457, 1458, 1379, - /* 780 */ 1380, 702, 1430, 1433, 1434, 1435, 1436, 1437, 1438, 1439, - /* 790 */ 612, 608, 1448, 1449, 1451, 1452, 1453, 1454, 1456, 1459, - /* 800 */ 2, 318, 1384, 1398, 30, 1603, 1431, 1440, 1922, 710, - /* 810 */ 436, 1922, 155, 449, 1491, 1089, 448, 1092, 1093, 1922, - /* 820 */ 602, 1754, 2013, 282, 257, 1376, 1664, 1374, 82, 321, - /* 830 */ 1847, 420, 531, 450, 529, 9, 422, 7, 166, 1834, - /* 840 */ 604, 183, 2013, 6, 700, 696, 692, 688, 280, 1922, - /* 850 */ 1379, 1380, 1726, 1430, 1433, 1434, 1435, 1436, 1437, 1438, - /* 860 */ 1439, 612, 608, 1448, 1449, 1451, 1452, 1453, 1454, 1456, - /* 870 */ 1459, 2, 1804, 1403, 235, 36, 35, 334, 1400, 42, - /* 880 */ 40, 39, 38, 37, 98, 1847, 607, 273, 1803, 410, - /* 890 */ 645, 684, 683, 682, 681, 371, 187, 680, 679, 135, - /* 900 */ 674, 673, 672, 671, 670, 669, 668, 148, 664, 663, - /* 910 */ 662, 370, 369, 659, 658, 657, 656, 655, 646, 446, - /* 920 */ 596, 1795, 441, 440, 439, 438, 435, 434, 433, 432, - /* 930 */ 431, 427, 426, 425, 424, 335, 417, 416, 415, 1381, - /* 940 */ 412, 411, 332, 156, 1092, 1093, 36, 35, 295, 1387, - /* 950 */ 42, 40, 39, 38, 37, 1602, 1601, 260, 1954, 652, - /* 960 */ 666, 650, 293, 66, 1795, 289, 65, 651, 1781, 1521, - /* 970 */ 1795, 667, 1740, 1721, 1346, 48, 229, 3, 146, 145, - /* 980 */ 649, 648, 647, 143, 193, 455, 453, 36, 35, 1972, - /* 990 */ 423, 42, 40, 39, 38, 37, 502, 617, 1943, 1922, - /* 1000 */ 1922, 600, 1922, 652, 616, 1736, 36, 35, 2061, 1518, - /* 1010 */ 42, 40, 39, 38, 37, 428, 234, 233, 501, 1432, - /* 1020 */ 58, 600, 146, 145, 649, 648, 647, 143, 1476, 1953, - /* 1030 */ 154, 600, 1751, 1988, 1561, 429, 100, 1955, 620, 1957, - /* 1040 */ 1958, 615, 25, 610, 144, 476, 1945, 1518, 2129, 137, - /* 1050 */ 2041, 125, 1751, 407, 355, 2037, 83, 1738, 67, 99, - /* 1060 */ 600, 1954, 1751, 36, 35, 2075, 408, 42, 40, 39, - /* 1070 */ 38, 37, 36, 35, 1748, 1734, 42, 40, 39, 38, - /* 1080 */ 37, 600, 216, 218, 570, 214, 217, 600, 1654, 1383, - /* 1090 */ 220, 1751, 1972, 219, 1647, 139, 51, 74, 73, 403, - /* 1100 */ 617, 542, 180, 600, 60, 1922, 1954, 616, 75, 1645, - /* 1110 */ 514, 222, 1751, 1941, 221, 239, 516, 582, 1751, 45, - /* 1120 */ 317, 1593, 1594, 391, 1936, 389, 385, 381, 378, 375, - /* 1130 */ 263, 519, 1953, 600, 1751, 600, 1988, 1972, 1611, 100, - /* 1140 */ 1955, 620, 1957, 1958, 615, 617, 610, 270, 140, 597, - /* 1150 */ 1922, 2129, 616, 2041, 1932, 1938, 1548, 355, 2037, 1706, - /* 1160 */ 1541, 600, 600, 230, 1751, 610, 1751, 241, 2103, 176, - /* 1170 */ 142, 1322, 611, 50, 144, 598, 276, 1953, 545, 1954, - /* 1180 */ 60, 1988, 268, 252, 100, 1955, 620, 1957, 1958, 615, - /* 1190 */ 660, 610, 1751, 1751, 358, 357, 2129, 600, 2041, 45, - /* 1200 */ 592, 45, 355, 2037, 1389, 11, 10, 661, 644, 96, - /* 1210 */ 1972, 368, 1154, 2060, 2081, 1455, 624, 1382, 617, 93, - /* 1220 */ 211, 584, 272, 1922, 374, 616, 1206, 1591, 1751, 1152, - /* 1230 */ 209, 554, 1492, 246, 1954, 162, 1386, 142, 1973, 372, - /* 1240 */ 1450, 494, 490, 486, 482, 208, 144, 1856, 1635, 126, - /* 1250 */ 1953, 1441, 1381, 288, 1988, 1792, 142, 100, 1955, 620, - /* 1260 */ 1957, 1958, 615, 546, 610, 1972, 546, 2109, 1234, 2016, - /* 1270 */ 2109, 2041, 1135, 617, 579, 355, 2037, 2071, 1922, 251, - /* 1280 */ 616, 81, 2115, 173, 206, 2115, 173, 2110, 567, 1238, - /* 1290 */ 2110, 567, 254, 1, 373, 4, 382, 331, 1245, 606, - /* 1300 */ 377, 1243, 1339, 571, 283, 1953, 186, 1136, 147, 1988, - /* 1310 */ 409, 1403, 100, 1955, 620, 1957, 1958, 615, 1857, 610, - /* 1320 */ 413, 1590, 444, 418, 2014, 1398, 2041, 430, 1849, 437, - /* 1330 */ 355, 2037, 443, 451, 452, 445, 546, 190, 454, 456, - /* 1340 */ 2109, 1404, 457, 466, 1406, 470, 1954, 534, 469, 1405, - /* 1350 */ 196, 205, 199, 198, 204, 2115, 173, 473, 471, 1407, - /* 1360 */ 2110, 567, 1390, 472, 1385, 201, 474, 203, 78, 79, - /* 1370 */ 478, 1109, 207, 197, 495, 496, 1954, 1972, 499, 497, - /* 1380 */ 103, 1741, 1899, 213, 320, 617, 1898, 1393, 1395, 546, - /* 1390 */ 1922, 1737, 616, 2109, 533, 284, 535, 215, 149, 608, - /* 1400 */ 1448, 1449, 1451, 1452, 1453, 1454, 150, 1972, 2115, 173, - /* 1410 */ 228, 536, 1739, 2110, 567, 617, 540, 1953, 1735, 151, - /* 1420 */ 1922, 1988, 616, 152, 100, 1955, 620, 1957, 1958, 615, - /* 1430 */ 231, 610, 1954, 543, 550, 537, 603, 2072, 2041, 590, - /* 1440 */ 2082, 555, 355, 2037, 5, 552, 345, 1953, 2087, 558, - /* 1450 */ 237, 1988, 1954, 240, 101, 1955, 620, 1957, 1958, 615, - /* 1460 */ 2063, 610, 2086, 1972, 564, 548, 551, 549, 2041, 572, - /* 1470 */ 250, 617, 2040, 2037, 247, 245, 1922, 346, 616, 575, - /* 1480 */ 130, 248, 1518, 1972, 249, 163, 1402, 2057, 349, 583, - /* 1490 */ 259, 617, 1408, 1861, 285, 593, 1922, 588, 616, 286, - /* 1500 */ 589, 1870, 1869, 1953, 1868, 2108, 351, 1988, 88, 1752, - /* 1510 */ 101, 1955, 620, 1957, 1958, 615, 287, 610, 90, 594, - /* 1520 */ 1954, 253, 57, 618, 2041, 2022, 2132, 1988, 605, 2037, - /* 1530 */ 101, 1955, 620, 1957, 1958, 615, 1954, 610, 92, 622, - /* 1540 */ 279, 703, 1796, 1722, 2041, 704, 290, 706, 325, 2037, - /* 1550 */ 49, 1972, 314, 322, 323, 294, 1916, 1915, 71, 617, - /* 1560 */ 72, 292, 1914, 1913, 1922, 1910, 616, 1972, 379, 299, - /* 1570 */ 1367, 313, 303, 380, 1368, 617, 179, 1908, 384, 386, - /* 1580 */ 1922, 387, 616, 388, 1907, 390, 1906, 392, 1905, 394, - /* 1590 */ 1904, 1953, 1342, 396, 1341, 1988, 1881, 1954, 159, 1955, - /* 1600 */ 620, 1957, 1958, 615, 1880, 610, 401, 1953, 402, 1879, - /* 1610 */ 1878, 1988, 1842, 1300, 159, 1955, 620, 1957, 1958, 615, - /* 1620 */ 1841, 610, 1839, 136, 1838, 1837, 1840, 1836, 1972, 184, - /* 1630 */ 419, 1830, 421, 1829, 1828, 1827, 617, 1835, 1833, 547, - /* 1640 */ 2078, 1922, 1954, 616, 1832, 1831, 1826, 1825, 1824, 1823, - /* 1650 */ 1822, 1821, 1820, 1819, 1818, 1817, 2079, 1816, 1815, 138, - /* 1660 */ 1814, 1813, 1812, 1811, 1810, 1302, 1809, 1808, 1953, 1807, - /* 1670 */ 1806, 447, 1988, 1972, 1181, 305, 1955, 620, 1957, 1958, - /* 1680 */ 615, 617, 610, 1805, 1669, 1668, 1922, 1666, 616, 1630, - /* 1690 */ 191, 1095, 1094, 1629, 192, 1894, 1888, 1877, 194, 202, - /* 1700 */ 1876, 1859, 1730, 1954, 69, 1128, 1942, 1665, 1663, 1661, - /* 1710 */ 1659, 70, 168, 1953, 479, 195, 1657, 1988, 462, 563, - /* 1720 */ 160, 1955, 620, 1957, 1958, 615, 1954, 610, 464, 200, - /* 1730 */ 480, 481, 1644, 483, 1972, 1643, 484, 1626, 485, 350, - /* 1740 */ 487, 491, 617, 489, 1732, 1249, 59, 1922, 1731, 616, - /* 1750 */ 1250, 488, 493, 1172, 1171, 1170, 1169, 1972, 675, 492, - /* 1760 */ 1166, 677, 1164, 1165, 1163, 614, 1655, 340, 1648, 341, - /* 1770 */ 1922, 1646, 616, 2131, 1953, 212, 517, 342, 1988, 1625, - /* 1780 */ 1624, 312, 1955, 620, 1957, 1958, 615, 1954, 610, 520, - /* 1790 */ 1623, 522, 524, 102, 526, 1357, 1356, 1953, 530, 1893, - /* 1800 */ 1348, 1988, 1887, 53, 311, 1955, 620, 1957, 1958, 615, - /* 1810 */ 1875, 610, 1873, 2007, 538, 1874, 2114, 1954, 1972, 17, - /* 1820 */ 1359, 14, 56, 360, 244, 24, 617, 1563, 26, 236, - /* 1830 */ 243, 1922, 238, 616, 1547, 1540, 161, 242, 1943, 28, - /* 1840 */ 1954, 27, 18, 539, 84, 61, 1583, 1584, 1972, 19, - /* 1850 */ 15, 232, 1578, 362, 343, 1577, 617, 347, 1953, 153, - /* 1860 */ 1582, 1922, 1988, 616, 1581, 312, 1955, 620, 1957, 1958, - /* 1870 */ 615, 1972, 610, 348, 256, 55, 1872, 1515, 1514, 617, - /* 1880 */ 164, 544, 1871, 261, 1922, 1954, 616, 1858, 1953, 20, - /* 1890 */ 87, 264, 1988, 262, 1545, 312, 1955, 620, 1957, 1958, - /* 1900 */ 615, 1954, 610, 269, 86, 93, 89, 274, 21, 10, - /* 1910 */ 1391, 532, 1445, 1991, 609, 1988, 1972, 271, 307, 1955, - /* 1920 */ 620, 1957, 1958, 615, 617, 610, 1443, 34, 591, 1922, - /* 1930 */ 1442, 616, 1972, 165, 13, 22, 54, 177, 1423, 1415, - /* 1940 */ 617, 23, 625, 621, 1235, 1922, 623, 616, 363, 1467, - /* 1950 */ 1232, 628, 627, 630, 1954, 1229, 1953, 1477, 631, 633, - /* 1960 */ 1988, 1223, 634, 296, 1955, 620, 1957, 1958, 615, 1221, - /* 1970 */ 610, 619, 1953, 636, 637, 1212, 1988, 1227, 643, 297, - /* 1980 */ 1955, 620, 1957, 1958, 615, 1972, 610, 8, 1226, 1225, - /* 1990 */ 1466, 1224, 94, 617, 277, 95, 1244, 1240, 1922, 1954, - /* 2000 */ 616, 68, 1160, 1126, 653, 1159, 1158, 1157, 1156, 1155, - /* 2010 */ 1179, 1153, 1151, 1150, 1149, 665, 1147, 1146, 1145, 1144, - /* 2020 */ 1143, 1142, 1141, 1174, 278, 1953, 1176, 1138, 1137, 1988, - /* 2030 */ 1972, 1134, 298, 1955, 620, 1957, 1958, 615, 617, 610, - /* 2040 */ 1133, 1132, 1131, 1922, 1662, 616, 685, 686, 687, 1660, - /* 2050 */ 689, 690, 1954, 691, 1658, 693, 694, 695, 1656, 697, - /* 2060 */ 698, 699, 1642, 701, 1084, 1622, 709, 281, 1954, 705, - /* 2070 */ 1953, 1597, 1377, 291, 1988, 1597, 708, 304, 1955, 620, - /* 2080 */ 1957, 1958, 615, 1972, 610, 1597, 1597, 1597, 1597, 1597, - /* 2090 */ 1597, 617, 1597, 1597, 1597, 1597, 1922, 1597, 616, 1972, - /* 2100 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 617, 1597, 1597, - /* 2110 */ 1597, 1597, 1922, 1954, 616, 1597, 1597, 1597, 1597, 1597, - /* 2120 */ 1597, 1597, 1597, 1953, 1597, 1597, 1597, 1988, 1597, 1597, - /* 2130 */ 308, 1955, 620, 1957, 1958, 615, 1597, 610, 1597, 1953, - /* 2140 */ 1597, 1597, 1597, 1988, 1972, 1597, 300, 1955, 620, 1957, - /* 2150 */ 1958, 615, 617, 610, 1597, 1597, 1597, 1922, 1597, 616, - /* 2160 */ 1597, 1597, 1597, 1597, 1597, 1597, 1954, 1597, 1597, 1597, - /* 2170 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, - /* 2180 */ 1597, 1597, 1954, 1597, 1953, 1597, 1597, 1597, 1988, 1597, - /* 2190 */ 1597, 309, 1955, 620, 1957, 1958, 615, 1972, 610, 1597, - /* 2200 */ 1597, 1597, 1597, 1597, 1597, 617, 1597, 1597, 1597, 1597, - /* 2210 */ 1922, 1597, 616, 1972, 1597, 1597, 1597, 1597, 1597, 1597, - /* 2220 */ 1597, 617, 1597, 1597, 1597, 1597, 1922, 1954, 616, 1597, - /* 2230 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1953, 1597, 1597, - /* 2240 */ 1597, 1988, 1597, 1597, 301, 1955, 620, 1957, 1958, 615, - /* 2250 */ 1954, 610, 1597, 1953, 1597, 1597, 1597, 1988, 1972, 1597, - /* 2260 */ 310, 1955, 620, 1957, 1958, 615, 617, 610, 1597, 1597, - /* 2270 */ 1597, 1922, 1597, 616, 1597, 1597, 1597, 1597, 1597, 1597, - /* 2280 */ 1954, 1972, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 617, - /* 2290 */ 1597, 1597, 1597, 1597, 1922, 1597, 616, 1597, 1953, 1597, - /* 2300 */ 1597, 1597, 1988, 1597, 1597, 302, 1955, 620, 1957, 1958, - /* 2310 */ 615, 1972, 610, 1597, 1597, 1597, 1597, 1597, 1597, 617, - /* 2320 */ 1597, 1953, 1597, 1597, 1922, 1988, 616, 1597, 315, 1955, - /* 2330 */ 620, 1957, 1958, 615, 1597, 610, 1597, 1597, 1597, 1597, - /* 2340 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, - /* 2350 */ 1597, 1953, 1597, 1597, 1597, 1988, 1954, 1597, 316, 1955, - /* 2360 */ 620, 1957, 1958, 615, 1597, 610, 1597, 1597, 1597, 1597, - /* 2370 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1954, 1597, - /* 2380 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1972, 1597, 1597, - /* 2390 */ 1597, 1597, 1597, 1597, 1597, 617, 1597, 1597, 1597, 1597, - /* 2400 */ 1922, 1597, 616, 1597, 1597, 1597, 1597, 1597, 1597, 1972, - /* 2410 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 617, 1597, 1597, - /* 2420 */ 1597, 1597, 1922, 1597, 616, 1597, 1597, 1953, 1597, 1597, - /* 2430 */ 1597, 1988, 1597, 1954, 1966, 1955, 620, 1957, 1958, 615, - /* 2440 */ 1597, 610, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1953, - /* 2450 */ 1597, 1597, 1597, 1988, 1597, 1597, 1965, 1955, 620, 1957, - /* 2460 */ 1958, 615, 1597, 610, 1972, 1597, 1597, 1597, 1597, 1597, - /* 2470 */ 1597, 1597, 617, 1597, 1597, 1597, 1597, 1922, 1954, 616, - /* 2480 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, - /* 2490 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, - /* 2500 */ 1597, 1597, 1597, 1597, 1953, 1597, 1597, 1597, 1988, 1972, - /* 2510 */ 1597, 1964, 1955, 620, 1957, 1958, 615, 617, 610, 1597, - /* 2520 */ 1597, 1597, 1922, 1954, 616, 1597, 1597, 1597, 1597, 1597, - /* 2530 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1954, - /* 2540 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1953, - /* 2550 */ 1597, 1597, 1597, 1988, 1972, 1597, 327, 1955, 620, 1957, - /* 2560 */ 1958, 615, 617, 610, 1597, 1597, 1597, 1922, 1597, 616, - /* 2570 */ 1972, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 617, 1597, - /* 2580 */ 1597, 1597, 1597, 1922, 1954, 616, 1597, 1597, 1597, 1597, - /* 2590 */ 1597, 1597, 1597, 1597, 1953, 1597, 1597, 1597, 1988, 1597, - /* 2600 */ 1597, 328, 1955, 620, 1957, 1958, 615, 1954, 610, 1597, - /* 2610 */ 1953, 1597, 1597, 1597, 1988, 1972, 1597, 324, 1955, 620, - /* 2620 */ 1957, 1958, 615, 617, 610, 1597, 1597, 1597, 1922, 1597, - /* 2630 */ 616, 1597, 1597, 1597, 1597, 1597, 1597, 1954, 1972, 1597, - /* 2640 */ 1597, 1597, 1597, 1597, 1597, 1597, 617, 1597, 1597, 1597, - /* 2650 */ 1597, 1922, 1597, 616, 1597, 1953, 1597, 1597, 1597, 1988, - /* 2660 */ 1597, 1597, 329, 1955, 620, 1957, 1958, 615, 1972, 610, - /* 2670 */ 1597, 1597, 1597, 1597, 1597, 1597, 617, 1597, 618, 1597, - /* 2680 */ 1597, 1922, 1988, 616, 1597, 307, 1955, 620, 1957, 1958, - /* 2690 */ 615, 1597, 610, 1597, 1597, 1597, 1597, 1597, 1597, 1597, - /* 2700 */ 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1597, 1953, 1597, - /* 2710 */ 1597, 1597, 1988, 1597, 1597, 306, 1955, 620, 1957, 1958, - /* 2720 */ 615, 1597, 610, + /* 0 */ 460, 354, 461, 1637, 567, 601, 579, 2118, 2113, 155, + /* 10 */ 1944, 2113, 43, 41, 1528, 39, 38, 37, 1756, 123, + /* 20 */ 362, 1940, 1378, 566, 173, 600, 499, 2117, 2114, 568, + /* 30 */ 1958, 2114, 2116, 1458, 1403, 1376, 1754, 131, 469, 407, + /* 40 */ 461, 1637, 36, 35, 1944, 587, 42, 40, 39, 38, + /* 50 */ 37, 1936, 1942, 345, 542, 1940, 167, 579, 1453, 466, + /* 60 */ 586, 1976, 611, 16, 367, 462, 1807, 1800, 1802, 582, + /* 70 */ 1384, 600, 1404, 331, 1926, 158, 617, 334, 1854, 320, + /* 80 */ 1708, 1976, 1805, 43, 41, 1936, 1942, 357, 131, 561, + /* 90 */ 478, 362, 476, 1378, 1863, 12, 611, 327, 172, 2053, + /* 100 */ 2054, 1957, 129, 2058, 1458, 1992, 1376, 2117, 100, 1959, + /* 110 */ 621, 1961, 1962, 616, 601, 611, 1108, 708, 1107, 655, + /* 120 */ 170, 459, 2045, 600, 464, 1643, 356, 2041, 123, 1453, + /* 130 */ 560, 210, 1460, 1461, 16, 504, 33, 276, 1487, 567, + /* 140 */ 175, 1384, 157, 2113, 1613, 1754, 46, 1109, 2071, 256, + /* 150 */ 2053, 578, 579, 124, 577, 1624, 1958, 2113, 566, 173, + /* 160 */ 601, 1434, 1443, 2114, 568, 579, 12, 42, 40, 39, + /* 170 */ 38, 37, 566, 173, 52, 1311, 1312, 2114, 568, 97, + /* 180 */ 1379, 587, 1377, 131, 1258, 1259, 468, 1976, 708, 464, + /* 190 */ 1643, 1754, 1405, 132, 1488, 615, 131, 562, 46, 1926, + /* 200 */ 1926, 1746, 617, 1460, 1461, 1382, 1383, 62, 1433, 1436, + /* 210 */ 1437, 1438, 1439, 1440, 1441, 1442, 613, 609, 1451, 1452, + /* 220 */ 1454, 1455, 1456, 1457, 1459, 1462, 2, 1957, 596, 58, + /* 230 */ 1863, 1992, 1434, 1443, 312, 1959, 621, 1961, 1962, 616, + /* 240 */ 614, 611, 602, 2010, 174, 2053, 2054, 1403, 129, 2058, + /* 250 */ 339, 1379, 2060, 1377, 1801, 1802, 581, 171, 2053, 2054, + /* 260 */ 557, 129, 2058, 1623, 58, 32, 360, 1482, 1483, 1484, + /* 270 */ 1485, 1486, 1490, 1491, 1492, 1493, 1382, 1383, 2057, 1433, + /* 280 */ 1436, 1437, 1438, 1439, 1440, 1441, 1442, 613, 609, 1451, + /* 290 */ 1452, 1454, 1455, 1456, 1457, 1459, 1462, 2, 58, 9, + /* 300 */ 43, 41, 1670, 47, 1554, 1599, 1850, 1926, 362, 1532, + /* 310 */ 1378, 340, 1732, 338, 337, 1403, 501, 181, 1958, 1622, + /* 320 */ 503, 1458, 58, 1376, 1215, 643, 642, 641, 1219, 640, + /* 330 */ 1221, 1222, 639, 1224, 636, 258, 1230, 633, 1232, 1233, + /* 340 */ 630, 627, 502, 563, 558, 259, 1453, 176, 400, 1976, + /* 350 */ 399, 16, 554, 1552, 1553, 1555, 1556, 618, 1384, 1108, + /* 360 */ 1175, 1107, 1926, 1926, 617, 570, 396, 514, 513, 512, + /* 370 */ 377, 43, 41, 1463, 478, 128, 508, 401, 176, 362, + /* 380 */ 507, 1378, 58, 12, 85, 506, 511, 398, 394, 1957, + /* 390 */ 1109, 505, 1458, 1992, 1376, 1177, 100, 1959, 621, 1961, + /* 400 */ 1962, 616, 1958, 611, 80, 708, 134, 1621, 141, 2016, + /* 410 */ 2045, 601, 547, 176, 356, 2041, 2113, 1453, 127, 547, + /* 420 */ 1460, 1461, 169, 2113, 1404, 178, 227, 1749, 574, 1384, + /* 430 */ 1620, 2119, 173, 1976, 1602, 1794, 2114, 568, 2119, 173, + /* 440 */ 1378, 618, 1754, 2114, 568, 167, 1926, 176, 617, 1434, + /* 450 */ 1443, 1926, 176, 1376, 44, 113, 1435, 588, 112, 111, + /* 460 */ 110, 109, 108, 107, 106, 105, 104, 1855, 1379, 353, + /* 470 */ 1377, 176, 1867, 1957, 1926, 228, 708, 1992, 1784, 1807, + /* 480 */ 160, 1959, 621, 1961, 1962, 616, 355, 611, 1384, 1354, + /* 490 */ 1355, 1460, 1461, 1382, 1383, 1805, 1433, 1436, 1437, 1438, + /* 500 */ 1439, 1440, 1441, 1442, 613, 609, 1451, 1452, 1454, 1455, + /* 510 */ 1456, 1457, 1459, 1462, 2, 510, 509, 679, 677, 113, + /* 520 */ 1434, 1443, 112, 111, 110, 109, 108, 107, 106, 105, + /* 530 */ 104, 176, 569, 2134, 365, 708, 514, 513, 512, 1379, + /* 540 */ 368, 1377, 155, 1807, 128, 508, 1468, 1913, 155, 507, + /* 550 */ 366, 1756, 1403, 1406, 506, 511, 1731, 1756, 1958, 1805, + /* 560 */ 505, 267, 268, 1403, 1382, 1383, 266, 1433, 1436, 1437, + /* 570 */ 1438, 1439, 1440, 1441, 1442, 613, 609, 1451, 1452, 1454, + /* 580 */ 1455, 1456, 1457, 1459, 1462, 2, 43, 41, 1601, 1976, + /* 590 */ 1619, 601, 185, 528, 362, 384, 1378, 582, 1379, 1730, + /* 600 */ 1377, 1618, 1926, 646, 617, 405, 526, 1458, 524, 1376, + /* 610 */ 1745, 601, 122, 121, 120, 119, 118, 117, 116, 115, + /* 620 */ 114, 1940, 1754, 1382, 1383, 406, 11, 10, 77, 1957, + /* 630 */ 1617, 76, 1453, 1992, 1926, 1616, 100, 1959, 621, 1961, + /* 640 */ 1962, 616, 1754, 611, 1384, 1926, 91, 80, 170, 575, + /* 650 */ 2045, 1936, 1942, 133, 356, 2041, 2016, 43, 41, 601, + /* 660 */ 519, 655, 611, 653, 2118, 362, 1958, 1378, 1747, 44, + /* 670 */ 1750, 443, 1615, 415, 1926, 529, 2072, 588, 1458, 1926, + /* 680 */ 1376, 1405, 146, 145, 650, 649, 648, 143, 29, 224, + /* 690 */ 1754, 708, 1868, 1435, 36, 35, 182, 1976, 42, 40, + /* 700 */ 39, 38, 37, 1453, 522, 618, 1460, 1461, 1612, 516, + /* 710 */ 1926, 1611, 617, 2060, 223, 1384, 1926, 1610, 2118, 36, + /* 720 */ 35, 1501, 2113, 42, 40, 39, 38, 37, 1402, 189, + /* 730 */ 188, 1406, 1644, 1489, 2060, 1434, 1443, 1957, 2117, 2056, + /* 740 */ 12, 1992, 2114, 2115, 101, 1959, 621, 1961, 1962, 616, + /* 750 */ 64, 611, 1926, 63, 1379, 1926, 1377, 1587, 2045, 31, + /* 760 */ 2055, 1926, 708, 2042, 1609, 36, 35, 1608, 1607, 42, + /* 770 */ 40, 39, 38, 37, 9, 1606, 1089, 1460, 1461, 1382, + /* 780 */ 1383, 703, 1433, 1436, 1437, 1438, 1439, 1440, 1441, 1442, + /* 790 */ 613, 609, 1451, 1452, 1454, 1455, 1456, 1457, 1459, 1462, + /* 800 */ 2, 319, 234, 1401, 30, 1605, 1434, 1443, 1926, 711, + /* 810 */ 437, 1926, 1926, 450, 1494, 1091, 449, 1094, 1095, 1926, + /* 820 */ 603, 1837, 2017, 283, 258, 1379, 1667, 1377, 1604, 1850, + /* 830 */ 9, 421, 7, 451, 155, 605, 423, 2017, 166, 647, + /* 840 */ 183, 83, 1798, 1757, 701, 697, 693, 689, 281, 1926, + /* 850 */ 1382, 1383, 1729, 1433, 1436, 1437, 1438, 1439, 1440, 1441, + /* 860 */ 1442, 613, 609, 1451, 1452, 1454, 1455, 1456, 1457, 1459, + /* 870 */ 1462, 2, 1926, 1406, 236, 36, 35, 335, 6, 42, + /* 880 */ 40, 39, 38, 37, 98, 1850, 668, 274, 1724, 411, + /* 890 */ 1525, 685, 684, 683, 682, 372, 187, 681, 680, 135, + /* 900 */ 675, 674, 673, 672, 671, 670, 669, 148, 665, 664, + /* 910 */ 663, 371, 370, 660, 659, 658, 657, 656, 651, 447, + /* 920 */ 597, 1798, 442, 441, 440, 439, 436, 435, 434, 433, + /* 930 */ 432, 428, 427, 426, 425, 336, 418, 417, 416, 1807, + /* 940 */ 413, 412, 333, 156, 1094, 1095, 36, 35, 296, 1384, + /* 950 */ 42, 40, 39, 38, 37, 1806, 144, 261, 1958, 653, + /* 960 */ 667, 652, 294, 66, 1798, 290, 65, 1403, 1784, 2065, + /* 970 */ 1521, 48, 424, 3, 1348, 1743, 230, 503, 146, 145, + /* 980 */ 650, 649, 648, 143, 193, 456, 454, 36, 35, 1976, + /* 990 */ 571, 42, 40, 39, 38, 37, 226, 618, 1739, 502, + /* 1000 */ 225, 601, 1926, 653, 617, 408, 36, 35, 51, 1657, + /* 1010 */ 42, 40, 39, 38, 37, 429, 235, 137, 409, 125, + /* 1020 */ 58, 601, 146, 145, 650, 649, 648, 143, 1741, 1957, + /* 1030 */ 154, 515, 1754, 1992, 1564, 430, 100, 1959, 621, 1961, + /* 1040 */ 1962, 616, 25, 611, 216, 1614, 67, 214, 2133, 1737, + /* 1050 */ 2045, 608, 1754, 1650, 356, 2041, 82, 322, 60, 99, + /* 1060 */ 532, 1958, 530, 36, 35, 2079, 231, 42, 40, 39, + /* 1070 */ 38, 37, 36, 35, 612, 517, 42, 40, 39, 38, + /* 1080 */ 37, 601, 1521, 218, 1387, 220, 217, 601, 219, 1648, + /* 1090 */ 1947, 222, 1976, 2085, 221, 477, 75, 74, 73, 404, + /* 1100 */ 618, 1751, 180, 601, 240, 1926, 1958, 617, 1435, 1386, + /* 1110 */ 1551, 520, 1754, 1945, 1596, 1597, 50, 139, 1754, 645, + /* 1120 */ 318, 546, 45, 392, 1940, 390, 386, 382, 379, 376, + /* 1130 */ 264, 1524, 1957, 601, 1754, 601, 1992, 1976, 1949, 100, + /* 1140 */ 1959, 621, 1961, 1962, 616, 618, 611, 543, 140, 583, + /* 1150 */ 1926, 2133, 617, 2045, 1936, 1942, 242, 356, 2041, 1709, + /* 1160 */ 1544, 601, 142, 96, 1754, 611, 1754, 253, 2107, 176, + /* 1170 */ 11, 10, 555, 93, 1324, 271, 209, 1957, 247, 1958, + /* 1180 */ 144, 1992, 269, 1977, 100, 1959, 621, 1961, 1962, 616, + /* 1190 */ 373, 611, 1754, 1479, 359, 358, 2133, 601, 2045, 60, + /* 1200 */ 593, 45, 356, 2041, 1392, 601, 1638, 661, 601, 572, + /* 1210 */ 1976, 598, 1859, 2064, 273, 1458, 601, 1385, 618, 599, + /* 1220 */ 211, 585, 277, 1926, 375, 617, 1795, 1594, 1754, 1156, + /* 1230 */ 369, 1390, 1208, 662, 1958, 162, 1754, 45, 2075, 1754, + /* 1240 */ 1453, 495, 491, 487, 483, 208, 625, 1754, 142, 144, + /* 1250 */ 1957, 1495, 1384, 1444, 1992, 1154, 1389, 100, 1959, 621, + /* 1260 */ 1961, 1962, 616, 547, 611, 1976, 547, 2113, 126, 2020, + /* 1270 */ 2113, 2045, 1137, 618, 580, 356, 2041, 142, 1926, 255, + /* 1280 */ 617, 81, 2119, 173, 206, 2119, 173, 2114, 568, 289, + /* 1290 */ 2114, 568, 4, 378, 374, 1, 252, 383, 1236, 607, + /* 1300 */ 1240, 1247, 332, 1341, 284, 1957, 186, 1138, 410, 1992, + /* 1310 */ 1406, 414, 100, 1959, 621, 1961, 1962, 616, 1860, 611, + /* 1320 */ 1245, 1593, 445, 419, 2018, 1401, 2045, 431, 1852, 147, + /* 1330 */ 356, 2041, 438, 444, 452, 446, 547, 453, 190, 455, + /* 1340 */ 2113, 457, 1407, 458, 467, 1409, 1958, 535, 470, 196, + /* 1350 */ 198, 205, 199, 1408, 204, 2119, 173, 474, 471, 472, + /* 1360 */ 2114, 568, 1393, 1410, 1388, 473, 201, 475, 203, 78, + /* 1370 */ 79, 479, 1111, 197, 207, 496, 1958, 1976, 497, 498, + /* 1380 */ 500, 103, 1744, 534, 213, 618, 321, 1396, 1398, 547, + /* 1390 */ 1926, 1905, 617, 2113, 536, 285, 1740, 215, 537, 609, + /* 1400 */ 1451, 1452, 1454, 1455, 1456, 1457, 149, 1976, 2119, 173, + /* 1410 */ 150, 538, 1742, 2114, 568, 618, 1902, 1957, 1738, 151, + /* 1420 */ 1926, 1992, 617, 152, 100, 1959, 621, 1961, 1962, 616, + /* 1430 */ 1901, 611, 1958, 229, 232, 544, 604, 541, 2045, 551, + /* 1440 */ 556, 591, 356, 2041, 2076, 2067, 2091, 1957, 2086, 553, + /* 1450 */ 5, 1992, 1958, 346, 101, 1959, 621, 1961, 1962, 616, + /* 1460 */ 238, 611, 2090, 1976, 559, 241, 565, 552, 2045, 550, + /* 1470 */ 248, 618, 2044, 2041, 249, 246, 1926, 549, 617, 347, + /* 1480 */ 250, 251, 576, 1976, 163, 573, 1521, 130, 1405, 2061, + /* 1490 */ 350, 618, 584, 1411, 2136, 260, 1926, 1864, 617, 589, + /* 1500 */ 286, 590, 1873, 1957, 1872, 287, 1871, 1992, 352, 2112, + /* 1510 */ 101, 1959, 621, 1961, 1962, 616, 594, 611, 88, 595, + /* 1520 */ 1958, 90, 254, 619, 2045, 288, 57, 1992, 606, 2041, + /* 1530 */ 101, 1959, 621, 1961, 1962, 616, 1958, 611, 1755, 2026, + /* 1540 */ 92, 1799, 1725, 623, 2045, 280, 704, 291, 326, 2041, + /* 1550 */ 705, 1976, 707, 49, 315, 323, 324, 295, 1920, 618, + /* 1560 */ 71, 1918, 1917, 72, 1926, 300, 617, 1976, 1919, 314, + /* 1570 */ 293, 304, 1914, 380, 381, 618, 1370, 1371, 179, 385, + /* 1580 */ 1926, 1912, 617, 387, 388, 389, 1911, 391, 1910, 393, + /* 1590 */ 1909, 1957, 395, 1908, 397, 1992, 1344, 1958, 159, 1959, + /* 1600 */ 621, 1961, 1962, 616, 1343, 611, 1884, 1957, 1883, 402, + /* 1610 */ 403, 1992, 1882, 1881, 159, 1959, 621, 1961, 1962, 616, + /* 1620 */ 1302, 611, 1845, 1844, 1842, 136, 1841, 1840, 1976, 184, + /* 1630 */ 420, 1833, 422, 1832, 1831, 1830, 618, 1843, 1839, 548, + /* 1640 */ 2082, 1926, 1958, 617, 1838, 1836, 1835, 1834, 1829, 1828, + /* 1650 */ 1827, 1826, 1825, 1824, 1823, 1822, 2083, 1821, 1820, 1819, + /* 1660 */ 1818, 138, 1817, 1816, 1815, 1814, 1813, 1812, 1957, 1811, + /* 1670 */ 1810, 448, 1992, 1976, 1183, 306, 1959, 621, 1961, 1962, + /* 1680 */ 616, 618, 611, 1304, 1809, 1808, 1926, 1672, 617, 1671, + /* 1690 */ 191, 1669, 1633, 1097, 192, 1632, 1897, 1096, 194, 1891, + /* 1700 */ 1880, 202, 1879, 1958, 1862, 1733, 1130, 69, 1668, 1666, + /* 1710 */ 1664, 480, 195, 1957, 70, 168, 200, 1992, 1662, 564, + /* 1720 */ 160, 1959, 621, 1961, 1962, 616, 1958, 611, 1946, 463, + /* 1730 */ 1660, 1647, 482, 465, 1976, 1646, 481, 486, 484, 351, + /* 1740 */ 490, 488, 618, 212, 59, 485, 492, 1926, 1629, 617, + /* 1750 */ 1735, 1252, 494, 1734, 1251, 489, 1174, 1976, 1173, 1172, + /* 1760 */ 493, 1171, 1166, 676, 678, 615, 1658, 341, 1168, 1167, + /* 1770 */ 1926, 1165, 617, 2135, 1957, 1651, 342, 1649, 1992, 518, + /* 1780 */ 343, 313, 1959, 621, 1961, 1962, 616, 1958, 611, 1628, + /* 1790 */ 521, 523, 1627, 1626, 525, 527, 102, 1957, 1359, 1358, + /* 1800 */ 1896, 1992, 531, 1350, 312, 1959, 621, 1961, 1962, 616, + /* 1810 */ 1362, 611, 24, 2011, 1890, 1878, 539, 1958, 1976, 1876, + /* 1820 */ 2118, 1566, 17, 361, 239, 14, 618, 56, 244, 245, + /* 1830 */ 28, 1926, 26, 617, 237, 18, 53, 1550, 1543, 161, + /* 1840 */ 1958, 540, 243, 1947, 27, 233, 84, 61, 1976, 19, + /* 1850 */ 15, 1581, 55, 363, 257, 1580, 618, 344, 1957, 348, + /* 1860 */ 1585, 1926, 1992, 617, 1584, 313, 1959, 621, 1961, 1962, + /* 1870 */ 616, 1976, 611, 153, 349, 1586, 1587, 1518, 164, 618, + /* 1880 */ 1877, 1517, 1875, 1874, 1926, 1958, 617, 262, 1957, 1861, + /* 1890 */ 20, 545, 1992, 263, 1548, 313, 1959, 621, 1961, 1962, + /* 1900 */ 616, 1958, 611, 87, 265, 270, 86, 89, 93, 275, + /* 1910 */ 21, 533, 592, 10, 1394, 1992, 1976, 1995, 308, 1959, + /* 1920 */ 621, 1961, 1962, 616, 618, 611, 272, 1426, 1448, 1926, + /* 1930 */ 610, 617, 1976, 1446, 1445, 165, 622, 34, 13, 624, + /* 1940 */ 618, 22, 177, 1480, 1418, 1926, 23, 617, 1237, 364, + /* 1950 */ 626, 628, 1234, 629, 1958, 1231, 1957, 54, 631, 620, + /* 1960 */ 1992, 1225, 632, 297, 1959, 621, 1961, 1962, 616, 634, + /* 1970 */ 611, 635, 1957, 1223, 637, 1214, 1992, 278, 644, 298, + /* 1980 */ 1959, 621, 1961, 1962, 616, 1976, 611, 1470, 638, 1229, + /* 1990 */ 8, 1228, 1469, 618, 1227, 94, 95, 1226, 1926, 1958, + /* 2000 */ 617, 1246, 68, 1242, 1128, 654, 1162, 1161, 1160, 1159, + /* 2010 */ 1158, 1157, 1155, 1153, 1152, 1151, 1181, 1146, 666, 1149, + /* 2020 */ 1148, 1147, 1145, 1144, 1143, 1957, 279, 1178, 1176, 1992, + /* 2030 */ 1976, 1140, 299, 1959, 621, 1961, 1962, 616, 618, 611, + /* 2040 */ 1139, 1136, 1135, 1926, 1134, 617, 1133, 1665, 686, 687, + /* 2050 */ 1663, 688, 1958, 690, 692, 1661, 691, 695, 694, 696, + /* 2060 */ 1659, 698, 699, 700, 1645, 702, 1625, 1086, 1958, 706, + /* 2070 */ 1957, 282, 710, 1380, 1992, 1600, 292, 305, 1959, 621, + /* 2080 */ 1961, 1962, 616, 1976, 611, 1600, 709, 1600, 1600, 1600, + /* 2090 */ 1600, 618, 1600, 1600, 1600, 1600, 1926, 1600, 617, 1976, + /* 2100 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 618, 1600, 1600, + /* 2110 */ 1600, 1600, 1926, 1958, 617, 1600, 1600, 1600, 1600, 1600, + /* 2120 */ 1600, 1600, 1600, 1957, 1600, 1600, 1600, 1992, 1600, 1600, + /* 2130 */ 309, 1959, 621, 1961, 1962, 616, 1600, 611, 1600, 1957, + /* 2140 */ 1600, 1600, 1600, 1992, 1976, 1600, 301, 1959, 621, 1961, + /* 2150 */ 1962, 616, 618, 611, 1600, 1600, 1600, 1926, 1600, 617, + /* 2160 */ 1600, 1600, 1600, 1600, 1600, 1600, 1958, 1600, 1600, 1600, + /* 2170 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, + /* 2180 */ 1600, 1600, 1958, 1600, 1957, 1600, 1600, 1600, 1992, 1600, + /* 2190 */ 1600, 310, 1959, 621, 1961, 1962, 616, 1976, 611, 1600, + /* 2200 */ 1600, 1600, 1600, 1600, 1600, 618, 1600, 1600, 1600, 1600, + /* 2210 */ 1926, 1600, 617, 1976, 1600, 1600, 1600, 1600, 1600, 1600, + /* 2220 */ 1600, 618, 1600, 1600, 1600, 1600, 1926, 1958, 617, 1600, + /* 2230 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1957, 1600, 1600, + /* 2240 */ 1600, 1992, 1600, 1600, 302, 1959, 621, 1961, 1962, 616, + /* 2250 */ 1958, 611, 1600, 1957, 1600, 1600, 1600, 1992, 1976, 1600, + /* 2260 */ 311, 1959, 621, 1961, 1962, 616, 618, 611, 1600, 1600, + /* 2270 */ 1600, 1926, 1600, 617, 1600, 1600, 1600, 1600, 1600, 1600, + /* 2280 */ 1958, 1976, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 618, + /* 2290 */ 1600, 1600, 1600, 1600, 1926, 1600, 617, 1600, 1957, 1600, + /* 2300 */ 1600, 1600, 1992, 1600, 1600, 303, 1959, 621, 1961, 1962, + /* 2310 */ 616, 1976, 611, 1600, 1600, 1600, 1600, 1600, 1600, 618, + /* 2320 */ 1600, 1957, 1600, 1600, 1926, 1992, 617, 1600, 316, 1959, + /* 2330 */ 621, 1961, 1962, 616, 1600, 611, 1600, 1600, 1600, 1600, + /* 2340 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, + /* 2350 */ 1600, 1957, 1600, 1600, 1600, 1992, 1958, 1600, 317, 1959, + /* 2360 */ 621, 1961, 1962, 616, 1600, 611, 1600, 1600, 1600, 1600, + /* 2370 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1958, 1600, + /* 2380 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1976, 1600, 1600, + /* 2390 */ 1600, 1600, 1600, 1600, 1600, 618, 1600, 1600, 1600, 1600, + /* 2400 */ 1926, 1600, 617, 1600, 1600, 1600, 1600, 1600, 1600, 1976, + /* 2410 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 618, 1600, 1600, + /* 2420 */ 1600, 1600, 1926, 1600, 617, 1600, 1600, 1957, 1600, 1600, + /* 2430 */ 1600, 1992, 1600, 1958, 1970, 1959, 621, 1961, 1962, 616, + /* 2440 */ 1600, 611, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1957, + /* 2450 */ 1600, 1600, 1600, 1992, 1600, 1600, 1969, 1959, 621, 1961, + /* 2460 */ 1962, 616, 1600, 611, 1976, 1600, 1600, 1600, 1600, 1600, + /* 2470 */ 1600, 1600, 618, 1600, 1600, 1600, 1600, 1926, 1958, 617, + /* 2480 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, + /* 2490 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, + /* 2500 */ 1600, 1600, 1600, 1600, 1957, 1600, 1600, 1600, 1992, 1976, + /* 2510 */ 1600, 1968, 1959, 621, 1961, 1962, 616, 618, 611, 1600, + /* 2520 */ 1600, 1600, 1926, 1958, 617, 1600, 1600, 1600, 1600, 1600, + /* 2530 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1958, + /* 2540 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1957, + /* 2550 */ 1600, 1600, 1600, 1992, 1976, 1600, 328, 1959, 621, 1961, + /* 2560 */ 1962, 616, 618, 611, 1600, 1600, 1600, 1926, 1600, 617, + /* 2570 */ 1976, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 618, 1600, + /* 2580 */ 1600, 1600, 1600, 1926, 1958, 617, 1600, 1600, 1600, 1600, + /* 2590 */ 1600, 1600, 1600, 1600, 1957, 1600, 1600, 1600, 1992, 1600, + /* 2600 */ 1600, 329, 1959, 621, 1961, 1962, 616, 1958, 611, 1600, + /* 2610 */ 1957, 1600, 1600, 1600, 1992, 1976, 1600, 325, 1959, 621, + /* 2620 */ 1961, 1962, 616, 618, 611, 1600, 1600, 1600, 1926, 1600, + /* 2630 */ 617, 1600, 1600, 1600, 1600, 1600, 1600, 1958, 1976, 1600, + /* 2640 */ 1600, 1600, 1600, 1600, 1600, 1600, 618, 1600, 1600, 1600, + /* 2650 */ 1600, 1926, 1600, 617, 1600, 1957, 1600, 1600, 1600, 1992, + /* 2660 */ 1600, 1600, 330, 1959, 621, 1961, 1962, 616, 1976, 611, + /* 2670 */ 1600, 1600, 1600, 1600, 1600, 1600, 618, 1600, 619, 1600, + /* 2680 */ 1600, 1926, 1992, 617, 1600, 308, 1959, 621, 1961, 1962, + /* 2690 */ 616, 1600, 611, 1600, 1600, 1600, 1600, 1600, 1600, 1600, + /* 2700 */ 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1957, 1600, + /* 2710 */ 1600, 1600, 1992, 1600, 1600, 307, 1959, 621, 1961, 1962, + /* 2720 */ 616, 1600, 611, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 329, 348, 331, 332, 429, 333, 333, 429, 433, 356, @@ -553,7 +553,7 @@ static const YYCODETYPE yy_lookahead[] = { /* 570 */ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, /* 580 */ 221, 222, 223, 224, 225, 226, 12, 13, 0, 356, /* 590 */ 325, 333, 58, 21, 20, 48, 22, 364, 180, 0, - /* 600 */ 182, 325, 369, 20, 371, 347, 34, 33, 36, 35, + /* 600 */ 182, 325, 369, 106, 371, 347, 34, 33, 36, 35, /* 610 */ 358, 333, 24, 25, 26, 27, 28, 29, 30, 31, /* 620 */ 32, 369, 364, 205, 206, 347, 1, 2, 94, 396, /* 630 */ 325, 97, 58, 400, 369, 325, 403, 404, 405, 406, @@ -565,143 +565,143 @@ static const YYCODETYPE yy_lookahead[] = { /* 690 */ 364, 117, 386, 161, 8, 9, 163, 356, 12, 13, /* 700 */ 14, 15, 16, 58, 48, 364, 132, 133, 325, 53, /* 710 */ 369, 325, 371, 402, 58, 70, 369, 325, 429, 8, - /* 720 */ 9, 96, 433, 12, 13, 14, 15, 16, 4, 137, + /* 720 */ 9, 96, 433, 12, 13, 14, 15, 16, 20, 137, /* 730 */ 138, 20, 0, 160, 402, 161, 162, 396, 449, 428, /* 740 */ 95, 400, 453, 454, 403, 404, 405, 406, 407, 408, /* 750 */ 94, 410, 369, 97, 180, 369, 182, 96, 417, 2, - /* 760 */ 428, 369, 117, 422, 325, 8, 9, 325, 127, 12, + /* 760 */ 428, 369, 117, 422, 325, 8, 9, 325, 325, 12, /* 770 */ 13, 14, 15, 16, 228, 325, 4, 132, 133, 205, /* 780 */ 206, 49, 208, 209, 210, 211, 212, 213, 214, 215, /* 790 */ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, - /* 800 */ 226, 18, 35, 20, 231, 325, 161, 162, 369, 19, - /* 810 */ 27, 369, 356, 30, 241, 43, 33, 45, 46, 369, - /* 820 */ 414, 365, 416, 33, 163, 180, 0, 182, 187, 188, - /* 830 */ 364, 48, 191, 50, 193, 228, 53, 230, 48, 0, - /* 840 */ 414, 375, 416, 39, 54, 55, 56, 57, 58, 369, + /* 800 */ 226, 18, 58, 20, 231, 325, 161, 162, 369, 19, + /* 810 */ 27, 369, 369, 30, 241, 43, 33, 45, 46, 369, + /* 820 */ 414, 0, 416, 33, 163, 180, 0, 182, 325, 364, + /* 830 */ 228, 48, 230, 50, 356, 414, 53, 416, 48, 366, + /* 840 */ 375, 97, 369, 365, 54, 55, 56, 57, 58, 369, /* 850 */ 205, 206, 0, 208, 209, 210, 211, 212, 213, 214, /* 860 */ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, - /* 870 */ 225, 226, 356, 20, 163, 8, 9, 94, 20, 12, - /* 880 */ 13, 14, 15, 16, 94, 364, 63, 97, 372, 106, - /* 890 */ 106, 65, 66, 67, 68, 69, 375, 71, 72, 73, + /* 870 */ 225, 226, 369, 20, 163, 8, 9, 94, 39, 12, + /* 880 */ 13, 14, 15, 16, 94, 364, 344, 97, 346, 106, + /* 890 */ 4, 65, 66, 67, 68, 69, 375, 71, 72, 73, /* 900 */ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, /* 910 */ 84, 85, 86, 87, 88, 89, 90, 91, 366, 136, /* 920 */ 130, 369, 139, 140, 141, 142, 143, 144, 145, 146, - /* 930 */ 147, 148, 149, 150, 151, 152, 153, 154, 155, 70, - /* 940 */ 157, 158, 159, 18, 45, 46, 8, 9, 23, 182, - /* 950 */ 12, 13, 14, 15, 16, 325, 325, 167, 325, 107, - /* 960 */ 70, 366, 37, 38, 369, 349, 41, 366, 352, 245, - /* 970 */ 369, 344, 357, 346, 184, 42, 186, 44, 126, 127, + /* 930 */ 147, 148, 149, 150, 151, 152, 153, 154, 155, 356, + /* 940 */ 157, 158, 159, 18, 45, 46, 8, 9, 23, 70, + /* 950 */ 12, 13, 14, 15, 16, 372, 44, 167, 325, 107, + /* 960 */ 70, 366, 37, 38, 369, 349, 41, 20, 352, 242, + /* 970 */ 243, 42, 151, 44, 184, 357, 186, 107, 126, 127, /* 980 */ 128, 129, 130, 131, 59, 60, 61, 8, 9, 356, - /* 990 */ 151, 12, 13, 14, 15, 16, 107, 364, 47, 369, - /* 1000 */ 369, 333, 369, 107, 371, 357, 8, 9, 242, 243, - /* 1010 */ 12, 13, 14, 15, 16, 347, 163, 58, 129, 161, - /* 1020 */ 95, 333, 126, 127, 128, 129, 130, 131, 205, 396, - /* 1030 */ 163, 333, 364, 400, 96, 347, 403, 404, 405, 406, - /* 1040 */ 407, 408, 44, 410, 44, 347, 95, 243, 415, 42, - /* 1050 */ 417, 44, 364, 22, 421, 422, 97, 357, 106, 134, - /* 1060 */ 333, 325, 364, 8, 9, 432, 35, 12, 13, 14, - /* 1070 */ 15, 16, 8, 9, 347, 357, 12, 13, 14, 15, - /* 1080 */ 16, 333, 99, 99, 44, 102, 102, 333, 0, 35, - /* 1090 */ 99, 364, 356, 102, 0, 347, 96, 172, 173, 174, - /* 1100 */ 364, 347, 177, 333, 44, 369, 325, 371, 156, 0, - /* 1110 */ 22, 99, 364, 358, 102, 44, 22, 347, 364, 44, - /* 1120 */ 195, 132, 133, 198, 369, 200, 201, 202, 203, 204, - /* 1130 */ 44, 22, 396, 333, 364, 333, 400, 356, 326, 403, + /* 990 */ 44, 12, 13, 14, 15, 16, 127, 364, 357, 129, + /* 1000 */ 131, 333, 369, 107, 371, 22, 8, 9, 96, 0, + /* 1010 */ 12, 13, 14, 15, 16, 347, 163, 42, 35, 44, + /* 1020 */ 95, 333, 126, 127, 128, 129, 130, 131, 357, 396, + /* 1030 */ 163, 22, 364, 400, 96, 347, 403, 404, 405, 406, + /* 1040 */ 407, 408, 44, 410, 99, 326, 106, 102, 415, 357, + /* 1050 */ 417, 63, 364, 0, 421, 422, 187, 188, 44, 134, + /* 1060 */ 191, 325, 193, 8, 9, 432, 357, 12, 13, 14, + /* 1070 */ 15, 16, 8, 9, 357, 22, 12, 13, 14, 15, + /* 1080 */ 16, 333, 243, 99, 35, 99, 102, 333, 102, 0, + /* 1090 */ 47, 99, 356, 379, 102, 347, 156, 172, 173, 174, + /* 1100 */ 364, 347, 177, 333, 44, 369, 325, 371, 161, 35, + /* 1110 */ 96, 22, 364, 358, 132, 133, 163, 347, 364, 357, + /* 1120 */ 195, 168, 44, 198, 369, 200, 201, 202, 203, 204, + /* 1130 */ 44, 245, 396, 333, 364, 333, 400, 356, 95, 403, /* 1140 */ 404, 405, 406, 407, 408, 364, 410, 347, 44, 347, /* 1150 */ 369, 415, 371, 417, 399, 400, 96, 421, 422, 345, - /* 1160 */ 96, 333, 333, 357, 364, 410, 364, 96, 432, 244, - /* 1170 */ 44, 96, 357, 163, 44, 347, 347, 396, 168, 325, - /* 1180 */ 44, 400, 96, 457, 403, 404, 405, 406, 407, 408, - /* 1190 */ 13, 410, 364, 364, 12, 13, 415, 333, 417, 44, - /* 1200 */ 96, 44, 421, 422, 22, 1, 2, 13, 357, 95, - /* 1210 */ 356, 347, 35, 432, 379, 33, 44, 35, 364, 105, - /* 1220 */ 33, 387, 96, 369, 387, 371, 96, 172, 364, 35, - /* 1230 */ 335, 446, 96, 440, 325, 48, 182, 44, 356, 335, - /* 1240 */ 58, 54, 55, 56, 57, 58, 44, 379, 332, 44, - /* 1250 */ 396, 96, 70, 96, 400, 368, 44, 403, 404, 405, - /* 1260 */ 406, 407, 408, 429, 410, 356, 429, 433, 96, 415, - /* 1270 */ 433, 417, 35, 364, 431, 421, 422, 379, 369, 423, + /* 1160 */ 96, 333, 44, 95, 364, 410, 364, 457, 432, 244, + /* 1170 */ 1, 2, 446, 105, 96, 347, 335, 396, 440, 325, + /* 1180 */ 44, 400, 96, 356, 403, 404, 405, 406, 407, 408, + /* 1190 */ 335, 410, 364, 205, 12, 13, 415, 333, 417, 44, + /* 1200 */ 96, 44, 421, 422, 22, 333, 332, 13, 333, 263, + /* 1210 */ 356, 347, 379, 432, 96, 33, 333, 35, 364, 347, + /* 1220 */ 33, 387, 347, 369, 387, 371, 368, 172, 364, 35, + /* 1230 */ 347, 182, 96, 13, 325, 48, 364, 44, 379, 364, + /* 1240 */ 58, 54, 55, 56, 57, 58, 44, 364, 44, 44, + /* 1250 */ 396, 96, 70, 96, 400, 35, 182, 403, 404, 405, + /* 1260 */ 406, 407, 408, 429, 410, 356, 429, 433, 44, 415, + /* 1270 */ 433, 417, 35, 364, 431, 421, 422, 44, 369, 450, /* 1280 */ 371, 94, 448, 449, 97, 448, 449, 453, 454, 96, - /* 1290 */ 453, 454, 450, 434, 387, 246, 48, 397, 96, 117, - /* 1300 */ 398, 96, 178, 263, 389, 396, 42, 70, 96, 400, - /* 1310 */ 376, 20, 403, 404, 405, 406, 407, 408, 379, 410, - /* 1320 */ 376, 266, 160, 374, 415, 20, 417, 333, 333, 376, - /* 1330 */ 421, 422, 374, 93, 341, 374, 429, 333, 333, 333, - /* 1340 */ 433, 20, 327, 327, 20, 371, 325, 387, 393, 20, - /* 1350 */ 339, 164, 165, 339, 167, 448, 449, 170, 334, 20, - /* 1360 */ 453, 454, 180, 388, 182, 339, 334, 339, 339, 339, - /* 1370 */ 333, 52, 339, 186, 336, 336, 325, 356, 356, 327, - /* 1380 */ 333, 356, 369, 356, 327, 364, 369, 205, 206, 429, - /* 1390 */ 369, 356, 371, 433, 194, 393, 395, 356, 356, 217, + /* 1290 */ 453, 454, 246, 398, 387, 434, 423, 48, 96, 117, + /* 1300 */ 96, 96, 397, 178, 389, 396, 42, 70, 376, 400, + /* 1310 */ 20, 376, 403, 404, 405, 406, 407, 408, 379, 410, + /* 1320 */ 96, 266, 160, 374, 415, 20, 417, 333, 333, 96, + /* 1330 */ 421, 422, 376, 374, 93, 374, 429, 341, 333, 333, + /* 1340 */ 433, 333, 20, 327, 327, 20, 325, 387, 393, 339, + /* 1350 */ 339, 164, 165, 20, 167, 448, 449, 170, 371, 334, + /* 1360 */ 453, 454, 180, 20, 182, 388, 339, 334, 339, 339, + /* 1370 */ 339, 333, 52, 186, 339, 336, 325, 356, 336, 327, + /* 1380 */ 356, 333, 356, 194, 356, 364, 327, 205, 206, 429, + /* 1390 */ 369, 369, 371, 433, 395, 393, 356, 356, 185, 217, /* 1400 */ 218, 219, 220, 221, 222, 223, 356, 356, 448, 449, - /* 1410 */ 337, 185, 356, 453, 454, 364, 371, 396, 356, 356, + /* 1410 */ 356, 392, 356, 453, 454, 364, 369, 396, 356, 356, /* 1420 */ 369, 400, 371, 356, 403, 404, 405, 406, 407, 408, - /* 1430 */ 337, 410, 325, 333, 369, 392, 415, 379, 417, 251, - /* 1440 */ 379, 252, 421, 422, 258, 369, 369, 396, 439, 369, - /* 1450 */ 384, 400, 325, 384, 403, 404, 405, 406, 407, 408, - /* 1460 */ 442, 410, 439, 356, 171, 247, 260, 259, 417, 262, - /* 1470 */ 398, 364, 421, 422, 438, 441, 369, 267, 371, 264, - /* 1480 */ 364, 437, 243, 356, 436, 439, 20, 402, 334, 333, - /* 1490 */ 337, 364, 20, 382, 384, 165, 369, 369, 371, 384, - /* 1500 */ 369, 369, 369, 396, 369, 452, 369, 400, 337, 364, - /* 1510 */ 403, 404, 405, 406, 407, 408, 352, 410, 337, 381, - /* 1520 */ 325, 451, 95, 396, 417, 420, 458, 400, 421, 422, - /* 1530 */ 403, 404, 405, 406, 407, 408, 325, 410, 95, 360, - /* 1540 */ 337, 36, 369, 346, 417, 328, 333, 327, 421, 422, - /* 1550 */ 390, 356, 394, 385, 385, 323, 0, 0, 187, 364, - /* 1560 */ 42, 338, 0, 0, 369, 0, 371, 356, 35, 350, - /* 1570 */ 35, 350, 350, 199, 35, 364, 35, 0, 199, 35, - /* 1580 */ 369, 35, 371, 199, 0, 199, 0, 35, 0, 22, - /* 1590 */ 0, 396, 182, 35, 180, 400, 0, 325, 403, 404, - /* 1600 */ 405, 406, 407, 408, 0, 410, 176, 396, 175, 0, - /* 1610 */ 0, 400, 0, 47, 403, 404, 405, 406, 407, 408, - /* 1620 */ 0, 410, 0, 42, 0, 0, 0, 0, 356, 151, + /* 1430 */ 369, 410, 325, 337, 337, 333, 415, 371, 417, 369, + /* 1440 */ 252, 251, 421, 422, 379, 442, 439, 396, 379, 369, + /* 1450 */ 258, 400, 325, 369, 403, 404, 405, 406, 407, 408, + /* 1460 */ 384, 410, 439, 356, 369, 384, 171, 260, 417, 259, + /* 1470 */ 438, 364, 421, 422, 437, 441, 369, 247, 371, 267, + /* 1480 */ 436, 398, 264, 356, 439, 262, 243, 364, 20, 402, + /* 1490 */ 334, 364, 333, 20, 458, 337, 369, 382, 371, 369, + /* 1500 */ 384, 369, 369, 396, 369, 384, 369, 400, 369, 452, + /* 1510 */ 403, 404, 405, 406, 407, 408, 165, 410, 337, 381, + /* 1520 */ 325, 337, 451, 396, 417, 352, 95, 400, 421, 422, + /* 1530 */ 403, 404, 405, 406, 407, 408, 325, 410, 364, 420, + /* 1540 */ 95, 369, 346, 360, 417, 337, 36, 333, 421, 422, + /* 1550 */ 328, 356, 327, 390, 394, 385, 385, 323, 0, 364, + /* 1560 */ 187, 0, 0, 42, 369, 350, 371, 356, 0, 350, + /* 1570 */ 338, 350, 0, 35, 199, 364, 35, 35, 35, 199, + /* 1580 */ 369, 0, 371, 35, 35, 199, 0, 199, 0, 35, + /* 1590 */ 0, 396, 22, 0, 35, 400, 182, 325, 403, 404, + /* 1600 */ 405, 406, 407, 408, 180, 410, 0, 396, 0, 176, + /* 1610 */ 175, 400, 0, 0, 403, 404, 405, 406, 407, 408, + /* 1620 */ 47, 410, 0, 0, 0, 42, 0, 0, 356, 151, /* 1630 */ 35, 0, 151, 0, 0, 0, 364, 0, 0, 444, /* 1640 */ 445, 369, 325, 371, 0, 0, 0, 0, 0, 0, - /* 1650 */ 0, 0, 0, 0, 0, 0, 445, 0, 0, 42, - /* 1660 */ 0, 0, 0, 0, 0, 22, 0, 0, 396, 0, + /* 1650 */ 0, 0, 0, 0, 0, 0, 445, 0, 0, 0, + /* 1660 */ 0, 42, 0, 0, 0, 0, 0, 0, 396, 0, /* 1670 */ 0, 135, 400, 356, 35, 403, 404, 405, 406, 407, - /* 1680 */ 408, 364, 410, 0, 0, 0, 369, 0, 371, 0, - /* 1690 */ 58, 14, 14, 0, 58, 0, 0, 0, 42, 171, - /* 1700 */ 0, 0, 0, 325, 39, 64, 47, 0, 0, 0, - /* 1710 */ 0, 39, 44, 396, 35, 40, 0, 400, 47, 447, - /* 1720 */ 403, 404, 405, 406, 407, 408, 325, 410, 47, 39, - /* 1730 */ 48, 39, 0, 35, 356, 0, 48, 0, 39, 361, - /* 1740 */ 35, 35, 364, 39, 0, 22, 104, 369, 0, 371, - /* 1750 */ 35, 48, 39, 35, 35, 35, 35, 356, 44, 48, - /* 1760 */ 35, 44, 22, 35, 35, 364, 0, 22, 0, 22, - /* 1770 */ 369, 0, 371, 456, 396, 102, 50, 22, 400, 0, - /* 1780 */ 0, 403, 404, 405, 406, 407, 408, 325, 410, 35, - /* 1790 */ 0, 35, 35, 20, 22, 35, 35, 396, 192, 0, - /* 1800 */ 35, 400, 0, 163, 403, 404, 405, 406, 407, 408, - /* 1810 */ 0, 410, 0, 412, 22, 0, 3, 325, 356, 44, - /* 1820 */ 96, 248, 44, 361, 47, 95, 364, 96, 95, 95, - /* 1830 */ 44, 369, 96, 371, 96, 96, 95, 95, 47, 44, - /* 1840 */ 325, 95, 248, 163, 95, 3, 96, 96, 356, 44, - /* 1850 */ 248, 165, 35, 361, 163, 35, 364, 35, 396, 183, + /* 1680 */ 408, 364, 410, 22, 0, 0, 369, 0, 371, 0, + /* 1690 */ 58, 0, 0, 14, 58, 0, 0, 14, 42, 0, + /* 1700 */ 0, 171, 0, 325, 0, 0, 64, 39, 0, 0, + /* 1710 */ 0, 35, 40, 396, 39, 44, 39, 400, 0, 447, + /* 1720 */ 403, 404, 405, 406, 407, 408, 325, 410, 47, 47, + /* 1730 */ 0, 0, 39, 47, 356, 0, 48, 39, 35, 361, + /* 1740 */ 39, 35, 364, 102, 104, 48, 35, 369, 0, 371, + /* 1750 */ 0, 35, 39, 0, 22, 48, 35, 356, 35, 35, + /* 1760 */ 48, 35, 22, 44, 44, 364, 0, 22, 35, 35, + /* 1770 */ 369, 35, 371, 456, 396, 0, 22, 0, 400, 50, + /* 1780 */ 22, 403, 404, 405, 406, 407, 408, 325, 410, 0, + /* 1790 */ 35, 35, 0, 0, 35, 22, 20, 396, 35, 35, + /* 1800 */ 0, 400, 192, 35, 403, 404, 405, 406, 407, 408, + /* 1810 */ 96, 410, 95, 412, 0, 0, 22, 325, 356, 0, + /* 1820 */ 3, 96, 44, 361, 96, 248, 364, 44, 44, 47, + /* 1830 */ 44, 369, 95, 371, 95, 248, 163, 96, 96, 95, + /* 1840 */ 325, 163, 95, 47, 95, 165, 95, 3, 356, 44, + /* 1850 */ 248, 35, 44, 361, 47, 35, 364, 163, 396, 35, /* 1860 */ 35, 369, 400, 371, 35, 403, 404, 405, 406, 407, - /* 1870 */ 408, 356, 410, 35, 47, 44, 0, 96, 96, 364, - /* 1880 */ 47, 169, 0, 47, 369, 325, 371, 0, 396, 95, - /* 1890 */ 39, 95, 400, 96, 96, 403, 404, 405, 406, 407, - /* 1900 */ 408, 325, 410, 95, 95, 105, 95, 47, 44, 2, - /* 1910 */ 22, 396, 96, 95, 95, 400, 356, 164, 403, 404, - /* 1920 */ 405, 406, 407, 408, 364, 410, 96, 95, 166, 369, - /* 1930 */ 96, 371, 356, 47, 95, 95, 242, 47, 22, 96, - /* 1940 */ 364, 95, 95, 106, 96, 369, 35, 371, 35, 227, - /* 1950 */ 96, 95, 35, 35, 325, 96, 396, 205, 95, 35, - /* 1960 */ 400, 96, 95, 403, 404, 405, 406, 407, 408, 96, - /* 1970 */ 410, 207, 396, 35, 95, 22, 400, 119, 107, 403, - /* 1980 */ 404, 405, 406, 407, 408, 356, 410, 229, 119, 119, - /* 1990 */ 227, 119, 95, 364, 44, 95, 35, 22, 369, 325, - /* 2000 */ 371, 95, 35, 64, 63, 35, 35, 35, 35, 35, - /* 2010 */ 70, 35, 35, 35, 35, 92, 35, 35, 35, 22, - /* 2020 */ 35, 35, 35, 35, 44, 396, 70, 35, 35, 400, + /* 1870 */ 408, 356, 410, 183, 35, 96, 96, 96, 47, 364, + /* 1880 */ 0, 96, 0, 0, 369, 325, 371, 47, 396, 0, + /* 1890 */ 95, 169, 400, 96, 96, 403, 404, 405, 406, 407, + /* 1900 */ 408, 325, 410, 39, 95, 95, 95, 95, 105, 47, + /* 1910 */ 44, 396, 166, 2, 22, 400, 356, 95, 403, 404, + /* 1920 */ 405, 406, 407, 408, 364, 410, 164, 22, 96, 369, + /* 1930 */ 95, 371, 356, 96, 96, 47, 106, 95, 95, 35, + /* 1940 */ 364, 95, 47, 205, 96, 369, 95, 371, 96, 35, + /* 1950 */ 95, 35, 96, 95, 325, 96, 396, 242, 35, 207, + /* 1960 */ 400, 96, 95, 403, 404, 405, 406, 407, 408, 35, + /* 1970 */ 410, 95, 396, 96, 35, 22, 400, 44, 107, 403, + /* 1980 */ 404, 405, 406, 407, 408, 356, 410, 227, 95, 119, + /* 1990 */ 229, 119, 227, 364, 119, 95, 95, 119, 369, 325, + /* 2000 */ 371, 35, 95, 22, 64, 63, 35, 35, 35, 35, + /* 2010 */ 35, 35, 35, 35, 35, 35, 70, 22, 92, 35, + /* 2020 */ 35, 35, 35, 35, 35, 396, 44, 70, 35, 400, /* 2030 */ 356, 35, 403, 404, 405, 406, 407, 408, 364, 410, - /* 2040 */ 35, 22, 35, 369, 0, 371, 35, 48, 39, 0, - /* 2050 */ 35, 48, 325, 39, 0, 35, 48, 39, 0, 35, - /* 2060 */ 48, 39, 0, 35, 35, 0, 20, 22, 325, 21, - /* 2070 */ 396, 459, 22, 22, 400, 459, 21, 403, 404, 405, - /* 2080 */ 406, 407, 408, 356, 410, 459, 459, 459, 459, 459, + /* 2040 */ 35, 35, 35, 369, 22, 371, 35, 0, 35, 48, + /* 2050 */ 0, 39, 325, 35, 39, 0, 48, 48, 35, 39, + /* 2060 */ 0, 35, 48, 39, 0, 35, 0, 35, 325, 21, + /* 2070 */ 396, 22, 20, 22, 400, 459, 22, 403, 404, 405, + /* 2080 */ 406, 407, 408, 356, 410, 459, 21, 459, 459, 459, /* 2090 */ 459, 364, 459, 459, 459, 459, 369, 459, 371, 356, /* 2100 */ 459, 459, 459, 459, 459, 459, 459, 364, 459, 459, /* 2110 */ 459, 459, 369, 325, 371, 459, 459, 459, 459, 459, @@ -767,9 +767,9 @@ static const YYCODETYPE yy_lookahead[] = { /* 2710 */ 459, 459, 400, 459, 459, 403, 404, 405, 406, 407, /* 2720 */ 408, 459, 410, }; -#define YY_SHIFT_COUNT (710) +#define YY_SHIFT_COUNT (711) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2065) +#define YY_SHIFT_MAX (2066) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 925, 0, 71, 0, 288, 288, 288, 288, 288, 288, /* 10 */ 288, 288, 288, 359, 574, 574, 645, 574, 574, 574, @@ -784,67 +784,67 @@ static const unsigned short int yy_shift_ofst[] = { /* 100 */ 34, 34, 498, 471, 418, 418, 418, 418, 418, 418, /* 110 */ 418, 418, 418, 418, 418, 418, 418, 418, 418, 418, /* 120 */ 418, 418, 418, 213, 661, 14, 52, 312, 325, 172, - /* 130 */ 172, 172, 599, 607, 607, 325, 583, 583, 583, 784, - /* 140 */ 404, 546, 543, 869, 543, 869, 869, 784, 890, 216, + /* 130 */ 172, 172, 599, 602, 602, 325, 708, 708, 708, 497, + /* 140 */ 404, 546, 543, 879, 543, 879, 879, 497, 890, 216, /* 150 */ 216, 216, 216, 216, 216, 216, 790, 434, 302, 711, - /* 160 */ 1055, 99, 96, 94, 295, 532, 339, 533, 899, 889, - /* 170 */ 853, 766, 804, 104, 766, 933, 724, 858, 1049, 1248, - /* 180 */ 1124, 1264, 1291, 1264, 1162, 1305, 1305, 1264, 1162, 1162, - /* 190 */ 1240, 1305, 1305, 1305, 1321, 1321, 1324, 28, 404, 28, - /* 200 */ 1329, 1339, 28, 1329, 28, 28, 28, 1305, 28, 1319, - /* 210 */ 1319, 1321, 543, 543, 543, 543, 543, 543, 543, 543, - /* 220 */ 543, 543, 543, 1305, 1321, 869, 869, 1200, 1324, 57, - /* 230 */ 1226, 404, 57, 1305, 1291, 1291, 869, 1189, 1188, 869, - /* 240 */ 1189, 1188, 869, 869, 543, 1186, 1293, 1189, 1206, 1208, - /* 250 */ 1218, 1049, 1210, 1215, 1207, 1239, 583, 1466, 1305, 1329, - /* 260 */ 57, 1472, 1188, 869, 869, 869, 869, 869, 1188, 869, - /* 270 */ 1330, 57, 784, 57, 583, 1427, 1443, 869, 890, 1305, - /* 280 */ 57, 1505, 1321, 2723, 2723, 2723, 2723, 2723, 2723, 2723, - /* 290 */ 2723, 2723, 826, 1187, 588, 656, 938, 998, 1064, 556, - /* 300 */ 686, 757, 867, 852, 979, 979, 979, 979, 979, 979, - /* 310 */ 979, 979, 979, 896, 641, 155, 155, 191, 534, 592, - /* 320 */ 572, 300, 435, 435, 1, 625, 573, 1, 1, 1, - /* 330 */ 1000, 547, 1031, 1007, 952, 839, 983, 984, 991, 1012, - /* 340 */ 1088, 1094, 1109, 959, 1060, 1071, 989, 1040, 384, 1010, - /* 350 */ 1075, 1086, 1104, 1126, 1130, 1204, 1136, 767, 1054, 823, - /* 360 */ 1155, 951, 1157, 1172, 1193, 1202, 1205, 1212, 1114, 1177, - /* 370 */ 1194, 1237, 732, 1556, 1557, 1371, 1562, 1563, 1518, 1565, - /* 380 */ 1533, 1374, 1535, 1539, 1541, 1379, 1577, 1544, 1546, 1384, - /* 390 */ 1584, 1386, 1586, 1552, 1588, 1567, 1590, 1558, 1410, 1414, - /* 400 */ 1596, 1604, 1430, 1433, 1609, 1610, 1566, 1612, 1620, 1622, - /* 410 */ 1581, 1624, 1625, 1626, 1627, 1637, 1638, 1644, 1645, 1478, - /* 420 */ 1595, 1631, 1481, 1633, 1634, 1635, 1646, 1647, 1648, 1649, - /* 430 */ 1650, 1651, 1652, 1653, 1654, 1655, 1657, 1658, 1617, 1660, - /* 440 */ 1661, 1662, 1663, 1664, 1643, 1666, 1667, 1669, 1536, 1670, - /* 450 */ 1683, 1639, 1684, 1632, 1685, 1636, 1687, 1689, 1656, 1665, - /* 460 */ 1668, 1659, 1677, 1671, 1678, 1681, 1693, 1675, 1672, 1695, - /* 470 */ 1696, 1697, 1690, 1528, 1700, 1701, 1702, 1641, 1707, 1708, - /* 480 */ 1679, 1682, 1692, 1709, 1698, 1688, 1699, 1710, 1705, 1703, - /* 490 */ 1704, 1716, 1706, 1711, 1713, 1732, 1735, 1737, 1744, 1642, - /* 500 */ 1673, 1715, 1723, 1748, 1718, 1719, 1720, 1721, 1714, 1717, - /* 510 */ 1725, 1728, 1740, 1729, 1766, 1745, 1768, 1747, 1726, 1771, - /* 520 */ 1755, 1754, 1779, 1756, 1780, 1757, 1790, 1772, 1773, 1760, - /* 530 */ 1761, 1606, 1724, 1730, 1799, 1640, 1765, 1802, 1676, 1792, - /* 540 */ 1680, 1686, 1810, 1812, 1691, 1712, 1813, 1775, 1573, 1733, - /* 550 */ 1731, 1734, 1736, 1778, 1738, 1741, 1742, 1746, 1739, 1786, - /* 560 */ 1777, 1791, 1749, 1795, 1594, 1750, 1751, 1842, 1805, 1602, - /* 570 */ 1817, 1820, 1822, 1825, 1829, 1838, 1781, 1782, 1827, 1694, - /* 580 */ 1831, 1833, 1815, 1876, 1882, 1659, 1836, 1794, 1797, 1798, - /* 590 */ 1796, 1808, 1762, 1809, 1887, 1851, 1753, 1811, 1800, 1659, - /* 600 */ 1860, 1864, 1722, 1758, 1763, 1907, 1888, 1752, 1818, 1816, - /* 610 */ 1819, 1830, 1832, 1834, 1886, 1839, 1840, 1890, 1843, 1916, - /* 620 */ 1764, 1846, 1837, 1848, 1911, 1913, 1847, 1854, 1917, 1856, - /* 630 */ 1859, 1918, 1863, 1865, 1924, 1867, 1873, 1938, 1879, 1858, - /* 640 */ 1869, 1870, 1872, 1953, 1871, 1897, 1950, 1900, 1961, 1906, - /* 650 */ 1950, 1950, 1975, 1939, 1941, 1967, 1970, 1971, 1972, 1973, - /* 660 */ 1974, 1976, 1977, 1978, 1979, 1940, 1923, 1980, 1981, 1982, - /* 670 */ 1983, 1997, 1985, 1986, 1987, 1956, 1714, 1988, 1717, 1992, - /* 680 */ 1993, 1996, 2005, 2019, 2007, 2044, 2011, 1999, 2009, 2049, - /* 690 */ 2015, 2003, 2014, 2054, 2020, 2008, 2018, 2058, 2024, 2012, - /* 700 */ 2022, 2062, 2028, 2029, 2065, 2045, 2048, 2050, 2051, 2055, - /* 710 */ 2046, + /* 160 */ 1055, 99, 96, 94, 295, 532, 339, 533, 899, 870, + /* 170 */ 853, 727, 839, 104, 727, 929, 886, 947, 1046, 1249, + /* 180 */ 1125, 1264, 1290, 1264, 1162, 1305, 1305, 1264, 1162, 1162, + /* 190 */ 1241, 1305, 1305, 1305, 1322, 1322, 1325, 28, 404, 28, + /* 200 */ 1333, 1343, 28, 1333, 28, 28, 28, 1305, 28, 1320, + /* 210 */ 1320, 1322, 543, 543, 543, 543, 543, 543, 543, 543, + /* 220 */ 543, 543, 543, 1305, 1322, 879, 879, 879, 1189, 1325, + /* 230 */ 57, 1213, 404, 57, 1305, 1290, 1290, 879, 1188, 1190, + /* 240 */ 879, 1188, 1190, 879, 879, 543, 1192, 1295, 1188, 1207, + /* 250 */ 1210, 1230, 1046, 1212, 1218, 1223, 1243, 708, 1468, 1305, + /* 260 */ 1333, 57, 1473, 1190, 879, 879, 879, 879, 879, 1190, + /* 270 */ 879, 1351, 57, 497, 57, 708, 1431, 1445, 879, 890, + /* 280 */ 1305, 57, 1510, 1322, 2723, 2723, 2723, 2723, 2723, 2723, + /* 290 */ 2723, 2723, 2723, 826, 1187, 588, 656, 938, 998, 1064, + /* 300 */ 556, 686, 757, 867, 852, 979, 979, 979, 979, 979, + /* 310 */ 979, 979, 979, 979, 896, 869, 155, 155, 191, 534, + /* 320 */ 592, 572, 300, 435, 435, 1, 625, 573, 1, 1, + /* 330 */ 1, 912, 547, 983, 975, 940, 821, 945, 984, 986, + /* 340 */ 992, 1009, 1053, 1089, 744, 1014, 1060, 982, 946, 384, + /* 350 */ 953, 1078, 1086, 1104, 1118, 1136, 1169, 1155, 1049, 1074, + /* 360 */ 988, 1157, 1043, 1193, 1202, 1204, 1205, 1224, 1233, 1068, + /* 370 */ 1194, 1220, 1237, 732, 1558, 1568, 1373, 1561, 1562, 1521, + /* 380 */ 1572, 1538, 1375, 1541, 1542, 1543, 1380, 1581, 1548, 1549, + /* 390 */ 1386, 1586, 1388, 1588, 1554, 1590, 1570, 1593, 1559, 1414, + /* 400 */ 1424, 1606, 1608, 1433, 1435, 1612, 1613, 1573, 1622, 1623, + /* 410 */ 1624, 1583, 1626, 1627, 1637, 1638, 1644, 1645, 1646, 1647, + /* 420 */ 1478, 1595, 1631, 1481, 1633, 1634, 1635, 1648, 1649, 1650, + /* 430 */ 1651, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1660, 1619, + /* 440 */ 1662, 1663, 1664, 1665, 1666, 1661, 1667, 1669, 1670, 1536, + /* 450 */ 1684, 1685, 1639, 1687, 1632, 1689, 1636, 1691, 1692, 1656, + /* 460 */ 1668, 1671, 1681, 1679, 1682, 1683, 1686, 1695, 1672, 1675, + /* 470 */ 1696, 1699, 1700, 1677, 1530, 1702, 1704, 1705, 1642, 1708, + /* 480 */ 1709, 1676, 1688, 1693, 1710, 1703, 1697, 1698, 1718, 1706, + /* 490 */ 1707, 1701, 1730, 1711, 1712, 1713, 1731, 1735, 1748, 1750, + /* 500 */ 1640, 1641, 1716, 1732, 1753, 1721, 1723, 1724, 1726, 1719, + /* 510 */ 1720, 1733, 1734, 1740, 1736, 1766, 1745, 1775, 1754, 1729, + /* 520 */ 1777, 1758, 1755, 1789, 1756, 1792, 1759, 1793, 1773, 1776, + /* 530 */ 1763, 1764, 1610, 1714, 1717, 1800, 1673, 1768, 1814, 1690, + /* 540 */ 1794, 1678, 1680, 1815, 1819, 1694, 1722, 1817, 1778, 1577, + /* 550 */ 1737, 1725, 1739, 1728, 1783, 1741, 1744, 1747, 1749, 1742, + /* 560 */ 1784, 1782, 1796, 1751, 1786, 1587, 1779, 1780, 1844, 1805, + /* 570 */ 1602, 1816, 1820, 1824, 1825, 1829, 1839, 1781, 1785, 1807, + /* 580 */ 1715, 1808, 1831, 1880, 1882, 1883, 1681, 1840, 1795, 1797, + /* 590 */ 1798, 1809, 1810, 1746, 1811, 1889, 1864, 1762, 1812, 1803, + /* 600 */ 1681, 1862, 1866, 1760, 1761, 1765, 1911, 1892, 1738, 1822, + /* 610 */ 1832, 1835, 1837, 1842, 1838, 1888, 1843, 1846, 1895, 1848, + /* 620 */ 1905, 1752, 1851, 1830, 1852, 1904, 1914, 1855, 1856, 1916, + /* 630 */ 1858, 1859, 1923, 1867, 1865, 1934, 1876, 1877, 1939, 1893, + /* 640 */ 1870, 1872, 1875, 1878, 1953, 1871, 1900, 1933, 1901, 1966, + /* 650 */ 1907, 1933, 1933, 1981, 1940, 1942, 1971, 1972, 1973, 1974, + /* 660 */ 1975, 1976, 1977, 1978, 1979, 1980, 1946, 1926, 1982, 1984, + /* 670 */ 1985, 1986, 1995, 1987, 1988, 1989, 1957, 1719, 1993, 1720, + /* 680 */ 1996, 2005, 2006, 2007, 2022, 2011, 2047, 2013, 2001, 2012, + /* 690 */ 2050, 2018, 2008, 2015, 2055, 2023, 2009, 2020, 2060, 2026, + /* 700 */ 2014, 2024, 2064, 2030, 2032, 2066, 2049, 2048, 2051, 2054, + /* 710 */ 2065, 2052, }; -#define YY_REDUCE_COUNT (291) +#define YY_REDUCE_COUNT (292) #define YY_REDUCE_MIN (-425) #define YY_REDUCE_MAX (2312) static const short yy_reduce_ofst[] = { @@ -855,103 +855,103 @@ static const short yy_reduce_ofst[] = { /* 40 */ 2153, 2198, 2214, 2259, 2282, 2312, -276, -290, -168, -10, /* 50 */ 834, 837, 907, 960, -327, -181, -348, -314, -425, 252, /* 60 */ 755, -422, 289, -328, -219, -329, -291, -300, -303, -209, - /* 70 */ -144, -173, 78, 258, 278, 326, 668, 688, -288, 698, - /* 80 */ 727, 65, 748, 754, -275, 770, 86, 800, -152, -347, - /* 90 */ 802, 828, 123, 829, 186, 187, 192, 864, -158, -294, + /* 70 */ -144, -173, 78, 258, 278, 326, 668, 688, -288, 748, + /* 80 */ 754, 65, 770, 800, -275, 802, 86, 828, -152, -347, + /* 90 */ 864, 872, 123, 875, 186, 187, 192, 883, -158, -294, /* 100 */ -282, -282, -182, -265, -170, -62, -6, 82, 105, 265, - /* 110 */ 276, 305, 310, 347, 383, 386, 392, 439, 442, 450, - /* 120 */ 480, 630, 631, 67, -150, 89, -116, 308, 173, -150, - /* 130 */ 311, 332, 309, 406, 426, 175, -58, 466, 521, 126, - /* 140 */ 306, 240, 456, 552, 516, 595, 601, 616, 627, 615, - /* 150 */ 648, 700, 718, 806, 815, 851, -337, 812, 814, 835, - /* 160 */ 726, 785, 895, 793, 882, 882, 904, 868, 916, 887, - /* 170 */ 898, 843, 843, 842, 843, 856, 859, 882, 902, 900, - /* 180 */ 915, 934, 939, 944, 949, 994, 995, 953, 958, 961, - /* 190 */ 993, 1004, 1005, 1006, 1015, 1016, 955, 1011, 974, 1014, - /* 200 */ 1024, 975, 1026, 1032, 1028, 1029, 1030, 1037, 1033, 1038, - /* 210 */ 1039, 1052, 1022, 1025, 1027, 1035, 1041, 1042, 1050, 1056, - /* 220 */ 1062, 1063, 1067, 1047, 1057, 1013, 1017, 1001, 1002, 1073, - /* 230 */ 1043, 1045, 1093, 1100, 1058, 1061, 1065, 1009, 1066, 1076, - /* 240 */ 1023, 1069, 1077, 1080, 882, 1018, 1034, 1046, 1036, 1044, - /* 250 */ 1048, 1072, 1068, 1053, 1070, 843, 1116, 1085, 1156, 1154, - /* 260 */ 1153, 1111, 1110, 1128, 1131, 1132, 1133, 1135, 1115, 1137, - /* 270 */ 1138, 1171, 1164, 1181, 1145, 1105, 1179, 1173, 1197, 1213, - /* 280 */ 1203, 1217, 1220, 1160, 1158, 1168, 1169, 1219, 1221, 1222, - /* 290 */ 1223, 1232, + /* 110 */ 276, 305, 310, 347, 383, 386, 392, 439, 442, 443, + /* 120 */ 450, 480, 503, 67, -150, 89, -116, 308, 173, -150, + /* 130 */ 311, 332, 309, 406, 421, 175, -58, 465, 521, 126, + /* 140 */ 306, 240, 478, 473, 583, 552, 595, 616, 542, 618, + /* 150 */ 641, 671, 692, 709, 717, 762, -337, 719, 814, 714, + /* 160 */ 710, 726, 841, 738, 827, 827, 855, 833, 874, 858, + /* 170 */ 859, 843, 843, 829, 843, 873, 861, 827, 895, 905, + /* 180 */ 915, 932, 939, 935, 949, 994, 995, 956, 959, 961, + /* 190 */ 996, 1005, 1006, 1008, 1016, 1017, 955, 1010, 987, 1011, + /* 200 */ 1025, 977, 1027, 1033, 1029, 1030, 1031, 1038, 1035, 1039, + /* 210 */ 1042, 1052, 1024, 1026, 1028, 1040, 1041, 1050, 1054, 1056, + /* 220 */ 1062, 1063, 1067, 1048, 1059, 1022, 1047, 1061, 999, 1002, + /* 230 */ 1096, 1019, 1066, 1097, 1102, 1065, 1069, 1070, 1007, 1076, + /* 240 */ 1080, 1023, 1081, 1084, 1095, 827, 1003, 1034, 1045, 1032, + /* 250 */ 1037, 1044, 1083, 1036, 1057, 1071, 843, 1123, 1087, 1159, + /* 260 */ 1156, 1158, 1115, 1116, 1130, 1132, 1133, 1135, 1137, 1121, + /* 270 */ 1139, 1138, 1181, 1173, 1184, 1174, 1119, 1183, 1172, 1196, + /* 280 */ 1214, 1208, 1222, 1225, 1163, 1160, 1170, 1171, 1215, 1219, + /* 290 */ 1221, 1232, 1234, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 10 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 20 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 30 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 40 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 50 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 60 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1850, 1595, 1595, - /* 70 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 80 */ 1595, 1673, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 90 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1671, 1843, - /* 100 */ 2043, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 110 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 120 */ 1595, 1595, 1595, 1595, 2055, 1595, 1595, 1673, 1595, 2055, - /* 130 */ 2055, 2055, 1671, 2015, 2015, 1595, 1595, 1595, 1595, 1780, - /* 140 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1780, 1595, 1595, - /* 150 */ 1595, 1595, 1595, 1595, 1595, 1595, 1889, 1595, 1595, 2080, - /* 160 */ 2133, 1595, 1595, 2083, 1595, 1595, 1595, 1855, 1595, 1733, - /* 170 */ 2070, 2047, 2061, 2117, 2048, 2045, 2064, 1595, 2074, 1595, - /* 180 */ 1882, 1848, 1595, 1848, 1845, 1595, 1595, 1848, 1845, 1845, - /* 190 */ 1724, 1595, 1595, 1595, 1595, 1595, 1595, 1673, 1595, 1673, - /* 200 */ 1595, 1595, 1673, 1595, 1673, 1673, 1673, 1595, 1673, 1652, - /* 210 */ 1652, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 220 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1902, 1595, 1671, - /* 230 */ 1891, 1595, 1671, 1595, 1595, 1595, 1595, 2090, 2088, 1595, - /* 240 */ 2090, 2088, 1595, 1595, 1595, 2102, 2098, 2090, 2106, 2104, - /* 250 */ 2076, 2074, 2136, 2123, 2119, 2061, 1595, 1595, 1595, 1595, - /* 260 */ 1671, 1595, 2088, 1595, 1595, 1595, 1595, 1595, 2088, 1595, - /* 270 */ 1595, 1671, 1595, 1671, 1595, 1595, 1749, 1595, 1595, 1595, - /* 280 */ 1671, 1627, 1595, 1884, 1895, 1867, 1867, 1783, 1783, 1783, - /* 290 */ 1674, 1600, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 300 */ 1595, 1595, 1595, 1595, 2101, 2100, 1971, 1595, 2019, 2018, - /* 310 */ 2017, 2008, 1970, 1745, 1595, 1969, 1968, 1595, 1595, 1595, - /* 320 */ 1595, 1595, 1863, 1862, 1962, 1595, 1595, 1963, 1961, 1960, - /* 330 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 340 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 2120, 2124, 1595, - /* 350 */ 1595, 1595, 1595, 1595, 1595, 2044, 1595, 1595, 1595, 1595, - /* 360 */ 1595, 1944, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 370 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 380 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 390 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 400 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 410 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 420 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 430 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 440 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 450 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 460 */ 1632, 1949, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 470 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 480 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 490 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 500 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1712, 1711, - /* 510 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 520 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 530 */ 1595, 1595, 1953, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 540 */ 1595, 1595, 1595, 1595, 1595, 1595, 2116, 2077, 1595, 1595, - /* 550 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 560 */ 1595, 1944, 1595, 2099, 1595, 1595, 2114, 1595, 2118, 1595, - /* 570 */ 1595, 1595, 1595, 1595, 1595, 1595, 2054, 2050, 1595, 1595, - /* 580 */ 2046, 1595, 1595, 1595, 1595, 1952, 1595, 1595, 1595, 1595, - /* 590 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1943, - /* 600 */ 1595, 2005, 1595, 1595, 1595, 2039, 1595, 1595, 1990, 1595, - /* 610 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1953, 1595, - /* 620 */ 1956, 1595, 1595, 1595, 1595, 1595, 1777, 1595, 1595, 1595, - /* 630 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1762, - /* 640 */ 1760, 1759, 1758, 1595, 1755, 1595, 1790, 1595, 1595, 1595, - /* 650 */ 1786, 1785, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 660 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1692, 1595, 1595, - /* 670 */ 1595, 1595, 1595, 1595, 1595, 1595, 1684, 1595, 1683, 1595, - /* 680 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 690 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 700 */ 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, 1595, - /* 710 */ 1595, + /* 0 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 10 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 20 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 30 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 40 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 50 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 60 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1853, 1598, 1598, + /* 70 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 80 */ 1598, 1676, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 90 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1674, 1846, + /* 100 */ 2047, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 110 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 120 */ 1598, 1598, 1598, 1598, 2059, 1598, 1598, 1676, 1598, 2059, + /* 130 */ 2059, 2059, 1674, 2019, 2019, 1598, 1598, 1598, 1598, 1783, + /* 140 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1783, 1598, 1598, + /* 150 */ 1598, 1598, 1598, 1598, 1598, 1598, 1892, 1598, 1598, 2084, + /* 160 */ 2137, 1598, 1598, 2087, 1598, 1598, 1598, 1858, 1598, 1736, + /* 170 */ 2074, 2051, 2065, 2121, 2052, 2049, 2068, 1598, 2078, 1598, + /* 180 */ 1885, 1851, 1598, 1851, 1848, 1598, 1598, 1851, 1848, 1848, + /* 190 */ 1727, 1598, 1598, 1598, 1598, 1598, 1598, 1676, 1598, 1676, + /* 200 */ 1598, 1598, 1676, 1598, 1676, 1676, 1676, 1598, 1676, 1655, + /* 210 */ 1655, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 220 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1906, 1598, + /* 230 */ 1674, 1894, 1598, 1674, 1598, 1598, 1598, 1598, 2094, 2092, + /* 240 */ 1598, 2094, 2092, 1598, 1598, 1598, 2106, 2102, 2094, 2110, + /* 250 */ 2108, 2080, 2078, 2140, 2127, 2123, 2065, 1598, 1598, 1598, + /* 260 */ 1598, 1674, 1598, 2092, 1598, 1598, 1598, 1598, 1598, 2092, + /* 270 */ 1598, 1598, 1674, 1598, 1674, 1598, 1598, 1752, 1598, 1598, + /* 280 */ 1598, 1674, 1630, 1598, 1887, 1898, 1870, 1870, 1786, 1786, + /* 290 */ 1786, 1677, 1603, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 300 */ 1598, 1598, 1598, 1598, 1598, 2105, 2104, 1975, 1598, 2023, + /* 310 */ 2022, 2021, 2012, 1974, 1748, 1598, 1973, 1972, 1598, 1598, + /* 320 */ 1598, 1598, 1598, 1866, 1865, 1966, 1598, 1598, 1967, 1965, + /* 330 */ 1964, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 340 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 2124, 2128, + /* 350 */ 1598, 1598, 1598, 1598, 1598, 1598, 2048, 1598, 1598, 1598, + /* 360 */ 1598, 1598, 1948, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 370 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 380 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 390 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 400 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 410 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 420 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 430 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 440 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 450 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 460 */ 1598, 1635, 1953, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 470 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 480 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 490 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 500 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1715, + /* 510 */ 1714, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 520 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 530 */ 1598, 1598, 1598, 1957, 1598, 1598, 1598, 1598, 1598, 1598, + /* 540 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 2120, 2081, 1598, + /* 550 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 560 */ 1598, 1598, 1948, 1598, 2103, 1598, 1598, 2118, 1598, 2122, + /* 570 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 2058, 2054, 1598, + /* 580 */ 1598, 2050, 1598, 1598, 1598, 1598, 1956, 1598, 1598, 1598, + /* 590 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 600 */ 1947, 1598, 2009, 1598, 1598, 1598, 2043, 1598, 1598, 1994, + /* 610 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1957, + /* 620 */ 1598, 1960, 1598, 1598, 1598, 1598, 1598, 1780, 1598, 1598, + /* 630 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 640 */ 1765, 1763, 1762, 1761, 1598, 1758, 1598, 1793, 1598, 1598, + /* 650 */ 1598, 1789, 1788, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 660 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1695, 1598, + /* 670 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1687, 1598, 1686, + /* 680 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 690 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 700 */ 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, 1598, + /* 710 */ 1598, 1598, }; /********** End of lemon-generated parsing tables *****************************/ @@ -2150,243 +2150,244 @@ static const char *const yyRuleName[] = { /* 301 */ "stream_options ::= stream_options WATERMARK duration_literal", /* 302 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER", /* 303 */ "stream_options ::= stream_options FILL_HISTORY NK_INTEGER", - /* 304 */ "subtable_opt ::=", - /* 305 */ "subtable_opt ::= SUBTABLE NK_LP expression NK_RP", - /* 306 */ "cmd ::= KILL CONNECTION NK_INTEGER", - /* 307 */ "cmd ::= KILL QUERY NK_STRING", - /* 308 */ "cmd ::= KILL TRANSACTION NK_INTEGER", - /* 309 */ "cmd ::= BALANCE VGROUP", - /* 310 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", - /* 311 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", - /* 312 */ "cmd ::= SPLIT VGROUP NK_INTEGER", - /* 313 */ "dnode_list ::= DNODE NK_INTEGER", - /* 314 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", - /* 315 */ "cmd ::= DELETE FROM full_table_name where_clause_opt", - /* 316 */ "cmd ::= query_or_subquery", - /* 317 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery", - /* 318 */ "cmd ::= INSERT INTO full_table_name query_or_subquery", - /* 319 */ "literal ::= NK_INTEGER", - /* 320 */ "literal ::= NK_FLOAT", - /* 321 */ "literal ::= NK_STRING", - /* 322 */ "literal ::= NK_BOOL", - /* 323 */ "literal ::= TIMESTAMP NK_STRING", - /* 324 */ "literal ::= duration_literal", - /* 325 */ "literal ::= NULL", - /* 326 */ "literal ::= NK_QUESTION", - /* 327 */ "duration_literal ::= NK_VARIABLE", - /* 328 */ "signed ::= NK_INTEGER", - /* 329 */ "signed ::= NK_PLUS NK_INTEGER", - /* 330 */ "signed ::= NK_MINUS NK_INTEGER", - /* 331 */ "signed ::= NK_FLOAT", - /* 332 */ "signed ::= NK_PLUS NK_FLOAT", - /* 333 */ "signed ::= NK_MINUS NK_FLOAT", - /* 334 */ "signed_literal ::= signed", - /* 335 */ "signed_literal ::= NK_STRING", - /* 336 */ "signed_literal ::= NK_BOOL", - /* 337 */ "signed_literal ::= TIMESTAMP NK_STRING", - /* 338 */ "signed_literal ::= duration_literal", - /* 339 */ "signed_literal ::= NULL", - /* 340 */ "signed_literal ::= literal_func", - /* 341 */ "signed_literal ::= NK_QUESTION", - /* 342 */ "literal_list ::= signed_literal", - /* 343 */ "literal_list ::= literal_list NK_COMMA signed_literal", - /* 344 */ "db_name ::= NK_ID", - /* 345 */ "table_name ::= NK_ID", - /* 346 */ "column_name ::= NK_ID", - /* 347 */ "function_name ::= NK_ID", - /* 348 */ "table_alias ::= NK_ID", - /* 349 */ "column_alias ::= NK_ID", - /* 350 */ "user_name ::= NK_ID", - /* 351 */ "topic_name ::= NK_ID", - /* 352 */ "stream_name ::= NK_ID", - /* 353 */ "cgroup_name ::= NK_ID", - /* 354 */ "index_name ::= NK_ID", - /* 355 */ "expr_or_subquery ::= expression", - /* 356 */ "expression ::= literal", - /* 357 */ "expression ::= pseudo_column", - /* 358 */ "expression ::= column_reference", - /* 359 */ "expression ::= function_expression", - /* 360 */ "expression ::= case_when_expression", - /* 361 */ "expression ::= NK_LP expression NK_RP", - /* 362 */ "expression ::= NK_PLUS expr_or_subquery", - /* 363 */ "expression ::= NK_MINUS expr_or_subquery", - /* 364 */ "expression ::= expr_or_subquery NK_PLUS expr_or_subquery", - /* 365 */ "expression ::= expr_or_subquery NK_MINUS expr_or_subquery", - /* 366 */ "expression ::= expr_or_subquery NK_STAR expr_or_subquery", - /* 367 */ "expression ::= expr_or_subquery NK_SLASH expr_or_subquery", - /* 368 */ "expression ::= expr_or_subquery NK_REM expr_or_subquery", - /* 369 */ "expression ::= column_reference NK_ARROW NK_STRING", - /* 370 */ "expression ::= expr_or_subquery NK_BITAND expr_or_subquery", - /* 371 */ "expression ::= expr_or_subquery NK_BITOR expr_or_subquery", - /* 372 */ "expression_list ::= expr_or_subquery", - /* 373 */ "expression_list ::= expression_list NK_COMMA expr_or_subquery", - /* 374 */ "column_reference ::= column_name", - /* 375 */ "column_reference ::= table_name NK_DOT column_name", - /* 376 */ "pseudo_column ::= ROWTS", - /* 377 */ "pseudo_column ::= TBNAME", - /* 378 */ "pseudo_column ::= table_name NK_DOT TBNAME", - /* 379 */ "pseudo_column ::= QSTART", - /* 380 */ "pseudo_column ::= QEND", - /* 381 */ "pseudo_column ::= QDURATION", - /* 382 */ "pseudo_column ::= WSTART", - /* 383 */ "pseudo_column ::= WEND", - /* 384 */ "pseudo_column ::= WDURATION", - /* 385 */ "pseudo_column ::= IROWTS", - /* 386 */ "pseudo_column ::= QTAGS", - /* 387 */ "function_expression ::= function_name NK_LP expression_list NK_RP", - /* 388 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", - /* 389 */ "function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP", - /* 390 */ "function_expression ::= literal_func", - /* 391 */ "literal_func ::= noarg_func NK_LP NK_RP", - /* 392 */ "literal_func ::= NOW", - /* 393 */ "noarg_func ::= NOW", - /* 394 */ "noarg_func ::= TODAY", - /* 395 */ "noarg_func ::= TIMEZONE", - /* 396 */ "noarg_func ::= DATABASE", - /* 397 */ "noarg_func ::= CLIENT_VERSION", - /* 398 */ "noarg_func ::= SERVER_VERSION", - /* 399 */ "noarg_func ::= SERVER_STATUS", - /* 400 */ "noarg_func ::= CURRENT_USER", - /* 401 */ "noarg_func ::= USER", - /* 402 */ "star_func ::= COUNT", - /* 403 */ "star_func ::= FIRST", - /* 404 */ "star_func ::= LAST", - /* 405 */ "star_func ::= LAST_ROW", - /* 406 */ "star_func_para_list ::= NK_STAR", - /* 407 */ "star_func_para_list ::= other_para_list", - /* 408 */ "other_para_list ::= star_func_para", - /* 409 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", - /* 410 */ "star_func_para ::= expr_or_subquery", - /* 411 */ "star_func_para ::= table_name NK_DOT NK_STAR", - /* 412 */ "case_when_expression ::= CASE when_then_list case_when_else_opt END", - /* 413 */ "case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END", - /* 414 */ "when_then_list ::= when_then_expr", - /* 415 */ "when_then_list ::= when_then_list when_then_expr", - /* 416 */ "when_then_expr ::= WHEN common_expression THEN common_expression", - /* 417 */ "case_when_else_opt ::=", - /* 418 */ "case_when_else_opt ::= ELSE common_expression", - /* 419 */ "predicate ::= expr_or_subquery compare_op expr_or_subquery", - /* 420 */ "predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery", - /* 421 */ "predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery", - /* 422 */ "predicate ::= expr_or_subquery IS NULL", - /* 423 */ "predicate ::= expr_or_subquery IS NOT NULL", - /* 424 */ "predicate ::= expr_or_subquery in_op in_predicate_value", - /* 425 */ "compare_op ::= NK_LT", - /* 426 */ "compare_op ::= NK_GT", - /* 427 */ "compare_op ::= NK_LE", - /* 428 */ "compare_op ::= NK_GE", - /* 429 */ "compare_op ::= NK_NE", - /* 430 */ "compare_op ::= NK_EQ", - /* 431 */ "compare_op ::= LIKE", - /* 432 */ "compare_op ::= NOT LIKE", - /* 433 */ "compare_op ::= MATCH", - /* 434 */ "compare_op ::= NMATCH", - /* 435 */ "compare_op ::= CONTAINS", - /* 436 */ "in_op ::= IN", - /* 437 */ "in_op ::= NOT IN", - /* 438 */ "in_predicate_value ::= NK_LP literal_list NK_RP", - /* 439 */ "boolean_value_expression ::= boolean_primary", - /* 440 */ "boolean_value_expression ::= NOT boolean_primary", - /* 441 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", - /* 442 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", - /* 443 */ "boolean_primary ::= predicate", - /* 444 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", - /* 445 */ "common_expression ::= expr_or_subquery", - /* 446 */ "common_expression ::= boolean_value_expression", - /* 447 */ "from_clause_opt ::=", - /* 448 */ "from_clause_opt ::= FROM table_reference_list", - /* 449 */ "table_reference_list ::= table_reference", - /* 450 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", - /* 451 */ "table_reference ::= table_primary", - /* 452 */ "table_reference ::= joined_table", - /* 453 */ "table_primary ::= table_name alias_opt", - /* 454 */ "table_primary ::= db_name NK_DOT table_name alias_opt", - /* 455 */ "table_primary ::= subquery alias_opt", - /* 456 */ "table_primary ::= parenthesized_joined_table", - /* 457 */ "alias_opt ::=", - /* 458 */ "alias_opt ::= table_alias", - /* 459 */ "alias_opt ::= AS table_alias", - /* 460 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", - /* 461 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", - /* 462 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", - /* 463 */ "join_type ::=", - /* 464 */ "join_type ::= INNER", - /* 465 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt", - /* 466 */ "set_quantifier_opt ::=", - /* 467 */ "set_quantifier_opt ::= DISTINCT", - /* 468 */ "set_quantifier_opt ::= ALL", - /* 469 */ "select_list ::= select_item", - /* 470 */ "select_list ::= select_list NK_COMMA select_item", - /* 471 */ "select_item ::= NK_STAR", - /* 472 */ "select_item ::= common_expression", - /* 473 */ "select_item ::= common_expression column_alias", - /* 474 */ "select_item ::= common_expression AS column_alias", - /* 475 */ "select_item ::= table_name NK_DOT NK_STAR", - /* 476 */ "where_clause_opt ::=", - /* 477 */ "where_clause_opt ::= WHERE search_condition", - /* 478 */ "partition_by_clause_opt ::=", - /* 479 */ "partition_by_clause_opt ::= PARTITION BY partition_list", - /* 480 */ "partition_list ::= partition_item", - /* 481 */ "partition_list ::= partition_list NK_COMMA partition_item", - /* 482 */ "partition_item ::= expr_or_subquery", - /* 483 */ "partition_item ::= expr_or_subquery column_alias", - /* 484 */ "partition_item ::= expr_or_subquery AS column_alias", - /* 485 */ "twindow_clause_opt ::=", - /* 486 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", - /* 487 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP", - /* 488 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", - /* 489 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", - /* 490 */ "sliding_opt ::=", - /* 491 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", - /* 492 */ "fill_opt ::=", - /* 493 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", - /* 494 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", - /* 495 */ "fill_mode ::= NONE", - /* 496 */ "fill_mode ::= PREV", - /* 497 */ "fill_mode ::= NULL", - /* 498 */ "fill_mode ::= LINEAR", - /* 499 */ "fill_mode ::= NEXT", - /* 500 */ "group_by_clause_opt ::=", - /* 501 */ "group_by_clause_opt ::= GROUP BY group_by_list", - /* 502 */ "group_by_list ::= expr_or_subquery", - /* 503 */ "group_by_list ::= group_by_list NK_COMMA expr_or_subquery", - /* 504 */ "having_clause_opt ::=", - /* 505 */ "having_clause_opt ::= HAVING search_condition", - /* 506 */ "range_opt ::=", - /* 507 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP", - /* 508 */ "every_opt ::=", - /* 509 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP", - /* 510 */ "query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt", - /* 511 */ "query_simple ::= query_specification", - /* 512 */ "query_simple ::= union_query_expression", - /* 513 */ "union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery", - /* 514 */ "union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery", - /* 515 */ "query_simple_or_subquery ::= query_simple", - /* 516 */ "query_simple_or_subquery ::= subquery", - /* 517 */ "query_or_subquery ::= query_expression", - /* 518 */ "query_or_subquery ::= subquery", - /* 519 */ "order_by_clause_opt ::=", - /* 520 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", - /* 521 */ "slimit_clause_opt ::=", - /* 522 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", - /* 523 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", - /* 524 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 525 */ "limit_clause_opt ::=", - /* 526 */ "limit_clause_opt ::= LIMIT NK_INTEGER", - /* 527 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", - /* 528 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 529 */ "subquery ::= NK_LP query_expression NK_RP", - /* 530 */ "subquery ::= NK_LP subquery NK_RP", - /* 531 */ "search_condition ::= common_expression", - /* 532 */ "sort_specification_list ::= sort_specification", - /* 533 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", - /* 534 */ "sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt", - /* 535 */ "ordering_specification_opt ::=", - /* 536 */ "ordering_specification_opt ::= ASC", - /* 537 */ "ordering_specification_opt ::= DESC", - /* 538 */ "null_ordering_opt ::=", - /* 539 */ "null_ordering_opt ::= NULLS FIRST", - /* 540 */ "null_ordering_opt ::= NULLS LAST", + /* 304 */ "stream_options ::= stream_options DELETE_MARK duration_literal", + /* 305 */ "subtable_opt ::=", + /* 306 */ "subtable_opt ::= SUBTABLE NK_LP expression NK_RP", + /* 307 */ "cmd ::= KILL CONNECTION NK_INTEGER", + /* 308 */ "cmd ::= KILL QUERY NK_STRING", + /* 309 */ "cmd ::= KILL TRANSACTION NK_INTEGER", + /* 310 */ "cmd ::= BALANCE VGROUP", + /* 311 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", + /* 312 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", + /* 313 */ "cmd ::= SPLIT VGROUP NK_INTEGER", + /* 314 */ "dnode_list ::= DNODE NK_INTEGER", + /* 315 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", + /* 316 */ "cmd ::= DELETE FROM full_table_name where_clause_opt", + /* 317 */ "cmd ::= query_or_subquery", + /* 318 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery", + /* 319 */ "cmd ::= INSERT INTO full_table_name query_or_subquery", + /* 320 */ "literal ::= NK_INTEGER", + /* 321 */ "literal ::= NK_FLOAT", + /* 322 */ "literal ::= NK_STRING", + /* 323 */ "literal ::= NK_BOOL", + /* 324 */ "literal ::= TIMESTAMP NK_STRING", + /* 325 */ "literal ::= duration_literal", + /* 326 */ "literal ::= NULL", + /* 327 */ "literal ::= NK_QUESTION", + /* 328 */ "duration_literal ::= NK_VARIABLE", + /* 329 */ "signed ::= NK_INTEGER", + /* 330 */ "signed ::= NK_PLUS NK_INTEGER", + /* 331 */ "signed ::= NK_MINUS NK_INTEGER", + /* 332 */ "signed ::= NK_FLOAT", + /* 333 */ "signed ::= NK_PLUS NK_FLOAT", + /* 334 */ "signed ::= NK_MINUS NK_FLOAT", + /* 335 */ "signed_literal ::= signed", + /* 336 */ "signed_literal ::= NK_STRING", + /* 337 */ "signed_literal ::= NK_BOOL", + /* 338 */ "signed_literal ::= TIMESTAMP NK_STRING", + /* 339 */ "signed_literal ::= duration_literal", + /* 340 */ "signed_literal ::= NULL", + /* 341 */ "signed_literal ::= literal_func", + /* 342 */ "signed_literal ::= NK_QUESTION", + /* 343 */ "literal_list ::= signed_literal", + /* 344 */ "literal_list ::= literal_list NK_COMMA signed_literal", + /* 345 */ "db_name ::= NK_ID", + /* 346 */ "table_name ::= NK_ID", + /* 347 */ "column_name ::= NK_ID", + /* 348 */ "function_name ::= NK_ID", + /* 349 */ "table_alias ::= NK_ID", + /* 350 */ "column_alias ::= NK_ID", + /* 351 */ "user_name ::= NK_ID", + /* 352 */ "topic_name ::= NK_ID", + /* 353 */ "stream_name ::= NK_ID", + /* 354 */ "cgroup_name ::= NK_ID", + /* 355 */ "index_name ::= NK_ID", + /* 356 */ "expr_or_subquery ::= expression", + /* 357 */ "expression ::= literal", + /* 358 */ "expression ::= pseudo_column", + /* 359 */ "expression ::= column_reference", + /* 360 */ "expression ::= function_expression", + /* 361 */ "expression ::= case_when_expression", + /* 362 */ "expression ::= NK_LP expression NK_RP", + /* 363 */ "expression ::= NK_PLUS expr_or_subquery", + /* 364 */ "expression ::= NK_MINUS expr_or_subquery", + /* 365 */ "expression ::= expr_or_subquery NK_PLUS expr_or_subquery", + /* 366 */ "expression ::= expr_or_subquery NK_MINUS expr_or_subquery", + /* 367 */ "expression ::= expr_or_subquery NK_STAR expr_or_subquery", + /* 368 */ "expression ::= expr_or_subquery NK_SLASH expr_or_subquery", + /* 369 */ "expression ::= expr_or_subquery NK_REM expr_or_subquery", + /* 370 */ "expression ::= column_reference NK_ARROW NK_STRING", + /* 371 */ "expression ::= expr_or_subquery NK_BITAND expr_or_subquery", + /* 372 */ "expression ::= expr_or_subquery NK_BITOR expr_or_subquery", + /* 373 */ "expression_list ::= expr_or_subquery", + /* 374 */ "expression_list ::= expression_list NK_COMMA expr_or_subquery", + /* 375 */ "column_reference ::= column_name", + /* 376 */ "column_reference ::= table_name NK_DOT column_name", + /* 377 */ "pseudo_column ::= ROWTS", + /* 378 */ "pseudo_column ::= TBNAME", + /* 379 */ "pseudo_column ::= table_name NK_DOT TBNAME", + /* 380 */ "pseudo_column ::= QSTART", + /* 381 */ "pseudo_column ::= QEND", + /* 382 */ "pseudo_column ::= QDURATION", + /* 383 */ "pseudo_column ::= WSTART", + /* 384 */ "pseudo_column ::= WEND", + /* 385 */ "pseudo_column ::= WDURATION", + /* 386 */ "pseudo_column ::= IROWTS", + /* 387 */ "pseudo_column ::= QTAGS", + /* 388 */ "function_expression ::= function_name NK_LP expression_list NK_RP", + /* 389 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", + /* 390 */ "function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP", + /* 391 */ "function_expression ::= literal_func", + /* 392 */ "literal_func ::= noarg_func NK_LP NK_RP", + /* 393 */ "literal_func ::= NOW", + /* 394 */ "noarg_func ::= NOW", + /* 395 */ "noarg_func ::= TODAY", + /* 396 */ "noarg_func ::= TIMEZONE", + /* 397 */ "noarg_func ::= DATABASE", + /* 398 */ "noarg_func ::= CLIENT_VERSION", + /* 399 */ "noarg_func ::= SERVER_VERSION", + /* 400 */ "noarg_func ::= SERVER_STATUS", + /* 401 */ "noarg_func ::= CURRENT_USER", + /* 402 */ "noarg_func ::= USER", + /* 403 */ "star_func ::= COUNT", + /* 404 */ "star_func ::= FIRST", + /* 405 */ "star_func ::= LAST", + /* 406 */ "star_func ::= LAST_ROW", + /* 407 */ "star_func_para_list ::= NK_STAR", + /* 408 */ "star_func_para_list ::= other_para_list", + /* 409 */ "other_para_list ::= star_func_para", + /* 410 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", + /* 411 */ "star_func_para ::= expr_or_subquery", + /* 412 */ "star_func_para ::= table_name NK_DOT NK_STAR", + /* 413 */ "case_when_expression ::= CASE when_then_list case_when_else_opt END", + /* 414 */ "case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END", + /* 415 */ "when_then_list ::= when_then_expr", + /* 416 */ "when_then_list ::= when_then_list when_then_expr", + /* 417 */ "when_then_expr ::= WHEN common_expression THEN common_expression", + /* 418 */ "case_when_else_opt ::=", + /* 419 */ "case_when_else_opt ::= ELSE common_expression", + /* 420 */ "predicate ::= expr_or_subquery compare_op expr_or_subquery", + /* 421 */ "predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery", + /* 422 */ "predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery", + /* 423 */ "predicate ::= expr_or_subquery IS NULL", + /* 424 */ "predicate ::= expr_or_subquery IS NOT NULL", + /* 425 */ "predicate ::= expr_or_subquery in_op in_predicate_value", + /* 426 */ "compare_op ::= NK_LT", + /* 427 */ "compare_op ::= NK_GT", + /* 428 */ "compare_op ::= NK_LE", + /* 429 */ "compare_op ::= NK_GE", + /* 430 */ "compare_op ::= NK_NE", + /* 431 */ "compare_op ::= NK_EQ", + /* 432 */ "compare_op ::= LIKE", + /* 433 */ "compare_op ::= NOT LIKE", + /* 434 */ "compare_op ::= MATCH", + /* 435 */ "compare_op ::= NMATCH", + /* 436 */ "compare_op ::= CONTAINS", + /* 437 */ "in_op ::= IN", + /* 438 */ "in_op ::= NOT IN", + /* 439 */ "in_predicate_value ::= NK_LP literal_list NK_RP", + /* 440 */ "boolean_value_expression ::= boolean_primary", + /* 441 */ "boolean_value_expression ::= NOT boolean_primary", + /* 442 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", + /* 443 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", + /* 444 */ "boolean_primary ::= predicate", + /* 445 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", + /* 446 */ "common_expression ::= expr_or_subquery", + /* 447 */ "common_expression ::= boolean_value_expression", + /* 448 */ "from_clause_opt ::=", + /* 449 */ "from_clause_opt ::= FROM table_reference_list", + /* 450 */ "table_reference_list ::= table_reference", + /* 451 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", + /* 452 */ "table_reference ::= table_primary", + /* 453 */ "table_reference ::= joined_table", + /* 454 */ "table_primary ::= table_name alias_opt", + /* 455 */ "table_primary ::= db_name NK_DOT table_name alias_opt", + /* 456 */ "table_primary ::= subquery alias_opt", + /* 457 */ "table_primary ::= parenthesized_joined_table", + /* 458 */ "alias_opt ::=", + /* 459 */ "alias_opt ::= table_alias", + /* 460 */ "alias_opt ::= AS table_alias", + /* 461 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", + /* 462 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", + /* 463 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", + /* 464 */ "join_type ::=", + /* 465 */ "join_type ::= INNER", + /* 466 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt", + /* 467 */ "set_quantifier_opt ::=", + /* 468 */ "set_quantifier_opt ::= DISTINCT", + /* 469 */ "set_quantifier_opt ::= ALL", + /* 470 */ "select_list ::= select_item", + /* 471 */ "select_list ::= select_list NK_COMMA select_item", + /* 472 */ "select_item ::= NK_STAR", + /* 473 */ "select_item ::= common_expression", + /* 474 */ "select_item ::= common_expression column_alias", + /* 475 */ "select_item ::= common_expression AS column_alias", + /* 476 */ "select_item ::= table_name NK_DOT NK_STAR", + /* 477 */ "where_clause_opt ::=", + /* 478 */ "where_clause_opt ::= WHERE search_condition", + /* 479 */ "partition_by_clause_opt ::=", + /* 480 */ "partition_by_clause_opt ::= PARTITION BY partition_list", + /* 481 */ "partition_list ::= partition_item", + /* 482 */ "partition_list ::= partition_list NK_COMMA partition_item", + /* 483 */ "partition_item ::= expr_or_subquery", + /* 484 */ "partition_item ::= expr_or_subquery column_alias", + /* 485 */ "partition_item ::= expr_or_subquery AS column_alias", + /* 486 */ "twindow_clause_opt ::=", + /* 487 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", + /* 488 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP", + /* 489 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", + /* 490 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", + /* 491 */ "sliding_opt ::=", + /* 492 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", + /* 493 */ "fill_opt ::=", + /* 494 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", + /* 495 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", + /* 496 */ "fill_mode ::= NONE", + /* 497 */ "fill_mode ::= PREV", + /* 498 */ "fill_mode ::= NULL", + /* 499 */ "fill_mode ::= LINEAR", + /* 500 */ "fill_mode ::= NEXT", + /* 501 */ "group_by_clause_opt ::=", + /* 502 */ "group_by_clause_opt ::= GROUP BY group_by_list", + /* 503 */ "group_by_list ::= expr_or_subquery", + /* 504 */ "group_by_list ::= group_by_list NK_COMMA expr_or_subquery", + /* 505 */ "having_clause_opt ::=", + /* 506 */ "having_clause_opt ::= HAVING search_condition", + /* 507 */ "range_opt ::=", + /* 508 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP", + /* 509 */ "every_opt ::=", + /* 510 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP", + /* 511 */ "query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt", + /* 512 */ "query_simple ::= query_specification", + /* 513 */ "query_simple ::= union_query_expression", + /* 514 */ "union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery", + /* 515 */ "union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery", + /* 516 */ "query_simple_or_subquery ::= query_simple", + /* 517 */ "query_simple_or_subquery ::= subquery", + /* 518 */ "query_or_subquery ::= query_expression", + /* 519 */ "query_or_subquery ::= subquery", + /* 520 */ "order_by_clause_opt ::=", + /* 521 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", + /* 522 */ "slimit_clause_opt ::=", + /* 523 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", + /* 524 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", + /* 525 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 526 */ "limit_clause_opt ::=", + /* 527 */ "limit_clause_opt ::= LIMIT NK_INTEGER", + /* 528 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", + /* 529 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 530 */ "subquery ::= NK_LP query_expression NK_RP", + /* 531 */ "subquery ::= NK_LP subquery NK_RP", + /* 532 */ "search_condition ::= common_expression", + /* 533 */ "sort_specification_list ::= sort_specification", + /* 534 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", + /* 535 */ "sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt", + /* 536 */ "ordering_specification_opt ::=", + /* 537 */ "ordering_specification_opt ::= ASC", + /* 538 */ "ordering_specification_opt ::= DESC", + /* 539 */ "null_ordering_opt ::=", + /* 540 */ "null_ordering_opt ::= NULLS FIRST", + /* 541 */ "null_ordering_opt ::= NULLS LAST", }; #endif /* NDEBUG */ @@ -3304,243 +3305,244 @@ static const struct { { 394, -3 }, /* (301) stream_options ::= stream_options WATERMARK duration_literal */ { 394, -4 }, /* (302) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ { 394, -3 }, /* (303) stream_options ::= stream_options FILL_HISTORY NK_INTEGER */ - { 395, 0 }, /* (304) subtable_opt ::= */ - { 395, -4 }, /* (305) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ - { 322, -3 }, /* (306) cmd ::= KILL CONNECTION NK_INTEGER */ - { 322, -3 }, /* (307) cmd ::= KILL QUERY NK_STRING */ - { 322, -3 }, /* (308) cmd ::= KILL TRANSACTION NK_INTEGER */ - { 322, -2 }, /* (309) cmd ::= BALANCE VGROUP */ - { 322, -4 }, /* (310) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ - { 322, -4 }, /* (311) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ - { 322, -3 }, /* (312) cmd ::= SPLIT VGROUP NK_INTEGER */ - { 397, -2 }, /* (313) dnode_list ::= DNODE NK_INTEGER */ - { 397, -3 }, /* (314) dnode_list ::= dnode_list DNODE NK_INTEGER */ - { 322, -4 }, /* (315) cmd ::= DELETE FROM full_table_name where_clause_opt */ - { 322, -1 }, /* (316) cmd ::= query_or_subquery */ - { 322, -7 }, /* (317) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ - { 322, -4 }, /* (318) cmd ::= INSERT INTO full_table_name query_or_subquery */ - { 325, -1 }, /* (319) literal ::= NK_INTEGER */ - { 325, -1 }, /* (320) literal ::= NK_FLOAT */ - { 325, -1 }, /* (321) literal ::= NK_STRING */ - { 325, -1 }, /* (322) literal ::= NK_BOOL */ - { 325, -2 }, /* (323) literal ::= TIMESTAMP NK_STRING */ - { 325, -1 }, /* (324) literal ::= duration_literal */ - { 325, -1 }, /* (325) literal ::= NULL */ - { 325, -1 }, /* (326) literal ::= NK_QUESTION */ - { 369, -1 }, /* (327) duration_literal ::= NK_VARIABLE */ - { 399, -1 }, /* (328) signed ::= NK_INTEGER */ - { 399, -2 }, /* (329) signed ::= NK_PLUS NK_INTEGER */ - { 399, -2 }, /* (330) signed ::= NK_MINUS NK_INTEGER */ - { 399, -1 }, /* (331) signed ::= NK_FLOAT */ - { 399, -2 }, /* (332) signed ::= NK_PLUS NK_FLOAT */ - { 399, -2 }, /* (333) signed ::= NK_MINUS NK_FLOAT */ - { 358, -1 }, /* (334) signed_literal ::= signed */ - { 358, -1 }, /* (335) signed_literal ::= NK_STRING */ - { 358, -1 }, /* (336) signed_literal ::= NK_BOOL */ - { 358, -2 }, /* (337) signed_literal ::= TIMESTAMP NK_STRING */ - { 358, -1 }, /* (338) signed_literal ::= duration_literal */ - { 358, -1 }, /* (339) signed_literal ::= NULL */ - { 358, -1 }, /* (340) signed_literal ::= literal_func */ - { 358, -1 }, /* (341) signed_literal ::= NK_QUESTION */ - { 401, -1 }, /* (342) literal_list ::= signed_literal */ - { 401, -3 }, /* (343) literal_list ::= literal_list NK_COMMA signed_literal */ - { 333, -1 }, /* (344) db_name ::= NK_ID */ - { 364, -1 }, /* (345) table_name ::= NK_ID */ - { 356, -1 }, /* (346) column_name ::= NK_ID */ - { 371, -1 }, /* (347) function_name ::= NK_ID */ - { 402, -1 }, /* (348) table_alias ::= NK_ID */ - { 379, -1 }, /* (349) column_alias ::= NK_ID */ - { 327, -1 }, /* (350) user_name ::= NK_ID */ - { 334, -1 }, /* (351) topic_name ::= NK_ID */ - { 393, -1 }, /* (352) stream_name ::= NK_ID */ - { 388, -1 }, /* (353) cgroup_name ::= NK_ID */ - { 382, -1 }, /* (354) index_name ::= NK_ID */ - { 403, -1 }, /* (355) expr_or_subquery ::= expression */ - { 396, -1 }, /* (356) expression ::= literal */ - { 396, -1 }, /* (357) expression ::= pseudo_column */ - { 396, -1 }, /* (358) expression ::= column_reference */ - { 396, -1 }, /* (359) expression ::= function_expression */ - { 396, -1 }, /* (360) expression ::= case_when_expression */ - { 396, -3 }, /* (361) expression ::= NK_LP expression NK_RP */ - { 396, -2 }, /* (362) expression ::= NK_PLUS expr_or_subquery */ - { 396, -2 }, /* (363) expression ::= NK_MINUS expr_or_subquery */ - { 396, -3 }, /* (364) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ - { 396, -3 }, /* (365) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ - { 396, -3 }, /* (366) expression ::= expr_or_subquery NK_STAR expr_or_subquery */ - { 396, -3 }, /* (367) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ - { 396, -3 }, /* (368) expression ::= expr_or_subquery NK_REM expr_or_subquery */ - { 396, -3 }, /* (369) expression ::= column_reference NK_ARROW NK_STRING */ - { 396, -3 }, /* (370) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ - { 396, -3 }, /* (371) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ - { 361, -1 }, /* (372) expression_list ::= expr_or_subquery */ - { 361, -3 }, /* (373) expression_list ::= expression_list NK_COMMA expr_or_subquery */ - { 405, -1 }, /* (374) column_reference ::= column_name */ - { 405, -3 }, /* (375) column_reference ::= table_name NK_DOT column_name */ - { 404, -1 }, /* (376) pseudo_column ::= ROWTS */ - { 404, -1 }, /* (377) pseudo_column ::= TBNAME */ - { 404, -3 }, /* (378) pseudo_column ::= table_name NK_DOT TBNAME */ - { 404, -1 }, /* (379) pseudo_column ::= QSTART */ - { 404, -1 }, /* (380) pseudo_column ::= QEND */ - { 404, -1 }, /* (381) pseudo_column ::= QDURATION */ - { 404, -1 }, /* (382) pseudo_column ::= WSTART */ - { 404, -1 }, /* (383) pseudo_column ::= WEND */ - { 404, -1 }, /* (384) pseudo_column ::= WDURATION */ - { 404, -1 }, /* (385) pseudo_column ::= IROWTS */ - { 404, -1 }, /* (386) pseudo_column ::= QTAGS */ - { 406, -4 }, /* (387) function_expression ::= function_name NK_LP expression_list NK_RP */ - { 406, -4 }, /* (388) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ - { 406, -6 }, /* (389) function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ - { 406, -1 }, /* (390) function_expression ::= literal_func */ - { 400, -3 }, /* (391) literal_func ::= noarg_func NK_LP NK_RP */ - { 400, -1 }, /* (392) literal_func ::= NOW */ - { 410, -1 }, /* (393) noarg_func ::= NOW */ - { 410, -1 }, /* (394) noarg_func ::= TODAY */ - { 410, -1 }, /* (395) noarg_func ::= TIMEZONE */ - { 410, -1 }, /* (396) noarg_func ::= DATABASE */ - { 410, -1 }, /* (397) noarg_func ::= CLIENT_VERSION */ - { 410, -1 }, /* (398) noarg_func ::= SERVER_VERSION */ - { 410, -1 }, /* (399) noarg_func ::= SERVER_STATUS */ - { 410, -1 }, /* (400) noarg_func ::= CURRENT_USER */ - { 410, -1 }, /* (401) noarg_func ::= USER */ - { 408, -1 }, /* (402) star_func ::= COUNT */ - { 408, -1 }, /* (403) star_func ::= FIRST */ - { 408, -1 }, /* (404) star_func ::= LAST */ - { 408, -1 }, /* (405) star_func ::= LAST_ROW */ - { 409, -1 }, /* (406) star_func_para_list ::= NK_STAR */ - { 409, -1 }, /* (407) star_func_para_list ::= other_para_list */ - { 411, -1 }, /* (408) other_para_list ::= star_func_para */ - { 411, -3 }, /* (409) other_para_list ::= other_para_list NK_COMMA star_func_para */ - { 412, -1 }, /* (410) star_func_para ::= expr_or_subquery */ - { 412, -3 }, /* (411) star_func_para ::= table_name NK_DOT NK_STAR */ - { 407, -4 }, /* (412) case_when_expression ::= CASE when_then_list case_when_else_opt END */ - { 407, -5 }, /* (413) case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ - { 413, -1 }, /* (414) when_then_list ::= when_then_expr */ - { 413, -2 }, /* (415) when_then_list ::= when_then_list when_then_expr */ - { 416, -4 }, /* (416) when_then_expr ::= WHEN common_expression THEN common_expression */ - { 414, 0 }, /* (417) case_when_else_opt ::= */ - { 414, -2 }, /* (418) case_when_else_opt ::= ELSE common_expression */ - { 417, -3 }, /* (419) predicate ::= expr_or_subquery compare_op expr_or_subquery */ - { 417, -5 }, /* (420) predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ - { 417, -6 }, /* (421) predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ - { 417, -3 }, /* (422) predicate ::= expr_or_subquery IS NULL */ - { 417, -4 }, /* (423) predicate ::= expr_or_subquery IS NOT NULL */ - { 417, -3 }, /* (424) predicate ::= expr_or_subquery in_op in_predicate_value */ - { 418, -1 }, /* (425) compare_op ::= NK_LT */ - { 418, -1 }, /* (426) compare_op ::= NK_GT */ - { 418, -1 }, /* (427) compare_op ::= NK_LE */ - { 418, -1 }, /* (428) compare_op ::= NK_GE */ - { 418, -1 }, /* (429) compare_op ::= NK_NE */ - { 418, -1 }, /* (430) compare_op ::= NK_EQ */ - { 418, -1 }, /* (431) compare_op ::= LIKE */ - { 418, -2 }, /* (432) compare_op ::= NOT LIKE */ - { 418, -1 }, /* (433) compare_op ::= MATCH */ - { 418, -1 }, /* (434) compare_op ::= NMATCH */ - { 418, -1 }, /* (435) compare_op ::= CONTAINS */ - { 419, -1 }, /* (436) in_op ::= IN */ - { 419, -2 }, /* (437) in_op ::= NOT IN */ - { 420, -3 }, /* (438) in_predicate_value ::= NK_LP literal_list NK_RP */ - { 421, -1 }, /* (439) boolean_value_expression ::= boolean_primary */ - { 421, -2 }, /* (440) boolean_value_expression ::= NOT boolean_primary */ - { 421, -3 }, /* (441) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ - { 421, -3 }, /* (442) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ - { 422, -1 }, /* (443) boolean_primary ::= predicate */ - { 422, -3 }, /* (444) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ - { 415, -1 }, /* (445) common_expression ::= expr_or_subquery */ - { 415, -1 }, /* (446) common_expression ::= boolean_value_expression */ - { 423, 0 }, /* (447) from_clause_opt ::= */ - { 423, -2 }, /* (448) from_clause_opt ::= FROM table_reference_list */ - { 424, -1 }, /* (449) table_reference_list ::= table_reference */ - { 424, -3 }, /* (450) table_reference_list ::= table_reference_list NK_COMMA table_reference */ - { 425, -1 }, /* (451) table_reference ::= table_primary */ - { 425, -1 }, /* (452) table_reference ::= joined_table */ - { 426, -2 }, /* (453) table_primary ::= table_name alias_opt */ - { 426, -4 }, /* (454) table_primary ::= db_name NK_DOT table_name alias_opt */ - { 426, -2 }, /* (455) table_primary ::= subquery alias_opt */ - { 426, -1 }, /* (456) table_primary ::= parenthesized_joined_table */ - { 428, 0 }, /* (457) alias_opt ::= */ - { 428, -1 }, /* (458) alias_opt ::= table_alias */ - { 428, -2 }, /* (459) alias_opt ::= AS table_alias */ - { 430, -3 }, /* (460) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - { 430, -3 }, /* (461) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ - { 427, -6 }, /* (462) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ - { 431, 0 }, /* (463) join_type ::= */ - { 431, -1 }, /* (464) join_type ::= INNER */ - { 433, -12 }, /* (465) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ - { 434, 0 }, /* (466) set_quantifier_opt ::= */ - { 434, -1 }, /* (467) set_quantifier_opt ::= DISTINCT */ - { 434, -1 }, /* (468) set_quantifier_opt ::= ALL */ - { 435, -1 }, /* (469) select_list ::= select_item */ - { 435, -3 }, /* (470) select_list ::= select_list NK_COMMA select_item */ - { 443, -1 }, /* (471) select_item ::= NK_STAR */ - { 443, -1 }, /* (472) select_item ::= common_expression */ - { 443, -2 }, /* (473) select_item ::= common_expression column_alias */ - { 443, -3 }, /* (474) select_item ::= common_expression AS column_alias */ - { 443, -3 }, /* (475) select_item ::= table_name NK_DOT NK_STAR */ - { 398, 0 }, /* (476) where_clause_opt ::= */ - { 398, -2 }, /* (477) where_clause_opt ::= WHERE search_condition */ - { 436, 0 }, /* (478) partition_by_clause_opt ::= */ - { 436, -3 }, /* (479) partition_by_clause_opt ::= PARTITION BY partition_list */ - { 444, -1 }, /* (480) partition_list ::= partition_item */ - { 444, -3 }, /* (481) partition_list ::= partition_list NK_COMMA partition_item */ - { 445, -1 }, /* (482) partition_item ::= expr_or_subquery */ - { 445, -2 }, /* (483) partition_item ::= expr_or_subquery column_alias */ - { 445, -3 }, /* (484) partition_item ::= expr_or_subquery AS column_alias */ - { 440, 0 }, /* (485) twindow_clause_opt ::= */ - { 440, -6 }, /* (486) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ - { 440, -4 }, /* (487) twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ - { 440, -6 }, /* (488) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ - { 440, -8 }, /* (489) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ - { 384, 0 }, /* (490) sliding_opt ::= */ - { 384, -4 }, /* (491) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ - { 439, 0 }, /* (492) fill_opt ::= */ - { 439, -4 }, /* (493) fill_opt ::= FILL NK_LP fill_mode NK_RP */ - { 439, -6 }, /* (494) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ - { 446, -1 }, /* (495) fill_mode ::= NONE */ - { 446, -1 }, /* (496) fill_mode ::= PREV */ - { 446, -1 }, /* (497) fill_mode ::= NULL */ - { 446, -1 }, /* (498) fill_mode ::= LINEAR */ - { 446, -1 }, /* (499) fill_mode ::= NEXT */ - { 441, 0 }, /* (500) group_by_clause_opt ::= */ - { 441, -3 }, /* (501) group_by_clause_opt ::= GROUP BY group_by_list */ - { 447, -1 }, /* (502) group_by_list ::= expr_or_subquery */ - { 447, -3 }, /* (503) group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ - { 442, 0 }, /* (504) having_clause_opt ::= */ - { 442, -2 }, /* (505) having_clause_opt ::= HAVING search_condition */ - { 437, 0 }, /* (506) range_opt ::= */ - { 437, -6 }, /* (507) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ - { 438, 0 }, /* (508) every_opt ::= */ - { 438, -4 }, /* (509) every_opt ::= EVERY NK_LP duration_literal NK_RP */ - { 448, -4 }, /* (510) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ - { 449, -1 }, /* (511) query_simple ::= query_specification */ - { 449, -1 }, /* (512) query_simple ::= union_query_expression */ - { 453, -4 }, /* (513) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ - { 453, -3 }, /* (514) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ - { 454, -1 }, /* (515) query_simple_or_subquery ::= query_simple */ - { 454, -1 }, /* (516) query_simple_or_subquery ::= subquery */ - { 387, -1 }, /* (517) query_or_subquery ::= query_expression */ - { 387, -1 }, /* (518) query_or_subquery ::= subquery */ - { 450, 0 }, /* (519) order_by_clause_opt ::= */ - { 450, -3 }, /* (520) order_by_clause_opt ::= ORDER BY sort_specification_list */ - { 451, 0 }, /* (521) slimit_clause_opt ::= */ - { 451, -2 }, /* (522) slimit_clause_opt ::= SLIMIT NK_INTEGER */ - { 451, -4 }, /* (523) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - { 451, -4 }, /* (524) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 452, 0 }, /* (525) limit_clause_opt ::= */ - { 452, -2 }, /* (526) limit_clause_opt ::= LIMIT NK_INTEGER */ - { 452, -4 }, /* (527) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ - { 452, -4 }, /* (528) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 429, -3 }, /* (529) subquery ::= NK_LP query_expression NK_RP */ - { 429, -3 }, /* (530) subquery ::= NK_LP subquery NK_RP */ - { 432, -1 }, /* (531) search_condition ::= common_expression */ - { 455, -1 }, /* (532) sort_specification_list ::= sort_specification */ - { 455, -3 }, /* (533) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ - { 456, -3 }, /* (534) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ - { 457, 0 }, /* (535) ordering_specification_opt ::= */ - { 457, -1 }, /* (536) ordering_specification_opt ::= ASC */ - { 457, -1 }, /* (537) ordering_specification_opt ::= DESC */ - { 458, 0 }, /* (538) null_ordering_opt ::= */ - { 458, -2 }, /* (539) null_ordering_opt ::= NULLS FIRST */ - { 458, -2 }, /* (540) null_ordering_opt ::= NULLS LAST */ + { 394, -3 }, /* (304) stream_options ::= stream_options DELETE_MARK duration_literal */ + { 395, 0 }, /* (305) subtable_opt ::= */ + { 395, -4 }, /* (306) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ + { 322, -3 }, /* (307) cmd ::= KILL CONNECTION NK_INTEGER */ + { 322, -3 }, /* (308) cmd ::= KILL QUERY NK_STRING */ + { 322, -3 }, /* (309) cmd ::= KILL TRANSACTION NK_INTEGER */ + { 322, -2 }, /* (310) cmd ::= BALANCE VGROUP */ + { 322, -4 }, /* (311) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + { 322, -4 }, /* (312) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ + { 322, -3 }, /* (313) cmd ::= SPLIT VGROUP NK_INTEGER */ + { 397, -2 }, /* (314) dnode_list ::= DNODE NK_INTEGER */ + { 397, -3 }, /* (315) dnode_list ::= dnode_list DNODE NK_INTEGER */ + { 322, -4 }, /* (316) cmd ::= DELETE FROM full_table_name where_clause_opt */ + { 322, -1 }, /* (317) cmd ::= query_or_subquery */ + { 322, -7 }, /* (318) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ + { 322, -4 }, /* (319) cmd ::= INSERT INTO full_table_name query_or_subquery */ + { 325, -1 }, /* (320) literal ::= NK_INTEGER */ + { 325, -1 }, /* (321) literal ::= NK_FLOAT */ + { 325, -1 }, /* (322) literal ::= NK_STRING */ + { 325, -1 }, /* (323) literal ::= NK_BOOL */ + { 325, -2 }, /* (324) literal ::= TIMESTAMP NK_STRING */ + { 325, -1 }, /* (325) literal ::= duration_literal */ + { 325, -1 }, /* (326) literal ::= NULL */ + { 325, -1 }, /* (327) literal ::= NK_QUESTION */ + { 369, -1 }, /* (328) duration_literal ::= NK_VARIABLE */ + { 399, -1 }, /* (329) signed ::= NK_INTEGER */ + { 399, -2 }, /* (330) signed ::= NK_PLUS NK_INTEGER */ + { 399, -2 }, /* (331) signed ::= NK_MINUS NK_INTEGER */ + { 399, -1 }, /* (332) signed ::= NK_FLOAT */ + { 399, -2 }, /* (333) signed ::= NK_PLUS NK_FLOAT */ + { 399, -2 }, /* (334) signed ::= NK_MINUS NK_FLOAT */ + { 358, -1 }, /* (335) signed_literal ::= signed */ + { 358, -1 }, /* (336) signed_literal ::= NK_STRING */ + { 358, -1 }, /* (337) signed_literal ::= NK_BOOL */ + { 358, -2 }, /* (338) signed_literal ::= TIMESTAMP NK_STRING */ + { 358, -1 }, /* (339) signed_literal ::= duration_literal */ + { 358, -1 }, /* (340) signed_literal ::= NULL */ + { 358, -1 }, /* (341) signed_literal ::= literal_func */ + { 358, -1 }, /* (342) signed_literal ::= NK_QUESTION */ + { 401, -1 }, /* (343) literal_list ::= signed_literal */ + { 401, -3 }, /* (344) literal_list ::= literal_list NK_COMMA signed_literal */ + { 333, -1 }, /* (345) db_name ::= NK_ID */ + { 364, -1 }, /* (346) table_name ::= NK_ID */ + { 356, -1 }, /* (347) column_name ::= NK_ID */ + { 371, -1 }, /* (348) function_name ::= NK_ID */ + { 402, -1 }, /* (349) table_alias ::= NK_ID */ + { 379, -1 }, /* (350) column_alias ::= NK_ID */ + { 327, -1 }, /* (351) user_name ::= NK_ID */ + { 334, -1 }, /* (352) topic_name ::= NK_ID */ + { 393, -1 }, /* (353) stream_name ::= NK_ID */ + { 388, -1 }, /* (354) cgroup_name ::= NK_ID */ + { 382, -1 }, /* (355) index_name ::= NK_ID */ + { 403, -1 }, /* (356) expr_or_subquery ::= expression */ + { 396, -1 }, /* (357) expression ::= literal */ + { 396, -1 }, /* (358) expression ::= pseudo_column */ + { 396, -1 }, /* (359) expression ::= column_reference */ + { 396, -1 }, /* (360) expression ::= function_expression */ + { 396, -1 }, /* (361) expression ::= case_when_expression */ + { 396, -3 }, /* (362) expression ::= NK_LP expression NK_RP */ + { 396, -2 }, /* (363) expression ::= NK_PLUS expr_or_subquery */ + { 396, -2 }, /* (364) expression ::= NK_MINUS expr_or_subquery */ + { 396, -3 }, /* (365) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ + { 396, -3 }, /* (366) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ + { 396, -3 }, /* (367) expression ::= expr_or_subquery NK_STAR expr_or_subquery */ + { 396, -3 }, /* (368) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ + { 396, -3 }, /* (369) expression ::= expr_or_subquery NK_REM expr_or_subquery */ + { 396, -3 }, /* (370) expression ::= column_reference NK_ARROW NK_STRING */ + { 396, -3 }, /* (371) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ + { 396, -3 }, /* (372) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ + { 361, -1 }, /* (373) expression_list ::= expr_or_subquery */ + { 361, -3 }, /* (374) expression_list ::= expression_list NK_COMMA expr_or_subquery */ + { 405, -1 }, /* (375) column_reference ::= column_name */ + { 405, -3 }, /* (376) column_reference ::= table_name NK_DOT column_name */ + { 404, -1 }, /* (377) pseudo_column ::= ROWTS */ + { 404, -1 }, /* (378) pseudo_column ::= TBNAME */ + { 404, -3 }, /* (379) pseudo_column ::= table_name NK_DOT TBNAME */ + { 404, -1 }, /* (380) pseudo_column ::= QSTART */ + { 404, -1 }, /* (381) pseudo_column ::= QEND */ + { 404, -1 }, /* (382) pseudo_column ::= QDURATION */ + { 404, -1 }, /* (383) pseudo_column ::= WSTART */ + { 404, -1 }, /* (384) pseudo_column ::= WEND */ + { 404, -1 }, /* (385) pseudo_column ::= WDURATION */ + { 404, -1 }, /* (386) pseudo_column ::= IROWTS */ + { 404, -1 }, /* (387) pseudo_column ::= QTAGS */ + { 406, -4 }, /* (388) function_expression ::= function_name NK_LP expression_list NK_RP */ + { 406, -4 }, /* (389) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ + { 406, -6 }, /* (390) function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ + { 406, -1 }, /* (391) function_expression ::= literal_func */ + { 400, -3 }, /* (392) literal_func ::= noarg_func NK_LP NK_RP */ + { 400, -1 }, /* (393) literal_func ::= NOW */ + { 410, -1 }, /* (394) noarg_func ::= NOW */ + { 410, -1 }, /* (395) noarg_func ::= TODAY */ + { 410, -1 }, /* (396) noarg_func ::= TIMEZONE */ + { 410, -1 }, /* (397) noarg_func ::= DATABASE */ + { 410, -1 }, /* (398) noarg_func ::= CLIENT_VERSION */ + { 410, -1 }, /* (399) noarg_func ::= SERVER_VERSION */ + { 410, -1 }, /* (400) noarg_func ::= SERVER_STATUS */ + { 410, -1 }, /* (401) noarg_func ::= CURRENT_USER */ + { 410, -1 }, /* (402) noarg_func ::= USER */ + { 408, -1 }, /* (403) star_func ::= COUNT */ + { 408, -1 }, /* (404) star_func ::= FIRST */ + { 408, -1 }, /* (405) star_func ::= LAST */ + { 408, -1 }, /* (406) star_func ::= LAST_ROW */ + { 409, -1 }, /* (407) star_func_para_list ::= NK_STAR */ + { 409, -1 }, /* (408) star_func_para_list ::= other_para_list */ + { 411, -1 }, /* (409) other_para_list ::= star_func_para */ + { 411, -3 }, /* (410) other_para_list ::= other_para_list NK_COMMA star_func_para */ + { 412, -1 }, /* (411) star_func_para ::= expr_or_subquery */ + { 412, -3 }, /* (412) star_func_para ::= table_name NK_DOT NK_STAR */ + { 407, -4 }, /* (413) case_when_expression ::= CASE when_then_list case_when_else_opt END */ + { 407, -5 }, /* (414) case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ + { 413, -1 }, /* (415) when_then_list ::= when_then_expr */ + { 413, -2 }, /* (416) when_then_list ::= when_then_list when_then_expr */ + { 416, -4 }, /* (417) when_then_expr ::= WHEN common_expression THEN common_expression */ + { 414, 0 }, /* (418) case_when_else_opt ::= */ + { 414, -2 }, /* (419) case_when_else_opt ::= ELSE common_expression */ + { 417, -3 }, /* (420) predicate ::= expr_or_subquery compare_op expr_or_subquery */ + { 417, -5 }, /* (421) predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ + { 417, -6 }, /* (422) predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ + { 417, -3 }, /* (423) predicate ::= expr_or_subquery IS NULL */ + { 417, -4 }, /* (424) predicate ::= expr_or_subquery IS NOT NULL */ + { 417, -3 }, /* (425) predicate ::= expr_or_subquery in_op in_predicate_value */ + { 418, -1 }, /* (426) compare_op ::= NK_LT */ + { 418, -1 }, /* (427) compare_op ::= NK_GT */ + { 418, -1 }, /* (428) compare_op ::= NK_LE */ + { 418, -1 }, /* (429) compare_op ::= NK_GE */ + { 418, -1 }, /* (430) compare_op ::= NK_NE */ + { 418, -1 }, /* (431) compare_op ::= NK_EQ */ + { 418, -1 }, /* (432) compare_op ::= LIKE */ + { 418, -2 }, /* (433) compare_op ::= NOT LIKE */ + { 418, -1 }, /* (434) compare_op ::= MATCH */ + { 418, -1 }, /* (435) compare_op ::= NMATCH */ + { 418, -1 }, /* (436) compare_op ::= CONTAINS */ + { 419, -1 }, /* (437) in_op ::= IN */ + { 419, -2 }, /* (438) in_op ::= NOT IN */ + { 420, -3 }, /* (439) in_predicate_value ::= NK_LP literal_list NK_RP */ + { 421, -1 }, /* (440) boolean_value_expression ::= boolean_primary */ + { 421, -2 }, /* (441) boolean_value_expression ::= NOT boolean_primary */ + { 421, -3 }, /* (442) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + { 421, -3 }, /* (443) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + { 422, -1 }, /* (444) boolean_primary ::= predicate */ + { 422, -3 }, /* (445) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ + { 415, -1 }, /* (446) common_expression ::= expr_or_subquery */ + { 415, -1 }, /* (447) common_expression ::= boolean_value_expression */ + { 423, 0 }, /* (448) from_clause_opt ::= */ + { 423, -2 }, /* (449) from_clause_opt ::= FROM table_reference_list */ + { 424, -1 }, /* (450) table_reference_list ::= table_reference */ + { 424, -3 }, /* (451) table_reference_list ::= table_reference_list NK_COMMA table_reference */ + { 425, -1 }, /* (452) table_reference ::= table_primary */ + { 425, -1 }, /* (453) table_reference ::= joined_table */ + { 426, -2 }, /* (454) table_primary ::= table_name alias_opt */ + { 426, -4 }, /* (455) table_primary ::= db_name NK_DOT table_name alias_opt */ + { 426, -2 }, /* (456) table_primary ::= subquery alias_opt */ + { 426, -1 }, /* (457) table_primary ::= parenthesized_joined_table */ + { 428, 0 }, /* (458) alias_opt ::= */ + { 428, -1 }, /* (459) alias_opt ::= table_alias */ + { 428, -2 }, /* (460) alias_opt ::= AS table_alias */ + { 430, -3 }, /* (461) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + { 430, -3 }, /* (462) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ + { 427, -6 }, /* (463) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ + { 431, 0 }, /* (464) join_type ::= */ + { 431, -1 }, /* (465) join_type ::= INNER */ + { 433, -12 }, /* (466) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + { 434, 0 }, /* (467) set_quantifier_opt ::= */ + { 434, -1 }, /* (468) set_quantifier_opt ::= DISTINCT */ + { 434, -1 }, /* (469) set_quantifier_opt ::= ALL */ + { 435, -1 }, /* (470) select_list ::= select_item */ + { 435, -3 }, /* (471) select_list ::= select_list NK_COMMA select_item */ + { 443, -1 }, /* (472) select_item ::= NK_STAR */ + { 443, -1 }, /* (473) select_item ::= common_expression */ + { 443, -2 }, /* (474) select_item ::= common_expression column_alias */ + { 443, -3 }, /* (475) select_item ::= common_expression AS column_alias */ + { 443, -3 }, /* (476) select_item ::= table_name NK_DOT NK_STAR */ + { 398, 0 }, /* (477) where_clause_opt ::= */ + { 398, -2 }, /* (478) where_clause_opt ::= WHERE search_condition */ + { 436, 0 }, /* (479) partition_by_clause_opt ::= */ + { 436, -3 }, /* (480) partition_by_clause_opt ::= PARTITION BY partition_list */ + { 444, -1 }, /* (481) partition_list ::= partition_item */ + { 444, -3 }, /* (482) partition_list ::= partition_list NK_COMMA partition_item */ + { 445, -1 }, /* (483) partition_item ::= expr_or_subquery */ + { 445, -2 }, /* (484) partition_item ::= expr_or_subquery column_alias */ + { 445, -3 }, /* (485) partition_item ::= expr_or_subquery AS column_alias */ + { 440, 0 }, /* (486) twindow_clause_opt ::= */ + { 440, -6 }, /* (487) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ + { 440, -4 }, /* (488) twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ + { 440, -6 }, /* (489) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ + { 440, -8 }, /* (490) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ + { 384, 0 }, /* (491) sliding_opt ::= */ + { 384, -4 }, /* (492) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ + { 439, 0 }, /* (493) fill_opt ::= */ + { 439, -4 }, /* (494) fill_opt ::= FILL NK_LP fill_mode NK_RP */ + { 439, -6 }, /* (495) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ + { 446, -1 }, /* (496) fill_mode ::= NONE */ + { 446, -1 }, /* (497) fill_mode ::= PREV */ + { 446, -1 }, /* (498) fill_mode ::= NULL */ + { 446, -1 }, /* (499) fill_mode ::= LINEAR */ + { 446, -1 }, /* (500) fill_mode ::= NEXT */ + { 441, 0 }, /* (501) group_by_clause_opt ::= */ + { 441, -3 }, /* (502) group_by_clause_opt ::= GROUP BY group_by_list */ + { 447, -1 }, /* (503) group_by_list ::= expr_or_subquery */ + { 447, -3 }, /* (504) group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ + { 442, 0 }, /* (505) having_clause_opt ::= */ + { 442, -2 }, /* (506) having_clause_opt ::= HAVING search_condition */ + { 437, 0 }, /* (507) range_opt ::= */ + { 437, -6 }, /* (508) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ + { 438, 0 }, /* (509) every_opt ::= */ + { 438, -4 }, /* (510) every_opt ::= EVERY NK_LP duration_literal NK_RP */ + { 448, -4 }, /* (511) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ + { 449, -1 }, /* (512) query_simple ::= query_specification */ + { 449, -1 }, /* (513) query_simple ::= union_query_expression */ + { 453, -4 }, /* (514) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ + { 453, -3 }, /* (515) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ + { 454, -1 }, /* (516) query_simple_or_subquery ::= query_simple */ + { 454, -1 }, /* (517) query_simple_or_subquery ::= subquery */ + { 387, -1 }, /* (518) query_or_subquery ::= query_expression */ + { 387, -1 }, /* (519) query_or_subquery ::= subquery */ + { 450, 0 }, /* (520) order_by_clause_opt ::= */ + { 450, -3 }, /* (521) order_by_clause_opt ::= ORDER BY sort_specification_list */ + { 451, 0 }, /* (522) slimit_clause_opt ::= */ + { 451, -2 }, /* (523) slimit_clause_opt ::= SLIMIT NK_INTEGER */ + { 451, -4 }, /* (524) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + { 451, -4 }, /* (525) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 452, 0 }, /* (526) limit_clause_opt ::= */ + { 452, -2 }, /* (527) limit_clause_opt ::= LIMIT NK_INTEGER */ + { 452, -4 }, /* (528) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ + { 452, -4 }, /* (529) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 429, -3 }, /* (530) subquery ::= NK_LP query_expression NK_RP */ + { 429, -3 }, /* (531) subquery ::= NK_LP subquery NK_RP */ + { 432, -1 }, /* (532) search_condition ::= common_expression */ + { 455, -1 }, /* (533) sort_specification_list ::= sort_specification */ + { 455, -3 }, /* (534) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ + { 456, -3 }, /* (535) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ + { 457, 0 }, /* (536) ordering_specification_opt ::= */ + { 457, -1 }, /* (537) ordering_specification_opt ::= ASC */ + { 457, -1 }, /* (538) ordering_specification_opt ::= DESC */ + { 458, 0 }, /* (539) null_ordering_opt ::= */ + { 458, -2 }, /* (540) null_ordering_opt ::= NULLS FIRST */ + { 458, -2 }, /* (541) null_ordering_opt ::= NULLS LAST */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -3733,7 +3735,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-2].minor.yy317 = yylhsminor.yy317; break; case 42: /* priv_level ::= topic_name */ - case 458: /* alias_opt ::= table_alias */ yytestcase(yyruleno==458); + case 459: /* alias_opt ::= table_alias */ yytestcase(yyruleno==459); { yylhsminor.yy317 = yymsp[0].minor.yy317; } yymsp[0].minor.yy317 = yylhsminor.yy317; break; @@ -3764,30 +3766,30 @@ static YYACTIONTYPE yy_reduce( case 51: /* dnode_endpoint ::= NK_STRING */ case 52: /* dnode_endpoint ::= NK_ID */ yytestcase(yyruleno==52); case 53: /* dnode_endpoint ::= NK_IPTOKEN */ yytestcase(yyruleno==53); - case 344: /* db_name ::= NK_ID */ yytestcase(yyruleno==344); - case 345: /* table_name ::= NK_ID */ yytestcase(yyruleno==345); - case 346: /* column_name ::= NK_ID */ yytestcase(yyruleno==346); - case 347: /* function_name ::= NK_ID */ yytestcase(yyruleno==347); - case 348: /* table_alias ::= NK_ID */ yytestcase(yyruleno==348); - case 349: /* column_alias ::= NK_ID */ yytestcase(yyruleno==349); - case 350: /* user_name ::= NK_ID */ yytestcase(yyruleno==350); - case 351: /* topic_name ::= NK_ID */ yytestcase(yyruleno==351); - case 352: /* stream_name ::= NK_ID */ yytestcase(yyruleno==352); - case 353: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==353); - case 354: /* index_name ::= NK_ID */ yytestcase(yyruleno==354); - case 393: /* noarg_func ::= NOW */ yytestcase(yyruleno==393); - case 394: /* noarg_func ::= TODAY */ yytestcase(yyruleno==394); - case 395: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==395); - case 396: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==396); - case 397: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==397); - case 398: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==398); - case 399: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==399); - case 400: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==400); - case 401: /* noarg_func ::= USER */ yytestcase(yyruleno==401); - case 402: /* star_func ::= COUNT */ yytestcase(yyruleno==402); - case 403: /* star_func ::= FIRST */ yytestcase(yyruleno==403); - case 404: /* star_func ::= LAST */ yytestcase(yyruleno==404); - case 405: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==405); + case 345: /* db_name ::= NK_ID */ yytestcase(yyruleno==345); + case 346: /* table_name ::= NK_ID */ yytestcase(yyruleno==346); + case 347: /* column_name ::= NK_ID */ yytestcase(yyruleno==347); + case 348: /* function_name ::= NK_ID */ yytestcase(yyruleno==348); + case 349: /* table_alias ::= NK_ID */ yytestcase(yyruleno==349); + case 350: /* column_alias ::= NK_ID */ yytestcase(yyruleno==350); + case 351: /* user_name ::= NK_ID */ yytestcase(yyruleno==351); + case 352: /* topic_name ::= NK_ID */ yytestcase(yyruleno==352); + case 353: /* stream_name ::= NK_ID */ yytestcase(yyruleno==353); + case 354: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==354); + case 355: /* index_name ::= NK_ID */ yytestcase(yyruleno==355); + case 394: /* noarg_func ::= NOW */ yytestcase(yyruleno==394); + case 395: /* noarg_func ::= TODAY */ yytestcase(yyruleno==395); + case 396: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==396); + case 397: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==397); + case 398: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==398); + case 399: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==399); + case 400: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==400); + case 401: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==401); + case 402: /* noarg_func ::= USER */ yytestcase(yyruleno==402); + case 403: /* star_func ::= COUNT */ yytestcase(yyruleno==403); + case 404: /* star_func ::= FIRST */ yytestcase(yyruleno==404); + case 405: /* star_func ::= LAST */ yytestcase(yyruleno==405); + case 406: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==406); { yylhsminor.yy317 = yymsp[0].minor.yy0; } yymsp[0].minor.yy317 = yylhsminor.yy317; break; @@ -3796,13 +3798,13 @@ static YYACTIONTYPE yy_reduce( case 75: /* exists_opt ::= */ yytestcase(yyruleno==75); case 284: /* analyze_opt ::= */ yytestcase(yyruleno==284); case 291: /* agg_func_opt ::= */ yytestcase(yyruleno==291); - case 466: /* set_quantifier_opt ::= */ yytestcase(yyruleno==466); + case 467: /* set_quantifier_opt ::= */ yytestcase(yyruleno==467); { yymsp[1].minor.yy335 = false; } break; case 55: /* force_opt ::= FORCE */ case 285: /* analyze_opt ::= ANALYZE */ yytestcase(yyruleno==285); case 292: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==292); - case 467: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==467); + case 468: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==468); { yymsp[0].minor.yy335 = true; } break; case 56: /* cmd ::= ALTER LOCAL NK_STRING */ @@ -4025,7 +4027,7 @@ static YYACTIONTYPE yy_reduce( yymsp[0].minor.yy874 = yylhsminor.yy874; break; case 120: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ - case 314: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==314); + case 315: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==315); { yylhsminor.yy874 = addNodeToList(pCxt, yymsp[-2].minor.yy874, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } yymsp[-2].minor.yy874 = yylhsminor.yy874; break; @@ -4045,12 +4047,12 @@ static YYACTIONTYPE yy_reduce( case 204: /* col_name_list ::= col_name */ yytestcase(yyruleno==204); case 253: /* tag_list_opt ::= tag_item */ yytestcase(yyruleno==253); case 266: /* func_list ::= func */ yytestcase(yyruleno==266); - case 342: /* literal_list ::= signed_literal */ yytestcase(yyruleno==342); - case 408: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==408); - case 414: /* when_then_list ::= when_then_expr */ yytestcase(yyruleno==414); - case 469: /* select_list ::= select_item */ yytestcase(yyruleno==469); - case 480: /* partition_list ::= partition_item */ yytestcase(yyruleno==480); - case 532: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==532); + case 343: /* literal_list ::= signed_literal */ yytestcase(yyruleno==343); + case 409: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==409); + case 415: /* when_then_list ::= when_then_expr */ yytestcase(yyruleno==415); + case 470: /* select_list ::= select_item */ yytestcase(yyruleno==470); + case 481: /* partition_list ::= partition_item */ yytestcase(yyruleno==481); + case 533: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==533); { yylhsminor.yy874 = createNodeList(pCxt, yymsp[0].minor.yy74); } yymsp[0].minor.yy874 = yylhsminor.yy874; break; @@ -4060,11 +4062,11 @@ static YYACTIONTYPE yy_reduce( case 205: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==205); case 254: /* tag_list_opt ::= tag_list_opt NK_COMMA tag_item */ yytestcase(yyruleno==254); case 267: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==267); - case 343: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==343); - case 409: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==409); - case 470: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==470); - case 481: /* partition_list ::= partition_list NK_COMMA partition_item */ yytestcase(yyruleno==481); - case 533: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==533); + case 344: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==344); + case 410: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==410); + case 471: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==471); + case 482: /* partition_list ::= partition_list NK_COMMA partition_item */ yytestcase(yyruleno==482); + case 534: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==534); { yylhsminor.yy874 = addNodeToList(pCxt, yymsp[-2].minor.yy874, yymsp[0].minor.yy74); } yymsp[-2].minor.yy874 = yylhsminor.yy874; break; @@ -4094,7 +4096,7 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy335, yymsp[0].minor.yy74); } break; case 133: /* cmd ::= ALTER TABLE alter_table_clause */ - case 316: /* cmd ::= query_or_subquery */ yytestcase(yyruleno==316); + case 317: /* cmd ::= query_or_subquery */ yytestcase(yyruleno==317); { pCxt->pRootNode = yymsp[0].minor.yy74; } break; case 134: /* cmd ::= ALTER STABLE alter_table_clause */ @@ -4142,7 +4144,7 @@ static YYACTIONTYPE yy_reduce( break; case 146: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ case 149: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==149); - case 415: /* when_then_list ::= when_then_list when_then_expr */ yytestcase(yyruleno==415); + case 416: /* when_then_list ::= when_then_list when_then_expr */ yytestcase(yyruleno==416); { yylhsminor.yy874 = addNodeToList(pCxt, yymsp[-1].minor.yy874, yymsp[0].minor.yy74); } yymsp[-1].minor.yy874 = yylhsminor.yy874; break; @@ -4157,9 +4159,9 @@ static YYACTIONTYPE yy_reduce( case 151: /* specific_cols_opt ::= */ case 182: /* tags_def_opt ::= */ yytestcase(yyruleno==182); case 252: /* tag_list_opt ::= */ yytestcase(yyruleno==252); - case 478: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==478); - case 500: /* group_by_clause_opt ::= */ yytestcase(yyruleno==500); - case 519: /* order_by_clause_opt ::= */ yytestcase(yyruleno==519); + case 479: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==479); + case 501: /* group_by_clause_opt ::= */ yytestcase(yyruleno==501); + case 520: /* order_by_clause_opt ::= */ yytestcase(yyruleno==520); { yymsp[1].minor.yy874 = NULL; } break; case 152: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */ @@ -4249,7 +4251,7 @@ static YYACTIONTYPE yy_reduce( { yymsp[-5].minor.yy898 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 183: /* tags_def_opt ::= tags_def */ - case 407: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==407); + case 408: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==408); { yylhsminor.yy874 = yymsp[0].minor.yy874; } yymsp[0].minor.yy874 = yylhsminor.yy874; break; @@ -4302,12 +4304,12 @@ static YYACTIONTYPE yy_reduce( { yymsp[-1].minor.yy767.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy767.val = yymsp[0].minor.yy0; } break; case 197: /* duration_list ::= duration_literal */ - case 372: /* expression_list ::= expr_or_subquery */ yytestcase(yyruleno==372); + case 373: /* expression_list ::= expr_or_subquery */ yytestcase(yyruleno==373); { yylhsminor.yy874 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy74)); } yymsp[0].minor.yy874 = yylhsminor.yy874; break; case 198: /* duration_list ::= duration_list NK_COMMA duration_literal */ - case 373: /* expression_list ::= expression_list NK_COMMA expr_or_subquery */ yytestcase(yyruleno==373); + case 374: /* expression_list ::= expression_list NK_COMMA expr_or_subquery */ yytestcase(yyruleno==374); { yylhsminor.yy874 = addNodeToList(pCxt, yymsp[-2].minor.yy874, releaseRawExprNode(pCxt, yymsp[0].minor.yy74)); } yymsp[-2].minor.yy874 = yylhsminor.yy874; break; @@ -4445,18 +4447,18 @@ static YYACTIONTYPE yy_reduce( yymsp[-1].minor.yy74 = yylhsminor.yy74; break; case 247: /* like_pattern_opt ::= */ - case 304: /* subtable_opt ::= */ yytestcase(yyruleno==304); - case 417: /* case_when_else_opt ::= */ yytestcase(yyruleno==417); - case 447: /* from_clause_opt ::= */ yytestcase(yyruleno==447); - case 476: /* where_clause_opt ::= */ yytestcase(yyruleno==476); - case 485: /* twindow_clause_opt ::= */ yytestcase(yyruleno==485); - case 490: /* sliding_opt ::= */ yytestcase(yyruleno==490); - case 492: /* fill_opt ::= */ yytestcase(yyruleno==492); - case 504: /* having_clause_opt ::= */ yytestcase(yyruleno==504); - case 506: /* range_opt ::= */ yytestcase(yyruleno==506); - case 508: /* every_opt ::= */ yytestcase(yyruleno==508); - case 521: /* slimit_clause_opt ::= */ yytestcase(yyruleno==521); - case 525: /* limit_clause_opt ::= */ yytestcase(yyruleno==525); + case 305: /* subtable_opt ::= */ yytestcase(yyruleno==305); + case 418: /* case_when_else_opt ::= */ yytestcase(yyruleno==418); + case 448: /* from_clause_opt ::= */ yytestcase(yyruleno==448); + case 477: /* where_clause_opt ::= */ yytestcase(yyruleno==477); + case 486: /* twindow_clause_opt ::= */ yytestcase(yyruleno==486); + case 491: /* sliding_opt ::= */ yytestcase(yyruleno==491); + case 493: /* fill_opt ::= */ yytestcase(yyruleno==493); + case 505: /* having_clause_opt ::= */ yytestcase(yyruleno==505); + case 507: /* range_opt ::= */ yytestcase(yyruleno==507); + case 509: /* every_opt ::= */ yytestcase(yyruleno==509); + case 522: /* slimit_clause_opt ::= */ yytestcase(yyruleno==522); + case 526: /* limit_clause_opt ::= */ yytestcase(yyruleno==526); { yymsp[1].minor.yy74 = NULL; } break; case 248: /* like_pattern_opt ::= LIKE NK_STRING */ @@ -4519,6 +4521,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-2].minor.yy74 = yylhsminor.yy74; break; case 272: /* sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal */ + case 304: /* stream_options ::= stream_options DELETE_MARK duration_literal */ yytestcase(yyruleno==304); { ((SStreamOptions*)yymsp[-2].minor.yy74)->pDeleteMark = releaseRawExprNode(pCxt, yymsp[0].minor.yy74); yylhsminor.yy74 = yymsp[-2].minor.yy74; } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; @@ -4596,108 +4599,108 @@ static YYACTIONTYPE yy_reduce( { ((SStreamOptions*)yymsp[-2].minor.yy74)->fillHistory = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy74 = yymsp[-2].minor.yy74; } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 305: /* subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ - case 491: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ yytestcase(yyruleno==491); - case 509: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==509); + case 306: /* subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ + case 492: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ yytestcase(yyruleno==492); + case 510: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==510); { yymsp[-3].minor.yy74 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy74); } break; - case 306: /* cmd ::= KILL CONNECTION NK_INTEGER */ + case 307: /* cmd ::= KILL CONNECTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); } break; - case 307: /* cmd ::= KILL QUERY NK_STRING */ + case 308: /* cmd ::= KILL QUERY NK_STRING */ { pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); } break; - case 308: /* cmd ::= KILL TRANSACTION NK_INTEGER */ + case 309: /* cmd ::= KILL TRANSACTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); } break; - case 309: /* cmd ::= BALANCE VGROUP */ + case 310: /* cmd ::= BALANCE VGROUP */ { pCxt->pRootNode = createBalanceVgroupStmt(pCxt); } break; - case 310: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + case 311: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ { pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 311: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ + case 312: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ { pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy874); } break; - case 312: /* cmd ::= SPLIT VGROUP NK_INTEGER */ + case 313: /* cmd ::= SPLIT VGROUP NK_INTEGER */ { pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); } break; - case 313: /* dnode_list ::= DNODE NK_INTEGER */ + case 314: /* dnode_list ::= DNODE NK_INTEGER */ { yymsp[-1].minor.yy874 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } break; - case 315: /* cmd ::= DELETE FROM full_table_name where_clause_opt */ + case 316: /* cmd ::= DELETE FROM full_table_name where_clause_opt */ { pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy74, yymsp[0].minor.yy74); } break; - case 317: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ + case 318: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ { pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy74, yymsp[-2].minor.yy874, yymsp[0].minor.yy74); } break; - case 318: /* cmd ::= INSERT INTO full_table_name query_or_subquery */ + case 319: /* cmd ::= INSERT INTO full_table_name query_or_subquery */ { pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy74, NULL, yymsp[0].minor.yy74); } break; - case 319: /* literal ::= NK_INTEGER */ + case 320: /* literal ::= NK_INTEGER */ { yylhsminor.yy74 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 320: /* literal ::= NK_FLOAT */ + case 321: /* literal ::= NK_FLOAT */ { yylhsminor.yy74 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 321: /* literal ::= NK_STRING */ + case 322: /* literal ::= NK_STRING */ { yylhsminor.yy74 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 322: /* literal ::= NK_BOOL */ + case 323: /* literal ::= NK_BOOL */ { yylhsminor.yy74 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 323: /* literal ::= TIMESTAMP NK_STRING */ + case 324: /* literal ::= TIMESTAMP NK_STRING */ { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } yymsp[-1].minor.yy74 = yylhsminor.yy74; break; - case 324: /* literal ::= duration_literal */ - case 334: /* signed_literal ::= signed */ yytestcase(yyruleno==334); - case 355: /* expr_or_subquery ::= expression */ yytestcase(yyruleno==355); - case 356: /* expression ::= literal */ yytestcase(yyruleno==356); - case 357: /* expression ::= pseudo_column */ yytestcase(yyruleno==357); - case 358: /* expression ::= column_reference */ yytestcase(yyruleno==358); - case 359: /* expression ::= function_expression */ yytestcase(yyruleno==359); - case 360: /* expression ::= case_when_expression */ yytestcase(yyruleno==360); - case 390: /* function_expression ::= literal_func */ yytestcase(yyruleno==390); - case 439: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==439); - case 443: /* boolean_primary ::= predicate */ yytestcase(yyruleno==443); - case 445: /* common_expression ::= expr_or_subquery */ yytestcase(yyruleno==445); - case 446: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==446); - case 449: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==449); - case 451: /* table_reference ::= table_primary */ yytestcase(yyruleno==451); - case 452: /* table_reference ::= joined_table */ yytestcase(yyruleno==452); - case 456: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==456); - case 511: /* query_simple ::= query_specification */ yytestcase(yyruleno==511); - case 512: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==512); - case 515: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==515); - case 517: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==517); + case 325: /* literal ::= duration_literal */ + case 335: /* signed_literal ::= signed */ yytestcase(yyruleno==335); + case 356: /* expr_or_subquery ::= expression */ yytestcase(yyruleno==356); + case 357: /* expression ::= literal */ yytestcase(yyruleno==357); + case 358: /* expression ::= pseudo_column */ yytestcase(yyruleno==358); + case 359: /* expression ::= column_reference */ yytestcase(yyruleno==359); + case 360: /* expression ::= function_expression */ yytestcase(yyruleno==360); + case 361: /* expression ::= case_when_expression */ yytestcase(yyruleno==361); + case 391: /* function_expression ::= literal_func */ yytestcase(yyruleno==391); + case 440: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==440); + case 444: /* boolean_primary ::= predicate */ yytestcase(yyruleno==444); + case 446: /* common_expression ::= expr_or_subquery */ yytestcase(yyruleno==446); + case 447: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==447); + case 450: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==450); + case 452: /* table_reference ::= table_primary */ yytestcase(yyruleno==452); + case 453: /* table_reference ::= joined_table */ yytestcase(yyruleno==453); + case 457: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==457); + case 512: /* query_simple ::= query_specification */ yytestcase(yyruleno==512); + case 513: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==513); + case 516: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==516); + case 518: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==518); { yylhsminor.yy74 = yymsp[0].minor.yy74; } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 325: /* literal ::= NULL */ + case 326: /* literal ::= NULL */ { yylhsminor.yy74 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 326: /* literal ::= NK_QUESTION */ + case 327: /* literal ::= NK_QUESTION */ { yylhsminor.yy74 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 327: /* duration_literal ::= NK_VARIABLE */ + case 328: /* duration_literal ::= NK_VARIABLE */ { yylhsminor.yy74 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 328: /* signed ::= NK_INTEGER */ + case 329: /* signed ::= NK_INTEGER */ { yylhsminor.yy74 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 329: /* signed ::= NK_PLUS NK_INTEGER */ + case 330: /* signed ::= NK_PLUS NK_INTEGER */ { yymsp[-1].minor.yy74 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } break; - case 330: /* signed ::= NK_MINUS NK_INTEGER */ + case 331: /* signed ::= NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; @@ -4705,14 +4708,14 @@ static YYACTIONTYPE yy_reduce( } yymsp[-1].minor.yy74 = yylhsminor.yy74; break; - case 331: /* signed ::= NK_FLOAT */ + case 332: /* signed ::= NK_FLOAT */ { yylhsminor.yy74 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 332: /* signed ::= NK_PLUS NK_FLOAT */ + case 333: /* signed ::= NK_PLUS NK_FLOAT */ { yymsp[-1].minor.yy74 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } break; - case 333: /* signed ::= NK_MINUS NK_FLOAT */ + case 334: /* signed ::= NK_MINUS NK_FLOAT */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; @@ -4720,57 +4723,57 @@ static YYACTIONTYPE yy_reduce( } yymsp[-1].minor.yy74 = yylhsminor.yy74; break; - case 335: /* signed_literal ::= NK_STRING */ + case 336: /* signed_literal ::= NK_STRING */ { yylhsminor.yy74 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 336: /* signed_literal ::= NK_BOOL */ + case 337: /* signed_literal ::= NK_BOOL */ { yylhsminor.yy74 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 337: /* signed_literal ::= TIMESTAMP NK_STRING */ + case 338: /* signed_literal ::= TIMESTAMP NK_STRING */ { yymsp[-1].minor.yy74 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } break; - case 338: /* signed_literal ::= duration_literal */ - case 340: /* signed_literal ::= literal_func */ yytestcase(yyruleno==340); - case 410: /* star_func_para ::= expr_or_subquery */ yytestcase(yyruleno==410); - case 472: /* select_item ::= common_expression */ yytestcase(yyruleno==472); - case 482: /* partition_item ::= expr_or_subquery */ yytestcase(yyruleno==482); - case 516: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==516); - case 518: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==518); - case 531: /* search_condition ::= common_expression */ yytestcase(yyruleno==531); + case 339: /* signed_literal ::= duration_literal */ + case 341: /* signed_literal ::= literal_func */ yytestcase(yyruleno==341); + case 411: /* star_func_para ::= expr_or_subquery */ yytestcase(yyruleno==411); + case 473: /* select_item ::= common_expression */ yytestcase(yyruleno==473); + case 483: /* partition_item ::= expr_or_subquery */ yytestcase(yyruleno==483); + case 517: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==517); + case 519: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==519); + case 532: /* search_condition ::= common_expression */ yytestcase(yyruleno==532); { yylhsminor.yy74 = releaseRawExprNode(pCxt, yymsp[0].minor.yy74); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 339: /* signed_literal ::= NULL */ + case 340: /* signed_literal ::= NULL */ { yylhsminor.yy74 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 341: /* signed_literal ::= NK_QUESTION */ + case 342: /* signed_literal ::= NK_QUESTION */ { yylhsminor.yy74 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 361: /* expression ::= NK_LP expression NK_RP */ - case 444: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==444); - case 530: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==530); + case 362: /* expression ::= NK_LP expression NK_RP */ + case 445: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==445); + case 531: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==531); { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy74)); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 362: /* expression ::= NK_PLUS expr_or_subquery */ + case 363: /* expression ::= NK_PLUS expr_or_subquery */ { SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy74)); } yymsp[-1].minor.yy74 = yylhsminor.yy74; break; - case 363: /* expression ::= NK_MINUS expr_or_subquery */ + case 364: /* expression ::= NK_MINUS expr_or_subquery */ { SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy74), NULL)); } yymsp[-1].minor.yy74 = yylhsminor.yy74; break; - case 364: /* expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ + case 365: /* expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4778,7 +4781,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 365: /* expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ + case 366: /* expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4786,7 +4789,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 366: /* expression ::= expr_or_subquery NK_STAR expr_or_subquery */ + case 367: /* expression ::= expr_or_subquery NK_STAR expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4794,7 +4797,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 367: /* expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ + case 368: /* expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4802,7 +4805,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 368: /* expression ::= expr_or_subquery NK_REM expr_or_subquery */ + case 369: /* expression ::= expr_or_subquery NK_REM expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4810,14 +4813,14 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 369: /* expression ::= column_reference NK_ARROW NK_STRING */ + case 370: /* expression ::= column_reference NK_ARROW NK_STRING */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); yylhsminor.yy74 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy74), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 370: /* expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ + case 371: /* expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4825,7 +4828,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 371: /* expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ + case 372: /* expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4833,70 +4836,70 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 374: /* column_reference ::= column_name */ + case 375: /* column_reference ::= column_name */ { yylhsminor.yy74 = createRawExprNode(pCxt, &yymsp[0].minor.yy317, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy317)); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 375: /* column_reference ::= table_name NK_DOT column_name */ + case 376: /* column_reference ::= table_name NK_DOT column_name */ { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy317, &yymsp[0].minor.yy317, createColumnNode(pCxt, &yymsp[-2].minor.yy317, &yymsp[0].minor.yy317)); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 376: /* pseudo_column ::= ROWTS */ - case 377: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==377); - case 379: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==379); - case 380: /* pseudo_column ::= QEND */ yytestcase(yyruleno==380); - case 381: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==381); - case 382: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==382); - case 383: /* pseudo_column ::= WEND */ yytestcase(yyruleno==383); - case 384: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==384); - case 385: /* pseudo_column ::= IROWTS */ yytestcase(yyruleno==385); - case 386: /* pseudo_column ::= QTAGS */ yytestcase(yyruleno==386); - case 392: /* literal_func ::= NOW */ yytestcase(yyruleno==392); + case 377: /* pseudo_column ::= ROWTS */ + case 378: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==378); + case 380: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==380); + case 381: /* pseudo_column ::= QEND */ yytestcase(yyruleno==381); + case 382: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==382); + case 383: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==383); + case 384: /* pseudo_column ::= WEND */ yytestcase(yyruleno==384); + case 385: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==385); + case 386: /* pseudo_column ::= IROWTS */ yytestcase(yyruleno==386); + case 387: /* pseudo_column ::= QTAGS */ yytestcase(yyruleno==387); + case 393: /* literal_func ::= NOW */ yytestcase(yyruleno==393); { yylhsminor.yy74 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 378: /* pseudo_column ::= table_name NK_DOT TBNAME */ + case 379: /* pseudo_column ::= table_name NK_DOT TBNAME */ { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy317, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy317)))); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 387: /* function_expression ::= function_name NK_LP expression_list NK_RP */ - case 388: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==388); + case 388: /* function_expression ::= function_name NK_LP expression_list NK_RP */ + case 389: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==389); { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy317, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy317, yymsp[-1].minor.yy874)); } yymsp[-3].minor.yy74 = yylhsminor.yy74; break; - case 389: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ + case 390: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy74), yymsp[-1].minor.yy898)); } yymsp[-5].minor.yy74 = yylhsminor.yy74; break; - case 391: /* literal_func ::= noarg_func NK_LP NK_RP */ + case 392: /* literal_func ::= noarg_func NK_LP NK_RP */ { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy317, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy317, NULL)); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 406: /* star_func_para_list ::= NK_STAR */ + case 407: /* star_func_para_list ::= NK_STAR */ { yylhsminor.yy874 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy874 = yylhsminor.yy874; break; - case 411: /* star_func_para ::= table_name NK_DOT NK_STAR */ - case 475: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==475); + case 412: /* star_func_para ::= table_name NK_DOT NK_STAR */ + case 476: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==476); { yylhsminor.yy74 = createColumnNode(pCxt, &yymsp[-2].minor.yy317, &yymsp[0].minor.yy0); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 412: /* case_when_expression ::= CASE when_then_list case_when_else_opt END */ + case 413: /* case_when_expression ::= CASE when_then_list case_when_else_opt END */ { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, NULL, yymsp[-2].minor.yy874, yymsp[-1].minor.yy74)); } yymsp[-3].minor.yy74 = yylhsminor.yy74; break; - case 413: /* case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ + case 414: /* case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy74), yymsp[-2].minor.yy874, yymsp[-1].minor.yy74)); } yymsp[-4].minor.yy74 = yylhsminor.yy74; break; - case 416: /* when_then_expr ::= WHEN common_expression THEN common_expression */ + case 417: /* when_then_expr ::= WHEN common_expression THEN common_expression */ { yymsp[-3].minor.yy74 = createWhenThenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy74), releaseRawExprNode(pCxt, yymsp[0].minor.yy74)); } break; - case 418: /* case_when_else_opt ::= ELSE common_expression */ + case 419: /* case_when_else_opt ::= ELSE common_expression */ { yymsp[-1].minor.yy74 = releaseRawExprNode(pCxt, yymsp[0].minor.yy74); } break; - case 419: /* predicate ::= expr_or_subquery compare_op expr_or_subquery */ - case 424: /* predicate ::= expr_or_subquery in_op in_predicate_value */ yytestcase(yyruleno==424); + case 420: /* predicate ::= expr_or_subquery compare_op expr_or_subquery */ + case 425: /* predicate ::= expr_or_subquery in_op in_predicate_value */ yytestcase(yyruleno==425); { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4904,7 +4907,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 420: /* predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ + case 421: /* predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4912,7 +4915,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-4].minor.yy74 = yylhsminor.yy74; break; - case 421: /* predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ + case 422: /* predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4920,71 +4923,71 @@ static YYACTIONTYPE yy_reduce( } yymsp[-5].minor.yy74 = yylhsminor.yy74; break; - case 422: /* predicate ::= expr_or_subquery IS NULL */ + case 423: /* predicate ::= expr_or_subquery IS NULL */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); yylhsminor.yy74 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy74), NULL)); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 423: /* predicate ::= expr_or_subquery IS NOT NULL */ + case 424: /* predicate ::= expr_or_subquery IS NOT NULL */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy74); yylhsminor.yy74 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy74), NULL)); } yymsp[-3].minor.yy74 = yylhsminor.yy74; break; - case 425: /* compare_op ::= NK_LT */ + case 426: /* compare_op ::= NK_LT */ { yymsp[0].minor.yy20 = OP_TYPE_LOWER_THAN; } break; - case 426: /* compare_op ::= NK_GT */ + case 427: /* compare_op ::= NK_GT */ { yymsp[0].minor.yy20 = OP_TYPE_GREATER_THAN; } break; - case 427: /* compare_op ::= NK_LE */ + case 428: /* compare_op ::= NK_LE */ { yymsp[0].minor.yy20 = OP_TYPE_LOWER_EQUAL; } break; - case 428: /* compare_op ::= NK_GE */ + case 429: /* compare_op ::= NK_GE */ { yymsp[0].minor.yy20 = OP_TYPE_GREATER_EQUAL; } break; - case 429: /* compare_op ::= NK_NE */ + case 430: /* compare_op ::= NK_NE */ { yymsp[0].minor.yy20 = OP_TYPE_NOT_EQUAL; } break; - case 430: /* compare_op ::= NK_EQ */ + case 431: /* compare_op ::= NK_EQ */ { yymsp[0].minor.yy20 = OP_TYPE_EQUAL; } break; - case 431: /* compare_op ::= LIKE */ + case 432: /* compare_op ::= LIKE */ { yymsp[0].minor.yy20 = OP_TYPE_LIKE; } break; - case 432: /* compare_op ::= NOT LIKE */ + case 433: /* compare_op ::= NOT LIKE */ { yymsp[-1].minor.yy20 = OP_TYPE_NOT_LIKE; } break; - case 433: /* compare_op ::= MATCH */ + case 434: /* compare_op ::= MATCH */ { yymsp[0].minor.yy20 = OP_TYPE_MATCH; } break; - case 434: /* compare_op ::= NMATCH */ + case 435: /* compare_op ::= NMATCH */ { yymsp[0].minor.yy20 = OP_TYPE_NMATCH; } break; - case 435: /* compare_op ::= CONTAINS */ + case 436: /* compare_op ::= CONTAINS */ { yymsp[0].minor.yy20 = OP_TYPE_JSON_CONTAINS; } break; - case 436: /* in_op ::= IN */ + case 437: /* in_op ::= IN */ { yymsp[0].minor.yy20 = OP_TYPE_IN; } break; - case 437: /* in_op ::= NOT IN */ + case 438: /* in_op ::= NOT IN */ { yymsp[-1].minor.yy20 = OP_TYPE_NOT_IN; } break; - case 438: /* in_predicate_value ::= NK_LP literal_list NK_RP */ + case 439: /* in_predicate_value ::= NK_LP literal_list NK_RP */ { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy874)); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 440: /* boolean_value_expression ::= NOT boolean_primary */ + case 441: /* boolean_value_expression ::= NOT boolean_primary */ { SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy74), NULL)); } yymsp[-1].minor.yy74 = yylhsminor.yy74; break; - case 441: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + case 442: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -4992,7 +4995,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 442: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + case 443: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy74); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy74); @@ -5000,48 +5003,48 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 448: /* from_clause_opt ::= FROM table_reference_list */ - case 477: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==477); - case 505: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==505); + case 449: /* from_clause_opt ::= FROM table_reference_list */ + case 478: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==478); + case 506: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==506); { yymsp[-1].minor.yy74 = yymsp[0].minor.yy74; } break; - case 450: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ + case 451: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ { yylhsminor.yy74 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy74, yymsp[0].minor.yy74, NULL); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 453: /* table_primary ::= table_name alias_opt */ + case 454: /* table_primary ::= table_name alias_opt */ { yylhsminor.yy74 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy317, &yymsp[0].minor.yy317); } yymsp[-1].minor.yy74 = yylhsminor.yy74; break; - case 454: /* table_primary ::= db_name NK_DOT table_name alias_opt */ + case 455: /* table_primary ::= db_name NK_DOT table_name alias_opt */ { yylhsminor.yy74 = createRealTableNode(pCxt, &yymsp[-3].minor.yy317, &yymsp[-1].minor.yy317, &yymsp[0].minor.yy317); } yymsp[-3].minor.yy74 = yylhsminor.yy74; break; - case 455: /* table_primary ::= subquery alias_opt */ + case 456: /* table_primary ::= subquery alias_opt */ { yylhsminor.yy74 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy74), &yymsp[0].minor.yy317); } yymsp[-1].minor.yy74 = yylhsminor.yy74; break; - case 457: /* alias_opt ::= */ + case 458: /* alias_opt ::= */ { yymsp[1].minor.yy317 = nil_token; } break; - case 459: /* alias_opt ::= AS table_alias */ + case 460: /* alias_opt ::= AS table_alias */ { yymsp[-1].minor.yy317 = yymsp[0].minor.yy317; } break; - case 460: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - case 461: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==461); + case 461: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + case 462: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==462); { yymsp[-2].minor.yy74 = yymsp[-1].minor.yy74; } break; - case 462: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ + case 463: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ { yylhsminor.yy74 = createJoinTableNode(pCxt, yymsp[-4].minor.yy630, yymsp[-5].minor.yy74, yymsp[-2].minor.yy74, yymsp[0].minor.yy74); } yymsp[-5].minor.yy74 = yylhsminor.yy74; break; - case 463: /* join_type ::= */ + case 464: /* join_type ::= */ { yymsp[1].minor.yy630 = JOIN_TYPE_INNER; } break; - case 464: /* join_type ::= INNER */ + case 465: /* join_type ::= INNER */ { yymsp[0].minor.yy630 = JOIN_TYPE_INNER; } break; - case 465: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + case 466: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ { yymsp[-11].minor.yy74 = createSelectStmt(pCxt, yymsp[-10].minor.yy335, yymsp[-9].minor.yy874, yymsp[-8].minor.yy74); yymsp[-11].minor.yy74 = addWhereClause(pCxt, yymsp[-11].minor.yy74, yymsp[-7].minor.yy74); @@ -5054,73 +5057,73 @@ static YYACTIONTYPE yy_reduce( yymsp[-11].minor.yy74 = addFillClause(pCxt, yymsp[-11].minor.yy74, yymsp[-3].minor.yy74); } break; - case 468: /* set_quantifier_opt ::= ALL */ + case 469: /* set_quantifier_opt ::= ALL */ { yymsp[0].minor.yy335 = false; } break; - case 471: /* select_item ::= NK_STAR */ + case 472: /* select_item ::= NK_STAR */ { yylhsminor.yy74 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); } yymsp[0].minor.yy74 = yylhsminor.yy74; break; - case 473: /* select_item ::= common_expression column_alias */ - case 483: /* partition_item ::= expr_or_subquery column_alias */ yytestcase(yyruleno==483); + case 474: /* select_item ::= common_expression column_alias */ + case 484: /* partition_item ::= expr_or_subquery column_alias */ yytestcase(yyruleno==484); { yylhsminor.yy74 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy74), &yymsp[0].minor.yy317); } yymsp[-1].minor.yy74 = yylhsminor.yy74; break; - case 474: /* select_item ::= common_expression AS column_alias */ - case 484: /* partition_item ::= expr_or_subquery AS column_alias */ yytestcase(yyruleno==484); + case 475: /* select_item ::= common_expression AS column_alias */ + case 485: /* partition_item ::= expr_or_subquery AS column_alias */ yytestcase(yyruleno==485); { yylhsminor.yy74 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy74), &yymsp[0].minor.yy317); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 479: /* partition_by_clause_opt ::= PARTITION BY partition_list */ - case 501: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==501); - case 520: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==520); + case 480: /* partition_by_clause_opt ::= PARTITION BY partition_list */ + case 502: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==502); + case 521: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==521); { yymsp[-2].minor.yy874 = yymsp[0].minor.yy874; } break; - case 486: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ + case 487: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ { yymsp[-5].minor.yy74 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy74), releaseRawExprNode(pCxt, yymsp[-1].minor.yy74)); } break; - case 487: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ + case 488: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ { yymsp[-3].minor.yy74 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy74)); } break; - case 488: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ + case 489: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ { yymsp[-5].minor.yy74 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy74), NULL, yymsp[-1].minor.yy74, yymsp[0].minor.yy74); } break; - case 489: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ + case 490: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ { yymsp[-7].minor.yy74 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy74), releaseRawExprNode(pCxt, yymsp[-3].minor.yy74), yymsp[-1].minor.yy74, yymsp[0].minor.yy74); } break; - case 493: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ + case 494: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ { yymsp[-3].minor.yy74 = createFillNode(pCxt, yymsp[-1].minor.yy828, NULL); } break; - case 494: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ + case 495: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ { yymsp[-5].minor.yy74 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy874)); } break; - case 495: /* fill_mode ::= NONE */ + case 496: /* fill_mode ::= NONE */ { yymsp[0].minor.yy828 = FILL_MODE_NONE; } break; - case 496: /* fill_mode ::= PREV */ + case 497: /* fill_mode ::= PREV */ { yymsp[0].minor.yy828 = FILL_MODE_PREV; } break; - case 497: /* fill_mode ::= NULL */ + case 498: /* fill_mode ::= NULL */ { yymsp[0].minor.yy828 = FILL_MODE_NULL; } break; - case 498: /* fill_mode ::= LINEAR */ + case 499: /* fill_mode ::= LINEAR */ { yymsp[0].minor.yy828 = FILL_MODE_LINEAR; } break; - case 499: /* fill_mode ::= NEXT */ + case 500: /* fill_mode ::= NEXT */ { yymsp[0].minor.yy828 = FILL_MODE_NEXT; } break; - case 502: /* group_by_list ::= expr_or_subquery */ + case 503: /* group_by_list ::= expr_or_subquery */ { yylhsminor.yy874 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy74))); } yymsp[0].minor.yy874 = yylhsminor.yy874; break; - case 503: /* group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ + case 504: /* group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ { yylhsminor.yy874 = addNodeToList(pCxt, yymsp[-2].minor.yy874, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy74))); } yymsp[-2].minor.yy874 = yylhsminor.yy874; break; - case 507: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ + case 508: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ { yymsp[-5].minor.yy74 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy74), releaseRawExprNode(pCxt, yymsp[-1].minor.yy74)); } break; - case 510: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ + case 511: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ { yylhsminor.yy74 = addOrderByClause(pCxt, yymsp[-3].minor.yy74, yymsp[-2].minor.yy874); yylhsminor.yy74 = addSlimitClause(pCxt, yylhsminor.yy74, yymsp[-1].minor.yy74); @@ -5128,50 +5131,50 @@ static YYACTIONTYPE yy_reduce( } yymsp[-3].minor.yy74 = yylhsminor.yy74; break; - case 513: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ + case 514: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ { yylhsminor.yy74 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy74, yymsp[0].minor.yy74); } yymsp[-3].minor.yy74 = yylhsminor.yy74; break; - case 514: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ + case 515: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ { yylhsminor.yy74 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy74, yymsp[0].minor.yy74); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 522: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ - case 526: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==526); + case 523: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ + case 527: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==527); { yymsp[-1].minor.yy74 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } break; - case 523: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - case 527: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==527); + case 524: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + case 528: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==528); { yymsp[-3].minor.yy74 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 524: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - case 528: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==528); + case 525: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + case 529: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==529); { yymsp[-3].minor.yy74 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } break; - case 529: /* subquery ::= NK_LP query_expression NK_RP */ + case 530: /* subquery ::= NK_LP query_expression NK_RP */ { yylhsminor.yy74 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy74); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 534: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ + case 535: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ { yylhsminor.yy74 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy74), yymsp[-1].minor.yy326, yymsp[0].minor.yy109); } yymsp[-2].minor.yy74 = yylhsminor.yy74; break; - case 535: /* ordering_specification_opt ::= */ + case 536: /* ordering_specification_opt ::= */ { yymsp[1].minor.yy326 = ORDER_ASC; } break; - case 536: /* ordering_specification_opt ::= ASC */ + case 537: /* ordering_specification_opt ::= ASC */ { yymsp[0].minor.yy326 = ORDER_ASC; } break; - case 537: /* ordering_specification_opt ::= DESC */ + case 538: /* ordering_specification_opt ::= DESC */ { yymsp[0].minor.yy326 = ORDER_DESC; } break; - case 538: /* null_ordering_opt ::= */ + case 539: /* null_ordering_opt ::= */ { yymsp[1].minor.yy109 = NULL_ORDER_DEFAULT; } break; - case 539: /* null_ordering_opt ::= NULLS FIRST */ + case 540: /* null_ordering_opt ::= NULLS FIRST */ { yymsp[-1].minor.yy109 = NULL_ORDER_FIRST; } break; - case 540: /* null_ordering_opt ::= NULLS LAST */ + case 541: /* null_ordering_opt ::= NULLS LAST */ { yymsp[-1].minor.yy109 = NULL_ORDER_LAST; } break; default: From e03deeaed80edcacb458be8d783e9c6937932233 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Tue, 17 Jan 2023 16:21:51 +0800 Subject: [PATCH 071/267] feat:add stream delete mark --- include/libs/stream/streamState.h | 1 + source/dnode/mnode/impl/src/mndStream.c | 2 + source/libs/executor/src/timewindowoperator.c | 6 +++ source/libs/stream/src/streamState.c | 43 +++++++++++++++++++ 4 files changed, 52 insertions(+) diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h index 8fdac0da7f..912c09a0fb 100644 --- a/include/libs/stream/streamState.h +++ b/include/libs/stream/streamState.h @@ -110,6 +110,7 @@ int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal #if 0 char* streamStateSessionDump(SStreamState* pState); +char* streamStateIntervalDump(SStreamState* pState); #endif #ifdef __cplusplus diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 83ed6eea78..38d4e5e1f7 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -295,6 +295,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, pObj->triggerParam = pCreate->maxDelay; pObj->watermark = pCreate->watermark; pObj->fillHistory = pCreate->fillHistory; + pObj->deleteMark = pCreate->deleteMark; memcpy(pObj->sourceDb, pCreate->sourceDB, TSDB_DB_FNAME_LEN); SDbObj *pSourceDb = mndAcquireDb(pMnode, pCreate->sourceDB); @@ -343,6 +344,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, .triggerType = pObj->trigger == STREAM_TRIGGER_MAX_DELAY ? STREAM_TRIGGER_WINDOW_CLOSE : pObj->trigger, .watermark = pObj->watermark, .igExpired = pObj->igExpired, + .deleteMark = pObj->deleteMark, }; // using ast and param to build physical plan diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 449c52d77f..c5cd64b493 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4803,6 +4803,12 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); taosHashCleanup(pUpdatedMap); +#if 0 + char* pBuf = streamStateIntervalDump(pInfo->pState); + qDebug("===stream===interval state%s", pBuf); + taosMemoryFree(pBuf); +#endif + doBuildDeleteResult(pInfo, pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes); if (pInfo->pDelRes->info.rows > 0) { printDataBlock(pInfo->pDelRes, "single interval delete"); diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 1952d9ab52..b890d145e5 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -879,4 +879,47 @@ char* streamStateSessionDump(SStreamState* pState) { streamStateFreeCur(pCur); return dumpBuf; } + +char* streamStateIntervalDump(SStreamState* pState) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + pCur->number = pState->number; + if (tdbTbcOpen(pState->pTdbState->pStateDb, &pCur->pCur, NULL) < 0) { + streamStateFreeCur(pCur); + return NULL; + } + tdbTbcMoveToFirst(pCur->pCur); + + SWinKey key = {0}; + void* buf = NULL; + int32_t bufSize = 0; + int32_t code = streamStateGetKVByCur(pCur, &key, (const void **)&buf, &bufSize); + if (code != 0) { + streamStateFreeCur(pCur); + return NULL; + } + + int32_t size = 2048; + char* dumpBuf = taosMemoryCalloc(size, 1); + int64_t len = 0; + len += snprintf(dumpBuf + len, size - len, "||s:%15" PRId64 ",", key.ts); + // len += snprintf(dumpBuf + len, size - len, "e:%15" PRId64 ",", key.win.ekey); + len += snprintf(dumpBuf + len, size - len, "g:%15" PRId64 "||", key.groupId); + while (1) { + tdbTbcMoveToNext(pCur->pCur); + key = (SWinKey){0}; + code = streamStateGetKVByCur(pCur, &key, NULL, 0); + if (code != 0) { + streamStateFreeCur(pCur); + return dumpBuf; + } + len += snprintf(dumpBuf + len, size - len, "||s:%15" PRId64 ",", key.ts); + // len += snprintf(dumpBuf + len, size - len, "e:%15" PRId64 ",", key.win.ekey); + len += snprintf(dumpBuf + len, size - len, "g:%15" PRId64 "||", key.groupId); + } + streamStateFreeCur(pCur); + return dumpBuf; +} #endif From 98906bb272eac3a2ffeadf812238e209c4e2e313 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Sun, 29 Jan 2023 15:24:31 +0800 Subject: [PATCH 072/267] remove assert --- source/dnode/vnode/src/tq/tq.c | 8 ++++---- source/dnode/vnode/src/tq/tqOffset.c | 3 ++- source/libs/stream/src/streamMeta.c | 1 - 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 1d5fae33eb..9b515efbb6 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -93,21 +93,21 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { taosHashSetFreeFp(pTq->pCheckInfo, (FDelete)tDeleteSTqCheckInfo); if (tqMetaOpen(pTq) < 0) { - ASSERT(0); + return NULL; } pTq->pOffsetStore = tqOffsetOpen(pTq); if (pTq->pOffsetStore == NULL) { - ASSERT(0); + return NULL; } pTq->pStreamMeta = streamMetaOpen(path, pTq, (FTaskExpand*)tqExpandTask, pTq->pVnode->config.vgId); if (pTq->pStreamMeta == NULL) { - ASSERT(0); + return NULL; } if (streamLoadTasks(pTq->pStreamMeta) < 0) { - ASSERT(0); + return NULL; } return pTq; diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index 9e04b70f41..338f9d6c24 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -101,7 +101,8 @@ STqOffsetStore* tqOffsetOpen(STQ* pTq) { } char* fname = tqOffsetBuildFName(pStore->pTq->path, 0); if (tqOffsetRestoreFromFile(pStore, fname) < 0) { - ASSERT(0); + taosMemoryFree(fname); + return NULL; } taosMemoryFree(fname); return pStore; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 2f991288ff..3c80059e9a 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -258,7 +258,6 @@ int32_t streamMetaAbort(SStreamMeta* pMeta) { int32_t streamLoadTasks(SStreamMeta* pMeta) { TBC* pCur = NULL; if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) { - ASSERT(0); return -1; } From 579f65115debc544654d2bfc59b1814a8d03e499 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 31 Jan 2023 15:23:21 +0800 Subject: [PATCH 073/267] add debug info --- source/dnode/vnode/src/vnd/vnodeCommit.c | 26 ++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index b94d44eb7d..cefa9e6755 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -21,12 +21,34 @@ static int vnodeEncodeInfo(const SVnodeInfo *pInfo, char **ppData); static int vnodeCommitImpl(SCommitInfo *pInfo); +#define WAIT_TIME_MILI_SEC 50 + int vnodeBegin(SVnode *pVnode) { // alloc buffer pool + int32_t nTry = 0; + taosThreadMutexLock(&pVnode->mutex); while (pVnode->pPool == NULL) { - taosThreadCondWait(&pVnode->poolNotEmpty, &pVnode->mutex); + vInfo("vgId:%d no free buffer pool on %d try, wait %d ms...", TD_VID(pVnode), ++nTry, WAIT_TIME_MILI_SEC); + + struct timeval tv; + struct timespec ts; + taosGetTimeOfDay(&tv); + ts.tv_nsec = tv.tv_usec * 1000 + WAIT_TIME_MILI_SEC * 1000000; + if (ts.tv_nsec > 999999999l) { + ts.tv_sec = tv.tv_sec + 1; + ts.tv_nsec -= 1000000000l; + } else { + ts.tv_sec = tv.tv_sec; + } + + int32_t rc = taosThreadCondTimedWait(&pVnode->poolNotEmpty, &pVnode->mutex, &ts); + if (rc && rc != ETIMEDOUT) { + terrno = TAOS_SYSTEM_ERROR(rc); + taosThreadMutexUnlock(&pVnode->mutex); + return -1; + } } pVnode->inUse = pVnode->pPool; @@ -70,7 +92,7 @@ int vnodeShouldCommit(SVnode *pVnode) { } SVCommitSched *pSched = &pVnode->commitSched; - int64_t nowMs = taosGetMonoTimestampMs(); + int64_t nowMs = taosGetMonoTimestampMs(); return (((pVnode->inUse->size > pVnode->inUse->node.size) && (pSched->commitMs + SYNC_VND_COMMIT_MIN_MS < nowMs)) || (pVnode->inUse->size > 0 && pSched->commitMs + pSched->maxWaitMs < nowMs)); From e13499b5424b39f75bbe4d44b8811b595ea5eb2d Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 1 Feb 2023 18:42:39 +0800 Subject: [PATCH 074/267] add some log --- source/dnode/vnode/src/vnd/vnodeSvr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 4cf6c4e55c..c34f5b7202 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -883,6 +883,8 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq bool tbCreated = false; terrno = TSDB_CODE_SUCCESS; + vDebug("vgId:%d %s start, version:%" PRId64 ", len:%d", TD_VID(pVnode), __func__, version); + pRsp->code = 0; pSubmitReq->version = version; statis.nBatchInsert = 1; @@ -1019,7 +1021,7 @@ _exit: atomic_add_fetch_64(&pVnode->statis.nBatchInsert, statis.nBatchInsert); atomic_add_fetch_64(&pVnode->statis.nBatchInsertSuccess, statis.nBatchInsertSuccess); - vDebug("vgId:%d, submit success, index:%" PRId64, pVnode->config.vgId, version); + vDebug("vgId:%d %s done, index:%" PRId64, TD_VID(pVnode), __func__, version); return 0; } From 6a5ce7d663149c773e1a719cac33b2c4b0a9686a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 2 Feb 2023 10:11:36 +0800 Subject: [PATCH 075/267] refactor: opt tag filter perf. --- include/common/tcommon.h | 6 + source/dnode/vnode/src/meta/metaQuery.c | 15 ++- source/libs/executor/src/executil.c | 163 ++++++++++++++---------- 3 files changed, 113 insertions(+), 71 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index f74795a250..ea9bf1fcfd 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -367,6 +367,12 @@ typedef struct SSortExecInfo { int32_t readBytes; // read io bytes } SSortExecInfo; +typedef struct SFilterTableInfo { + char* name; + uint64_t uid; + void* pTagVal; +} SFilterTableInfo; + // stream special block column #define START_TS_COLUMN_INDEX 0 diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 8e932e0c73..aa2c3a4b46 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1346,13 +1346,14 @@ static int32_t metaGetTableTagByUid(SMeta *pMeta, int64_t suid, int64_t uid, voi return ret; } + int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHashObj *tags) { const int32_t LIMIT = 128; int32_t isLock = false; int32_t sz = uidList ? taosArrayGetSize(uidList) : 0; for (int i = 0; i < sz; i++) { - tb_uid_t *id = taosArrayGet(uidList, i); + SFilterTableInfo *p = taosArrayGet(uidList, i); if (i % LIMIT == 0) { if (isLock) metaULock(pMeta); @@ -1361,20 +1362,20 @@ int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHas isLock = true; } - if (taosHashGet(tags, id, sizeof(tb_uid_t)) == NULL) { +// if (taosHashGet(tags, &p->uid, sizeof(tb_uid_t)) == NULL) { void *val = NULL; int32_t len = 0; - if (metaGetTableTagByUid(pMeta, suid, *id, &val, &len, false) == 0) { - taosHashPut(tags, id, sizeof(tb_uid_t), val, len); + if (metaGetTableTagByUid(pMeta, suid, p->uid, &val, &len, false) == 0) { + p->pTagVal = taosMemoryMalloc(len); + memcpy(p->pTagVal, val, len); tdbFree(val); } else { metaError("vgId:%d, failed to table tags, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid, - *id); + p->uid); } } - } +// } if (isLock) metaULock(pMeta); - return 0; } diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 015affd6c7..244fb7127c 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -44,8 +44,8 @@ typedef struct tagFilterAssist { } tagFilterAssist; static int32_t removeInvalidUid(SArray* uids, SHashObj* tags); -static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SHashObj* tags); -static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* pExistedUidList, SNode* pTagCond); +static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* pTagCond, SHashObj* tags); +static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond); static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo); @@ -416,6 +416,48 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx); + SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; + + // int64_t stt = taosGetTimestampUs(); +// tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + + SArray* pRes = taosArrayInit(10, sizeof(SFilterTableInfo)); + int32_t filter = optimizeTbnameInCond(metaHandle, suid, pRes, pTagCond, tags); + if (filter == 0) { // tbname in filter is activated, do nothing and return + int32_t numOfRows = taosArrayGetSize(pRes); + code = createResultData(&type, numOfRows, &output); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + qError("failed to create result, reason:%s", tstrerror(code)); + goto end; + } + + bool* b = (bool*)output.columnData->pData; + taosArrayEnsureCap(uidList, numOfRows); + + for(int32_t i = 0; i < numOfRows; ++i) { + b[i] = true; + SFilterTableInfo* pInfo = taosArrayGet(pRes, i); + taosArrayPush(uidList, &pInfo->uid); + } + + terrno = 0; + goto end; + } else { +// if (filter == -1) { + // here we retrieve all tags from the vnode table-meta store + code = metaGetTableTags(metaHandle, suid, uidList, tags); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid); + terrno = code; + goto end; + } + } + + if (suid != 0) { +// removeInvalidUid(uidList, tags); + } + pResBlock = createDataBlock(); if (pResBlock == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -428,25 +470,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* blockDataAppendColInfo(pResBlock, &colInfo); } - // int64_t stt = taosGetTimestampUs(); - tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - - int32_t filter = optimizeTbnameInCond(metaHandle, suid, uidList, pTagCond, tags); - if (filter == -1) { - // here we retrieve all tags from the vnode table-meta store - code = metaGetTableTags(metaHandle, suid, uidList, tags); - if (code != TSDB_CODE_SUCCESS) { - qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid); - terrno = code; - goto end; - } - } - - if (suid != 0) { - removeInvalidUid(uidList, tags); - } - - int32_t size = taosArrayGetSize(uidList); + int32_t size = taosArrayGetSize(pRes); if (size == 0) { goto end; } @@ -457,27 +481,32 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* goto end; } + int32_t numOfCols = taosArrayGetSize(pResBlock->pDataBlock); + for (int32_t i = 0; i < size; i++) { - int64_t* uid = taosArrayGet(uidList, i); - for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { + SFilterTableInfo* p1 = taosArrayGet(pRes, i); + + for (int32_t j = 0; j < numOfCols; j++) { SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); if (pColInfo->info.colId == -1) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - metaGetTableNameByUid(metaHandle, *uid, str); + STR_TO_VARSTR(str, p1->name); + +// metaGetTableNameByUid(metaHandle, *uid, str); colDataAppend(pColInfo, i, str, false); #if TAG_FILTER_DEBUG qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); #endif } else { - void* pTagsVal = taosHashGet(tags, uid, sizeof(uint64_t)); - if (pTagsVal == NULL) { - continue; - } +// void* pTagsVal = taosHashGet(tags, uid, sizeof(uint64_t)); +// if (pTagsVal == NULL) { +// continue; +// } STagVal tagVal = {0}; tagVal.cid = pColInfo->info.colId; - const char* p = metaGetTableTagVal(pTagsVal, pColInfo->info.type, &tagVal); + const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal); if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { colDataAppend(pColInfo, i, p, true); @@ -485,14 +514,12 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* colDataAppend(pColInfo, i, p, false); } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { char* tmp = alloca(tagVal.nData + VARSTR_HEADER_SIZE + 1); -// char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); varDataSetLen(tmp, tagVal.nData); memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); colDataAppend(pColInfo, i, tmp, false); #if TAG_FILTER_DEBUG qDebug("tagfilter varch:%s", tmp + 2); #endif -// taosMemoryFree(tmp); } else { colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); #if TAG_FILTER_DEBUG @@ -515,14 +542,6 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* pBlockList = taosArrayInit(2, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); - SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; - code = createResultData(&type, size, &output); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - qError("failed to create result, reason:%s", tstrerror(code)); - goto end; - } - code = scalarCalculate(pTagCond, pBlockList, &output); if (code != TSDB_CODE_SUCCESS) { qError("failed to calculate scalar, reason:%s", tstrerror(code)); @@ -847,13 +866,26 @@ static int tableUidCompare(const void* a, const void* b) { return u1 < u2 ? -1 : 1; } -static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* cond, SHashObj* tags) { +static int32_t filterTableInfoCompare(const void* a, const void* b) { + SFilterTableInfo* p1 = (SFilterTableInfo*) a; + SFilterTableInfo* p2 = (SFilterTableInfo*) b; + + if (p1->uid == p2->uid) { + return 0; + } + + return p1->uid < p2->uid? -1:1; +} + +static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* cond, SHashObj* tags) { int32_t ret = -1; if (nodeType(cond) == QUERY_NODE_OPERATOR) { - ret = optimizeTbnameInCondImpl(metaHandle, suid, list, cond); - if (ret != -1) { - metaGetTableTagsByUids(metaHandle, suid, list, tags); - removeInvalidUid(list, tags); + ret = optimizeTbnameInCondImpl(metaHandle, pRes, cond); + if (ret == 0) { +// metaGetTableTagsByUids(metaHandle, suid, pRes, tags); +// removeInvalidUid(pRes, tags); + } else { // ret == -1 + // do nothing } } @@ -873,19 +905,19 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list SListCell* cell = pList->pHead; for (int i = 0; i < len; i++) { if (cell == NULL) break; - if (optimizeTbnameInCondImpl(metaHandle, suid, list, cell->pNode) == 0) { + if (optimizeTbnameInCondImpl(metaHandle, pRes, cell->pNode) == 0) { hasTbnameCond = true; break; } cell = cell->pNext; } - taosArraySort(list, tableUidCompare); - taosArrayRemoveDuplicate(list, tableUidCompare, NULL); + taosArraySort(pRes, filterTableInfoCompare); + taosArrayRemoveDuplicate(pRes, filterTableInfoCompare, NULL); if (hasTbnameCond) { - ret = metaGetTableTagsByUids(metaHandle, suid, list, tags); - removeInvalidUid(list, tags); + ret = metaGetTableTagsByUids(metaHandle, suid, pRes, tags); + removeInvalidUid(pRes, tags); } return ret; @@ -900,12 +932,12 @@ static int32_t removeInvalidUid(SArray* uids, SHashObj* tags) { return 0; } - SArray* validUid = taosArrayInit(size, sizeof(int64_t)); + SArray* validUid = taosArrayInit(size, sizeof(SFilterTableInfo)); for (int32_t i = 0; i < size; i++) { - int64_t* uid = taosArrayGet(uids, i); - if (taosHashGet(tags, uid, sizeof(int64_t)) != NULL) { - taosArrayPush(validUid, uid); + SFilterTableInfo* p = taosArrayGet(uids, i); + if (taosHashGet(tags, &p->uid, sizeof(int64_t)) != NULL) { + taosArrayPush(validUid, p); } } @@ -915,7 +947,7 @@ static int32_t removeInvalidUid(SArray* uids, SHashObj* tags) { } // only return uid that does not contained in pExistedUidList -static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* pExistedUidList, SNode* pTagCond) { +static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond) { if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) { return -1; } @@ -938,12 +970,13 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* SArray* pTbList = getTableNameList(pList); int32_t numOfTables = taosArrayGetSize(pTbList); SHashObj* uHash = NULL; + size_t numOfExisted = taosArrayGetSize(pExistedUidList); // len > 0 means there already have uids if (numOfExisted > 0) { uHash = taosHashInit(numOfExisted / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); for (int i = 0; i < numOfExisted; i++) { - int64_t* uid = taosArrayGet(pExistedUidList, i); - taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i)); + SFilterTableInfo* pTInfo = taosArrayGet(pExistedUidList, i); + taosHashPut(uHash, &pTInfo->uid, sizeof(uint64_t), &i, sizeof(i)); } } @@ -955,7 +988,8 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* ETableType tbType = TSDB_TABLE_MAX; if (metaGetTableTypeByName(metaHandle, name, &tbType) == 0 && tbType == TSDB_CHILD_TABLE) { if (NULL == uHash || taosHashGet(uHash, &uid, sizeof(uid)) == NULL) { - taosArrayPush(pExistedUidList, &uid); + SFilterTableInfo s = {.uid = uid, .name = name, .pTagVal = NULL}; + taosArrayPush(pExistedUidList, &s); } } else { taosArrayDestroy(pTbList); @@ -992,39 +1026,40 @@ static void genTagFilterDigest(const SNode* pTagCond, T_MD5_CTX* pContext) { taosMemoryFree(payload); } -static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* res, SNode* pTagCond, void* metaHandle) { +static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pRes, SNode* pTagCond, void* metaHandle) { if (pTagCond == NULL) { return TSDB_CODE_SUCCESS; } terrno = TDB_CODE_SUCCESS; - SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond); + SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, pRes, pTagCond); if (terrno != TDB_CODE_SUCCESS) { colDataDestroy(pColInfoData); taosMemoryFreeClear(pColInfoData); - taosArrayDestroy(res); + taosArrayDestroy(pRes); qError("failed to getColInfoResult, code: %s", tstrerror(terrno)); return terrno; } int32_t i = 0; - int32_t len = taosArrayGetSize(res); + int32_t len = taosArrayGetSize(pRes); if (pColInfoData != NULL) { bool* pResult = (bool*)pColInfoData->pData; - SArray* p = taosArrayInit(taosArrayGetSize(res), sizeof(uint64_t)); + SArray* p = taosArrayInit(taosArrayGetSize(pRes), sizeof(uint64_t)); while (i < len && pColInfoData) { - int64_t* uid = taosArrayGet(res, i); + int64_t* uid = taosArrayGet(pRes, i); qDebug("tagfilter get uid:%" PRId64 ", res:%d", *uid, pResult[i]); if (pResult[i]) { taosArrayPush(p, uid); } + i += 1; } - taosArraySwap(res, p); + taosArraySwap(pRes, p); taosArrayDestroy(p); } From 3897a91a952ec1e3e17bd69c7658718e69c8b360 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 2 Feb 2023 10:17:39 +0800 Subject: [PATCH 076/267] refactor: do some internal refactor. --- source/dnode/vnode/inc/vnode.h | 2 +- source/dnode/vnode/src/meta/metaQuery.c | 2 +- source/libs/executor/src/executil.c | 24 +++++------------------- 3 files changed, 7 insertions(+), 21 deletions(-) diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index e5e7fea1cf..ebdc49d967 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -105,7 +105,7 @@ int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid); int32_t metaGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid); int metaGetTableEntryByName(SMetaReader *pReader, const char *name); int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags); -int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHashObj *tags); +int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList); int32_t metaReadNext(SMetaReader *pReader); const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal); int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index aa2c3a4b46..58d0711389 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1347,7 +1347,7 @@ static int32_t metaGetTableTagByUid(SMeta *pMeta, int64_t suid, int64_t uid, voi return ret; } -int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHashObj *tags) { +int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList) { const int32_t LIMIT = 128; int32_t isLock = false; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 244fb7127c..c55ae08439 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -419,8 +419,6 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; // int64_t stt = taosGetTimestampUs(); -// tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - SArray* pRes = taosArrayInit(10, sizeof(SFilterTableInfo)); int32_t filter = optimizeTbnameInCond(metaHandle, suid, pRes, pTagCond, tags); if (filter == 0) { // tbname in filter is activated, do nothing and return @@ -444,7 +442,6 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* terrno = 0; goto end; } else { -// if (filter == -1) { // here we retrieve all tags from the vnode table-meta store code = metaGetTableTags(metaHandle, suid, uidList, tags); if (code != TSDB_CODE_SUCCESS) { @@ -492,18 +489,11 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* if (pColInfo->info.colId == -1) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; STR_TO_VARSTR(str, p1->name); - -// metaGetTableNameByUid(metaHandle, *uid, str); colDataAppend(pColInfo, i, str, false); #if TAG_FILTER_DEBUG qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); #endif } else { -// void* pTagsVal = taosHashGet(tags, uid, sizeof(uint64_t)); -// if (pTagsVal == NULL) { -// continue; -// } - STagVal tagVal = {0}; tagVal.cid = pColInfo->info.colId; const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal); @@ -879,17 +869,13 @@ static int32_t filterTableInfoCompare(const void* a, const void* b) { static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* cond, SHashObj* tags) { int32_t ret = -1; - if (nodeType(cond) == QUERY_NODE_OPERATOR) { + int32_t ntype = nodeType(cond); + + if (ntype == QUERY_NODE_OPERATOR) { ret = optimizeTbnameInCondImpl(metaHandle, pRes, cond); - if (ret == 0) { -// metaGetTableTagsByUids(metaHandle, suid, pRes, tags); -// removeInvalidUid(pRes, tags); - } else { // ret == -1 - // do nothing - } } - if (nodeType(cond) != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) { + if (ntype != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) { return ret; } @@ -916,7 +902,7 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes taosArrayRemoveDuplicate(pRes, filterTableInfoCompare, NULL); if (hasTbnameCond) { - ret = metaGetTableTagsByUids(metaHandle, suid, pRes, tags); + ret = metaGetTableTagsByUids(metaHandle, suid, pRes); removeInvalidUid(pRes, tags); } From 924e5bfe6caf8039cb3d3c17ffe9bdec85645ea4 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 2 Feb 2023 13:34:11 +0800 Subject: [PATCH 077/267] add debug log --- source/dnode/vnode/src/vnd/vnodeSvr.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index c34f5b7202..c282afae70 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -312,10 +312,13 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp walApplyVer(pVnode->pWal, version); + vInfo("vgId:%d, push msg begin", pVnode->config.vgId); if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) { vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno)); + vInfo("vgId:%d, push msg end", pVnode->config.vgId); return -1; } + vInfo("vgId:%d, push msg end", pVnode->config.vgId); // commit if need if (needCommit) { From fd6ea6ba2fa55c6f196784de6c9d16a5625350ea Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 2 Feb 2023 14:12:24 +0800 Subject: [PATCH 078/267] other: merge main. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 88 +++++++++++++++++--------- source/util/src/talgo.c | 8 +-- 2 files changed, 62 insertions(+), 34 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 9c61ccf6b4..4e4ba6c0d7 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -215,6 +215,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader); static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader); static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo); static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter); +static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order); static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); } @@ -1118,9 +1119,9 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn int32_t unDumpedRows = asc ? pBlock->nRow - pDumpInfo->rowIndex : pDumpInfo->rowIndex + 1; tsdbDebug("%p copy file block to sdatablock, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 - ", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s", + ", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", uid:%"PRIu64" elapsed time:%.2f ms, %s", pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, dumpedRows, - unDumpedRows, pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr); + unDumpedRows, pBlock->minVer, pBlock->maxVer, pBlockInfo->uid, elapsedTime, pReader->idStr); return TSDB_CODE_SUCCESS; } @@ -1764,11 +1765,14 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* } if (minKey == k.ts) { + STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); + if (pSchema == NULL) { + return terrno; + } if (init) { - tRowMerge(&merge, pRow); + tRowMergerAdd(&merge, pRow, pSchema); } else { init = true; - STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); int32_t code = tRowMergerInit(&merge, pRow, pSchema); if (code != TSDB_CODE_SUCCESS) { return code; @@ -2189,17 +2193,17 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea if (code == TSDB_CODE_SUCCESS) { pBlockScanInfo->iter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iter.iter) != NULL); - tsdbDebug("%p uid:%" PRId64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + tsdbDebug("%p uid:%" PRIu64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 " %s", pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, d->minKey, d->maxKey, pReader->idStr); } else { - tsdbError("%p uid:%" PRId64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid, + tsdbError("%p uid:%" PRIu64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid, tstrerror(code), pReader->idStr); return code; } } } else { - tsdbDebug("%p uid:%" PRId64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); + tsdbDebug("%p uid:%" PRIu64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); } STbData* di = NULL; @@ -2210,17 +2214,17 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea if (code == TSDB_CODE_SUCCESS) { pBlockScanInfo->iiter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iiter.iter) != NULL); - tsdbDebug("%p uid:%" PRId64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + tsdbDebug("%p uid:%" PRIu64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 " %s", pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, di->minKey, di->maxKey, pReader->idStr); } else { - tsdbError("%p uid:%" PRId64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid, + tsdbError("%p uid:%" PRIu64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid, tstrerror(code), pReader->idStr); return code; } } } else { - tsdbDebug("%p uid:%" PRId64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); + tsdbDebug("%p uid:%" PRIu64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); } initDelSkylineIterator(pBlockScanInfo, pReader, d, di); @@ -2529,6 +2533,14 @@ _end: void setComposedBlockFlag(STsdbReader* pReader, bool composed) { pReader->status.composedDataBlock = composed; } +int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order) { + if (pDelSkyline == NULL) { + return 0; + } + + return ASCENDING_TRAVERSE(order) ? 0 : taosArrayGetSize(pDelSkyline) - 1; +} + int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData, STbData* piMemTbData) { if (pBlockScanInfo->delSkyline != NULL) { @@ -2546,7 +2558,6 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* if (pIdx != NULL) { code = tsdbReadDelData(pReader->pDelFReader, pIdx, pDelData); } - if (code != TSDB_CODE_SUCCESS) { goto _err; } @@ -2575,11 +2586,13 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* } taosArrayDestroy(pDelData); - pBlockScanInfo->iter.index = - ASCENDING_TRAVERSE(pReader->order) ? 0 : taosArrayGetSize(pBlockScanInfo->delSkyline) - 1; - pBlockScanInfo->iiter.index = pBlockScanInfo->iter.index; - pBlockScanInfo->fileDelIndex = pBlockScanInfo->iter.index; - pBlockScanInfo->lastBlockDelIndex = pBlockScanInfo->iter.index; + int32_t index = getInitialDelIndex(pBlockScanInfo->delSkyline, pReader->order); + + pBlockScanInfo->iter.index = index; + pBlockScanInfo->iiter.index = index; + pBlockScanInfo->fileDelIndex = index; + pBlockScanInfo->lastBlockDelIndex = index; + return code; _err: @@ -2679,7 +2692,7 @@ static int32_t uidComparFunc(const void* p1, const void* p2) { } } -static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) { +static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) { int32_t index = 0; int32_t total = taosHashGetSize(pStatus->pTableMap); @@ -2693,7 +2706,21 @@ static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SRea taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc); } -static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) { +// reset the last del file index +static void resetScanBlockLastBlockDelIndex(SReaderStatus* pStatus, int32_t order) { + void* p = taosHashIterate(pStatus->pTableMap, NULL); + while (p != NULL) { + STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; + + // reset the last del file index + pScanInfo->lastBlockDelIndex = getInitialDelIndex(pScanInfo->delSkyline, order); + p = taosHashIterate(pStatus->pTableMap, p); + } +} + +static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) { + SReaderStatus* pStatus = &pReader->status; + int32_t total = taosHashGetSize(pStatus->pTableMap); if (total == 0) { return TSDB_CODE_SUCCESS; @@ -2706,7 +2733,7 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderSt return TSDB_CODE_OUT_OF_MEMORY; } - extractOrderedTableUidList(pOrderCheckInfo, pStatus); + extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); uint64_t uid = pOrderCheckInfo->tableUidList[0]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); } else { @@ -2723,7 +2750,7 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderSt } pOrderCheckInfo->tableUidList = p; - extractOrderedTableUidList(pOrderCheckInfo, pStatus); + extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); uid = pOrderCheckInfo->tableUidList[0]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); @@ -2743,11 +2770,7 @@ static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus uint64_t uid = pOrderedCheckInfo->tableUidList[pOrderedCheckInfo->currentIndex]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - if (pStatus->pTableIter == NULL) { - return false; - } - - return true; + return (pStatus->pTableIter != NULL); } static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { @@ -2755,7 +2778,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader; SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo; - int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pStatus); + int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pReader); if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) { return code; } @@ -2820,6 +2843,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader; + ASSERT(pBlockInfo != NULL); + if (pBlockInfo != NULL) { pScanInfo = *(STableBlockScanInfo**)taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); @@ -2840,7 +2865,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { initLastBlockReader(pLastBlockReader, pScanInfo, pReader); TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader); - if (pBlockInfo == NULL) { // build data block from last data file + /*if (pBlockInfo == NULL) { // build data block from last data file SBlockData* pBData = &pReader->status.fileBlockData; tBlockDataReset(pBData); @@ -2872,7 +2897,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows, el, pReader->idStr); } - } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) { + } else*/ if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) { code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid); if (code != TSDB_CODE_SUCCESS) { return code; @@ -3043,6 +3068,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have data files, let's start check the last block file if exists if (pBlockIter->numOfBlocks == 0) { + resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); goto _begin; } } @@ -3074,6 +3100,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // data blocks in current file are exhausted, let's try the next file now tBlockDataReset(&pReader->status.fileBlockData); resetDataBlockIterator(pBlockIter, pReader->order); + resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); goto _begin; } else { code = initForFirstBlockInFile(pReader, pBlockIter); @@ -3085,6 +3112,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have blocks, let's start check the last block file if (pBlockIter->numOfBlocks == 0) { + resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); goto _begin; } } @@ -3890,7 +3918,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL if (pReader->type == TIMEWINDOW_RANGE_CONTAINED) { code = doOpenReaderImpl(pReader); if (code != TSDB_CODE_SUCCESS) { - return code; + goto _err; } } else { STsdbReader* pPrevReader = pReader->innerReader[0]; @@ -3911,7 +3939,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL code = doOpenReaderImpl(pPrevReader); if (code != TSDB_CODE_SUCCESS) { - return code; + goto _err; } } } diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c index a06aac6afe..e373850b3c 100644 --- a/source/util/src/talgo.c +++ b/source/util/src/talgo.c @@ -28,14 +28,14 @@ static void median(void *src, int64_t size, int64_t s, int64_t e, const void *pa void *buf) { int32_t mid = ((int32_t)(e - s) >> 1u) + (int32_t)s; - if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, s), param) == 1) { + if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, s), param) > 0) { doswap(elePtrAt(src, size, mid), elePtrAt(src, size, s), size, buf); } - if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, e), param) == 1) { + if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, e), param) > 0) { doswap(elePtrAt(src, size, mid), elePtrAt(src, size, s), size, buf); doswap(elePtrAt(src, size, mid), elePtrAt(src, size, e), size, buf); - } else if (comparFn(elePtrAt(src, size, s), elePtrAt(src, size, e), param) == 1) { + } else if (comparFn(elePtrAt(src, size, s), elePtrAt(src, size, e), param) > 0) { doswap(elePtrAt(src, size, s), elePtrAt(src, size, e), size, buf); } @@ -47,7 +47,7 @@ static void tInsertSort(void *src, int64_t size, int32_t s, int32_t e, const voi void *buf) { for (int32_t i = s + 1; i <= e; ++i) { for (int32_t j = i; j > s; --j) { - if (comparFn(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), param) == -1) { + if (comparFn(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), param) < 0) { doswap(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), size, buf); } else { break; From edca89b73158e556a21467696773fd679bc61ba5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 2 Feb 2023 15:15:46 +0800 Subject: [PATCH 079/267] refactor: opt query perf. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 137 ++++++++++++++++--------- 1 file changed, 88 insertions(+), 49 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 4e4ba6c0d7..1dc36f1304 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -130,17 +130,17 @@ typedef struct SFileBlockDumpInfo { bool allDumped; } SFileBlockDumpInfo; -typedef struct SUidOrderCheckInfo { +typedef struct SUidOrderedList { uint64_t* tableUidList; // access table uid list in uid ascending order list int32_t currentIndex; // index in table uid list -} SUidOrderCheckInfo; +} SUidOrderedList; typedef struct SReaderStatus { bool loadFromFile; // check file stage bool composedDataBlock; // the returned data block is a composed block or not SHashObj* pTableMap; // SHash STableBlockScanInfo** pTableIter; // table iterator used in building in-memory buffer data blocks. - SUidOrderCheckInfo uidCheckInfo; // check all table in uid order + SUidOrderedList uidCheckInfo; // check all table in uid order SFileBlockDumpInfo fBlockDumpInfo; SDFileSet* pCurrentFileset; // current opened file set SBlockData fileBlockData; @@ -311,6 +311,16 @@ static void* getPosInBlockInfoBuf(SBlockInfoBuf* pBuf, int32_t index) { return (*pBucket) + (index % pBuf->numPerBucket) * sizeof(STableBlockScanInfo); } +static int32_t uidComparFunc(const void* p1, const void* p2) { + uint64_t pu1 = *(uint64_t*)p1; + uint64_t pu2 = *(uint64_t*)p2; + if (pu1 == pu2) { + return 0; + } else { + return (pu1 < pu2) ? -1 : 1; + } +} + // NOTE: speedup the whole processing by preparing the buffer for STableBlockScanInfo in batch model static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList, int32_t numOfTables) { // allocate buffer in order to load data blocks from file @@ -324,9 +334,20 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf int64_t st = taosGetTimestampUs(); initBlockScanInfoBuf(pBuf, numOfTables); + SUidOrderedList* pOrderedCheckInfo = &pTsdbReader->status.uidCheckInfo; + + pOrderedCheckInfo->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t)); + if (pOrderedCheckInfo->tableUidList == NULL) { + return NULL; + } + pOrderedCheckInfo->currentIndex = 0; + for (int32_t j = 0; j < numOfTables; ++j) { STableBlockScanInfo* pScanInfo = getPosInBlockInfoBuf(pBuf, j); + pScanInfo->uid = idList[j].uid; + pOrderedCheckInfo->tableUidList[j] = idList[j].uid; + if (ASCENDING_TRAVERSE(pTsdbReader->order)) { int64_t skey = pTsdbReader->window.skey; pScanInfo->lastKey = (skey > INT64_MIN) ? (skey - 1) : skey; @@ -340,6 +361,8 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf pScanInfo->lastKey, pTsdbReader->idStr); } + taosSort(pOrderedCheckInfo->tableUidList, numOfTables, sizeof(uint64_t), uidComparFunc); + pTsdbReader->cost.createScanInfoList = (taosGetTimestampUs() - st) / 1000.0; tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, elapsed time:%.2f ms, %s", pTsdbReader, numOfTables, (sizeof(STableBlockScanInfo) * numOfTables) / 1024.0, pTsdbReader->cost.createScanInfoList, @@ -663,28 +686,42 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, int64_t et1 = taosGetTimestampUs(); SBlockIdx* pBlockIdx = NULL; - for (int32_t i = 0; i < num; ++i) { + SUidOrderedList* pList = &pReader->status.uidCheckInfo; + + int32_t i = 0, j = 0; + while(i < num && j < numOfTables) { pBlockIdx = (SBlockIdx*)taosArrayGet(aBlockIdx, i); - - // uid check if (pBlockIdx->suid != pReader->suid) { + i += 1; continue; } - // this block belongs to a table that is not queried. - void* p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(uint64_t)); - if (p == NULL) { + if (pBlockIdx->uid < pList->tableUidList[j]) { + i += 1; continue; } - STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; - if (pScanInfo->pBlockList == NULL) { - pScanInfo->pBlockList = taosArrayInit(4, sizeof(SBlockIndex)); + if (pBlockIdx->uid == pList->tableUidList[j]) { + i += 1; + j += 1; + + // this block belongs to a table that is not queried. + void* p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(uint64_t)); + if (p == NULL) { + tsdbError("failed to locate the tableBlockScan Info in hashmap, uid:%"PRIu64", %s", pBlockIdx->uid, pReader->idStr); + return TSDB_CODE_APP_ERROR; + } + + STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; + if (pScanInfo->pBlockList == NULL) { + pScanInfo->pBlockList = taosArrayInit(4, sizeof(SBlockIndex)); + } + + taosArrayPush(pIndexList, pBlockIdx); } - taosArrayPush(pIndexList, pBlockIdx); - if (taosArrayGetSize(pIndexList) == numOfTables) { - break; + if (pBlockIdx->uid > pList->tableUidList[j]) { + j += 1; } } @@ -2682,17 +2719,7 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) { return TSDB_CODE_SUCCESS; } -static int32_t uidComparFunc(const void* p1, const void* p2) { - uint64_t pu1 = *(uint64_t*)p1; - uint64_t pu2 = *(uint64_t*)p2; - if (pu1 == pu2) { - return 0; - } else { - return (pu1 < pu2) ? -1 : 1; - } -} - -static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) { +static void extractOrderedTableUidList(SUidOrderedList* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) { int32_t index = 0; int32_t total = taosHashGetSize(pStatus->pTableMap); @@ -2718,7 +2745,7 @@ static void resetScanBlockLastBlockDelIndex(SReaderStatus* pStatus, int32_t orde } } -static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) { +static int32_t initOrderCheckInfo(SUidOrderedList* pOrderCheckInfo, STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; int32_t total = taosHashGetSize(pStatus->pTableMap); @@ -2742,7 +2769,7 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbRead uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - // the tableMap has already updated + // the tableMap has already updated, let's also update the order list if (pStatus->pTableIter == NULL) { void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, total * sizeof(uint64_t)); if (p == NULL) { @@ -2761,7 +2788,15 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbRead return TSDB_CODE_SUCCESS; } -static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus* pStatus) { +static void resetTableListIndex(SReaderStatus *pStatus) { + SUidOrderedList* pList = &pStatus->uidCheckInfo; + + pList->currentIndex = 0; + uint64_t uid = pList->tableUidList[0]; + pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); +} + +static bool moveToNextTable(SUidOrderedList* pOrderedCheckInfo, SReaderStatus* pStatus) { pOrderedCheckInfo->currentIndex += 1; if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) { pStatus->pTableIter = NULL; @@ -2777,10 +2812,9 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader; - SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo; - int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pReader); - if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) { - return code; + SUidOrderedList* pOrderedCheckInfo = &pStatus->uidCheckInfo; + if (taosHashGetSize(pStatus->pTableMap) == 0) { + return TSDB_CODE_SUCCESS; } SSDataBlock* pResBlock = pReader->pResBlock; @@ -3028,6 +3062,7 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl } else { // no block data, only last block exists tBlockDataReset(&pReader->status.fileBlockData); resetDataBlockIterator(pBlockIter, pReader->order); + resetTableListIndex(&pReader->status); } // set the correct start position according to the query time window @@ -3069,6 +3104,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have data files, let's start check the last block file if exists if (pBlockIter->numOfBlocks == 0) { resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); + resetTableListIndex(&pReader->status); goto _begin; } } @@ -3101,6 +3137,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { tBlockDataReset(&pReader->status.fileBlockData); resetDataBlockIterator(pBlockIter, pReader->order); resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); + resetTableListIndex(&pReader->status); goto _begin; } else { code = initForFirstBlockInFile(pReader, pBlockIter); @@ -3113,6 +3150,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have blocks, let's start check the last block file if (pBlockIter->numOfBlocks == 0) { resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); + resetTableListIndex(&pReader->status); goto _begin; } } @@ -3774,11 +3812,15 @@ int32_t tsdbSetTableList(STsdbReader* pReader, const void* pTableList, int32_t n ASSERT(size >= num); taosHashClear(pReader->status.pTableMap); + SUidOrderedList* pUidList = &pReader->status.uidCheckInfo; + pUidList->currentIndex = 0; STableKeyInfo* pList = (STableKeyInfo*)pTableList; for (int32_t i = 0; i < num; ++i) { STableBlockScanInfo* pInfo = getPosInBlockInfoBuf(&pReader->blockInfoBuf, i); pInfo->uid = pList[i].uid; + pUidList->tableUidList[i] = pList[i].uid; + taosHashPut(pReader->status.pTableMap, &pInfo->uid, sizeof(uint64_t), &pInfo, POINTER_BYTES); } @@ -3825,13 +3867,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL pCond->twindows.ekey -= 1; } - int32_t capacity = 0; - if (pResBlock == NULL) { - capacity = 4096; - } else { - capacity = pResBlock->info.capacity; - } - + int32_t capacity = (pResBlock == NULL)? 4096:pResBlock->info.capacity; int32_t code = tsdbReaderCreate(pVnode, pCond, ppReader, capacity, pResBlock, idstr); if (code != TSDB_CODE_SUCCESS) { goto _err; @@ -4304,12 +4340,14 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { return TSDB_CODE_SUCCESS; } - SDataBlockIter* pBlockIter = &pReader->status.blockIter; + SReaderStatus* pStatus = &pReader->status; + + SDataBlockIter* pBlockIter = &pStatus->blockIter; pReader->order = pCond->order; pReader->type = TIMEWINDOW_RANGE_CONTAINED; - pReader->status.loadFromFile = true; - pReader->status.pTableIter = NULL; + pStatus->loadFromFile = true; + pStatus->pTableIter = NULL; pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows); // allocate buffer in order to load data blocks from file @@ -4318,19 +4356,20 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID; tsdbDataFReaderClose(&pReader->pFileReader); - int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap); + int32_t numOfTables = taosHashGetSize(pStatus->pTableMap); - initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); + initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); resetDataBlockIterator(pBlockIter, pReader->order); + resetTableListIndex(&pReader->status); int64_t ts = ASCENDING_TRAVERSE(pReader->order) ? pReader->window.skey - 1 : pReader->window.ekey + 1; - resetAllDataBlockScanInfo(pReader->status.pTableMap, ts); + resetAllDataBlockScanInfo(pStatus->pTableMap, ts); int32_t code = 0; // no data in files, let's try buffer in memory - if (pReader->status.fileIter.numOfFiles == 0) { - pReader->status.loadFromFile = false; + if (pStatus->fileIter.numOfFiles == 0) { + pStatus->loadFromFile = false; } else { code = initForFirstBlockInFile(pReader, pBlockIter); if (code != TSDB_CODE_SUCCESS) { @@ -4408,7 +4447,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa hasNext = blockIteratorNext(&pStatus->blockIter, pReader->idStr); } else { code = initForFirstBlockInFile(pReader, pBlockIter); - if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) { + if ((code != TSDB_CODE_SUCCESS) || (pStatus->loadFromFile == false)) { break; } From 8cd362c8d0fc73e37315a4c9739e247ba9bcfa25 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 2 Feb 2023 16:15:12 +0800 Subject: [PATCH 080/267] fix log --- source/dnode/vnode/src/vnd/vnodeSvr.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index c282afae70..09224a883e 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -312,10 +312,8 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp walApplyVer(pVnode->pWal, version); - vInfo("vgId:%d, push msg begin", pVnode->config.vgId); if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) { vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno)); - vInfo("vgId:%d, push msg end", pVnode->config.vgId); return -1; } vInfo("vgId:%d, push msg end", pVnode->config.vgId); @@ -886,8 +884,6 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq bool tbCreated = false; terrno = TSDB_CODE_SUCCESS; - vDebug("vgId:%d %s start, version:%" PRId64 ", len:%d", TD_VID(pVnode), __func__, version); - pRsp->code = 0; pSubmitReq->version = version; statis.nBatchInsert = 1; From e9490a323563db01b7c9a1f0af6876d7fd05afa3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 2 Feb 2023 17:38:21 +0800 Subject: [PATCH 081/267] refactor(query): disable tag filter cache by default. --- include/os/osEnv.h | 1 + source/common/src/tglobal.c | 3 + source/dnode/vnode/src/tsdb/tsdbRead.c | 84 +++++++++++++------------- source/libs/executor/src/executil.c | 24 +++++--- 4 files changed, 61 insertions(+), 51 deletions(-) diff --git a/include/os/osEnv.h b/include/os/osEnv.h index 533d989ffc..bc65da47a9 100644 --- a/include/os/osEnv.h +++ b/include/os/osEnv.h @@ -41,6 +41,7 @@ extern char tsSSE42Enable; extern char tsAVXEnable; extern char tsAVX2Enable; extern char tsFMAEnable; +extern char tsTagFilterCache; extern char configDir[]; extern char tsDataDir[]; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 037c8a4541..d4849650e6 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -140,6 +140,7 @@ int32_t tsMaxMemUsedByInsert = 1024; float tsSelectivityRatio = 1.0; int32_t tsTagFilterResCacheSize = 1024 * 10; +char tsTagFilterCache = 0; // the maximum allowed query buffer size during query processing for each data node. // -1 no limit (default) @@ -351,6 +352,7 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) { if (cfgAddBool(pCfg, "AVX2", tsAVX2Enable, 0) != 0) return -1; if (cfgAddBool(pCfg, "FMA", tsFMAEnable, 0) != 0) return -1; if (cfgAddBool(pCfg, "SIMD-builtins", tsSIMDBuiltins, 0) != 0) return -1; + if (cfgAddBool(pCfg, "tagFilterCache", tsTagFilterCache, 0) != 0) return -1; if (cfgAddInt64(pCfg, "openMax", tsOpenMax, 0, INT64_MAX, 1) != 0) return -1; if (cfgAddInt64(pCfg, "streamMax", tsStreamMax, 0, INT64_MAX, 1) != 0) return -1; @@ -731,6 +733,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64; tsSIMDBuiltins = (bool)cfgGetItem(pCfg, "SIMD-builtins")->bval; + tsTagFilterCache = (bool)cfgGetItem(pCfg, "tagFilterCache")->bval; tsEnableMonitor = cfgGetItem(pCfg, "monitor")->bval; tsMonitorInterval = cfgGetItem(pCfg, "monitorInterval")->i32; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 1dc36f1304..d0ab2e2806 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2745,48 +2745,48 @@ static void resetScanBlockLastBlockDelIndex(SReaderStatus* pStatus, int32_t orde } } -static int32_t initOrderCheckInfo(SUidOrderedList* pOrderCheckInfo, STsdbReader* pReader) { - SReaderStatus* pStatus = &pReader->status; - - int32_t total = taosHashGetSize(pStatus->pTableMap); - if (total == 0) { - return TSDB_CODE_SUCCESS; - } - - if (pOrderCheckInfo->tableUidList == NULL) { - pOrderCheckInfo->currentIndex = 0; - pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t)); - if (pOrderCheckInfo->tableUidList == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); - uint64_t uid = pOrderCheckInfo->tableUidList[0]; - pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - } else { - if (pStatus->pTableIter == NULL) { // it is the last block of a new file - pOrderCheckInfo->currentIndex = 0; - uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex]; - pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - - // the tableMap has already updated, let's also update the order list - if (pStatus->pTableIter == NULL) { - void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, total * sizeof(uint64_t)); - if (p == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - pOrderCheckInfo->tableUidList = p; - extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); - - uid = pOrderCheckInfo->tableUidList[0]; - pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - } - } - } - - return TSDB_CODE_SUCCESS; -} +//static int32_t initOrderCheckInfo(SUidOrderedList* pOrderCheckInfo, STsdbReader* pReader) { +// SReaderStatus* pStatus = &pReader->status; +// +// int32_t total = taosHashGetSize(pStatus->pTableMap); +// if (total == 0) { +// return TSDB_CODE_SUCCESS; +// } +// +// if (pOrderCheckInfo->tableUidList == NULL) { +// pOrderCheckInfo->currentIndex = 0; +// pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t)); +// if (pOrderCheckInfo->tableUidList == NULL) { +// return TSDB_CODE_OUT_OF_MEMORY; +// } +// +// extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); +// uint64_t uid = pOrderCheckInfo->tableUidList[0]; +// pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); +// } else { +// if (pStatus->pTableIter == NULL) { // it is the last block of a new file +// pOrderCheckInfo->currentIndex = 0; +// uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex]; +// pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); +// +// // the tableMap has already updated, let's also update the order list +// if (pStatus->pTableIter == NULL) { +// void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, total * sizeof(uint64_t)); +// if (p == NULL) { +// return TSDB_CODE_OUT_OF_MEMORY; +// } +// +// pOrderCheckInfo->tableUidList = p; +// extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); +// +// uid = pOrderCheckInfo->tableUidList[0]; +// pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); +// } +// } +// } +// +// return TSDB_CODE_SUCCESS; +//} static void resetTableListIndex(SReaderStatus *pStatus) { SUidOrderedList* pList = &pStatus->uidCheckInfo; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index e65708326e..d685ba2062 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1074,15 +1074,19 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, return code; } } else { - // try to retrieve the result from meta cache - T_MD5_CTX context = {0}; - genTagFilterDigest(pTagCond, &context); - bool acquired = false; - metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), res, &acquired); - if (acquired) { - qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(res)); - goto _end; + T_MD5_CTX context = {0}; + + if (tsTagFilterCache) { + // try to retrieve the result from meta cache + genTagFilterDigest(pTagCond, &context); + + bool acquired = false; + metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), res, &acquired); + if (acquired) { + qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(res)); + goto _end; + } } if (!pTagCond) { // no tag filter condition exists, let's fetch all tables of this super table @@ -1118,7 +1122,9 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, memcpy(pPayload + sizeof(int32_t), taosArrayGet(res, 0), numOfTables * sizeof(uint64_t)); } - metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); + if (tsTagFilterCache) { + metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); + } } _end: From 40b771807801b7b40e00b70d150c4e5172f02dfd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 2 Feb 2023 17:45:21 +0800 Subject: [PATCH 082/267] refactor(query): disable file size check when reading data files. --- .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index cd8454ade0..1addb60997 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -47,17 +47,17 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd taosMemoryFree(pFD); goto _exit; } - - if (taosStatFile(path, &pFD->szFile, NULL) < 0) { - code = TAOS_SYSTEM_ERROR(errno); - taosMemoryFree(pFD->pBuf); - taosCloseFile(&pFD->pFD); - taosMemoryFree(pFD); - goto _exit; + if (flag == TD_FILE_READ) { + if (taosStatFile(path, &pFD->szFile, NULL) < 0) { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD->pBuf); + taosCloseFile(&pFD->pFD); + taosMemoryFree(pFD); + goto _exit; + } + ASSERT(pFD->szFile % szPage == 0); + pFD->szFile = pFD->szFile / szPage; } - - ASSERT(pFD->szFile % szPage == 0); - pFD->szFile = pFD->szFile / szPage; *ppFD = pFD; _exit: @@ -105,7 +105,7 @@ _exit: static int32_t tsdbReadFilePage(STsdbFD *pFD, int64_t pgno) { int32_t code = 0; - ASSERT(pgno <= pFD->szFile); + // ASSERT(pgno <= pFD->szFile); // seek int64_t offset = PAGE_OFFSET(pgno, pFD->szPage); @@ -177,7 +177,7 @@ static int32_t tsdbReadFile(STsdbFD *pFD, int64_t offset, uint8_t *pBuf, int64_t int32_t szPgCont = PAGE_CONTENT_SIZE(pFD->szPage); int64_t bOffset = fOffset % pFD->szPage; - ASSERT(pgno && pgno <= pFD->szFile); + // ASSERT(pgno && pgno <= pFD->szFile); ASSERT(bOffset < szPgCont); while (n < size) { From 7168f90c51c808e83901c5a91cae899b5a97c2e6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 2 Feb 2023 17:49:07 +0800 Subject: [PATCH 083/267] refactor(query): disable file size check when reading data files. --- source/dnode/vnode/src/tsdb/tsdbReaderWriter.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 1addb60997..50fd9d7aa7 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -47,7 +47,9 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd taosMemoryFree(pFD); goto _exit; } - if (flag == TD_FILE_READ) { + + // not check file size when reading data files. + if (flag != TD_FILE_READ) { if (taosStatFile(path, &pFD->szFile, NULL) < 0) { code = TAOS_SYSTEM_ERROR(errno); taosMemoryFree(pFD->pBuf); @@ -55,9 +57,11 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd taosMemoryFree(pFD); goto _exit; } + ASSERT(pFD->szFile % szPage == 0); pFD->szFile = pFD->szFile / szPage; } + *ppFD = pFD; _exit: From baaa57567eeac08d0f15d2c233655e4894956f99 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 3 Feb 2023 09:13:13 +0800 Subject: [PATCH 084/267] refactor: do some internal refactor. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 57 -------------------------- 1 file changed, 57 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index d0ab2e2806..432f306a93 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2719,20 +2719,6 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) { return TSDB_CODE_SUCCESS; } -static void extractOrderedTableUidList(SUidOrderedList* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) { - int32_t index = 0; - int32_t total = taosHashGetSize(pStatus->pTableMap); - - void* p = taosHashIterate(pStatus->pTableMap, NULL); - while (p != NULL) { - STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; - pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid; - p = taosHashIterate(pStatus->pTableMap, p); - } - - taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc); -} - // reset the last del file index static void resetScanBlockLastBlockDelIndex(SReaderStatus* pStatus, int32_t order) { void* p = taosHashIterate(pStatus->pTableMap, NULL); @@ -2745,49 +2731,6 @@ static void resetScanBlockLastBlockDelIndex(SReaderStatus* pStatus, int32_t orde } } -//static int32_t initOrderCheckInfo(SUidOrderedList* pOrderCheckInfo, STsdbReader* pReader) { -// SReaderStatus* pStatus = &pReader->status; -// -// int32_t total = taosHashGetSize(pStatus->pTableMap); -// if (total == 0) { -// return TSDB_CODE_SUCCESS; -// } -// -// if (pOrderCheckInfo->tableUidList == NULL) { -// pOrderCheckInfo->currentIndex = 0; -// pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t)); -// if (pOrderCheckInfo->tableUidList == NULL) { -// return TSDB_CODE_OUT_OF_MEMORY; -// } -// -// extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); -// uint64_t uid = pOrderCheckInfo->tableUidList[0]; -// pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); -// } else { -// if (pStatus->pTableIter == NULL) { // it is the last block of a new file -// pOrderCheckInfo->currentIndex = 0; -// uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex]; -// pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); -// -// // the tableMap has already updated, let's also update the order list -// if (pStatus->pTableIter == NULL) { -// void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, total * sizeof(uint64_t)); -// if (p == NULL) { -// return TSDB_CODE_OUT_OF_MEMORY; -// } -// -// pOrderCheckInfo->tableUidList = p; -// extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); -// -// uid = pOrderCheckInfo->tableUidList[0]; -// pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); -// } -// } -// } -// -// return TSDB_CODE_SUCCESS; -//} - static void resetTableListIndex(SReaderStatus *pStatus) { SUidOrderedList* pList = &pStatus->uidCheckInfo; From 6248804b28ceff6e72c4c5d4f58d73b95d047ddf Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 2 Feb 2023 17:26:49 +0800 Subject: [PATCH 085/267] fix log --- source/dnode/vnode/src/vnd/vnodeSvr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 09224a883e..b1a84c572a 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -312,11 +312,13 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp walApplyVer(pVnode->pWal, version); + /*vInfo("vgId:%d, push msg begin", pVnode->config.vgId);*/ if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) { + /*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/ vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; } - vInfo("vgId:%d, push msg end", pVnode->config.vgId); + /*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/ // commit if need if (needCommit) { From e29ca4f5d80689c202b0697fcc045e6dc4700a79 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 3 Feb 2023 10:04:15 +0800 Subject: [PATCH 086/267] refactor(query): use real number of stt instead of default value. --- source/dnode/vnode/src/inc/tsdb.h | 3 ++- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 3 ++- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 14 ++++++++------ source/dnode/vnode/src/tsdb/tsdbRead.c | 7 +++++-- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index efa3af5cbd..199529da32 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -645,6 +645,7 @@ typedef struct SSttBlockLoadInfo { int16_t *colIds; int32_t numOfCols; bool sttBlockLoaded; + int32_t numOfStt; // keep the last access position, this position may be used to reduce the binary times for // starting last block data for a new table @@ -710,7 +711,7 @@ bool tMergeTreeNext(SMergeTree *pMTree); TSDBROW tMergeTreeGetRow(SMergeTree *pMTree); void tMergeTreeClose(SMergeTree *pMTree); -SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols); +SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols, int32_t numOfStt); void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo); void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el); void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo); diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index a837543e62..5b8393e811 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -138,7 +138,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, } } - p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0); + int32_t numOfStt = ((SVnode*)pVnode)->config.sttTrigger; + p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0, numOfStt); if (p->pLoadInfo == NULL) { tsdbCacherowsReaderClose(p); return TSDB_CODE_OUT_OF_MEMORY; diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index af1a42d018..bf330396a2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -31,14 +31,16 @@ struct SLDataIter { SSttBlockLoadInfo *pBlockLoadInfo; }; -SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols) { - SSttBlockLoadInfo *pLoadInfo = taosMemoryCalloc(TSDB_MAX_STT_TRIGGER, sizeof(SSttBlockLoadInfo)); +SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols, int32_t numOfSttTrigger) { + SSttBlockLoadInfo *pLoadInfo = taosMemoryCalloc(numOfSttTrigger, sizeof(SSttBlockLoadInfo)); if (pLoadInfo == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) { + pLoadInfo->numOfStt = numOfSttTrigger; + + for (int32_t i = 0; i < numOfSttTrigger; ++i) { pLoadInfo[i].blockIndex[0] = -1; pLoadInfo[i].blockIndex[1] = -1; pLoadInfo[i].currentLoadBlockIndex = 1; @@ -63,7 +65,7 @@ SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, } void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) { - for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) { + for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) { pLoadInfo[i].currentLoadBlockIndex = 1; pLoadInfo[i].blockIndex[0] = -1; pLoadInfo[i].blockIndex[1] = -1; @@ -77,14 +79,14 @@ void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) { } void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el) { - for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) { + for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) { *el += pLoadInfo[i].elapsedTime; *blocks += pLoadInfo[i].loadBlocks; } } void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) { - for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) { + for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) { pLoadInfo[i].currentLoadBlockIndex = 1; pLoadInfo[i].blockIndex[0] = -1; pLoadInfo[i].blockIndex[1] = -1; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 432f306a93..dbeeb13c6b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -482,8 +482,11 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdb if (pLReader->pInfo == NULL) { // here we ignore the first column, which is always be the primary timestamp column + SBlockLoadSuppInfo* pInfo = &pReader->suppInfo; + + int32_t numOfStt = pReader->pTsdb->pVnode->config.sttTrigger; pLReader->pInfo = - tCreateLastBlockLoadInfo(pReader->pSchema, &pReader->suppInfo.colId[1], pReader->suppInfo.numOfCols - 1); + tCreateLastBlockLoadInfo(pReader->pSchema, &pInfo->colId[1], pInfo->numOfCols - 1, numOfStt); if (pLReader->pInfo == NULL) { tsdbDebug("init fileset iterator failed, code:%s %s", tstrerror(terrno), pReader->idStr); return terrno; @@ -655,7 +658,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd goto _end; } - setColumnIdSlotList(&pReader->suppInfo, pCond->colList, pCond->pSlotList, pCond->numOfCols); + setColumnIdSlotList(pSup, pCond->colList, pCond->pSlotList, pCond->numOfCols); *ppReader = pReader; return code; From 645c45a274b37e5a2239ba6a2da2fe4c8068e73f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 3 Feb 2023 10:28:12 +0800 Subject: [PATCH 087/267] refactor(query): opt perf by remove some functions. --- include/libs/function/function.h | 1 + source/libs/executor/src/executil.c | 1 + source/libs/executor/src/executorimpl.c | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 16d270118c..1d12cce353 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -132,6 +132,7 @@ typedef struct SqlFunctionCtx { SInputColumnInfoData input; SResultDataInfo resDataInfo; uint32_t order; // data block scanner order: asc|desc + uint8_t isPseudoFunc;// denote current function is pseudo function or not [added for perf reason] uint8_t scanFlag; // record current running step, default: 0 int16_t functionId; // function id char *pOutput; // final result output buffer, point to sdata->data diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index d685ba2062..890b71e583 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1542,6 +1542,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) { SFuncExecEnv env = {0}; pCtx->functionId = pExpr->pExpr->_function.pFunctNode->funcId; + pCtx->isPseudoFunc = fmIsWindowPseudoColumnFunc(pCtx->functionId); if (fmIsAggFunc(pCtx->functionId) || fmIsIndefiniteRowsFunc(pCtx->functionId)) { bool isUdaf = fmIsUserDefinedFunc(pCtx->functionId); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index f37a31d5a9..fe019aaa47 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -365,7 +365,7 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC pCtx[k].input.colDataSMAIsSet = false; } - if (fmIsWindowPseudoColumnFunc(pCtx[k].functionId)) { + if (pCtx[k].isPseudoFunc) { SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pCtx[k]); char* p = GET_ROWCELL_INTERBUF(pEntryInfo); @@ -819,7 +819,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO continue; } - if (fmIsWindowPseudoColumnFunc(pCtx[i].functionId)) { + if (pCtx[i].isPseudoFunc) { continue; } From 0030c4b5ee85aa7c9176f179612539670d58332f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 3 Feb 2023 14:24:00 +0800 Subject: [PATCH 088/267] refactor(query): opt perf by remove some functions. --- include/libs/function/function.h | 1 + .../executor/inc => include/util}/tsimplehash.h | 0 source/libs/executor/src/executil.c | 1 + source/libs/executor/src/executorimpl.c | 2 +- source/libs/function/src/builtinsimpl.c | 13 ++++++++----- source/util/src/tpagedbuf.c | 14 +++++++------- source/{libs/executor => util}/src/tsimplehash.c | 0 7 files changed, 18 insertions(+), 13 deletions(-) rename {source/libs/executor/inc => include/util}/tsimplehash.h (100%) rename source/{libs/executor => util}/src/tsimplehash.c (100%) diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 1d12cce353..c44ad12759 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -133,6 +133,7 @@ typedef struct SqlFunctionCtx { SResultDataInfo resDataInfo; uint32_t order; // data block scanner order: asc|desc uint8_t isPseudoFunc;// denote current function is pseudo function or not [added for perf reason] + uint8_t isNotNullFunc;// not return null value. uint8_t scanFlag; // record current running step, default: 0 int16_t functionId; // function id char *pOutput; // final result output buffer, point to sdata->data diff --git a/source/libs/executor/inc/tsimplehash.h b/include/util/tsimplehash.h similarity index 100% rename from source/libs/executor/inc/tsimplehash.h rename to include/util/tsimplehash.h diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 890b71e583..9e573acde2 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1543,6 +1543,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, SFuncExecEnv env = {0}; pCtx->functionId = pExpr->pExpr->_function.pFunctNode->funcId; pCtx->isPseudoFunc = fmIsWindowPseudoColumnFunc(pCtx->functionId); + pCtx->isNotNullFunc = fmIsNotNullOutputFunc(pCtx->functionId); if (fmIsAggFunc(pCtx->functionId) || fmIsIndefiniteRowsFunc(pCtx->functionId)) { bool isUdaf = fmIsUserDefinedFunc(pCtx->functionId); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index fe019aaa47..ef76402d34 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1065,7 +1065,7 @@ static void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t nu pRow->numOfRows = pResInfo->numOfRes; } - if (fmIsNotNullOutputFunc(pCtx[j].functionId)) { + if (pCtx[j].isNotNullFunc) { returnNotNull = true; } } diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index c831c3183b..50ceda4605 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -794,7 +794,8 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { switch (pCol->info.type) { case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_BIGINT: - colDataAppendInt64(pCol, currentRow, &pRes->v); + ((int64_t*)pCol->pData)[currentRow] = pRes->v; +// colDataAppendInt64(pCol, currentRow, &pRes->v); break; case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_INT: @@ -822,10 +823,12 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { colDataAppendNULL(pCol, currentRow); } - if (pEntryInfo->numOfRes > 0) { - code = setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow); - } else { - code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow); + if (pCtx->subsidiaries.num > 0) { + if (pEntryInfo->numOfRes > 0) { + code = setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow); + } else { + code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow); + } } return code; diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 76431d7836..c08ae1e364 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -2,7 +2,7 @@ #include "tpagedbuf.h" #include "taoserror.h" #include "tcompression.h" -#include "thash.h" +#include "tsimplehash.h" #include "tlog.h" #define GET_PAYLOAD_DATA(_p) ((char*)(_p)->pData + POINTER_BYTES) @@ -38,7 +38,7 @@ struct SDiskbasedBuf { int32_t inMemPages; // numOfPages that are allocated in memory SList* freePgList; // free page list SArray* pIdList; // page id list - SHashObj* all; + SSHashObj*all; SList* lruList; void* emptyDummyIdList; // dummy id list void* assistBuf; // assistant buffer for compress/decompress data @@ -377,7 +377,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem goto _error; } - pPBuf->all = taosHashInit(10, fn, true, false); + pPBuf->all = tSimpleHashInit(20, fn); if (pPBuf->all == NULL) { goto _error; } @@ -438,7 +438,7 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) { } // add to hash map - taosHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES); + tSimpleHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES); pBuf->totalBufSize += pBuf->pageSize; } @@ -463,7 +463,7 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) { pBuf->statis.getPages += 1; - SPageInfo** pi = taosHashGet(pBuf->all, &id, sizeof(int32_t)); + SPageInfo** pi = tSimpleHashGet(pBuf->all, &id, sizeof(int32_t)); if (pi == NULL || *pi == NULL) { uError("failed to locate the buffer page:%d, %s", id, pBuf->id); terrno = TSDB_CODE_INVALID_PARA; @@ -615,7 +615,7 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) { taosArrayDestroy(pBuf->emptyDummyIdList); taosArrayDestroy(pBuf->pFree); - taosHashCleanup(pBuf->all); + tSimpleHashCleanup(pBuf->all); taosMemoryFreeClear(pBuf->id); taosMemoryFreeClear(pBuf->assistBuf); @@ -711,7 +711,7 @@ void clearDiskbasedBuf(SDiskbasedBuf* pBuf) { taosArrayClear(pBuf->emptyDummyIdList); taosArrayClear(pBuf->pFree); - taosHashClear(pBuf->all); + tSimpleHashClear(pBuf->all); pBuf->numOfPages = 0; // all pages are in buffer in the first place pBuf->totalBufSize = 0; diff --git a/source/libs/executor/src/tsimplehash.c b/source/util/src/tsimplehash.c similarity index 100% rename from source/libs/executor/src/tsimplehash.c rename to source/util/src/tsimplehash.c From d4e3a9cf2b66fac2029265c423cba087eff3db7d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 3 Feb 2023 14:42:47 +0800 Subject: [PATCH 089/267] refactor(query): opt perf by remove some functions. --- source/util/src/tpagedbuf.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index c08ae1e364..9684c9dea1 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -372,12 +372,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem goto _error; } - pPBuf->assistBuf = taosMemoryMalloc(pPBuf->pageSize + 2); // EXTRA BYTES - if (pPBuf->assistBuf == NULL) { - goto _error; - } - - pPBuf->all = tSimpleHashInit(20, fn); + pPBuf->all = tSimpleHashInit(64, fn); if (pPBuf->all == NULL) { goto _error; } From 299afd98d977ee50d2d5df4608242fe40b188cbf Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 3 Feb 2023 14:58:55 +0800 Subject: [PATCH 090/267] refactor(query): opt perf by remove some functions. --- include/util/tsimplehash.h | 1 + source/util/src/tsimplehash.c | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/include/util/tsimplehash.h b/include/util/tsimplehash.h index 7344c34261..c9df911476 100644 --- a/include/util/tsimplehash.h +++ b/include/util/tsimplehash.h @@ -116,6 +116,7 @@ typedef struct SHNode { struct SHNode *next; uint32_t keyLen : 20; uint32_t dataLen : 12; + uint32_t hashVal; char data[]; } SHNode; #pragma pack(pop) diff --git a/source/util/src/tsimplehash.c b/source/util/src/tsimplehash.c index 42938b848d..8f2078d6ef 100644 --- a/source/util/src/tsimplehash.c +++ b/source/util/src/tsimplehash.c @@ -114,7 +114,8 @@ static void* doInternalAlloc(SSHashObj* pHashObj, int32_t size) { } } -static SHNode *doCreateHashNode(SSHashObj* pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen) { +static SHNode *doCreateHashNode(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen, + uint32_t hashVal) { SHNode *pNewNode = doInternalAlloc(pHashObj, sizeof(SHNode) + keyLen + dataLen); if (!pNewNode) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -124,6 +125,8 @@ static SHNode *doCreateHashNode(SSHashObj* pHashObj, const void *key, size_t key pNewNode->keyLen = keyLen; pNewNode->dataLen = dataLen; pNewNode->next = NULL; + pNewNode->hashVal = hashVal; + if (data) { memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen); } @@ -167,10 +170,7 @@ static void tSimpleHashTableResize(SSHashObj *pHashObj) { SHNode *pPrev = NULL; while (pNode != NULL) { - void *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen); - uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pNode->keyLen); - - int32_t newIdx = HASH_INDEX(hashVal, pHashObj->capacity); + int32_t newIdx = HASH_INDEX(pNode->hashVal, pHashObj->capacity); pNext = pNode->next; if (newIdx != idx) { if (!pPrev) { @@ -211,7 +211,7 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons SHNode *pNode = pHashObj->hashList[slot]; if (!pNode) { - SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen); + SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen, hashVal); if (!pNewNode) { return -1; } @@ -229,7 +229,7 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons } if (!pNode) { - SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen); + SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen, hashVal); if (!pNewNode) { return -1; } From ebbf3af3b14464240c14aa8ea134e1970efba490 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 6 Feb 2023 13:46:47 +0800 Subject: [PATCH 091/267] fix: invalid msg order issue --- source/libs/qworker/src/qworker.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index fedaa96ed9..102a34517c 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -263,6 +263,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, SOutputData output = {0}; if (NULL == ctx->sinkHandle) { + pOutput->queryEnd = true; return TSDB_CODE_SUCCESS; } @@ -758,7 +759,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { } QW_LOCK(QW_WRITE, &ctx->lock); - if (qComplete || (queryStop && (0 == atomic_load_8((int8_t *)&ctx->queryContinue))) || code) { + if (atomic_load_8((int8_t*)&ctx->queryEnd) || (queryStop && (0 == atomic_load_8((int8_t *)&ctx->queryContinue))) || code) { // Note: query is not running anymore QW_SET_PHASE(ctx, QW_PHASE_POST_CQUERY); QW_UNLOCK(QW_WRITE, &ctx->lock); From 61be1fdc585c92534e2d8ba73d51e275f574f000 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 7 Feb 2023 11:37:56 +0800 Subject: [PATCH 092/267] fix(query):fix syntax error. --- source/libs/function/src/detail/tminmax.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index e867682396..90385dfcc1 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -799,18 +799,6 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) } } - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } - } - } - numOfElems = 1; pBuf->assign = true; goto _over; @@ -939,5 +927,5 @@ _over: } *nElems = numOfElems; - return TSDB_CODE_SUCCESS; + return code; } From cc2fb6671251637f817173e901df28afff2f1489 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 8 Feb 2023 22:30:00 +0800 Subject: [PATCH 093/267] fix(query): disable AVX2 when cpu instructions do not support it. --- source/util/src/tcompression.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index d3605cd02c..9d38253101 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -228,7 +228,7 @@ int32_t tsCompressINTImp(const char *const input, const int32_t nelements, char } int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, char *const output, const char type) { -#if 1 +#if __AVX2__ int32_t word_length = 0; switch (type) { case TSDB_DATA_TYPE_BIGINT: From a70f8cea91a69414f57af85fe65b298cdfe14eda Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 8 Feb 2023 22:34:32 +0800 Subject: [PATCH 094/267] refactor: do some internal refactor. --- source/util/src/tcompression.c | 38 ++-------------------------------- 1 file changed, 2 insertions(+), 36 deletions(-) diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 9d38253101..695a83abb1 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -228,7 +228,7 @@ int32_t tsCompressINTImp(const char *const input, const int32_t nelements, char } int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, char *const output, const char type) { -#if __AVX2__ + int32_t word_length = 0; switch (type) { case TSDB_DATA_TYPE_BIGINT: @@ -264,6 +264,7 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int32_t _pos = 0; int64_t prev_value = 0; +#if __AVX2__ while (1) { if (_pos == nelements) break; @@ -434,41 +435,6 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha return nelements * word_length; #else - int32_t word_length = 0; - switch (type) { - case TSDB_DATA_TYPE_BIGINT: - word_length = LONG_BYTES; - break; - case TSDB_DATA_TYPE_INT: - word_length = INT_BYTES; - break; - case TSDB_DATA_TYPE_SMALLINT: - word_length = SHORT_BYTES; - break; - case TSDB_DATA_TYPE_TINYINT: - word_length = CHAR_BYTES; - break; - default: - uError("Invalid decompress integer type:%d", type); - return -1; - } - - // If not compressed. - if (input[0] == 1) { - memcpy(output, input + 1, nelements * word_length); - return nelements * word_length; - } - - // Selector value: 0 1 2 3 4 5 6 7 8 9 10 11 - // 12 13 14 15 - char bit_per_integer[] = {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 15, 20, 30, 60}; - int32_t selector_to_elems[] = {240, 120, 60, 30, 20, 15, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1}; - - const char *ip = input + 1; - int32_t count = 0; - int32_t _pos = 0; - int64_t prev_value = 0; - while (1) { if (count == nelements) break; From 24c83dc83a182ede130c0471a869794e43a66b66 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 9 Feb 2023 00:14:00 +0800 Subject: [PATCH 095/267] fix(query): fix memory leak. --- source/util/src/tsimplehash.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/util/src/tsimplehash.c b/source/util/src/tsimplehash.c index 8f2078d6ef..32474d0880 100644 --- a/source/util/src/tsimplehash.c +++ b/source/util/src/tsimplehash.c @@ -367,6 +367,7 @@ void tSimpleHashCleanup(SSHashObj *pHashObj) { return; } + taosArrayDestroyEx(pHashObj->pHashNodeBuf, destroyItems); tSimpleHashClear(pHashObj); taosMemoryFreeClear(pHashObj->hashList); taosMemoryFree(pHashObj); From ad04f7afffc8ac063dc9dcb599fe4f92a1b5dd89 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 9 Feb 2023 00:43:55 +0800 Subject: [PATCH 096/267] fix(query): fix memory leak. --- source/util/src/tsimplehash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/src/tsimplehash.c b/source/util/src/tsimplehash.c index 32474d0880..db73dac929 100644 --- a/source/util/src/tsimplehash.c +++ b/source/util/src/tsimplehash.c @@ -367,8 +367,8 @@ void tSimpleHashCleanup(SSHashObj *pHashObj) { return; } - taosArrayDestroyEx(pHashObj->pHashNodeBuf, destroyItems); tSimpleHashClear(pHashObj); + taosArrayDestroy(pHashObj->pHashNodeBuf); taosMemoryFreeClear(pHashObj->hashList); taosMemoryFree(pHashObj); } From e46657ada27af19bdcd8b78ae79cd20a606cb9b7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 00:30:56 +0800 Subject: [PATCH 097/267] fix(query): fix memory leak. --- source/libs/executor/src/executil.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 554ee73c7f..190ab2a7d2 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1125,6 +1125,8 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, if (tsTagFilterCache) { metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); } + + taosMemoryFree(pPayload); } _end: From 3d3d15b7656736ebd0d361549a07c5b09cef68bb Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 10 Feb 2023 10:45:52 +0800 Subject: [PATCH 098/267] fix(query): fix max/min result inconsistent using with first or last plus interval time window query --- source/libs/function/src/detail/tminmax.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index a511ca97f1..b24172acc0 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -372,7 +372,7 @@ static void handleInt8Col(const void* data, int32_t start, int32_t numOfRows, SM pBuf->v = i8VectorCmpAVX2(data, numOfRows, isMinFunc, signVal); } else { if (!pBuf->assign) { - pBuf->v = ((int8_t*)data)[0]; + pBuf->v = ((int8_t*)data)[start]; } if (signVal) { @@ -406,7 +406,7 @@ static void handleInt16Col(const void* data, int32_t start, int32_t numOfRows, S pBuf->v = i16VectorCmpAVX2(data, numOfRows, isMinFunc, signVal); } else { if (!pBuf->assign) { - pBuf->v = ((int16_t*)data)[0]; + pBuf->v = ((int16_t*)data)[start]; } if (signVal) { @@ -440,7 +440,7 @@ static void handleInt32Col(const void* data, int32_t start, int32_t numOfRows, S pBuf->v = i32VectorCmpAVX2(data, numOfRows, isMinFunc, signVal); } else { if (!pBuf->assign) { - pBuf->v = ((int32_t*)data)[0]; + pBuf->v = ((int32_t*)data)[start]; } if (signVal) { @@ -470,7 +470,7 @@ static void handleInt32Col(const void* data, int32_t start, int32_t numOfRows, S static void handleInt64Col(const void* data, int32_t start, int32_t numOfRows, SMinmaxResInfo* pBuf, bool isMinFunc, bool signVal) { if (!pBuf->assign) { - pBuf->v = ((int64_t*)data)[0]; + pBuf->v = ((int64_t*)data)[start]; } if (signVal) { @@ -504,7 +504,7 @@ static void handleFloatCol(SColumnInfoData* pCol, int32_t start, int32_t numOfRo *val = floatVectorCmpAVX(pData, numOfRows, isMinFunc); } else { if (!pBuf->assign) { - *val = pData[0]; + *val = pData[start]; } if (isMinFunc) { // min @@ -535,7 +535,7 @@ static void handleDoubleCol(SColumnInfoData* pCol, int32_t start, int32_t numOfR *val = (double)doubleVectorCmpAVX(pData, numOfRows, isMinFunc); } else { if (!pBuf->assign) { - *val = pData[0]; + *val = pData[start]; } if (isMinFunc) { // min From c8acfc4d54530cc05736696454aa386f312cadbe Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Fri, 10 Feb 2023 10:48:26 +0800 Subject: [PATCH 099/267] fix: stream meta --- source/libs/stream/src/streamMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 2f991288ff..184c1c8abf 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -44,7 +44,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF goto _err; } - pMeta->pTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + pMeta->pTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); if (pMeta->pTasks == NULL) { goto _err; } From 5d526d926978f851c60c52120058527f451ea2d7 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 10 Feb 2023 10:57:50 +0800 Subject: [PATCH 100/267] fix: taosd crash issue --- source/libs/transport/src/thttp.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index cd508f6fe9..8e5f79137f 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -420,7 +420,13 @@ static void transHttpEnvInit() { uv_loop_init(http->loop); http->asyncPool = transAsyncPoolCreate(http->loop, 1, http, httpAsyncCb); - + if (NULL == http->asyncPool) { + taosMemoryFree(http->loop); + taosMemoryFree(http); + http = NULL; + return; + } + int err = taosThreadCreate(&http->thread, NULL, httpThread, (void*)http); if (err != 0) { taosMemoryFree(http->loop); From 4908c563ccd3fd9347c217f97db06df6f2312c02 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 14:35:18 +0800 Subject: [PATCH 101/267] ehn(query): dynamic set the initial buffer size for data block from disk. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 6400e7534b..af3da87b17 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -3837,7 +3837,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL int32_t capacity = 0; if (pResBlock == NULL) { - capacity = 4096; + capacity = pVnode->config.tsdbCfg.maxRows; } else { capacity = pResBlock->info.capacity; } From 17f05c0020c2b285dd5d928ed4260081f36cccb9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 14:40:37 +0800 Subject: [PATCH 102/267] refactor: disable the limitation of maximum buffer size. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index af3da87b17..b8d0ea28da 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -423,7 +423,9 @@ static STimeWindow updateQueryTimeWindow(STsdb* pTsdb, STimeWindow* pWindow) { return win; } +// note: currently not need this limitation static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* capacity) { +#if 0 int32_t rowLen = 0; for (int32_t i = 0; i < pCond->numOfCols; ++i) { rowLen += pCond->colList[i].bytes; @@ -434,6 +436,7 @@ static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* cap if ((*capacity) * rowLen > TWOMB) { (*capacity) = TWOMB / rowLen; } +#endif } // init file iterator @@ -618,7 +621,6 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd goto _end; } - // todo refactor. limitOutputBufferSize(pCond, &pReader->capacity); // allocate buffer in order to load data blocks from file From f0c400e968cf3d7452d4ef3f550db31486528f6b Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 10 Feb 2023 15:08:34 +0800 Subject: [PATCH 103/267] add test cases --- .../2-query/max_min_last_interval.py | 1711 +++++++++++++++++ 1 file changed, 1711 insertions(+) create mode 100644 tests/system-test/2-query/max_min_last_interval.py diff --git a/tests/system-test/2-query/max_min_last_interval.py b/tests/system-test/2-query/max_min_last_interval.py new file mode 100644 index 0000000000..553060fd4c --- /dev/null +++ b/tests/system-test/2-query/max_min_last_interval.py @@ -0,0 +1,1711 @@ +# author : wenzhouwww +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + + def prepare_data(self): + tdSql.execute(f" CREATE TABLE `tb` (`ts` TIMESTAMP, `open` DOUBLE, `close` DOUBLE, `high` DOUBLE, `low` DOUBLE, `vol` DOUBLE, `amount` DOUBLE, `preclose` DOUBLE) ") + + tdSql.execute(f"insert into tb values ('2020-01-02 09:31:00',11.2,11.08,11.24,11.06,907000.0,10149188.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:32:00',11.08,10.99,11.08,10.96,301000.0,3323031.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:33:00',10.99,11.05,11.08,10.95,269300.0,2966591.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:34:00',11.08,11.0,11.08,10.99,239100.0,2635055.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:35:00',10.99,10.94,10.99,10.93,267200.0,2926850.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:36:00',10.95,10.97,10.98,10.93,202300.0,2216126.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:37:00',10.98,10.96,10.99,10.96,206400.0,2263399.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:38:00',10.96,10.96,10.97,10.95,197900.0,2168095.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:39:00',10.96,10.94,10.98,10.93,133100.0,1457263.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:40:00',10.93,10.95,10.97,10.93,102400.0,1120751.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:41:00',10.93,10.93,10.95,10.91,175000.0,1912470.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:42:00',10.92,10.92,10.96,10.92,128000.0,1399009.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:43:00',10.95,10.92,10.97,10.92,201500.0,2202114.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:44:00',10.91,10.9,10.92,10.9,198300.0,2162914.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:45:00',10.9,10.86,10.9,10.84,319100.0,3468665.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:46:00',10.87,10.9,10.9,10.86,222700.0,2420930.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:47:00',10.9,10.89,10.93,10.88,126500.0,1379156.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:48:00',10.91,10.95,10.96,10.91,44600.0,487778.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:49:00',10.96,10.96,10.98,10.95,95300.0,1045077.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:50:00',10.94,10.97,10.99,10.93,161900.0,1775641.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:51:00',10.97,11.05,11.05,10.97,156300.0,1722227.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:52:00',11.05,11.06,11.08,11.04,194200.0,2146643.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:53:00',11.06,11.03,11.06,11.02,187000.0,2062967.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:54:00',11.02,11.01,11.03,11.0,60800.0,670041.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:55:00',11.01,11.07,11.07,11.01,186400.0,2056238.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:56:00',11.08,11.03,11.08,11.03,107100.0,1185077.5799999982,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:57:00',11.03,11.04,11.06,11.02,118700.0,1308724.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:58:00',11.03,11.05,11.06,11.03,23600.0,260707.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:59:00',11.05,11.03,11.05,11.03,38200.0,421723.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:00:00',11.03,11.04,11.05,11.03,77600.0,856134.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:01:00',11.04,11.03,11.04,11.03,34000.0,375159.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:02:00',11.04,11.04,11.05,11.03,67900.0,749782.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:03:00',11.05,11.04,11.06,11.04,67822.0,749311.8800000027,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:04:00',11.04,11.04,11.05,11.03,39878.0,440388.8999999985,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:05:00',11.04,11.04,11.04,11.03,10000.0,110358.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:06:00',11.04,11.02,11.04,11.02,71200.0,785535.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:07:00',11.02,11.04,11.04,11.02,41500.0,457611.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:08:00',11.03,11.04,11.05,11.03,11700.0,129195.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:09:00',11.04,11.03,11.05,11.02,173300.0,1913275.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:10:00',11.03,11.04,11.04,11.02,70000.0,771276.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:11:00',11.03,11.06,11.06,11.02,169200.0,1869131.1400000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:12:00',11.06,11.07,11.07,11.05,64800.0,716812.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:13:00',11.06,11.06,11.07,11.06,16400.0,181433.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:14:00',11.06,11.11,11.11,11.06,298500.0,3307379.5,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:15:00',11.11,11.11,11.14,11.11,95000.0,1056803.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:16:00',11.14,11.12,11.15,11.12,196300.0,2186954.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:17:00',11.12,11.12,11.15,11.12,169000.0,1881848.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:18:00',11.13,11.15,11.15,11.13,58700.0,653823.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:19:00',11.15,11.13,11.15,11.13,183100.0,2040752.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:20:00',11.13,11.13,11.14,11.12,57600.0,641102.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:21:00',11.13,11.14,11.14,11.13,89700.0,998969.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:22:00',11.14,11.11,11.14,11.11,50500.0,561017.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:23:00',11.1,11.12,11.12,11.1,83300.0,925194.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:24:00',11.12,11.09,11.12,11.09,15100.0,167627.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:25:00',11.1,11.1,11.11,11.1,8600.0,95462.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:26:00',11.09,11.12,11.17,11.09,270300.0,3011716.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:27:00',11.12,11.18,11.18,11.12,69600.0,777739.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:28:00',11.17,11.19,11.2,11.17,237400.0,2655762.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:29:00',11.19,11.21,11.21,11.18,103400.0,1157957.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:30:00',11.21,11.18,11.21,11.16,58000.0,649227.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:31:00',11.17,11.2,11.2,11.17,187000.0,2093130.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:32:00',11.21,11.23,11.25,11.2,212000.0,2378486.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:33:00',11.24,11.34,11.34,11.23,501637.0,5661866.099999994,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:34:00',11.33,11.41,11.42,11.31,612000.0,6956284.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:35:00',11.41,11.38,11.45,11.38,575600.0,6575579.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:36:00',11.38,11.39,11.39,11.35,328800.0,3738689.4900000095,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:37:00',11.35,11.41,11.41,11.35,230516.0,2626204.4399999976,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:38:00',11.4,11.36,11.4,11.35,137497.0,1562526.9200000018,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:39:00',11.36,11.37,11.39,11.33,249100.0,2828453.7099999934,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:40:00',11.35,11.35,11.37,11.34,205987.0,2340070.950000003,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:41:00',11.35,11.4,11.4,11.35,149496.0,1702321.1899999976,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:42:00',11.38,11.41,11.42,11.38,342971.0,3909339.100000009,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:43:00',11.41,11.4,11.42,11.39,291326.0,3322693.9799999893,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:44:00',11.4,11.48,11.48,11.4,443425.0,5075111.1400000155,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:45:00',11.46,11.44,11.46,11.44,125734.0,1439362.2999999821,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:46:00',11.44,11.44,11.45,11.44,152887.0,1750929.900000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:47:00',11.45,11.44,11.46,11.44,81013.0,927533.7199999988,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:48:00',11.43,11.4,11.44,11.4,79475.0,907338.5,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:49:00',11.4,11.4,11.43,11.36,167652.0,1911327.0600000024,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:50:00',11.41,11.39,11.41,11.38,23800.0,271169.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:51:00',11.38,11.37,11.39,11.37,36761.0,418027.56999999285,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:52:00',11.38,11.38,11.39,11.37,109039.0,1240554.9900000095,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:53:00',11.37,11.37,11.4,11.37,189396.0,2156768.5200000107,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:54:00',11.39,11.37,11.39,11.37,104404.0,1187164.4799999893,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:55:00',11.37,11.35,11.37,11.35,86980.0,988150.8000000119,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:56:00',11.35,11.37,11.37,11.34,96736.0,1097808.599999994,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:57:00',11.36,11.36,11.38,11.36,62323.0,708217.2800000012,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:58:00',11.37,11.34,11.37,11.34,135782.0,1541601.1499999762,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:59:00',11.35,11.34,11.35,11.34,38379.0,435287.8600000143,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:00:00',11.34,11.36,11.36,11.34,56176.0,637639.8400000036,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:01:00',11.36,11.36,11.37,11.35,56000.0,636169.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:02:00',11.35,11.38,11.38,11.35,120151.0,1365796.3599999845,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:03:00',11.38,11.4,11.4,11.37,91258.0,1039128.0400000215,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:04:00',11.4,11.39,11.4,11.38,86994.0,991647.6599999964,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:05:00',11.39,11.38,11.4,11.38,75903.0,864927.2199999988,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:06:00',11.4,11.4,11.4,11.39,37100.0,422815.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:07:00',11.4,11.39,11.4,11.39,64900.0,739346.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:08:00',11.39,11.38,11.39,11.38,58600.0,666998.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:09:00',11.38,11.4,11.4,11.36,144417.0,1644390.650000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:10:00',11.37,11.4,11.4,11.35,59152.0,673655.7599999905,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:11:00',11.41,11.4,11.41,11.39,48700.0,555351.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:12:00',11.4,11.42,11.42,11.39,79800.0,910197.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:13:00',11.41,11.43,11.44,11.41,49000.0,559999.3400000036,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:14:00',11.43,11.44,11.44,11.43,65900.0,753786.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:15:00',11.44,11.43,11.44,11.43,21200.0,242402.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:16:00',11.43,11.43,11.44,11.43,77900.0,890539.0499999821,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:17:00',11.42,11.42,11.43,11.41,72600.0,828962.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:18:00',11.42,11.38,11.42,11.38,127200.0,1451263.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:19:00',11.38,11.41,11.41,11.38,35700.0,406927.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:20:00',11.4,11.41,11.41,11.4,17900.0,204224.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:21:00',11.41,11.41,11.41,11.4,44400.0,506283.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:22:00',11.41,11.4,11.41,11.4,101700.0,1159426.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:23:00',11.4,11.38,11.4,11.36,61548.0,700285.2400000095,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:24:00',11.36,11.38,11.38,11.35,20852.0,237018.75999999046,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:25:00',11.37,11.36,11.37,11.36,36548.0,415406.24000000954,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:26:00',11.35,11.36,11.37,11.35,52800.0,599478.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:27:00',11.36,11.35,11.36,11.35,51200.0,581300.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:28:00',11.35,11.34,11.36,11.34,45700.0,518665.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:29:00',11.34,11.32,11.34,11.32,47883.0,542748.2199999988,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:30:00',11.32,11.33,11.33,11.32,37000.0,419126.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:01:00',11.33,11.33,11.35,11.33,82917.0,939614.2800000012,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:02:00',11.33,11.31,11.34,11.31,58750.0,665362.5,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:03:00',11.31,11.31,11.32,11.31,90400.0,1022443.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:04:00',11.32,11.31,11.32,11.3,120900.0,1366511.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:05:00',11.31,11.32,11.32,11.3,106400.0,1203291.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:06:00',11.32,11.36,11.36,11.32,67000.0,759965.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:07:00',11.36,11.36,11.37,11.34,95400.0,1083468.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:08:00',11.36,11.33,11.37,11.33,93900.0,1066666.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:09:00',11.33,11.34,11.34,11.32,37900.0,429351.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:10:00',11.33,11.33,11.33,11.32,26300.0,297964.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:11:00',11.33,11.28,11.33,11.28,90600.0,1024723.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:12:00',11.29,11.28,11.3,11.27,114839.0,1295492.9200000167,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:13:00',11.28,11.28,11.29,11.28,48300.0,544981.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:14:00',11.28,11.28,11.29,11.27,68561.0,773163.0799999833,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:15:00',11.28,11.27,11.29,11.27,84300.0,950882.6100000143,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:16:00',11.27,11.27,11.28,11.27,32000.0,360755.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:17:00',11.27,11.26,11.27,11.26,42220.0,475777.1999999881,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:18:00',11.26,11.25,11.26,11.25,37480.0,421951.8000000119,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:19:00',11.25,11.26,11.26,11.25,8800.0,99066.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:20:00',11.26,11.26,11.26,11.25,36400.0,409669.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:21:00',11.26,11.24,11.26,11.24,43500.0,489525.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:22:00',11.24,11.24,11.25,11.24,42500.0,477967.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:23:00',11.25,11.25,11.25,11.24,27500.0,309368.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:24:00',11.25,11.25,11.26,11.25,14800.0,166644.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:25:00',11.25,11.25,11.26,11.25,6300.0,70906.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:26:00',11.25,11.26,11.26,11.25,31500.0,354602.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:27:00',11.25,11.27,11.27,11.25,34500.0,388590.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:28:00',11.26,11.26,11.27,11.26,42700.0,481092.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:29:00',11.27,11.27,11.27,11.25,86020.0,968959.3999999762,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:30:00',11.27,11.26,11.28,11.26,28180.0,317608.60000002384,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:31:00',11.26,11.26,11.27,11.26,10620.0,119642.19999998808,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:32:00',11.26,11.26,11.27,11.26,72200.0,813073.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:33:00',11.26,11.26,11.27,11.26,29400.0,331219.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:34:00',11.26,11.27,11.27,11.26,14400.0,162189.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:35:00',11.27,11.25,11.27,11.25,22480.0,253073.80000001192,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:36:00',11.25,11.25,11.26,11.25,26600.0,299455.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:37:00',11.25,11.26,11.26,11.25,80520.0,906359.1999999881,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:38:00',11.27,11.25,11.27,11.25,16180.0,182079.80000001192,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:39:00',11.25,11.23,11.26,11.23,106000.0,1191411.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:40:00',11.23,11.23,11.24,11.23,91100.0,1023446.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:41:00',11.23,11.22,11.24,11.22,35400.0,397556.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:42:00',11.23,11.2,11.23,11.2,172700.0,1936596.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:43:00',11.21,11.19,11.21,11.19,89700.0,1004799.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:44:00',11.2,11.19,11.2,11.18,57100.0,639239.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:45:00',11.19,11.2,11.2,11.19,67595.0,756911.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:46:00',11.2,11.2,11.21,11.2,115000.0,1288434.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:47:00',11.19,11.2,11.2,11.19,27200.0,304467.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:48:00',11.19,11.19,11.19,11.18,43700.0,488802.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:49:00',11.19,11.19,11.19,11.18,48700.0,544675.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:50:00',11.19,11.19,11.22,11.18,98500.0,1103443.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:51:00',11.19,11.2,11.21,11.19,67500.0,756052.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:52:00',11.2,11.2,11.22,11.19,43600.0,488300.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:53:00',11.2,11.2,11.2,11.19,52600.0,589105.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:54:00',11.2,11.2,11.2,11.19,60300.0,675155.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:55:00',11.2,11.19,11.2,11.18,59100.0,661067.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:56:00',11.19,11.2,11.21,11.18,68700.0,768996.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:57:00',11.2,11.21,11.22,11.19,74900.0,839752.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:58:00',11.22,11.21,11.22,11.21,9600.0,107664.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:59:00',11.21,11.21,11.22,11.21,20900.0,234373.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:00:00',11.22,11.23,11.24,11.21,68500.0,768957.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:01:00',11.23,11.24,11.24,11.23,30200.0,339429.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:02:00',11.25,11.22,11.25,11.22,38700.0,434942.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:03:00',11.22,11.24,11.25,11.22,58720.0,660538.3999999762,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:04:00',11.24,11.24,11.25,11.24,19400.0,218099.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:05:00',11.25,11.24,11.25,11.23,31100.0,349744.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:06:00',11.24,11.24,11.24,11.23,6500.0,73064.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:07:00',11.24,11.23,11.24,11.22,21200.0,238054.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:08:00',11.22,11.22,11.24,11.22,6300.0,70731.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:09:00',11.22,11.22,11.24,11.22,5600.0,62878.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:10:00',11.22,11.23,11.23,11.22,5600.0,62866.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:11:00',11.23,11.24,11.24,11.22,36000.0,404370.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:12:00',11.24,11.24,11.25,11.22,55300.0,622032.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:13:00',11.25,11.23,11.25,11.23,32600.0,366576.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:14:00',11.23,11.24,11.25,11.23,21000.0,236082.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:15:00',11.24,11.26,11.26,11.24,64500.0,725876.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:16:00',11.26,11.27,11.28,11.26,82200.0,926583.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:17:00',11.27,11.26,11.28,11.26,45600.0,514013.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:18:00',11.27,11.26,11.27,11.26,17400.0,195986.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:19:00',11.27,11.27,11.29,11.27,273200.0,3082769.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:20:00',11.27,11.3,11.3,11.27,175039.0,1977112.3100000024,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:21:00',11.31,11.33,11.34,11.31,128300.0,1453462.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:22:00',11.33,11.35,11.35,11.32,66700.0,756208.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:23:00',11.34,11.35,11.36,11.34,121700.0,1381218.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:24:00',11.36,11.35,11.37,11.35,66600.0,756737.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:25:00',11.35,11.36,11.38,11.35,132600.0,1507997.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:26:00',11.38,11.39,11.4,11.37,149180.0,1699236.400000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:27:00',11.39,11.37,11.39,11.36,35820.0,407303.59999999404,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:28:00',11.37,11.35,11.38,11.35,47900.0,544627.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:29:00',11.35,11.37,11.37,11.35,17400.0,197656.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:30:00',11.36,11.35,11.36,11.35,87300.0,991027.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:31:00',11.35,11.39,11.4,11.35,138680.0,1578201.400000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:32:00',11.39,11.4,11.4,11.39,180200.0,2054110.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:33:00',11.4,11.41,11.41,11.39,114300.0,1303708.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:34:00',11.42,11.4,11.42,11.4,160600.0,1833388.1299999952,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:35:00',11.41,11.41,11.42,11.4,97538.0,1112477.580000013,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:36:00',11.41,11.43,11.43,11.41,192487.0,2198982.5399999917,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:37:00',11.43,11.44,11.45,11.43,389100.0,4451469.3900000155,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:38:00',11.44,11.44,11.44,11.43,62400.0,713725.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:39:00',11.44,11.43,11.44,11.43,41100.0,469907.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:40:00',11.43,11.43,11.44,11.43,180100.0,2058619.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:41:00',11.43,11.41,11.44,11.41,83900.0,959046.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:42:00',11.41,11.39,11.41,11.39,53100.0,605290.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:43:00',11.39,11.38,11.39,11.38,97700.0,1112686.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:44:00',11.38,11.37,11.39,11.37,57500.0,654477.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:45:00',11.37,11.4,11.4,11.37,312900.0,3565077.99999997,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:46:00',11.4,11.41,11.41,11.4,98500.0,1123762.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:47:00',11.41,11.42,11.43,11.41,159600.0,1822511.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:48:00',11.42,11.42,11.42,11.41,123400.0,1409109.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:49:00',11.42,11.41,11.43,11.41,154100.0,1760238.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:50:00',11.41,11.42,11.44,11.41,361300.0,4130568.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:51:00',11.42,11.45,11.45,11.42,231257.0,2645775.370000005,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:52:00',11.44,11.44,11.45,11.43,295700.0,3384133.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:53:00',11.41,11.41,11.44,11.41,97500.0,1114794.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:54:00',11.42,11.43,11.44,11.41,145700.0,1665211.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:55:00',11.41,11.39,11.42,11.36,110743.0,1260564.6299999952,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:56:00',11.36,11.37,11.38,11.35,110700.0,1257857.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:57:00',11.37,11.39,11.39,11.37,91600.0,1042258.4300000072,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 15:00:00',11.35,11.35,11.35,11.35,648000.0,7354800.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:31:00',11.27,11.35,11.35,11.27,194597.0,2194477.45,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:32:00',11.31,11.31,11.36,11.3,97600.0,1104836.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:33:00',11.31,11.25,11.33,11.25,142700.0,1610769.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:34:00',11.28,11.28,11.3,11.26,89100.0,1004739.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:35:00',11.28,11.27,11.28,11.26,65300.0,735698.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:36:00',11.27,11.25,11.27,11.23,198700.0,2235096.999999999,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:37:00',11.23,11.26,11.26,11.23,153800.0,1728714.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:38:00',11.26,11.32,11.32,11.26,59500.0,672484.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:39:00',11.33,11.39,11.4,11.33,250800.0,2849183.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:40:00',11.41,11.38,11.41,11.37,108200.0,1231972.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:41:00',11.39,11.33,11.41,11.33,146000.0,1662606.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:42:00',11.34,11.36,11.37,11.34,132600.0,1506245.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:43:00',11.35,11.44,11.44,11.35,422058.0,4811030.879999999,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:44:00',11.42,11.42,11.44,11.42,106200.0,1213679.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:45:00',11.42,11.39,11.42,11.36,102300.0,1166319.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:46:00',11.39,11.39,11.41,11.38,88100.0,1003877.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:47:00',11.39,11.35,11.4,11.35,132700.0,1508598.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:48:00',11.35,11.33,11.36,11.31,120300.0,1363681.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:49:00',11.32,11.32,11.33,11.3,118200.0,1337038.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:50:00',11.32,11.34,11.34,11.31,154300.0,1747209.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:51:00',11.34,11.34,11.35,11.31,192000.0,2176691.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:52:00',11.33,11.34,11.35,11.33,159100.0,1804359.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:53:00',11.34,11.35,11.35,11.31,245200.0,2779151.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:54:00',11.36,11.38,11.39,11.36,119800.0,1361758.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:55:00',11.37,11.37,11.38,11.36,41000.0,466342.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:56:00',11.36,11.35,11.37,11.35,35000.0,397418.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:57:00',11.36,11.34,11.36,11.3,160300.0,1814162.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:58:00',11.34,11.34,11.35,11.32,22300.0,252845.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:59:00',11.34,11.35,11.35,11.34,112600.0,1277749.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:00:00',11.34,11.34,11.35,11.34,65800.0,746178.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:01:00',11.34,11.32,11.34,11.3,226400.0,2561501.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:02:00',11.32,11.34,11.35,11.28,138200.0,1563277.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:03:00',11.34,11.34,11.34,11.3,63800.0,722991.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:04:00',11.34,11.34,11.35,11.34,39100.0,443617.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:05:00',11.35,11.35,11.35,11.33,81200.0,921373.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:06:00',11.35,11.35,11.36,11.35,85200.0,967050.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:07:00',11.36,11.37,11.37,11.35,135300.0,1537200.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:08:00',11.35,11.36,11.38,11.35,69700.0,791922.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:09:00',11.36,11.41,11.41,11.36,296500.0,3376310.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:10:00',11.41,11.41,11.42,11.4,163800.0,1869800.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:11:00',11.42,11.42,11.43,11.41,61900.0,706871.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:12:00',11.42,11.4,11.42,11.4,21600.0,246423.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:13:00',11.4,11.38,11.4,11.37,43000.0,489339.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:14:00',11.38,11.39,11.39,11.37,31200.0,355056.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:15:00',11.38,11.38,11.39,11.37,71400.0,812399.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:16:00',11.38,11.37,11.38,11.37,18300.0,208100.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:17:00',11.37,11.31,11.37,11.31,211200.0,2394958.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:18:00',11.32,11.35,11.36,11.3,146900.0,1661582.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:19:00',11.35,11.34,11.35,11.32,28600.0,324130.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:20:00',11.34,11.35,11.35,11.34,61900.0,701699.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:21:00',11.35,11.34,11.35,11.34,46900.0,531869.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:22:00',11.34,11.35,11.35,11.34,23600.0,267713.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:23:00',11.35,11.35,11.35,11.34,32200.0,365336.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:24:00',11.34,11.34,11.35,11.3,285800.0,3234866.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:25:00',11.34,11.31,11.35,11.3,45600.0,515771.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:26:00',11.34,11.34,11.34,11.32,72300.0,820048.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:27:00',11.34,11.34,11.35,11.34,41900.0,475051.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:28:00',11.34,11.35,11.35,11.34,46800.0,530896.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:29:00',11.35,11.34,11.35,11.33,118000.0,1338109.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:30:00',11.34,11.33,11.35,11.33,61300.0,695369.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:31:00',11.33,11.34,11.35,11.33,24500.0,277860.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:32:00',11.34,11.34,11.35,11.33,27800.0,315082.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:33:00',11.34,11.34,11.35,11.33,48500.0,549676.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:34:00',11.34,11.35,11.35,11.34,10400.0,117943.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:35:00',11.35,11.34,11.35,11.34,13600.0,154225.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:36:00',11.34,11.34,11.35,11.34,13700.0,155372.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:37:00',11.34,11.34,11.34,11.34,83600.0,947807.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:38:00',11.32,11.34,11.35,11.32,130300.0,1475481.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:39:00',11.34,11.33,11.35,11.32,41700.0,473008.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:40:00',11.33,11.33,11.35,11.33,60200.0,682238.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:41:00',11.34,11.35,11.35,11.33,36200.0,410228.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:42:00',11.34,11.33,11.35,11.32,122600.0,1389382.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:43:00',11.33,11.32,11.34,11.3,107600.0,1217485.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:44:00',11.32,11.33,11.34,11.32,52700.0,597355.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:45:00',11.34,11.31,11.34,11.31,30100.0,340572.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:46:00',11.31,11.3,11.32,11.29,57600.0,651033.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:47:00',11.3,11.29,11.3,11.29,13200.0,149081.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:48:00',11.29,11.29,11.3,11.29,25600.0,289111.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:49:00',11.29,11.29,11.3,11.29,25600.0,289165.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:50:00',11.29,11.27,11.3,11.27,115900.0,1308000.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:51:00',11.25,11.27,11.27,11.25,153300.0,1725374.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:52:00',11.28,11.27,11.28,11.27,32400.0,365219.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:53:00',11.27,11.25,11.28,11.25,81000.0,912142.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:54:00',11.25,11.27,11.27,11.24,99400.0,1118078.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:55:00',11.27,11.27,11.27,11.26,27800.0,313174.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:56:00',11.27,11.28,11.28,11.27,12000.0,135354.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:57:00',11.27,11.27,11.28,11.27,15600.0,175857.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:58:00',11.27,11.26,11.28,11.26,75400.0,849417.799999997,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:59:00',11.26,11.29,11.29,11.25,114481.0,1289693.8700000048,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:00:00',11.29,11.28,11.29,11.28,23200.0,261798.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:01:00',11.29,11.31,11.31,11.28,52900.0,597503.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:02:00',11.31,11.33,11.33,11.3,52800.0,597613.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:03:00',11.32,11.33,11.33,11.3,57400.0,648994.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:04:00',11.31,11.3,11.32,11.29,55500.0,627200.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:05:00',11.29,11.3,11.3,11.29,4600.0,51942.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:06:00',11.3,11.29,11.3,11.26,92600.0,1043860.1899999976,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:07:00',11.29,11.27,11.29,11.26,51100.0,575911.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:08:00',11.29,11.27,11.29,11.26,46100.0,519247.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:09:00',11.27,11.28,11.29,11.27,17700.0,199690.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:10:00',11.28,11.28,11.29,11.28,28400.0,320413.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:11:00',11.28,11.26,11.28,11.26,31400.0,353981.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:12:00',11.26,11.21,11.26,11.21,177800.0,1996944.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:13:00',11.22,11.23,11.24,11.22,119200.0,1337770.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:14:00',11.23,11.21,11.23,11.19,375600.0,4207714.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:15:00',11.21,11.21,11.22,11.21,88300.0,990017.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:16:00',11.21,11.2,11.22,11.2,125300.0,1403274.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:17:00',11.19,11.19,11.21,11.19,101800.0,1139480.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:18:00',11.2,11.2,11.21,11.19,99900.0,1118379.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:19:00',11.21,11.2,11.21,11.2,33800.0,378602.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:20:00',11.2,11.18,11.2,11.18,136400.0,1525650.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:21:00',11.18,11.22,11.22,11.18,114100.0,1278375.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:22:00',11.22,11.29,11.3,11.22,155400.0,1750586.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:23:00',11.3,11.26,11.3,11.26,32500.0,366525.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:24:00',11.27,11.32,11.32,11.27,105400.0,1190886.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:25:00',11.32,11.33,11.34,11.32,94200.0,1067788.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:26:00',11.34,11.34,11.35,11.33,97700.0,1107909.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:27:00',11.34,11.32,11.35,11.3,45900.0,519877.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:28:00',11.31,11.34,11.34,11.3,67100.0,760368.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:29:00',11.34,11.35,11.36,11.33,107300.0,1217725.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:30:00',11.34,11.33,11.35,11.33,31900.0,361717.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:01:00',11.33,11.3,11.33,11.3,163800.0,1854874.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:02:00',11.32,11.28,11.32,11.28,18800.0,212606.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:03:00',11.3,11.3,11.32,11.29,15100.0,170797.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:04:00',11.32,11.33,11.33,11.31,32500.0,367922.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:05:00',11.33,11.32,11.33,11.32,45900.0,520010.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:06:00',11.33,11.32,11.34,11.32,47500.0,538081.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:07:00',11.33,11.32,11.33,11.32,11000.0,124532.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:08:00',11.32,11.33,11.33,11.32,6900.0,78146.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:09:00',11.33,11.29,11.33,11.29,48500.0,548417.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:10:00',11.28,11.3,11.3,11.28,57300.0,647167.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:11:00',11.29,11.31,11.32,11.29,105600.0,1193513.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:12:00',11.31,11.3,11.31,11.3,37300.0,421730.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:13:00',11.3,11.29,11.3,11.29,15300.0,172751.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:14:00',11.29,11.3,11.3,11.29,11900.0,134382.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:15:00',11.3,11.25,11.3,11.25,86300.0,972690.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:16:00',11.25,11.26,11.27,11.25,40600.0,457406.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:17:00',11.26,11.26,11.28,11.26,33900.0,381866.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:18:00',11.26,11.26,11.26,11.24,77400.0,870776.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:19:00',11.26,11.27,11.27,11.26,45800.0,516044.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:20:00',11.27,11.25,11.27,11.25,48500.0,545868.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:21:00',11.25,11.28,11.28,11.25,36000.0,405597.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:22:00',11.27,11.26,11.28,11.26,39300.0,442846.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:23:00',11.26,11.28,11.29,11.26,53800.0,606769.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:24:00',11.29,11.29,11.3,11.28,40600.0,458601.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:25:00',11.29,11.3,11.3,11.29,47200.0,533344.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:26:00',11.3,11.29,11.3,11.29,83200.0,940090.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:27:00',11.3,11.32,11.32,11.29,46000.0,520109.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:28:00',11.32,11.29,11.32,11.29,31300.0,353497.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:29:00',11.29,11.28,11.3,11.27,83600.0,943134.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:30:00',11.28,11.3,11.31,11.27,114800.0,1296517.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:31:00',11.3,11.28,11.3,11.28,36500.0,411963.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:32:00',11.28,11.3,11.3,11.27,86600.0,976856.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:33:00',11.3,11.31,11.32,11.3,84400.0,954039.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:34:00',11.32,11.33,11.33,11.32,65600.0,742678.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:35:00',11.33,11.31,11.33,11.3,69500.0,786158.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:36:00',11.32,11.35,11.35,11.31,113900.0,1290451.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:37:00',11.34,11.33,11.34,11.32,28800.0,326397.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:38:00',11.33,11.34,11.34,11.32,57900.0,656048.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:39:00',11.34,11.35,11.35,11.34,56300.0,638545.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:40:00',11.35,11.35,11.35,11.31,167700.0,1902036.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:41:00',11.35,11.32,11.35,11.31,30300.0,343412.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:42:00',11.32,11.33,11.33,11.32,37600.0,425811.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:43:00',11.33,11.33,11.33,11.32,30500.0,345472.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:44:00',11.33,11.34,11.34,11.32,101700.0,1152481.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:45:00',11.34,11.34,11.35,11.33,95600.0,1084045.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:46:00',11.34,11.35,11.35,11.33,136400.0,1547277.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:47:00',11.34,11.34,11.35,11.33,78900.0,894651.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:48:00',11.34,11.35,11.35,11.34,121000.0,1372921.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:49:00',11.35,11.35,11.35,11.34,155300.0,1762140.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:50:00',11.35,11.34,11.35,11.33,87200.0,988809.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:51:00',11.33,11.34,11.34,11.32,67500.0,764699.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:52:00',11.34,11.34,11.34,11.33,31000.0,351343.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:53:00',11.34,11.33,11.34,11.32,76700.0,869290.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:54:00',11.34,11.34,11.34,11.33,72000.0,816172.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:55:00',11.34,11.33,11.34,11.32,42700.0,484008.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:56:00',11.33,11.35,11.35,11.33,97800.0,1109167.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:57:00',11.35,11.34,11.35,11.34,86000.0,975752.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:58:00',11.34,11.35,11.35,11.34,79900.0,906431.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:59:00',11.35,11.35,11.36,11.33,136400.0,1548251.6100000143,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:00:00',11.36,11.35,11.36,11.35,102100.0,1159249.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:01:00',11.35,11.36,11.36,11.35,89500.0,1016304.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:02:00',11.36,11.34,11.36,11.33,74400.0,844382.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:03:00',11.34,11.36,11.36,11.33,230300.0,2613617.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:04:00',11.35,11.36,11.36,11.35,164200.0,1864718.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:05:00',11.36,11.35,11.36,11.35,38500.0,437274.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:06:00',11.36,11.35,11.36,11.35,169800.0,1928068.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:07:00',11.36,11.4,11.4,11.36,536539.0,6100661.039999992,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:08:00',11.4,11.41,11.42,11.39,389800.0,4444539.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:09:00',11.4,11.41,11.41,11.4,204300.0,2331075.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:10:00',11.4,11.41,11.41,11.4,129400.0,1475937.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:11:00',11.41,11.43,11.43,11.41,218700.0,2497043.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:12:00',11.42,11.44,11.44,11.42,136100.0,1555541.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:13:00',11.44,11.54,11.54,11.43,1018100.0,11692577.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:14:00',11.52,11.51,11.53,11.5,422400.0,4866632.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:15:00',11.51,11.48,11.52,11.47,227100.0,2612523.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:16:00',11.49,11.49,11.5,11.48,198900.0,2285029.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:17:00',11.49,11.5,11.51,11.49,334700.0,3849974.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:18:00',11.5,11.5,11.5,11.48,253100.0,2910151.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:19:00',11.5,11.5,11.51,11.5,115400.0,1327508.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:20:00',11.5,11.54,11.54,11.5,832200.0,9587661.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:21:00',11.53,11.54,11.54,11.52,247600.0,2856077.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:22:00',11.54,11.53,11.54,11.52,69100.0,796877.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:23:00',11.52,11.52,11.53,11.51,106400.0,1226086.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:24:00',11.52,11.52,11.52,11.51,37100.0,427228.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:25:00',11.52,11.52,11.53,11.51,86500.0,996272.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:26:00',11.52,11.52,11.52,11.51,39400.0,453704.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:27:00',11.52,11.52,11.53,11.51,116597.0,1343284.4399999976,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:28:00',11.52,11.51,11.53,11.51,57400.0,660893.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:29:00',11.51,11.51,11.52,11.5,100600.0,1157549.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:30:00',11.51,11.5,11.51,11.5,32600.0,374987.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:31:00',11.5,11.45,11.51,11.45,92900.0,1067509.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:32:00',11.45,11.47,11.47,11.45,62700.0,719896.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:33:00',11.45,11.5,11.51,11.45,65100.0,747497.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:34:00',11.5,11.5,11.5,11.48,116900.0,1344482.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:35:00',11.5,11.5,11.5,11.49,67500.0,776201.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:36:00',11.5,11.49,11.51,11.46,112000.0,1287819.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:37:00',11.49,11.45,11.49,11.45,51000.0,584774.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:38:00',11.45,11.45,11.45,11.43,48300.0,552505.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:39:00',11.44,11.45,11.46,11.43,99100.0,1133866.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:40:00',11.42,11.43,11.45,11.4,188100.0,2148202.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:41:00',11.43,11.45,11.45,11.42,203400.0,2325646.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:42:00',11.45,11.44,11.45,11.43,232900.0,2664436.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:43:00',11.44,11.45,11.45,11.44,207700.0,2377790.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:44:00',11.44,11.47,11.47,11.44,313400.0,3590603.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:45:00',11.47,11.46,11.47,11.46,256700.0,2944280.99999997,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:46:00',11.45,11.48,11.48,11.45,220754.0,2531230.380000055,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:47:00',11.48,11.48,11.48,11.47,262500.0,3013403.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:48:00',11.48,11.47,11.48,11.46,55146.0,632740.6199999452,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:49:00',11.46,11.43,11.46,11.43,44500.0,509405.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:50:00',11.44,11.4,11.44,11.4,138000.0,1575039.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:51:00',11.4,11.42,11.43,11.4,27700.0,316131.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:52:00',11.42,11.43,11.44,11.41,84700.0,968231.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:53:00',11.43,11.44,11.45,11.43,30000.0,343186.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:54:00',11.43,11.43,11.44,11.43,64517.0,737698.3100000024,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:55:00',11.43,11.42,11.44,11.42,84600.0,967217.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:56:00',11.42,11.43,11.43,11.41,92300.0,1053894.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:57:00',11.42,11.42,11.42,11.41,39200.0,447526.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 15:00:00',11.42,11.42,11.42,11.42,172054.0,1964856.6800000072,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:31:00',11.55,11.6,11.65,11.55,907400.0,10511377.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:32:00',11.6,11.63,11.64,11.59,551900.0,6414024.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:33:00',11.64,11.7,11.71,11.64,591300.0,6900202.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:34:00',11.7,11.76,11.76,11.7,626000.0,7350773.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:35:00',11.77,11.73,11.78,11.73,551600.0,6482611.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:36:00',11.73,11.73,11.74,11.71,376500.0,4414561.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:37:00',11.72,11.68,11.73,11.68,307100.0,3594829.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:38:00',11.66,11.62,11.7,11.61,441200.0,5139836.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:39:00',11.61,11.62,11.64,11.6,335900.0,3902614.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:40:00',11.63,11.58,11.64,11.55,530800.0,6161605.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:41:00',11.55,11.54,11.58,11.53,223000.0,2577635.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:42:00',11.55,11.56,11.57,11.52,244400.0,2821957.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:43:00',11.55,11.58,11.58,11.55,346300.0,4006971.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:44:00',11.57,11.56,11.57,11.55,199700.0,2307770.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:45:00',11.56,11.56,11.58,11.56,188200.0,2177986.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:46:00',11.57,11.54,11.57,11.54,188500.0,2179350.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:47:00',11.54,11.53,11.56,11.53,140500.0,1622212.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:48:00',11.54,11.49,11.54,11.49,274100.0,3157452.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:49:00',11.49,11.52,11.52,11.48,249800.0,2872306.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:50:00',11.52,11.53,11.53,11.52,354000.0,4081214.620000005,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:51:00',11.53,11.53,11.54,11.52,143158.0,1650281.7399999946,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:52:00',11.54,11.53,11.54,11.51,308400.0,3556988.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:53:00',11.55,11.59,11.6,11.54,418100.0,4838869.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:54:00',11.59,11.58,11.6,11.58,134600.0,1559841.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:55:00',11.58,11.54,11.58,11.54,86000.0,993791.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:56:00',11.54,11.55,11.55,11.52,31900.0,367830.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:57:00',11.54,11.54,11.55,11.54,27500.0,317423.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:58:00',11.54,11.53,11.55,11.53,45100.0,520180.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:59:00',11.53,11.52,11.54,11.52,52000.0,599342.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:00:00',11.52,11.53,11.53,11.51,31100.0,358222.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:01:00',11.52,11.5,11.52,11.5,116100.0,1336027.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:02:00',11.5,11.5,11.5,11.49,67600.0,776937.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:03:00',11.49,11.48,11.5,11.48,94900.0,1090210.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:04:00',11.49,11.46,11.49,11.45,159660.0,1830713.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:05:00',11.45,11.44,11.47,11.44,447140.0,5118904.799999997,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:06:00',11.44,11.5,11.5,11.44,329660.0,3781580.600000009,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:07:00',11.5,11.5,11.5,11.49,140100.0,1610940.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:08:00',11.5,11.5,11.5,11.49,59900.0,688429.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:09:00',11.5,11.5,11.5,11.48,73200.0,841193.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:10:00',11.5,11.49,11.5,11.48,112700.0,1295175.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:11:00',11.48,11.48,11.49,11.47,104400.0,1198557.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:12:00',11.48,11.49,11.49,11.47,46200.0,530456.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:13:00',11.49,11.47,11.49,11.47,51300.0,588862.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:14:00',11.47,11.48,11.48,11.47,39200.0,449688.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:15:00',11.48,11.46,11.48,11.46,40800.0,468080.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:16:00',11.47,11.46,11.47,11.46,97300.0,1115636.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:17:00',11.46,11.47,11.47,11.46,45300.0,519462.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:18:00',11.47,11.46,11.47,11.46,30600.0,350741.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:19:00',11.46,11.46,11.47,11.46,41900.0,480414.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:20:00',11.47,11.46,11.47,11.46,82800.0,949611.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:21:00',11.46,11.47,11.47,11.46,89300.0,1024060.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:22:00',11.47,11.47,11.48,11.47,14800.0,169762.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:23:00',11.47,11.47,11.48,11.47,49700.0,570458.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:24:00',11.47,11.48,11.48,11.47,140600.0,1613876.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:25:00',11.48,11.49,11.49,11.47,117100.0,1344288.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:26:00',11.48,11.48,11.49,11.47,45000.0,516889.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:27:00',11.48,11.47,11.48,11.47,24900.0,285679.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:28:00',11.47,11.46,11.47,11.46,14200.0,162770.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:29:00',11.46,11.46,11.47,11.46,62300.0,714038.9999999851,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:30:00',11.46,11.45,11.46,11.45,45600.0,522439.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:31:00',11.45,11.44,11.46,11.44,78100.0,894275.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:32:00',11.44,11.44,11.45,11.43,117000.0,1338227.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:33:00',11.43,11.43,11.44,11.42,133125.0,1521295.5,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:34:00',11.43,11.41,11.43,11.4,159775.0,1823829.25,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:35:00',11.41,11.42,11.42,11.4,35500.0,405026.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:36:00',11.41,11.41,11.42,11.41,50300.0,574030.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:37:00',11.41,11.41,11.42,11.41,96300.0,1099034.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:38:00',11.41,11.41,11.41,11.41,74300.0,848177.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:39:00',11.41,11.42,11.42,11.41,39800.0,454243.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:40:00',11.41,11.42,11.42,11.41,45800.0,522904.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:41:00',11.42,11.42,11.42,11.41,38700.0,441801.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:42:00',11.42,11.41,11.42,11.41,193100.0,2203593.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:43:00',11.41,11.42,11.42,11.41,90200.0,1030064.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:44:00',11.42,11.42,11.43,11.42,51000.0,582499.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:45:00',11.42,11.42,11.43,11.42,46800.0,534543.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:46:00',11.42,11.42,11.43,11.41,70000.0,799187.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:47:00',11.41,11.41,11.42,11.41,17800.0,203125.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:48:00',11.41,11.41,11.42,11.41,108200.0,1235494.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:49:00',11.41,11.41,11.42,11.41,12700.0,144949.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:50:00',11.41,11.41,11.42,11.41,13400.0,152967.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:51:00',11.42,11.42,11.42,11.41,23200.0,264790.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:52:00',11.42,11.42,11.42,11.41,20000.0,228277.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:53:00',11.42,11.41,11.42,11.41,45600.0,520600.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:54:00',11.41,11.41,11.42,11.41,57700.0,658434.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:55:00',11.41,11.43,11.43,11.41,95100.0,1086338.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:56:00',11.42,11.43,11.44,11.42,194200.0,2219316.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:57:00',11.44,11.44,11.44,11.43,10600.0,121253.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:58:00',11.44,11.44,11.44,11.43,21900.0,250530.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:59:00',11.44,11.43,11.44,11.43,20400.0,233338.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:00:00',11.43,11.43,11.44,11.43,21900.0,250432.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:01:00',11.43,11.44,11.45,11.43,84600.0,967784.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:02:00',11.44,11.44,11.45,11.44,84400.0,966311.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:03:00',11.45,11.45,11.46,11.44,20100.0,230191.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:04:00',11.45,11.45,11.46,11.45,9400.0,107641.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:05:00',11.45,11.45,11.46,11.45,11000.0,126014.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:06:00',11.45,11.46,11.46,11.45,9400.0,107655.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:07:00',11.46,11.45,11.46,11.45,16900.0,193589.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:08:00',11.45,11.45,11.46,11.45,15400.0,176385.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:09:00',11.46,11.45,11.46,11.45,11600.0,132837.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:10:00',11.45,11.45,11.46,11.45,9600.0,109943.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:11:00',11.45,11.45,11.46,11.45,7500.0,85935.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:12:00',11.45,11.45,11.46,11.45,36500.0,418061.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:13:00',11.45,11.46,11.46,11.45,23100.0,264636.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:14:00',11.45,11.45,11.46,11.45,31100.0,356124.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:15:00',11.45,11.46,11.46,11.44,43000.0,492335.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:16:00',11.44,11.45,11.45,11.44,13500.0,154609.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:17:00',11.44,11.44,11.45,11.44,14900.0,170494.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:18:00',11.44,11.43,11.44,11.43,25200.0,288287.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:19:00',11.44,11.44,11.45,11.43,25800.0,295068.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:20:00',11.44,11.44,11.45,11.43,15000.0,171581.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:21:00',11.44,11.45,11.47,11.44,135600.0,1553718.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:22:00',11.47,11.49,11.49,11.44,211800.0,2430527.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:23:00',11.5,11.47,11.52,11.47,467537.0,5376870.129999995,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:24:00',11.47,11.49,11.5,11.47,70700.0,812462.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:25:00',11.49,11.49,11.51,11.49,119800.0,1377527.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:26:00',11.49,11.51,11.52,11.49,101800.0,1171902.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:27:00',11.52,11.51,11.52,11.51,23800.0,274100.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:28:00',11.52,11.52,11.52,11.51,109000.0,1255624.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:29:00',11.52,11.53,11.53,11.52,94100.0,1084933.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:30:00',11.53,11.53,11.54,11.53,46900.0,540799.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:01:00',11.53,11.56,11.56,11.53,225500.0,2604097.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:02:00',11.56,11.6,11.6,11.56,282100.0,3268939.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:03:00',11.6,11.63,11.64,11.59,247500.0,2874935.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:04:00',11.63,11.64,11.66,11.62,214100.0,2493356.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:05:00',11.66,11.67,11.69,11.63,243700.0,2842458.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:06:00',11.66,11.62,11.66,11.6,155300.0,1803920.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:07:00',11.62,11.63,11.63,11.6,35666.0,414469.9200000167,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:08:00',11.63,11.62,11.63,11.58,73800.0,856028.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:09:00',11.62,11.64,11.65,11.62,182200.0,2120971.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:10:00',11.64,11.64,11.65,11.6,186800.0,2172908.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:11:00',11.61,11.61,11.65,11.6,99200.0,1152563.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:12:00',11.62,11.6,11.63,11.59,223000.0,2587443.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:13:00',11.6,11.62,11.64,11.6,80000.0,929742.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:14:00',11.61,11.64,11.64,11.61,27922.0,324605.4199999869,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:15:00',11.64,11.63,11.64,11.62,66900.0,778431.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:16:00',11.63,11.62,11.65,11.62,122700.0,1428421.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:17:00',11.62,11.61,11.63,11.61,41600.0,483220.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:18:00',11.61,11.59,11.61,11.58,27678.0,321095.5800000131,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:19:00',11.59,11.58,11.59,11.56,47800.0,553382.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:20:00',11.58,11.59,11.6,11.56,42500.0,492765.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:21:00',11.6,11.58,11.6,11.58,43000.0,498714.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:22:00',11.59,11.59,11.59,11.58,31800.0,368312.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:23:00',11.59,11.58,11.59,11.57,21400.0,247778.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:24:00',11.58,11.57,11.58,11.56,32900.0,380588.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:25:00',11.56,11.59,11.59,11.56,42500.0,492084.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:26:00',11.58,11.6,11.6,11.58,83800.0,971768.5600000024,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:27:00',11.61,11.6,11.61,11.59,8300.0,96299.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:28:00',11.59,11.59,11.6,11.58,28900.0,334873.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:29:00',11.59,11.56,11.59,11.56,39500.0,457397.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:30:00',11.56,11.56,11.57,11.56,74200.0,858157.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:31:00',11.56,11.57,11.58,11.55,54800.0,633943.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:32:00',11.58,11.62,11.62,11.57,108600.0,1259501.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:33:00',11.62,11.6,11.62,11.6,126200.0,1466016.7800000012,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:34:00',11.6,11.6,11.63,11.6,218481.0,2536886.599999994,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:35:00',11.59,11.59,11.6,11.59,104619.0,1212764.400000006,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:36:00',11.6,11.58,11.6,11.56,97200.0,1126019.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:37:00',11.58,11.6,11.61,11.58,44700.0,518282.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:38:00',11.6,11.62,11.63,11.6,79400.0,922199.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:39:00',11.64,11.65,11.66,11.62,123692.0,1440583.6599999964,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:40:00',11.65,11.66,11.67,11.65,118500.0,1381623.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:41:00',11.65,11.65,11.66,11.64,51600.0,601051.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:42:00',11.65,11.66,11.67,11.65,85032.0,991095.4799999893,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:43:00',11.66,11.65,11.67,11.65,53900.0,628537.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:44:00',11.66,11.65,11.67,11.64,75600.0,881482.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:45:00',11.65,11.74,11.74,11.65,556691.0,6512344.060000002,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:46:00',11.74,11.72,11.74,11.71,158700.0,1861696.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:47:00',11.71,11.68,11.72,11.68,106100.0,1241840.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:48:00',11.69,11.68,11.69,11.67,128300.0,1498924.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:49:00',11.67,11.65,11.67,11.65,97709.0,1139307.9399999976,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:50:00',11.65,11.66,11.67,11.64,112791.0,1314383.0600000024,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:51:00',11.67,11.66,11.67,11.65,28600.0,333553.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:52:00',11.65,11.65,11.67,11.65,102700.0,1198039.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:53:00',11.66,11.65,11.67,11.65,92900.0,1082923.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:54:00',11.65,11.67,11.67,11.65,93217.0,1087300.3900000155,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:55:00',11.67,11.67,11.68,11.66,42483.0,495909.6099999845,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:56:00',11.66,11.69,11.69,11.66,72317.0,844650.2199999988,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:57:00',11.69,11.69,11.7,11.67,47000.0,549292.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:58:00',11.68,11.68,11.69,11.67,52500.0,613395.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:59:00',11.67,11.67,11.68,11.66,32900.0,384062.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:00:00',11.66,11.68,11.68,11.66,42400.0,494808.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:01:00',11.67,11.66,11.68,11.65,39983.0,466337.7800000012,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:02:00',11.66,11.65,11.66,11.65,59200.0,690117.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:03:00',11.65,11.65,11.66,11.64,41500.0,483686.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:04:00',11.65,11.67,11.67,11.65,55000.0,641485.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:05:00',11.65,11.66,11.67,11.65,52900.0,616666.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:06:00',11.65,11.64,11.66,11.64,73200.0,852847.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:07:00',11.64,11.63,11.64,11.62,84500.0,983285.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:08:00',11.63,11.64,11.64,11.62,67000.0,779341.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:09:00',11.63,11.59,11.63,11.59,90900.0,1055733.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:10:00',11.59,11.58,11.6,11.57,119300.0,1382200.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:11:00',11.57,11.6,11.6,11.56,66800.0,773697.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:12:00',11.58,11.57,11.6,11.56,106500.0,1233529.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:13:00',11.56,11.57,11.57,11.55,179100.0,2070229.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:14:00',11.57,11.58,11.58,11.56,122573.0,1418386.8799999952,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:15:00',11.58,11.6,11.6,11.57,71800.0,832365.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:16:00',11.59,11.59,11.6,11.57,84100.0,975112.5400000215,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:17:00',11.6,11.54,11.6,11.54,131527.0,1520418.3899999857,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:18:00',11.54,11.54,11.54,11.53,252300.0,2911447.00000003,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:19:00',11.55,11.55,11.55,11.53,106700.0,1232067.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:20:00',11.54,11.57,11.57,11.54,97473.0,1126292.1499999762,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:21:00',11.55,11.57,11.57,11.55,35800.0,414117.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:22:00',11.57,11.58,11.58,11.56,70900.0,820610.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:23:00',11.57,11.56,11.58,11.56,69300.0,801902.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:24:00',11.57,11.59,11.59,11.55,72700.0,841462.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:25:00',11.59,11.6,11.61,11.58,68500.0,794432.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:26:00',11.6,11.64,11.65,11.6,271400.0,3158497.0799999833,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:27:00',11.64,11.63,11.64,11.6,76700.0,891695.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:28:00',11.63,11.66,11.66,11.61,179698.0,2092381.699999988,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:29:00',11.66,11.69,11.7,11.66,287400.0,3357436.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:30:00',11.66,11.69,11.7,11.66,76200.0,890817.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:31:00',11.69,11.68,11.69,11.67,73300.0,856869.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:32:00',11.68,11.69,11.7,11.68,159200.0,1861908.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:33:00',11.71,11.7,11.71,11.69,107600.0,1258925.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:34:00',11.71,11.73,11.73,11.7,123200.0,1443006.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:35:00',11.73,11.71,11.73,11.71,72400.0,848655.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:36:00',11.71,11.71,11.72,11.71,89600.0,1049816.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:37:00',11.71,11.7,11.72,11.7,49500.0,579364.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:38:00',11.7,11.7,11.71,11.7,49890.0,583867.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:39:00',11.7,11.7,11.71,11.7,69100.0,808827.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:40:00',11.71,11.7,11.71,11.69,273000.0,3194146.100000024,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:41:00',11.7,11.71,11.71,11.69,335100.0,3921236.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:42:00',11.71,11.7,11.71,11.69,225400.0,2637167.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:43:00',11.7,11.7,11.71,11.69,45200.0,528885.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:44:00',11.71,11.7,11.71,11.69,48276.0,564936.1999999881,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:45:00',11.7,11.68,11.7,11.67,24500.0,286204.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:46:00',11.67,11.7,11.71,11.67,109654.0,1282759.2599999905,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:47:00',11.71,11.72,11.72,11.71,165489.0,1938800.1899999976,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:48:00',11.72,11.73,11.73,11.72,75900.0,889822.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:49:00',11.73,11.72,11.73,11.72,80500.0,943968.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:50:00',11.72,11.73,11.73,11.72,124200.0,1456271.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:51:00',11.73,11.75,11.75,11.73,390600.0,4586597.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:52:00',11.74,11.78,11.78,11.74,563135.0,6625838.950000048,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:53:00',11.78,11.78,11.78,11.76,270700.0,3188454.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:54:00',11.79,11.81,11.82,11.78,688760.0,8125004.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:55:00',11.81,11.8,11.81,11.78,284500.0,3357734.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:56:00',11.8,11.77,11.8,11.73,431600.0,5073451.199999988,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:57:00',11.76,11.78,11.78,11.76,108700.0,1279095.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 15:00:00',11.78,11.78,11.78,11.78,229300.0,2701154.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:31:00',11.56,11.59,11.65,11.56,357700.0,4145450.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:32:00',11.6,11.61,11.63,11.59,169200.0,1964288.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:33:00',11.6,11.62,11.62,11.6,159000.0,1846577.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:34:00',11.62,11.6,11.63,11.6,252705.0,2934441.9000000004,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:35:00',11.61,11.59,11.61,11.53,461495.0,5341009.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:36:00',11.58,11.64,11.64,11.58,161200.0,1871636.9999999981,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:37:00',11.64,11.7,11.7,11.63,364400.0,4250718.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:38:00',11.7,11.72,11.72,11.66,282400.0,3301811.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:39:00',11.69,11.72,11.74,11.69,239200.0,2804372.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:40:00',11.73,11.78,11.8,11.73,247800.0,2915690.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:41:00',11.78,11.81,11.82,11.78,283837.0,3347461.490000002,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:42:00',11.8,11.77,11.8,11.76,119463.0,1407704.7699999958,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:43:00',11.75,11.77,11.78,11.74,170600.0,2005333.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:44:00',11.77,11.77,11.78,11.71,238900.0,2811370.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:45:00',11.77,11.75,11.77,11.73,57500.0,675187.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:46:00',11.72,11.7,11.72,11.7,53500.0,626781.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:47:00',11.7,11.72,11.72,11.68,100000.0,1168537.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:48:00',11.72,11.7,11.72,11.7,50800.0,594895.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:49:00',11.69,11.69,11.7,11.67,173900.0,2032259.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:50:00',11.66,11.68,11.69,11.66,92000.0,1074240.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:51:00',11.68,11.69,11.69,11.68,51300.0,599440.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:52:00',11.69,11.72,11.72,11.69,86600.0,1013644.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:53:00',11.72,11.74,11.75,11.71,86292.0,1012255.1600000039,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:54:00',11.74,11.71,11.74,11.69,86508.0,1013349.9200000018,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:55:00',11.7,11.72,11.73,11.68,58300.0,682105.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:56:00',11.72,11.68,11.72,11.67,75700.0,885433.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:57:00',11.68,11.69,11.7,11.67,87200.0,1018693.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:58:00',11.69,11.68,11.69,11.67,33900.0,395908.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:59:00',11.68,11.68,11.69,11.67,51300.0,599246.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:00:00',11.67,11.67,11.67,11.65,79500.0,927279.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:01:00',11.68,11.65,11.68,11.64,100200.0,1167971.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:02:00',11.65,11.64,11.65,11.62,132400.0,1540544.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:03:00',11.64,11.66,11.66,11.62,101000.0,1175748.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:04:00',11.66,11.67,11.67,11.65,45900.0,535318.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:05:00',11.67,11.64,11.67,11.62,100900.0,1174982.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:06:00',11.64,11.7,11.71,11.62,270600.0,3159453.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:07:00',11.7,11.72,11.76,11.65,305700.0,3587904.9999999925,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:08:00',11.72,11.72,11.75,11.7,19300.0,225823.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:09:00',11.72,11.73,11.74,11.72,46000.0,539458.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:10:00',11.73,11.74,11.75,11.73,29100.0,341638.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:11:00',11.74,11.71,11.75,11.71,61000.0,715473.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:12:00',11.72,11.72,11.74,11.7,76400.0,894294.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:13:00',11.72,11.75,11.82,11.72,271300.0,3196803.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:14:00',11.8,11.84,11.85,11.8,210500.0,2489891.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:15:00',11.84,11.84,11.84,11.81,155800.0,1843780.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:16:00',11.84,11.94,11.95,11.84,355200.0,4231674.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:17:00',11.94,11.92,11.96,11.9,476500.0,5683660.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:18:00',11.92,11.91,11.93,11.9,213100.0,2538189.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:19:00',11.9,11.89,11.92,11.89,165800.0,1974602.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:20:00',11.89,11.91,11.91,11.89,168289.0,2003160.5900000036,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:21:00',11.91,11.9,11.91,11.88,212451.0,2528443.4099999964,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:22:00',11.89,11.92,11.93,11.89,180864.0,2153663.0900000036,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:23:00',11.92,11.95,11.96,11.92,137700.0,1644217.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:24:00',11.95,12.0,12.0,11.95,535700.0,6420378.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:25:00',12.0,12.04,12.04,12.0,297600.0,3578211.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:26:00',12.02,12.0,12.02,11.98,253100.0,3038148.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:27:00',12.0,12.0,12.01,11.98,194400.0,2332786.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:28:00',12.0,12.0,12.0,11.95,379000.0,4543717.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:29:00',12.0,12.04,12.04,12.0,198400.0,2385253.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:30:00',12.03,12.01,12.05,12.01,166000.0,1996865.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:31:00',12.0,11.97,12.01,11.97,181000.0,2169715.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:32:00',11.97,11.99,11.99,11.96,140900.0,1688177.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:33:00',11.98,11.95,11.98,11.94,111332.0,1331464.7600000054,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:34:00',11.96,12.0,12.01,11.94,167800.0,2009925.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:35:00',12.01,12.01,12.01,11.99,126400.0,1517584.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:36:00',11.99,12.0,12.01,11.98,212000.0,2543336.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:37:00',12.0,12.0,12.0,11.99,116900.0,1402478.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:38:00',12.0,12.0,12.0,11.98,129600.0,1555010.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:39:00',11.99,12.0,12.01,11.98,80900.0,970864.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:40:00',12.0,12.0,12.01,11.99,126700.0,1521066.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:41:00',12.0,12.0,12.01,11.99,76600.0,919096.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:42:00',12.0,12.0,12.01,11.99,74000.0,887990.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:43:00',12.0,12.0,12.02,12.0,83000.0,996610.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:44:00',12.0,12.02,12.03,12.0,58700.0,705384.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:45:00',12.02,12.0,12.03,12.0,52500.0,631011.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:46:00',12.0,11.95,12.0,11.95,130100.0,1559162.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:47:00',11.95,11.92,11.95,11.92,126168.0,1506029.2400000095,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:48:00',11.92,11.93,11.93,11.9,203532.0,2424166.799999982,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:49:00',11.93,11.93,11.96,11.93,101990.0,1216607.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:50:00',11.91,11.9,11.95,11.9,127800.0,1521916.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:51:00',11.91,11.91,11.94,11.9,25900.0,308867.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:52:00',11.91,11.95,11.95,11.91,53500.0,639014.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:53:00',11.94,11.96,11.97,11.93,51400.0,614166.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:54:00',11.97,11.95,11.97,11.95,58500.0,699540.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:55:00',11.95,11.97,11.98,11.95,38200.0,457136.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:56:00',11.98,11.97,11.99,11.95,158300.0,1894970.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:57:00',11.97,11.97,11.98,11.95,40600.0,485976.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:58:00',11.97,11.99,12.0,11.96,161300.0,1932126.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:59:00',11.99,12.01,12.01,11.99,51900.0,622760.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:00:00',12.0,11.98,12.01,11.98,47800.0,573467.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:01:00',11.98,11.99,12.0,11.98,27600.0,330951.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:02:00',11.99,11.99,11.99,11.98,18100.0,216966.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:03:00',11.99,11.98,11.99,11.98,35800.0,429223.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:04:00',11.98,11.98,11.99,11.98,68300.0,818610.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:05:00',11.98,11.97,11.99,11.97,42500.0,509153.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:06:00',11.97,11.99,11.99,11.96,34700.0,415733.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:07:00',11.99,11.97,12.0,11.96,21100.0,252874.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:08:00',11.97,11.96,11.99,11.96,24800.0,296937.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:09:00',11.96,11.95,11.98,11.95,29500.0,353064.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:10:00',11.95,11.95,11.96,11.95,33700.0,402950.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:11:00',11.96,11.96,11.97,11.95,36200.0,433011.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:12:00',11.96,11.96,11.97,11.96,16400.0,196199.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:13:00',11.96,11.97,11.98,11.96,10500.0,125645.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:14:00',11.97,11.97,11.98,11.97,15300.0,183249.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:15:00',11.98,11.99,11.99,11.98,45500.0,545452.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:16:00',11.99,12.01,12.02,11.99,151560.0,1819663.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:17:00',12.01,12.03,12.03,12.01,88900.0,1068800.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:18:00',12.02,12.03,12.03,12.01,145800.0,1754099.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:19:00',12.03,12.06,12.06,12.03,265200.0,3196467.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:20:00',12.06,12.08,12.08,12.06,94200.0,1136842.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:21:00',12.08,12.08,12.09,12.07,215467.0,2602713.0200000107,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:22:00',12.08,12.12,12.13,12.08,189310.0,2291281.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:23:00',12.13,12.09,12.14,12.09,150000.0,1818077.900000006,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:24:00',12.09,12.1,12.12,12.09,64300.0,778390.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:25:00',12.1,12.1,12.1,12.09,68500.0,828968.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:26:00',12.11,12.09,12.12,12.09,120700.0,1460715.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:27:00',12.09,12.08,12.1,12.08,109690.0,1326097.099999994,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:28:00',12.08,12.06,12.09,12.06,92400.0,1116280.900000006,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:29:00',12.06,12.06,12.08,12.06,57600.0,695067.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:30:00',12.07,12.09,12.09,12.06,26500.0,319971.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:01:00',12.09,12.07,12.1,12.07,129400.0,1564192.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:02:00',12.07,12.05,12.07,12.05,113333.0,1367652.9799999893,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:03:00',12.06,12.04,12.06,12.04,61400.0,739896.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:04:00',12.04,12.02,12.05,12.02,89300.0,1074425.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:05:00',12.02,12.01,12.03,12.01,106200.0,1276014.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:06:00',12.01,11.99,12.02,11.99,86840.0,1042156.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:07:00',11.99,12.0,12.02,11.99,92660.0,1111983.400000006,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:08:00',12.01,12.0,12.01,12.0,85900.0,1031328.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:09:00',12.0,11.98,12.0,11.97,57240.0,685716.599999994,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:10:00',11.97,11.96,11.97,11.96,68800.0,823265.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:11:00',11.96,11.95,11.96,11.93,97000.0,1158998.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:12:00',11.96,11.96,11.96,11.93,42860.0,512292.59999999404,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:13:00',11.96,11.98,11.98,11.96,59000.0,706154.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:14:00',11.98,11.98,12.0,11.97,73140.0,876933.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:15:00',11.97,11.95,11.98,11.95,73500.0,879475.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:16:00',11.96,11.96,11.98,11.95,44400.0,530969.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:17:00',11.96,11.94,11.96,11.94,36100.0,431383.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:18:00',11.94,11.93,11.95,11.93,46900.0,559719.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:19:00',11.93,11.89,11.93,11.89,145900.0,1737848.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:20:00',11.89,11.91,11.93,11.88,65300.0,777135.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:21:00',11.93,11.92,11.93,11.9,72100.0,858756.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:22:00',11.91,11.92,11.92,11.91,43400.0,517449.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:23:00',11.92,11.92,11.93,11.92,76200.0,908618.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:24:00',11.9,11.9,11.91,11.89,77600.0,923978.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:25:00',11.9,11.89,11.91,11.89,77400.0,921197.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:26:00',11.89,11.89,11.91,11.89,52500.0,624914.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:27:00',11.9,11.92,11.92,11.9,73200.0,871577.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:28:00',11.92,11.94,11.94,11.92,68900.0,821981.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:29:00',11.94,11.92,11.94,11.91,34500.0,411353.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:30:00',11.92,11.91,11.92,11.91,23300.0,277678.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:31:00',11.91,11.91,11.92,11.91,57000.0,679335.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:32:00',11.91,11.89,11.91,11.89,83400.0,992819.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:33:00',11.89,11.87,11.89,11.86,210600.0,2501788.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:34:00',11.88,11.81,11.88,11.81,260200.0,3078808.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:35:00',11.8,11.86,11.86,11.8,162200.0,1920478.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:36:00',11.86,11.84,11.87,11.83,69900.0,828699.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:37:00',11.84,11.82,11.85,11.82,51600.0,610763.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:38:00',11.83,11.8,11.83,11.79,141300.0,1668275.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:39:00',11.8,11.8,11.8,11.77,165800.0,1954307.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:40:00',11.8,11.8,11.8,11.78,82800.0,976549.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:41:00',11.8,11.81,11.83,11.79,79800.0,942170.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:42:00',11.81,11.81,11.83,11.8,50600.0,597531.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:43:00',11.8,11.79,11.81,11.79,109500.0,1292148.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:44:00',11.79,11.78,11.79,11.78,79300.0,934697.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:45:00',11.78,11.8,11.81,11.78,82700.0,975597.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:46:00',11.81,11.82,11.82,11.8,40800.0,481780.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:47:00',11.81,11.81,11.82,11.8,80500.0,950273.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:48:00',11.79,11.79,11.81,11.78,94900.0,1119019.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:49:00',11.78,11.78,11.79,11.78,99700.0,1174646.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:50:00',11.78,11.76,11.78,11.75,90300.0,1061993.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:51:00',11.75,11.73,11.75,11.72,185300.0,2172829.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:52:00',11.73,11.73,11.76,11.71,97700.0,1145413.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:53:00',11.73,11.77,11.77,11.73,37800.0,444156.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:54:00',11.77,11.78,11.78,11.77,43000.0,506361.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:55:00',11.78,11.77,11.78,11.77,81200.0,955961.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:56:00',11.77,11.77,11.78,11.76,36400.0,428338.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:57:00',11.77,11.77,11.77,11.76,59800.0,703499.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:58:00',11.76,11.79,11.79,11.76,52300.0,616201.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:59:00',11.79,11.79,11.79,11.78,58100.0,684769.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:00:00',11.8,11.78,11.8,11.78,48100.0,567104.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:01:00',11.78,11.78,11.79,11.77,43800.0,515912.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:02:00',11.78,11.77,11.78,11.76,34500.0,406061.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:03:00',11.77,11.77,11.77,11.76,27700.0,326032.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:04:00',11.77,11.76,11.77,11.75,80000.0,940777.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:05:00',11.76,11.75,11.76,11.75,66500.0,781523.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:06:00',11.75,11.74,11.76,11.74,196900.0,2312718.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:07:00',11.75,11.75,11.76,11.73,153100.0,1797798.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:08:00',11.75,11.76,11.76,11.74,106600.0,1252299.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:09:00',11.74,11.76,11.76,11.74,84500.0,992785.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:10:00',11.76,11.76,11.77,11.76,47700.0,561058.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:11:00',11.76,11.77,11.78,11.76,47100.0,554192.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:12:00',11.76,11.77,11.78,11.76,27800.0,327194.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:13:00',11.77,11.78,11.78,11.75,51100.0,601394.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:14:00',11.76,11.8,11.8,11.75,135000.0,1588455.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:15:00',11.8,11.79,11.8,11.78,36100.0,425523.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:16:00',11.8,11.79,11.8,11.78,113500.0,1338296.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:17:00',11.79,11.79,11.8,11.78,45000.0,530429.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:18:00',11.79,11.8,11.8,11.78,49200.0,580234.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:19:00',11.8,11.82,11.82,11.8,43200.0,510430.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:20:00',11.83,11.86,11.86,11.83,58900.0,697146.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:21:00',11.85,11.81,11.86,11.81,78300.0,926567.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:22:00',11.82,11.82,11.82,11.81,37700.0,445321.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:23:00',11.8,11.8,11.82,11.8,27600.0,325903.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:24:00',11.82,11.78,11.82,11.78,33300.0,393267.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:25:00',11.79,11.8,11.8,11.78,38800.0,457648.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:26:00',11.8,11.8,11.8,11.79,11900.0,140406.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:27:00',11.81,11.83,11.83,11.8,62500.0,738130.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:28:00',11.83,11.87,11.87,11.83,61064.0,723757.1200000048,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:29:00',11.86,11.85,11.87,11.85,23400.0,277705.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:30:00',11.86,11.86,11.87,11.8,75100.0,888528.0799999833,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:31:00',11.86,11.87,11.87,11.82,45621.0,539986.2200000286,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:32:00',11.84,11.86,11.87,11.83,43600.0,516837.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:33:00',11.85,11.81,11.86,11.81,40200.0,475623.78999996185,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:34:00',11.83,11.85,11.85,11.81,41579.0,491491.15000003576,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:35:00',11.81,11.84,11.85,11.81,18100.0,214370.78999996185,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:36:00',11.83,11.83,11.85,11.83,20300.0,240353.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:37:00',11.83,11.85,11.86,11.83,59200.0,701452.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:38:00',11.85,11.86,11.86,11.82,101300.0,1200326.7900000215,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:39:00',11.85,11.86,11.87,11.84,23600.0,279738.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:40:00',11.87,11.84,11.87,11.84,45900.0,544341.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:41:00',11.85,11.83,11.86,11.83,48600.0,575773.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:42:00',11.86,11.84,11.86,11.82,77400.0,915829.6200000048,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:43:00',11.83,11.83,11.84,11.81,44300.0,523969.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:44:00',11.83,11.81,11.83,11.81,68900.0,814555.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:45:00',11.81,11.81,11.82,11.81,79100.0,934546.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:46:00',11.81,11.82,11.82,11.81,53900.0,636811.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:47:00',11.82,11.81,11.82,11.8,131100.0,1547666.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:48:00',11.8,11.81,11.81,11.8,38800.0,457967.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:49:00',11.8,11.82,11.82,11.8,58400.0,689759.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:50:00',11.82,11.81,11.82,11.81,66100.0,780881.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:51:00',11.81,11.81,11.82,11.81,13391.0,158199.70999997854,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:52:00',11.82,11.81,11.82,11.79,152600.0,1801575.9900000095,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:53:00',11.82,11.81,11.82,11.8,104300.0,1231667.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:54:00',11.8,11.81,11.82,11.8,141260.0,1667581.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:55:00',11.81,11.8,11.82,11.8,126740.0,1496139.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:56:00',11.79,11.76,11.8,11.76,191345.0,2255231.949999988,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:57:00',11.78,11.78,11.78,11.76,99500.0,1171310.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 15:00:00',11.77,11.77,11.77,11.77,210600.0,2478762.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:31:00',11.86,11.99,11.99,11.86,969300.0,11544577.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:32:00',12.0,12.06,12.08,12.0,732300.0,8812888.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:33:00',12.06,11.93,12.06,11.9,1186100.0,14202394.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:34:00',11.94,11.88,11.95,11.88,725800.0,8640879.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:35:00',11.87,11.92,11.92,11.85,331400.0,3940248.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:36:00',11.91,11.82,11.91,11.82,333300.0,3952700.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:37:00',11.82,11.86,11.86,11.8,269600.0,3190195.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:38:00',11.86,11.8,11.86,11.8,500000.0,5915343.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:39:00',11.83,11.82,11.83,11.78,268300.0,3166061.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:40:00',11.82,11.88,11.88,11.77,246265.0,2911120.8999999985,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:41:00',11.89,11.9,11.94,11.89,207700.0,2475814.0000000075,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:42:00',11.93,11.92,11.93,11.89,149700.0,1782699.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:43:00',11.92,12.01,12.01,11.92,582907.0,6970150.439999998,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:44:00',12.01,11.97,12.01,11.97,283000.0,3392985.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:45:00',11.98,11.98,11.99,11.97,545200.0,6532243.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:46:00',11.98,12.02,12.02,11.97,668400.0,8014970.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:47:00',12.05,12.01,12.05,12.01,337700.0,4061646.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:48:00',12.01,12.02,12.02,12.0,139000.0,1669695.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:49:00',12.01,12.01,12.02,12.01,178100.0,2139348.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:50:00',12.0,12.0,12.01,12.0,121500.0,1458754.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:51:00',12.01,11.99,12.01,11.99,208800.0,2505681.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:52:00',12.0,11.98,12.0,11.98,140400.0,1682153.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:53:00',11.98,12.02,12.02,11.98,275800.0,3308836.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:54:00',12.02,12.02,12.03,12.01,140100.0,1684206.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:55:00',12.03,12.04,12.05,12.03,170400.0,2050992.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:56:00',12.05,12.09,12.09,12.03,317900.0,3833499.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:57:00',12.08,12.08,12.09,12.08,345800.0,4179299.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:58:00',12.06,12.03,12.07,12.03,130900.0,1577011.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:59:00',12.01,12.04,12.04,12.01,77900.0,937076.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:00:00',12.04,12.03,12.04,12.02,48800.0,587197.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:01:00',12.03,12.01,12.03,12.0,84900.0,1020105.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:02:00',12.01,12.02,12.02,12.0,143300.0,1723109.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:03:00',12.02,12.05,12.07,12.02,156300.0,1883042.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:04:00',12.07,12.13,12.13,12.06,248800.0,3009978.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:05:00',12.13,12.13,12.13,12.11,227700.0,2759920.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:06:00',12.14,12.11,12.14,12.1,212300.0,2572077.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:07:00',12.1,12.14,12.14,12.1,304500.0,3689113.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:08:00',12.15,12.13,12.15,12.11,215100.0,2610990.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:09:00',12.13,12.12,12.13,12.12,44400.0,538354.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:10:00',12.12,12.12,12.13,12.11,72300.0,876546.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:11:00',12.12,12.08,12.12,12.08,220200.0,2665568.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:12:00',12.08,12.1,12.1,12.08,101600.0,1228260.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:13:00',12.11,12.11,12.12,12.1,63800.0,772511.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:14:00',12.12,12.11,12.12,12.1,83600.0,1012696.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:15:00',12.11,12.08,12.11,12.08,120500.0,1458134.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:16:00',12.1,12.05,12.1,12.05,103800.0,1253430.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:17:00',12.05,12.03,12.07,12.03,145000.0,1747767.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:18:00',12.04,12.02,12.04,12.01,164600.0,1979260.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:19:00',12.01,11.99,12.02,11.99,162400.0,1949130.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:20:00',11.98,11.98,12.0,11.97,110500.0,1324680.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:21:00',11.99,11.98,11.99,11.97,31700.0,379668.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:22:00',11.98,11.99,11.99,11.97,76700.0,919152.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:23:00',11.98,11.96,11.99,11.96,114800.0,1375071.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:24:00',11.96,11.93,11.98,11.91,163000.0,1945281.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:25:00',11.93,11.95,11.98,11.93,34500.0,411824.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:26:00',11.93,11.94,11.94,11.93,45800.0,546620.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:27:00',11.93,11.93,11.94,11.93,72200.0,861402.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:28:00',11.94,11.93,11.94,11.93,83900.0,1001456.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:29:00',11.94,11.95,11.96,11.93,31200.0,372555.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:30:00',11.96,11.98,11.98,11.96,20100.0,240573.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:31:00',11.97,11.96,11.98,11.96,22000.0,263405.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:32:00',11.96,11.98,12.0,11.96,30700.0,368045.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:33:00',12.0,11.97,12.0,11.97,38200.0,457647.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:34:00',11.97,12.0,12.0,11.97,17900.0,214645.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:35:00',12.0,11.96,12.0,11.96,102000.0,1221077.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:36:00',11.96,11.95,11.98,11.95,34100.0,407809.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:37:00',11.95,11.94,11.95,11.94,17100.0,204271.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:38:00',11.94,11.94,11.95,11.93,10700.0,127751.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:39:00',11.93,11.93,11.95,11.93,47100.0,562049.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:40:00',11.94,11.94,11.94,11.93,31900.0,380739.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:41:00',11.93,11.94,11.95,11.93,17200.0,205353.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:42:00',11.94,11.96,11.97,11.94,19800.0,236641.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:43:00',11.96,11.95,11.97,11.95,48200.0,576622.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:44:00',11.98,11.95,11.98,11.95,8200.0,98105.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:45:00',11.96,11.97,11.97,11.95,23900.0,285778.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:46:00',11.97,11.97,11.98,11.95,33200.0,397359.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:47:00',11.97,11.97,11.98,11.95,30500.0,365205.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:48:00',11.96,11.94,11.97,11.94,41200.0,492462.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:49:00',11.94,11.93,11.94,11.92,104700.0,1248368.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:50:00',11.94,11.93,11.94,11.92,25200.0,300656.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:51:00',11.93,11.91,11.93,11.91,170200.0,2028315.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:52:00',11.91,11.91,11.91,11.9,65400.0,778657.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:53:00',11.9,11.88,11.9,11.88,86800.0,1032232.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:54:00',11.89,11.89,11.89,11.88,34400.0,408873.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:55:00',11.88,11.89,11.89,11.88,51000.0,606105.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:56:00',11.89,11.92,11.92,11.89,82400.0,980813.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:57:00',11.92,11.95,11.95,11.92,79600.0,950668.4300000072,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:58:00',11.95,11.95,11.95,11.92,80900.0,965951.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:59:00',11.95,11.95,11.96,11.94,68300.0,816046.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:00:00',11.95,11.96,11.96,11.95,19100.0,228402.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:01:00',11.96,11.93,11.96,11.93,58700.0,701503.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:02:00',11.93,11.89,11.95,11.89,71800.0,855173.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:03:00',11.89,11.89,11.9,11.88,28300.0,336423.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:04:00',11.89,11.88,11.9,11.88,55900.0,664750.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:05:00',11.89,11.9,11.92,11.88,35100.0,417661.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:06:00',11.91,11.92,11.92,11.91,21600.0,257313.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:07:00',11.92,11.91,11.92,11.9,28000.0,333443.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:08:00',11.91,11.91,11.91,11.9,15500.0,184557.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:09:00',11.91,11.93,11.93,11.9,77500.0,923605.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:10:00',11.93,11.93,11.93,11.92,35200.0,419758.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:11:00',11.93,11.92,11.93,11.91,35200.0,419427.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:12:00',11.91,11.89,11.92,11.89,20400.0,242863.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:13:00',11.89,11.9,11.9,11.89,18400.0,218869.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:14:00',11.9,11.9,11.9,11.89,29800.0,354472.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:15:00',11.9,11.88,11.9,11.88,59500.0,707013.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:16:00',11.87,11.84,11.88,11.84,106200.0,1259777.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:17:00',11.84,11.82,11.84,11.81,62500.0,738601.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:18:00',11.82,11.81,11.82,11.8,130500.0,1541133.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:19:00',11.81,11.81,11.82,11.8,53200.0,628256.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:20:00',11.82,11.83,11.83,11.81,89400.0,1056532.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:21:00',11.83,11.9,11.9,11.82,294500.0,3497211.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:22:00',11.88,11.86,11.89,11.86,110100.0,1307920.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:23:00',11.86,11.86,11.87,11.86,23800.0,282433.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:24:00',11.87,11.86,11.87,11.84,22100.0,261970.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:25:00',11.86,11.85,11.87,11.85,78200.0,927491.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:26:00',11.86,11.86,11.86,11.85,38200.0,452428.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:27:00',11.85,11.86,11.86,11.84,9400.0,111425.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:28:00',11.87,11.86,11.87,11.85,6500.0,77089.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:29:00',11.86,11.85,11.86,11.85,8300.0,98418.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:30:00',11.85,11.84,11.86,11.83,9400.0,111329.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:01:00',11.84,11.81,11.84,11.81,227900.0,2694345.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:02:00',11.82,11.82,11.82,11.8,30600.0,361511.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:03:00',11.81,11.81,11.82,11.81,13600.0,160657.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:04:00',11.85,11.81,11.85,11.8,77900.0,920352.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:05:00',11.81,11.81,11.84,11.8,62800.0,742590.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:06:00',11.81,11.83,11.85,11.81,202900.0,2402548.419999987,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:07:00',11.82,11.9,11.9,11.82,50058.0,593918.3000000119,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:08:00',11.89,11.93,11.93,11.86,117800.0,1402898.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:09:00',11.93,11.93,11.94,11.9,111100.0,1324350.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:10:00',11.93,11.9,11.96,11.9,135900.0,1622071.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:11:00',11.9,11.94,11.95,11.9,82900.0,988800.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:12:00',11.94,11.97,11.97,11.93,62600.0,748523.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:13:00',11.98,12.0,12.0,11.97,105800.0,1268615.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:14:00',11.99,11.99,12.0,11.98,70600.0,846399.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:15:00',11.99,12.02,12.02,11.97,455658.0,5471984.159999996,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:16:00',12.03,12.08,12.09,12.01,190300.0,2294897.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:17:00',12.08,12.11,12.12,12.07,397000.0,4803242.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:18:00',12.1,12.1,12.17,12.01,924900.0,11236227.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:19:00',12.1,12.19,12.19,12.1,367100.0,4472429.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:20:00',12.19,12.16,12.19,12.16,200100.0,2437388.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:21:00',12.15,12.09,12.15,12.08,127600.0,1544602.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:22:00',12.1,12.12,12.13,12.08,113900.0,1378052.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:23:00',12.12,12.1,12.12,12.09,107600.0,1301942.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:24:00',12.11,12.08,12.11,12.08,109200.0,1319868.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:25:00',12.09,12.1,12.1,12.07,99300.0,1200979.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:26:00',12.09,12.09,12.1,12.08,48600.0,587562.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:27:00',12.09,12.05,12.09,12.05,90300.0,1089928.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:28:00',12.05,12.02,12.06,12.02,84500.0,1016340.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:29:00',12.03,12.04,12.04,12.02,64700.0,778018.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:30:00',12.04,12.05,12.05,12.03,52400.0,631005.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:31:00',12.05,12.09,12.1,12.05,53800.0,649968.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:32:00',12.09,12.12,12.12,12.08,262500.0,3176698.99999997,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:33:00',12.13,12.17,12.18,12.12,118900.0,1444982.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:34:00',12.17,12.13,12.18,12.13,122300.0,1487613.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:35:00',12.13,12.14,12.16,12.13,193400.0,2348903.1100000143,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:36:00',12.13,12.13,12.15,12.12,108900.0,1321690.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:37:00',12.12,12.1,12.15,12.1,113600.0,1376982.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:38:00',12.1,12.08,12.12,12.08,178300.0,2157748.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:39:00',12.07,12.08,12.08,12.06,55200.0,666637.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:40:00',12.09,12.12,12.12,12.08,86000.0,1040562.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:41:00',12.11,12.11,12.12,12.11,85800.0,1039755.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:42:00',12.11,12.11,12.13,12.1,243900.0,2952943.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:43:00',12.11,12.1,12.11,12.09,128700.0,1557646.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:44:00',12.09,12.06,12.09,12.06,93100.0,1124181.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:45:00',12.08,12.04,12.09,12.04,87000.0,1049309.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:46:00',12.05,12.08,12.08,12.05,114151.0,1375917.550000012,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:47:00',12.07,12.07,12.08,12.06,84200.0,1016780.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:48:00',12.07,12.06,12.08,12.05,146700.0,1770253.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:49:00',12.07,12.06,12.08,12.06,217800.0,2625084.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:50:00',12.07,12.07,12.08,12.06,102400.0,1236308.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:51:00',12.07,12.07,12.08,12.07,51900.0,626728.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:52:00',12.06,12.08,12.08,12.06,61712.0,744801.8399999738,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:53:00',12.07,12.08,12.08,12.07,76000.0,917855.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:54:00',12.07,12.07,12.08,12.06,100800.0,1216622.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:55:00',12.08,12.04,12.08,12.04,75888.0,915106.1600000262,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:56:00',12.04,12.01,12.04,12.01,49700.0,597576.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:57:00',12.02,12.02,12.02,12.0,117400.0,1409872.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:58:00',12.01,11.99,12.01,11.99,88500.0,1061844.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:59:00',11.99,11.99,11.99,11.97,459400.0,5500281.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:00:00',11.97,11.98,11.99,11.97,65400.0,783356.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:01:00',11.99,12.02,12.02,11.98,95100.0,1140802.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:02:00',12.01,12.04,12.05,12.01,89100.0,1072340.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:03:00',12.05,12.04,12.07,12.03,122500.0,1477145.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:04:00',12.04,12.05,12.05,12.04,64300.0,774569.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:05:00',12.04,12.04,12.04,12.02,67900.0,817021.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:06:00',12.04,12.05,12.05,12.02,46200.0,555942.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:07:00',12.03,12.03,12.04,12.02,45500.0,547352.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:08:00',12.02,12.01,12.03,12.0,102300.0,1229247.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:09:00',12.0,12.01,12.02,12.0,31500.0,378402.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:10:00',12.01,12.0,12.01,12.0,41200.0,494819.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:11:00',12.0,11.99,12.02,11.99,56600.0,679339.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:12:00',12.0,11.99,12.0,11.98,48000.0,575773.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:13:00',11.98,11.99,12.0,11.98,25900.0,310571.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:14:00',11.99,11.99,11.99,11.98,26800.0,321210.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:15:00',11.98,11.98,11.99,11.97,22800.0,273152.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:16:00',11.98,11.95,11.98,11.95,99900.0,1195754.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:17:00',11.95,11.92,11.95,11.92,96600.0,1152911.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:18:00',11.91,11.91,11.92,11.9,79900.0,952248.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:19:00',11.92,11.91,11.93,11.91,34800.0,414584.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:20:00',11.91,11.9,11.91,11.9,41500.0,493898.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:21:00',11.89,11.89,11.89,11.88,21500.0,255622.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:22:00',11.88,11.87,11.89,11.87,33500.0,397937.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:23:00',11.88,11.85,11.88,11.85,74600.0,884573.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:24:00',11.84,11.84,11.86,11.83,113600.0,1345079.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:25:00',11.84,11.89,11.89,11.84,147000.0,1742627.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:26:00',11.89,11.9,11.9,11.86,28500.0,338871.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:27:00',11.9,11.92,11.93,11.89,55100.0,656600.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:28:00',11.93,11.9,11.93,11.9,31300.0,372966.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:29:00',11.9,11.92,11.92,11.9,15200.0,180980.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:30:00',11.92,11.92,11.93,11.9,36900.0,439541.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:31:00',11.92,11.93,11.93,11.92,31100.0,370979.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:32:00',11.93,11.97,11.98,11.93,47228.0,564658.0400000215,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:33:00',11.96,12.0,12.0,11.96,56100.0,672112.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:34:00',11.99,11.99,11.99,11.98,27700.0,331967.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:35:00',11.99,11.99,12.0,11.99,43400.0,520416.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:36:00',12.0,12.0,12.0,11.98,34000.0,407703.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:37:00',11.99,11.99,12.0,11.99,33500.0,401682.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:38:00',11.99,11.98,11.99,11.98,15100.0,181016.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:39:00',11.98,11.99,11.99,11.98,20700.0,248105.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:40:00',11.98,11.96,11.99,11.96,22400.0,268301.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:41:00',11.96,11.97,11.97,11.94,24300.0,290581.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:42:00',11.97,11.97,11.97,11.95,35300.0,422383.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:43:00',11.96,11.95,11.97,11.9,174600.0,2082936.1599999666,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:44:00',11.95,11.94,11.95,11.91,5500.0,65679.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:45:00',11.94,11.94,11.96,11.92,95400.0,1139203.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:46:00',11.94,11.94,11.97,11.92,31400.0,375225.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:47:00',11.97,11.99,11.99,11.94,66957.0,802030.8600000143,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:48:00',11.98,11.99,12.0,11.98,30300.0,363151.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:49:00',11.99,11.98,12.0,11.98,90000.0,1079313.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:50:00',12.0,12.0,12.0,11.99,118883.0,1426176.1700000167,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:51:00',12.0,12.0,12.0,11.99,133000.0,1595759.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:52:00',12.0,11.99,12.0,11.98,122217.0,1465572.8299999833,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:53:00',11.99,11.99,12.0,11.98,210200.0,2520826.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:54:00',12.0,12.01,12.01,12.0,127800.0,1533947.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:55:00',12.02,12.05,12.05,12.01,123300.0,1482774.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:56:00',12.04,12.07,12.08,12.04,154483.0,1863283.3199999928,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:57:00',12.07,12.08,12.09,12.07,232000.0,2802836.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 15:00:00',12.09,12.09,12.09,12.09,293500.0,3548415.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:31:00',11.8,11.96,11.96,11.79,1181452.0,13952825.2,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:32:00',11.93,11.87,11.94,11.84,348500.0,4140560.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:33:00',11.87,11.89,11.92,11.87,277900.0,3304930.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:34:00',11.89,11.84,11.89,11.8,336600.0,3985417.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:35:00',11.85,11.84,11.87,11.81,303800.0,3595813.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:36:00',11.84,11.83,11.84,11.82,180000.0,2129381.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:37:00',11.84,11.84,11.85,11.83,117800.0,1394355.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:38:00',11.83,11.74,11.83,11.74,1094600.0,12901723.000000004,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:39:00',11.73,11.68,11.75,11.66,564800.0,6611564.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:40:00',11.69,11.76,11.8,11.69,269000.0,3151298.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:41:00',11.76,11.78,11.78,11.74,213500.0,2509286.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:42:00',11.78,11.8,11.8,11.77,155500.0,1832592.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:43:00',11.79,11.76,11.79,11.76,252800.0,2975994.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:44:00',11.77,11.77,11.79,11.75,269400.0,3170422.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:45:00',11.77,11.79,11.8,11.77,140400.0,1654537.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:46:00',11.79,11.8,11.8,11.78,110900.0,1307492.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:47:00',11.8,11.83,11.83,11.8,148400.0,1752863.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:48:00',11.83,11.82,11.83,11.8,169200.0,2000161.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:49:00',11.8,11.82,11.83,11.8,91600.0,1082658.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:50:00',11.83,11.82,11.84,11.81,126000.0,1489754.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:51:00',11.82,11.79,11.82,11.79,111300.0,1313866.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:52:00',11.79,11.76,11.79,11.76,282300.0,3323827.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:53:00',11.76,11.76,11.77,11.75,170500.0,2005499.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:54:00',11.76,11.79,11.79,11.75,130100.0,1531191.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:55:00',11.79,11.8,11.8,11.78,122400.0,1443704.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:56:00',11.8,11.81,11.82,11.79,86300.0,1019182.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:57:00',11.81,11.82,11.83,11.81,57800.0,683367.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:58:00',11.83,11.83,11.83,11.81,84100.0,994313.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:59:00',11.83,11.81,11.83,11.81,96600.0,1141702.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:00:00',11.81,11.84,11.84,11.81,71600.0,846272.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:01:00',11.82,11.82,11.83,11.81,65800.0,777777.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:02:00',11.81,11.83,11.83,11.81,71500.0,845057.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:03:00',11.83,11.86,11.86,11.83,63496.0,752347.6799999923,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:04:00',11.86,11.87,11.89,11.86,122300.0,1451942.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:05:00',11.86,11.87,11.88,11.86,79700.0,945965.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:06:00',11.88,11.88,11.88,11.86,65200.0,774223.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:07:00',11.87,11.89,11.9,11.86,114800.0,1364045.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:08:00',11.88,11.86,11.9,11.86,64400.0,764960.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:09:00',11.87,11.86,11.88,11.84,119900.0,1421962.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:10:00',11.84,11.85,11.86,11.82,73300.0,867638.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:11:00',11.83,11.82,11.84,11.82,53200.0,629552.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:12:00',11.82,11.83,11.85,11.82,133900.0,1584264.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:13:00',11.83,11.85,11.85,11.83,54500.0,645483.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:14:00',11.85,11.88,11.88,11.84,183000.0,2170850.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:15:00',11.88,11.89,11.9,11.86,74100.0,880572.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:16:00',11.9,11.9,11.92,11.89,78200.0,930818.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:17:00',11.92,11.89,11.92,11.88,125600.0,1494664.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:18:00',11.88,11.88,11.9,11.88,64100.0,761948.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:19:00',11.87,11.88,11.91,11.87,54500.0,647863.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:20:00',11.89,11.9,11.92,11.89,77000.0,916637.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:21:00',11.93,11.9,11.93,11.88,206800.0,2458801.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:22:00',11.88,11.85,11.88,11.84,106000.0,1256595.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:23:00',11.85,11.83,11.86,11.83,159200.0,1885375.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:24:00',11.83,11.84,11.86,11.83,148800.0,1761022.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:25:00',11.83,11.82,11.83,11.81,215200.0,2543641.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:26:00',11.82,11.82,11.84,11.81,225200.0,2662189.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:27:00',11.82,11.82,11.83,11.81,129000.0,1524894.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:28:00',11.82,11.82,11.83,11.82,114600.0,1354650.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:29:00',11.83,11.84,11.87,11.83,112600.0,1332867.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:30:00',11.84,11.85,11.85,11.82,110800.0,1311207.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:31:00',11.83,11.82,11.85,11.82,81500.0,964412.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:32:00',11.84,11.84,11.84,11.82,76700.0,907196.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:33:00',11.84,11.85,11.85,11.82,61500.0,728081.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:34:00',11.85,11.85,11.86,11.84,27500.0,325729.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:35:00',11.85,11.87,11.87,11.85,89300.0,1059118.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:36:00',11.85,11.87,11.88,11.82,92400.0,1096264.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:37:00',11.87,11.86,11.87,11.84,58700.0,696401.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:38:00',11.85,11.86,11.88,11.85,68100.0,808058.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:39:00',11.86,11.88,11.89,11.86,50500.0,599803.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:40:00',11.88,11.88,11.89,11.86,34200.0,406103.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:41:00',11.88,11.88,11.89,11.87,13200.0,156802.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:42:00',11.88,11.91,11.93,11.88,144500.0,1719635.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:43:00',11.91,11.94,11.95,11.91,158800.0,1894595.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:44:00',11.94,11.96,11.96,11.91,115300.0,1377235.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:45:00',11.95,11.91,11.95,11.91,66100.0,788861.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:46:00',11.92,11.93,11.94,11.92,75500.0,900353.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:47:00',11.94,11.95,11.96,11.93,89500.0,1069684.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:48:00',11.95,11.96,11.97,11.95,72000.0,861053.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:49:00',11.95,11.97,11.98,11.95,147200.0,1762197.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:50:00',11.97,11.99,11.99,11.97,161500.0,1935869.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:51:00',11.99,12.0,12.01,11.99,76500.0,917906.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:52:00',12.0,12.0,12.0,11.99,78300.0,939684.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:53:00',12.0,11.95,12.01,11.95,106000.0,1269869.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:54:00',11.95,12.0,12.0,11.95,89700.0,1074745.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:55:00',12.0,11.98,12.0,11.96,44900.0,538166.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:56:00',11.96,11.95,11.97,11.95,15200.0,181801.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:57:00',11.95,11.94,11.95,11.91,86600.0,1033371.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:58:00',11.94,11.94,11.95,11.94,12100.0,144526.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:59:00',11.94,11.95,11.96,11.94,35000.0,418281.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:00:00',11.96,11.97,11.97,11.96,21700.0,259540.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:01:00',11.96,11.97,11.97,11.96,8700.0,104090.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:02:00',11.97,11.95,11.97,11.95,36700.0,439063.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:03:00',11.96,11.96,11.96,11.95,22000.0,263109.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:04:00',11.96,11.96,11.97,11.96,22300.0,266865.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:05:00',11.96,11.95,11.96,11.95,13200.0,157878.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:06:00',11.95,11.99,11.99,11.95,20000.0,239539.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:07:00',11.99,11.99,12.0,11.99,31886.0,382583.1400000155,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:08:00',12.0,11.99,12.0,11.98,31500.0,377722.1399999857,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:09:00',11.99,11.99,11.99,11.98,27900.0,334285.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:10:00',11.99,11.97,11.99,11.97,11414.0,136729.7199999988,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:11:00',11.97,11.98,11.98,11.97,26100.0,312661.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:12:00',11.98,11.99,11.99,11.97,36500.0,437536.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:13:00',12.0,12.0,12.0,11.99,39400.0,472703.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:14:00',12.0,11.99,12.02,11.99,27400.0,329000.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:15:00',11.99,11.99,12.0,11.99,2000.0,23981.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:16:00',11.99,12.0,12.0,11.99,67000.0,803868.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:17:00',11.99,12.0,12.01,11.99,26000.0,312084.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:18:00',12.0,11.97,12.0,11.97,58600.0,702840.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:19:00',11.97,11.98,11.99,11.97,35100.0,420603.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:20:00',11.97,11.96,11.98,11.96,36200.0,433314.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:21:00',11.96,11.97,11.97,11.96,25200.0,301540.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:22:00',11.98,11.98,11.99,11.97,74700.0,895397.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:23:00',11.98,11.99,11.99,11.96,74100.0,888127.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:24:00',11.99,11.96,11.99,11.96,33800.0,404807.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:25:00',11.96,11.95,11.97,11.95,24500.0,292918.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:26:00',11.95,11.95,11.95,11.94,24500.0,292679.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:27:00',11.94,11.96,11.96,11.94,40900.0,488751.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:28:00',11.95,11.95,11.96,11.95,12400.0,148204.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:29:00',11.95,11.95,11.96,11.94,64457.0,770570.150000006,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:30:00',11.95,11.96,11.96,11.93,42743.0,510538.84999999404,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:01:00',11.97,11.96,11.98,11.96,70600.0,844816.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:02:00',11.96,11.98,11.98,11.96,18500.0,221566.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:03:00',11.97,11.98,11.98,11.97,29500.0,353483.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:04:00',11.98,11.98,12.0,11.98,54600.0,655162.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:05:00',11.98,12.0,12.01,11.98,88200.0,1057428.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:06:00',11.99,12.0,12.0,11.96,23700.0,284382.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:07:00',12.0,12.02,12.02,11.96,53800.0,646060.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:08:00',12.02,11.98,12.02,11.98,20800.0,249202.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:09:00',11.98,12.0,12.0,11.98,27700.0,332378.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:10:00',12.0,12.0,12.0,11.98,12200.0,146372.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:11:00',12.0,11.98,12.0,11.98,4400.0,52724.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:12:00',11.98,11.93,11.98,11.93,108700.0,1299979.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:13:00',11.93,11.91,11.95,11.9,82900.0,987624.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:14:00',11.93,11.93,11.98,11.92,103700.0,1240948.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:15:00',11.93,11.96,11.97,11.93,3900.0,46668.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:16:00',11.96,11.95,11.98,11.95,62600.0,749326.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:17:00',11.95,11.96,11.98,11.95,10700.0,128102.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:18:00',11.96,12.02,12.02,11.96,118400.0,1420798.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:19:00',12.01,12.01,12.02,12.0,35700.0,429016.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:20:00',12.01,12.02,12.02,12.01,19200.0,230699.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:21:00',12.02,12.01,12.02,12.01,14600.0,175379.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:22:00',12.01,12.0,12.04,12.0,103600.0,1245267.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:23:00',12.0,11.97,12.01,11.97,9600.0,115115.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:24:00',11.97,11.96,11.98,11.96,9100.0,108942.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:25:00',11.97,11.99,11.99,11.96,18800.0,225218.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:26:00',11.99,11.99,12.0,11.99,9000.0,107915.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:27:00',11.99,12.01,12.01,11.99,31500.0,378045.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:28:00',12.01,12.03,12.03,12.01,31100.0,373794.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:29:00',12.03,12.02,12.04,12.02,21300.0,256204.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:30:00',12.02,12.02,12.03,12.0,90000.0,1080841.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:31:00',12.02,12.02,12.03,12.02,24100.0,289965.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:32:00',12.02,12.03,12.03,12.02,25700.0,309124.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:33:00',12.03,12.02,12.04,12.02,23200.0,279098.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:34:00',12.02,12.06,12.06,12.02,79100.0,953326.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:35:00',12.06,12.11,12.12,12.06,298800.0,3612764.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:36:00',12.12,12.12,12.13,12.11,105900.0,1283501.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:37:00',12.11,12.13,12.13,12.11,92900.0,1126259.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:38:00',12.13,12.13,12.15,12.13,96400.0,1169894.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:39:00',12.12,12.11,12.13,12.1,42300.0,512549.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:40:00',12.1,12.1,12.1,12.09,22700.0,274581.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:41:00',12.1,12.11,12.13,12.1,46800.0,567083.099999994,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:42:00',12.11,12.13,12.14,12.11,104390.0,1265925.7000000179,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:43:00',12.13,12.1,12.13,12.09,129600.0,1569111.2999999821,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:44:00',12.1,12.1,12.12,12.1,35300.0,427724.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:45:00',12.11,12.1,12.13,12.1,27200.0,329237.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:46:00',12.1,12.11,12.12,12.1,25501.0,308947.10000002384,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:47:00',12.12,12.13,12.13,12.11,70300.0,852342.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:48:00',12.13,12.11,12.13,12.1,11400.0,138103.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:49:00',12.11,12.1,12.12,12.1,13300.0,161070.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:50:00',12.1,12.11,12.12,12.1,46438.0,562392.1799999774,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:51:00',12.11,12.11,12.13,12.1,83862.0,1015883.0600000024,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:52:00',12.11,12.13,12.13,12.11,59838.0,725375.9399999976,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:53:00',12.14,12.15,12.15,12.13,86000.0,1044086.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:54:00',12.14,12.15,12.15,12.14,42000.0,510247.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:55:00',12.13,12.13,12.14,12.13,82600.0,1002290.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:56:00',12.13,12.12,12.14,12.11,71162.0,863430.6299999952,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:57:00',12.12,12.12,12.15,12.11,45200.0,548489.8000000119,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:58:00',12.15,12.12,12.15,12.12,13400.0,162586.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:59:00',12.12,12.12,12.14,12.12,38900.0,471704.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:00:00',12.14,12.12,12.14,12.12,26300.0,318796.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:01:00',12.12,12.13,12.13,12.11,66328.0,804196.3600000143,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:02:00',12.13,12.11,12.14,12.11,33672.0,408110.0799999833,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:03:00',12.11,12.1,12.13,12.1,47100.0,570431.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:04:00',12.1,12.11,12.11,12.1,19369.0,234477.59000000358,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:05:00',12.12,12.12,12.12,12.11,15400.0,186643.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:06:00',12.12,12.11,12.13,12.11,15400.0,186660.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:07:00',12.11,12.12,12.12,12.11,20600.0,249528.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:08:00',12.12,12.1,12.12,12.1,31531.0,381865.7199999988,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:09:00',12.11,12.1,12.11,12.1,23900.0,289493.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:10:00',12.1,12.1,12.11,12.09,53000.0,641330.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:11:00',12.1,12.09,12.1,12.08,49500.0,598315.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:12:00',12.07,12.04,12.07,12.04,61300.0,739610.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:13:00',12.04,12.01,12.04,12.01,164900.0,1981477.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:14:00',12.01,12.05,12.05,12.01,22100.0,265748.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:15:00',12.05,11.97,12.05,11.97,67800.0,813874.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:16:00',11.97,11.95,11.97,11.94,112800.0,1348763.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:17:00',11.95,11.95,11.96,11.95,26900.0,321502.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:18:00',11.96,11.97,11.99,11.96,33100.0,396230.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:19:00',11.98,11.96,11.98,11.96,27100.0,324405.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:20:00',11.96,11.99,11.99,11.96,17200.0,205815.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:21:00',11.98,12.0,12.0,11.98,19500.0,233949.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:22:00',12.0,11.98,12.0,11.98,5900.0,70682.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:23:00',11.98,11.98,11.98,11.97,23100.0,276745.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:24:00',11.98,11.97,11.98,11.95,59500.0,711852.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:25:00',11.97,11.96,11.97,11.95,36300.0,434076.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:26:00',11.95,11.95,11.95,11.94,51700.0,617761.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:27:00',11.96,11.97,11.97,11.95,102800.0,1229707.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:28:00',11.97,11.97,11.99,11.97,44100.0,528334.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:29:00',11.97,11.99,11.99,11.97,28800.0,345015.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:30:00',11.96,11.99,11.99,11.96,7500.0,89804.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:31:00',11.99,12.0,12.0,11.98,55100.0,661147.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:32:00',12.0,12.0,12.05,12.0,24500.0,294675.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:33:00',12.01,12.02,12.02,12.0,49100.0,589928.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:34:00',12.02,12.02,12.03,12.02,8200.0,98583.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:35:00',12.03,12.05,12.05,12.03,58200.0,700387.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:36:00',12.05,12.06,12.06,12.05,31500.0,379668.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:37:00',12.07,12.08,12.08,12.05,32900.0,396865.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:38:00',12.08,12.07,12.09,12.07,124200.0,1499433.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:39:00',12.06,12.05,12.06,12.04,29100.0,350621.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:40:00',12.05,12.02,12.05,12.02,35700.0,429752.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:41:00',12.03,12.03,12.04,12.02,107600.0,1294053.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:42:00',12.04,12.06,12.08,12.04,62900.0,757692.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:43:00',12.08,12.08,12.09,12.06,33400.0,403474.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:44:00',12.08,12.08,12.09,12.08,36700.0,443528.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:45:00',12.08,12.08,12.09,12.07,30900.0,373291.56999999285,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:46:00',12.08,12.06,12.08,12.06,57900.0,698897.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:47:00',12.06,12.05,12.07,12.05,11400.0,137457.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:48:00',12.06,12.07,12.08,12.05,112900.0,1362505.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:49:00',12.08,12.06,12.08,12.06,33669.0,406603.1400000155,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:50:00',12.07,12.08,12.09,12.07,97100.0,1172659.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:51:00',12.08,12.06,12.09,12.06,77131.0,931442.8599999845,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:52:00',12.06,12.08,12.09,12.06,47400.0,572668.25,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:53:00',12.07,12.06,12.08,12.05,44900.0,541687.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:54:00',12.05,12.08,12.08,12.05,117400.0,1417946.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:55:00',12.08,12.08,12.09,12.08,76200.0,920693.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:56:00',12.08,12.07,12.09,12.07,49800.0,601680.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:57:00',12.08,12.08,12.08,12.06,62000.0,748534.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 15:00:00',12.08,12.08,12.08,12.08,130900.0,1581272.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:31:00',12.06,12.1,12.1,12.02,316526.0,3816146.3,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:32:00',12.08,12.08,12.15,12.07,131200.0,1589154.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:33:00',12.06,12.05,12.08,12.05,49400.0,596016.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:34:00',12.05,12.08,12.08,12.05,75200.0,907711.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:35:00',12.07,12.12,12.12,12.07,86500.0,1045399.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:36:00',12.12,12.08,12.14,12.06,88100.0,1065477.000000001,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:37:00',12.08,12.09,12.09,12.06,22400.0,270362.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:38:00',12.07,12.05,12.07,12.0,113400.0,1364445.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:39:00',12.05,12.01,12.05,12.01,30400.0,365468.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:40:00',12.01,11.95,12.01,11.95,136200.0,1630108.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:41:00',11.95,12.0,12.04,11.95,169752.0,2035382.039999999,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:42:00',12.02,12.01,12.04,12.01,68700.0,825444.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:43:00',12.01,11.98,12.01,11.98,44900.0,538684.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:44:00',11.98,11.97,11.98,11.96,78000.0,933675.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:45:00',11.97,11.96,11.97,11.96,66000.0,789435.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:46:00',11.96,11.95,11.97,11.95,81400.0,973556.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:47:00',11.95,11.95,11.96,11.94,108600.0,1297634.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:48:00',11.95,11.95,11.96,11.94,46600.0,556907.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:49:00',11.95,11.95,11.96,11.95,51400.0,614276.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:50:00',11.95,11.98,11.98,11.95,104200.0,1246640.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:51:00',12.0,11.99,12.0,11.97,147700.0,1769823.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:52:00',11.98,11.99,11.99,11.96,102000.0,1221748.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:53:00',11.99,11.97,12.0,11.97,11600.0,138885.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:54:00',11.97,11.97,11.99,11.96,31600.0,378161.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:55:00',11.97,11.96,11.99,11.96,54300.0,649568.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:56:00',11.96,11.95,11.97,11.95,47200.0,564310.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:57:00',11.96,11.93,11.96,11.93,116800.0,1395667.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:58:00',11.93,11.91,11.93,11.91,89600.0,1067691.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:59:00',11.9,11.92,11.94,11.9,88398.0,1053290.1999999993,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:00:00',11.94,11.92,11.94,11.91,80500.0,960517.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:01:00',11.92,11.9,11.92,11.9,78400.0,933755.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:02:00',11.9,11.93,11.93,11.9,187100.0,2230130.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:03:00',11.93,11.95,11.95,11.91,172600.0,2055217.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:04:00',11.93,11.91,11.93,11.91,36300.0,432817.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:05:00',11.91,11.92,11.92,11.91,42600.0,507551.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:06:00',11.92,11.91,11.93,11.9,151400.0,1802617.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:07:00',11.9,11.9,11.91,11.9,74200.0,883104.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:08:00',11.9,11.9,11.91,11.9,112500.0,1339359.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:09:00',11.9,11.91,11.92,11.9,28500.0,339459.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:10:00',11.91,11.91,11.92,11.91,49700.0,591928.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:11:00',11.91,11.91,11.92,11.9,51100.0,608953.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:12:00',11.91,11.92,11.95,11.91,64600.0,770182.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:13:00',11.92,11.93,11.95,11.92,64400.0,767805.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:14:00',11.93,11.93,11.93,11.92,33100.0,394730.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:15:00',11.93,11.92,11.94,11.91,38000.0,453054.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:16:00',11.92,11.91,11.92,11.91,65200.0,776615.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:17:00',11.91,11.9,11.92,11.9,87300.0,1039071.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:18:00',11.9,11.88,11.91,11.88,369000.0,4390167.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:19:00',11.9,11.89,11.91,11.88,69500.0,826864.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:20:00',11.89,11.89,11.9,11.89,63000.0,749152.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:21:00',11.89,11.88,11.89,11.88,95890.0,1140070.200000003,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:22:00',11.88,11.88,11.89,11.87,26100.0,310042.1000000015,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:23:00',11.88,11.87,11.89,11.86,70900.0,841972.099999994,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:24:00',11.87,11.88,11.88,11.86,45100.0,535246.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:25:00',11.86,11.86,11.88,11.86,120900.0,1434415.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:26:00',11.86,11.87,11.87,11.85,62700.0,743781.1000000015,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:27:00',11.87,11.86,11.87,11.86,91400.0,1084428.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:28:00',11.87,11.88,11.89,11.87,79000.0,938308.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:29:00',11.88,11.88,11.89,11.88,20400.0,242424.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:30:00',11.88,11.9,11.9,11.88,57000.0,677808.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:31:00',11.9,11.89,11.91,11.88,37100.0,441204.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:32:00',11.9,11.91,11.91,11.89,23800.0,283159.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:33:00',11.91,11.91,11.91,11.9,22600.0,269057.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:34:00',11.91,11.92,11.92,11.9,12000.0,142938.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:35:00',11.92,11.9,11.93,11.9,29500.0,351508.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:36:00',11.9,11.9,11.9,11.9,16700.0,198730.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:37:00',11.9,11.91,11.93,11.89,9000.0,107172.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:38:00',11.91,11.93,11.93,11.91,9600.0,114429.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:39:00',11.93,11.93,11.94,11.92,10800.0,128874.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:40:00',11.92,11.94,11.94,11.92,17100.0,204038.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:41:00',11.94,11.97,11.97,11.93,52700.0,629486.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:42:00',11.97,11.94,11.97,11.93,36500.0,436116.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:43:00',11.94,11.94,11.94,11.94,13100.0,156482.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:44:00',11.94,11.94,11.96,11.94,8900.0,106304.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:45:00',11.94,11.96,11.96,11.92,24700.0,294770.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:46:00',11.93,11.93,11.94,11.93,20300.0,242182.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:47:00',11.93,11.95,11.96,11.93,20800.0,248404.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:48:00',11.94,11.94,11.95,11.94,38800.0,463298.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:49:00',11.94,11.94,11.94,11.93,41400.0,494259.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:50:00',11.94,11.95,11.96,11.94,83000.0,991746.0000000075,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:51:00',11.96,11.96,11.96,11.95,12100.0,144703.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:52:00',11.96,11.95,11.96,11.95,5100.0,60978.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:53:00',11.95,11.97,11.97,11.95,9900.0,118432.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:54:00',11.97,11.96,11.97,11.95,28700.0,343045.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:55:00',11.96,11.95,11.96,11.95,24600.0,294016.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:56:00',11.95,11.94,11.95,11.94,23700.0,283225.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:57:00',11.94,11.93,11.95,11.93,31000.0,370132.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:58:00',11.93,11.95,11.95,11.93,17500.0,208955.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:59:00',11.95,11.93,11.95,11.92,32900.0,392361.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:00:00',11.93,11.94,11.94,11.92,13200.0,157433.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:01:00',11.94,11.95,11.95,11.93,13500.0,161150.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:02:00',11.93,11.95,11.95,11.93,11900.0,142081.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:03:00',11.95,11.95,11.95,11.94,7700.0,92001.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:04:00',11.95,11.95,11.95,11.95,8700.0,103965.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:05:00',11.95,11.94,11.95,11.94,5900.0,70494.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:06:00',11.95,11.95,11.96,11.94,10700.0,127840.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:07:00',11.93,11.96,11.96,11.93,6400.0,76470.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:08:00',11.95,11.97,11.97,11.94,15300.0,183070.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:09:00',11.97,11.96,11.97,11.95,19000.0,227279.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:10:00',11.96,11.96,11.97,11.95,4900.0,58625.25,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:11:00',11.96,11.96,11.97,11.96,12300.0,147188.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:12:00',11.97,11.97,11.98,11.95,5900.0,70631.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:13:00',11.97,11.97,11.97,11.96,9600.0,114856.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:14:00',11.97,11.96,11.97,11.96,14900.0,178237.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:15:00',11.97,11.97,11.97,11.97,3500.0,41895.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:16:00',11.97,11.97,11.97,11.97,6500.0,77777.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:17:00',11.97,11.97,11.97,11.97,15600.0,186732.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:18:00',11.97,11.98,11.98,11.97,38900.0,465951.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:19:00',11.98,11.99,11.99,11.98,3500.0,41934.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:20:00',11.99,11.99,12.0,11.98,55500.0,665383.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:21:00',11.99,11.99,11.99,11.97,35200.0,421620.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:22:00',11.99,12.0,12.0,11.97,41000.0,491893.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:23:00',11.99,11.99,12.0,11.99,4700.0,56370.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:24:00',12.0,11.99,12.0,11.99,18200.0,218234.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:25:00',11.98,11.99,11.99,11.98,7800.0,93504.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:26:00',11.99,12.0,12.0,11.99,7400.0,88708.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:27:00',12.0,11.98,12.0,11.98,17100.0,204902.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:28:00',11.98,11.97,11.98,11.97,17500.0,209492.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:29:00',11.97,11.97,11.98,11.97,28200.0,337559.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:30:00',11.97,11.98,11.98,11.97,1800.0,21554.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:01:00',11.98,11.98,11.98,11.97,72300.0,865768.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:02:00',11.98,11.97,11.98,11.96,32400.0,387625.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:03:00',11.96,11.95,11.97,11.94,21600.0,258199.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:04:00',11.96,11.95,11.96,11.95,32600.0,389777.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:05:00',11.96,11.96,11.97,11.95,7900.0,94441.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:06:00',11.96,11.96,11.97,11.96,2400.0,28708.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:07:00',11.96,11.95,11.96,11.95,3100.0,37050.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:08:00',11.96,11.96,11.96,11.95,3500.0,41832.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:09:00',11.96,11.96,11.97,11.95,12500.0,149502.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:10:00',11.96,11.97,11.97,11.95,1600.0,19136.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:11:00',11.96,11.96,11.96,11.95,17300.0,206748.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:12:00',11.96,11.97,11.97,11.95,47600.0,568842.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:13:00',11.97,11.95,11.97,11.94,19100.0,228067.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:14:00',11.95,11.93,11.96,11.93,10700.0,127777.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:15:00',11.95,11.95,11.96,11.93,42300.0,505124.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:16:00',11.96,11.95,11.96,11.94,8800.0,105194.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:17:00',11.96,11.95,11.96,11.93,34000.0,405679.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:18:00',11.95,11.93,11.95,11.93,24800.0,295882.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:19:00',11.94,11.94,11.94,11.93,20900.0,249431.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:20:00',11.93,11.94,11.94,11.93,1200.0,14324.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:21:00',11.94,11.95,11.95,11.93,41100.0,490779.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:22:00',11.94,11.94,11.96,11.94,10000.0,119503.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:23:00',11.94,11.94,11.96,11.94,8800.0,105179.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:24:00',11.95,11.95,11.95,11.94,4500.0,53747.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:25:00',11.95,11.93,11.95,11.93,7700.0,91918.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:26:00',11.93,11.94,11.94,11.92,8600.0,102590.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:27:00',11.94,11.93,11.94,11.92,11600.0,138370.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:28:00',11.93,11.95,11.95,11.92,37200.0,443933.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:29:00',11.94,11.95,11.96,11.94,31301.0,374046.9499999881,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:30:00',11.95,11.96,11.96,11.94,41700.0,498496.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:31:00',11.96,11.99,11.99,11.96,127800.0,1530479.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:32:00',11.99,11.97,11.99,11.97,15900.0,190415.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:33:00',11.98,11.98,11.98,11.96,41400.0,495799.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:34:00',11.98,12.01,12.01,11.98,104700.0,1255640.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:35:00',12.01,11.98,12.03,11.98,141400.0,1695886.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:36:00',11.98,11.99,12.02,11.97,117300.0,1405537.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:37:00',12.0,12.01,12.02,12.0,37700.0,453031.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:38:00',12.01,12.06,12.07,12.0,237200.0,2857519.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:39:00',12.07,12.06,12.08,12.05,128500.0,1550267.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:40:00',12.06,12.04,12.07,12.04,36100.0,435110.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:41:00',12.04,12.04,12.05,12.02,18000.0,216603.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:42:00',12.04,12.03,12.04,12.02,36900.0,443792.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:43:00',12.04,12.04,12.04,12.03,29900.0,359889.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:44:00',12.04,12.04,12.04,12.03,53800.0,647334.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:45:00',12.04,12.03,12.04,12.02,17000.0,204427.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:46:00',12.03,12.03,12.03,12.02,22700.0,272989.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:47:00',12.03,12.03,12.03,12.02,70200.0,844460.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:48:00',12.03,12.01,12.03,12.01,34300.0,412246.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:49:00',12.03,12.02,12.03,12.01,34100.0,409855.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:50:00',12.02,12.01,12.03,12.01,21200.0,254718.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:51:00',12.02,12.02,12.03,12.01,48500.0,582911.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:52:00',12.02,12.01,12.03,12.01,21300.0,255961.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:53:00',12.02,12.0,12.02,12.0,37987.0,456280.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:54:00',12.0,11.99,12.01,11.99,34913.0,418955.13000001013,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:55:00',11.99,12.01,12.01,11.99,32600.0,391159.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:56:00',12.0,12.0,12.01,12.0,14000.0,168017.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:57:00',12.0,12.0,12.01,12.0,20900.0,250812.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:58:00',12.01,12.01,12.01,12.0,24300.0,291781.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:59:00',12.0,12.0,12.02,11.99,18487.0,221915.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:00:00',12.01,12.02,12.02,12.0,15300.0,183768.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:01:00',12.02,12.02,12.02,12.01,2300.0,27645.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:02:00',12.02,12.02,12.02,12.02,1900.0,22837.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:03:00',12.02,12.01,12.02,12.01,14500.0,174166.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:04:00',12.01,12.0,12.02,12.0,16700.0,200511.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:05:00',12.0,12.01,12.02,12.0,14100.0,169334.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:06:00',12.01,12.01,12.01,12.0,21100.0,253376.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:07:00',12.02,12.01,12.02,12.01,4400.0,52854.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:08:00',12.0,12.0,12.01,12.0,26513.0,318184.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:09:00',12.0,12.01,12.01,12.0,25687.0,308279.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:10:00',12.0,12.0,12.01,11.99,20200.0,242234.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:11:00',12.0,11.99,12.0,11.99,15500.0,185871.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:12:00',11.99,12.0,12.0,11.99,28100.0,336946.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:13:00',12.0,11.99,12.0,11.98,28800.0,345236.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:14:00',11.99,11.98,11.99,11.98,17400.0,208541.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:15:00',11.99,12.0,12.0,11.99,33300.0,399287.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:16:00',12.0,12.0,12.0,11.99,32200.0,386236.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:17:00',12.0,12.0,12.0,11.99,21159.0,253877.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:18:00',12.0,12.01,12.01,12.0,4200.0,50427.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:19:00',12.01,12.01,12.01,12.0,4100.0,49227.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:20:00',12.01,12.01,12.01,11.98,47200.0,566171.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:21:00',12.0,12.0,12.01,12.0,24800.0,297639.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:22:00',12.0,12.0,12.02,12.0,64500.0,774303.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:23:00',12.02,12.02,12.03,12.0,17100.0,205518.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:24:00',12.02,12.02,12.03,12.01,33200.0,399297.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:25:00',12.02,12.02,12.03,12.02,17600.0,211601.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:26:00',12.02,12.01,12.02,12.01,16200.0,194668.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:27:00',12.01,12.01,12.02,12.0,83700.0,1005081.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:28:00',12.01,12.01,12.01,11.99,49300.0,591313.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:29:00',12.01,12.01,12.02,12.0,14200.0,170574.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:30:00',12.01,12.02,12.02,12.0,65800.0,789746.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:31:00',12.0,11.98,12.02,11.98,42900.0,514641.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:32:00',12.0,12.0,12.01,11.99,20100.0,241232.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:33:00',12.0,12.0,12.0,11.99,64400.0,772180.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:34:00',12.0,12.0,12.01,11.98,77800.0,932959.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:35:00',12.0,11.99,12.01,11.99,19600.0,235230.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:36:00',11.99,12.0,12.01,11.98,35200.0,422031.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:37:00',12.0,12.01,12.01,12.0,20000.0,240035.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:38:00',12.0,11.99,12.01,11.99,38900.0,466642.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:39:00',12.0,11.99,12.01,11.99,48500.0,581947.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:40:00',12.0,12.0,12.0,11.97,76600.0,918304.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:41:00',12.0,12.0,12.0,11.97,38500.0,461387.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:42:00',12.0,11.98,12.0,11.96,65200.0,781070.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:43:00',11.98,11.95,11.98,11.95,185896.0,2222256.199999988,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:44:00',11.99,11.99,12.0,11.95,82800.0,991636.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:45:00',11.98,11.98,11.99,11.94,45600.0,545571.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:46:00',11.94,11.95,11.99,11.94,75200.0,899330.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:47:00',11.95,11.97,11.98,11.94,93700.0,1119454.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:48:00',11.97,11.93,11.97,11.93,45200.0,539657.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:49:00',11.93,11.92,11.94,11.9,245600.0,2926711.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:50:00',11.91,11.9,11.91,11.89,162800.0,1937561.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:51:00',11.9,11.9,11.91,11.88,168604.0,2004458.600000009,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:52:00',11.9,11.89,11.91,11.88,169296.0,2010980.399999991,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:53:00',11.89,11.88,11.9,11.86,179600.0,2131752.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:54:00',11.86,11.84,11.88,11.84,366700.0,4348833.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:55:00',11.85,11.85,11.86,11.84,169100.0,2002880.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:56:00',11.86,11.9,11.98,11.86,164200.0,1952380.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:57:00',11.9,11.89,11.94,11.88,59500.0,708274.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 15:00:00',11.89,11.89,11.89,11.89,137000.0,1628930.0,12.08)") + + def check_max_min_results(self): + max_results = [11.48, 11.54, 11.82, 12.14, 12.19, 12.15, 12.15] + min_results = [10.90, 11.20, 11.41, 11.61, 11.82, 11.75, 11.86] + + for i in range(len(max_results)): + tdSql.checkData(i, 1, max_results[i]) + tdSql.checkData(i, 2, min_results[i]) + + def basic_query(self): + tdSql.query(f"select first(ts), max(high), min(high) from tb interval(1d)") + self.check_max_min_results() + + tdSql.query(f"select last(ts), max(high), min(high) from tb interval(1d)") + self.check_max_min_results() + + + def run(self): + dbname = "db" + tdSql.prepare() + self.prepare_data() + self.basic_query() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 349d7ee58cf506b1681d2e735cab46a590cdb9f4 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 10 Feb 2023 15:08:34 +0800 Subject: [PATCH 104/267] add test cases --- tests/parallel_test/cases.task | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 33f22896be..d6f9144245 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -549,6 +549,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py @@ -834,6 +836,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 2 @@ -930,6 +933,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 3 @@ -1027,6 +1031,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 4 From 72287a3b5fed1d30855d5db7583280515db220ca Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 16:03:11 +0800 Subject: [PATCH 105/267] fix(query): set correct tag value during tag filter and do some internal refactor. --- include/common/tcommon.h | 4 +- source/common/src/tdatablock.c | 5 +- source/dnode/vnode/inc/vnode.h | 2 +- source/dnode/vnode/src/meta/metaQuery.c | 26 +- source/libs/executor/src/executil.c | 417 ++++++++++++++++-------- 5 files changed, 297 insertions(+), 157 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index ea9bf1fcfd..d88747eb92 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -367,11 +367,11 @@ typedef struct SSortExecInfo { int32_t readBytes; // read io bytes } SSortExecInfo; -typedef struct SFilterTableInfo { +typedef struct STUidTagInfo { char* name; uint64_t uid; void* pTagVal; -} SFilterTableInfo; +} STUidTagInfo; // stream special block column diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 7d5f7097f9..2e633e7479 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1546,7 +1546,10 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) { } void colDataDestroy(SColumnInfoData* pColData) { - if (!pColData) return; + if (!pColData) { + return; + } + if (IS_VAR_DATA_TYPE(pColData->info.type)) { taosMemoryFreeClear(pColData->varmeta.offset); } else { diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index d6f59b125f..7d2ce44776 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -104,7 +104,7 @@ void metaReaderClear(SMetaReader *pReader); int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid); int32_t metaGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid); int metaGetTableEntryByName(SMetaReader *pReader, const char *name); -int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags); +int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList); int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList); int32_t metaReadNext(SMetaReader *pReader); const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index ce6a8de8c5..21c9c77d4d 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1378,7 +1378,7 @@ int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList) { int32_t isLock = false; int32_t sz = uidList ? taosArrayGetSize(uidList) : 0; for (int i = 0; i < sz; i++) { - SFilterTableInfo *p = taosArrayGet(uidList, i); + STUidTagInfo *p = taosArrayGet(uidList, i); if (i % LIMIT == 0) { if (isLock) metaULock(pMeta); @@ -1404,18 +1404,18 @@ int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList) { return 0; } -int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags) { +int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *pUidTagInfo) { SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid, 1); // If len > 0 means there already have uids, and we only want the // tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept // in the hash map, that may require a lot of memory SHashObj *pSepecifiedUidMap = NULL; - size_t len = taosArrayGetSize(uidList); - if (len > 0) { - pSepecifiedUidMap = taosHashInit(len / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - for (int i = 0; i < len; i++) { - int64_t *uid = taosArrayGet(uidList, i); + size_t numOfElems = taosArrayGetSize(pUidTagInfo); + if (numOfElems > 0) { + pSepecifiedUidMap = taosHashInit(numOfElems / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + for (int i = 0; i < numOfElems; i++) { + int64_t *uid = taosArrayGet(pUidTagInfo, i); taosHashPut(pSepecifiedUidMap, uid, sizeof(int64_t), 0, 0); } } @@ -1426,13 +1426,15 @@ int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj break; } - if (len > 0 && taosHashGet(pSepecifiedUidMap, &uid, sizeof(int64_t)) == NULL) { + if (numOfElems > 0 && taosHashGet(pSepecifiedUidMap, &uid, sizeof(int64_t)) == NULL) { continue; - } else if (len == 0) { - taosArrayPush(uidList, &uid); - } + } else if (numOfElems == 0) { + STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal}; + info.pTagVal = taosMemoryMalloc(pCur->vLen); + memcpy(info.pTagVal, pCur->pVal, pCur->vLen); - taosHashPut(tags, &uid, sizeof(uint64_t), pCur->pVal, pCur->vLen); + taosArrayPush(pUidTagInfo, &info); + } } taosHashCleanup(pSepecifiedUidMap); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 190ab2a7d2..ce81408a91 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -44,10 +44,11 @@ typedef struct tagFilterAssist { } tagFilterAssist; static int32_t removeInvalidUid(SArray* uids, SHashObj* tags); -static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* pTagCond, SHashObj* tags); +static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* pTagCond); static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond); static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo); +static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; } @@ -393,7 +394,7 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara return TSDB_CODE_SUCCESS; } -static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* uidList, SNode* pTagCond) { +static void getColInfoResult(void* metaHandle, int64_t suid, SArray* pUidList, SNode* pTagCond) { int32_t code = TSDB_CODE_SUCCESS; SArray* pBlockList = NULL; SSDataBlock* pResBlock = NULL; @@ -401,7 +402,6 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* SScalarParam output = {0}; tagFilterAssist ctx = {0}; - ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); if (ctx.colHash == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -419,10 +419,10 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; // int64_t stt = taosGetTimestampUs(); - SArray* pRes = taosArrayInit(10, sizeof(SFilterTableInfo)); - int32_t filter = optimizeTbnameInCond(metaHandle, suid, pRes, pTagCond, tags); + SArray* pUidTagList = taosArrayInit(10, sizeof(STUidTagInfo)); + int32_t filter = optimizeTbnameInCond(metaHandle, suid, pUidTagList, pTagCond); if (filter == 0) { // tbname in filter is activated, do nothing and return - int32_t numOfRows = taosArrayGetSize(pRes); + int32_t numOfRows = taosArrayGetSize(pUidTagList); code = createResultData(&type, numOfRows, &output); if (code != TSDB_CODE_SUCCESS) { terrno = code; @@ -430,20 +430,26 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* goto end; } - bool* b = (bool*)output.columnData->pData; - taosArrayEnsureCap(uidList, numOfRows); - + taosArrayEnsureCap(pUidList, numOfRows); for(int32_t i = 0; i < numOfRows; ++i) { - b[i] = true; - SFilterTableInfo* pInfo = taosArrayGet(pRes, i); - taosArrayPush(uidList, &pInfo->uid); + STUidTagInfo* pInfo = taosArrayGet(pUidTagList, i); + taosArrayPush(pUidList, &pInfo->uid); } terrno = 0; goto end; } else { // here we retrieve all tags from the vnode table-meta store - code = metaGetTableTags(metaHandle, suid, uidList, tags); + int32_t numOfExisted = taosArrayGetSize(pUidList); + if (numOfExisted) { + for(int32_t i = 0; i < numOfExisted; ++i) { + uint64_t* uid = taosArrayGet(pUidList, i); + STUidTagInfo info = {.uid = *uid}; + taosArrayPush(pUidTagList, &info); + } + } + + code = metaGetTableTags(metaHandle, suid, pUidTagList); if (code != TSDB_CODE_SUCCESS) { qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid); terrno = code; @@ -451,10 +457,6 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* } } - if (suid != 0) { -// removeInvalidUid(uidList, tags); - } - pResBlock = createDataBlock(); if (pResBlock == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -467,12 +469,12 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* blockDataAppendColInfo(pResBlock, &colInfo); } - int32_t size = taosArrayGetSize(pRes); - if (size == 0) { + int32_t numOfTables = taosArrayGetSize(pUidTagList); + if (numOfTables == 0) { goto end; } - code = blockDataEnsureCapacity(pResBlock, size); + code = blockDataEnsureCapacity(pResBlock, numOfTables); if (code != TSDB_CODE_SUCCESS) { terrno = code; goto end; @@ -480,8 +482,8 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* int32_t numOfCols = taosArrayGetSize(pResBlock->pDataBlock); - for (int32_t i = 0; i < size; i++) { - SFilterTableInfo* p1 = taosArrayGet(pRes, i); + for (int32_t i = 0; i < numOfTables; i++) { + STUidTagInfo* p1 = taosArrayGet(pUidTagList, i); for (int32_t j = 0; j < numOfCols; j++) { SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); @@ -496,10 +498,14 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* } else { STagVal tagVal = {0}; tagVal.cid = pColInfo->info.colId; + if (p1->pTagVal == NULL) { + colDataAppendNULL(pColInfo, i); + } + const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal); if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { - colDataAppend(pColInfo, i, p, true); + colDataAppendNULL(pColInfo, i); } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { colDataAppend(pColInfo, i, p, false); } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { @@ -524,22 +530,39 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* } } - pResBlock->info.rows = size; + pResBlock->info.rows = numOfTables; // int64_t st1 = taosGetTimestampUs(); // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); - pBlockList = taosArrayInit(2, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); + code = createResultData(&type, numOfTables, &output); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + goto end; + } + code = scalarCalculate(pTagCond, pBlockList, &output); if (code != TSDB_CODE_SUCCESS) { qError("failed to calculate scalar, reason:%s", tstrerror(code)); terrno = code; goto end; } - // int64_t st2 = taosGetTimestampUs(); - // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); + + taosArrayClear(pUidList); + + bool* pResult = (bool*)output.columnData->pData; + for(int32_t i = 0; i < numOfTables; ++i) { + uint64_t uid = ((STUidTagInfo*)taosArrayGet(pUidTagList, i))->uid; + qDebug("tagfilter get uid:%" PRId64 ", res:%d", uid, pResult[i]); + + if (pResult[i]) { + taosArrayPush(pUidList, &uid); + } + + i += 1; + } end: taosHashCleanup(tags); @@ -547,7 +570,10 @@ end: taosArrayDestroy(ctx.cInfoList); blockDataDestroy(pResBlock); taosArrayDestroy(pBlockList); - return output.columnData; + + colDataDestroy(output.columnData); + taosMemoryFreeClear(output.columnData); +// return output.columnData; } static void releaseColInfoData(void* pCol) { @@ -604,77 +630,28 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis blockDataAppendColInfo(pResBlock, &colInfo); } + SArray* pUidTagList = taosArrayInit(8, sizeof(STUidTagInfo)); + uidList = taosArrayInit(rows, sizeof(uint64_t)); for (int32_t i = 0; i < rows; ++i) { STableKeyInfo* pkeyInfo = taosArrayGet(pTableListInfo->pTableList, i); - taosArrayPush(uidList, &pkeyInfo->uid); + STUidTagInfo info = {.uid = pkeyInfo->uid}; + taosArrayPush(pUidTagList, &info); } // int64_t stt = taosGetTimestampUs(); tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - code = metaGetTableTags(metaHandle, pTableListInfo->suid, uidList, tags); + code = metaGetTableTags(metaHandle, pTableListInfo->suid, pUidTagList); if (code != TSDB_CODE_SUCCESS) { goto end; } - // int64_t stt1 = taosGetTimestampUs(); - // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); - - code = blockDataEnsureCapacity(pResBlock, rows); + int32_t numOfTables = taosArrayGetSize(pUidTagList); + pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList); if (code != TSDB_CODE_SUCCESS) { goto end; } - // int64_t st = taosGetTimestampUs(); - for (int32_t i = 0; i < rows; i++) { - int64_t* uid = taosArrayGet(uidList, i); - for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { - SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); - - if (pColInfo->info.colId == -1) { // tbname - char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - metaGetTableNameByUid(metaHandle, *uid, str); - colDataAppend(pColInfo, i, str, false); -#if TAG_FILTER_DEBUG - qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); -#endif - } else { - void* tag = taosHashGet(tags, uid, sizeof(int64_t)); - ASSERT(tag); - - STagVal tagVal = {0}; - tagVal.cid = pColInfo->info.colId; - const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); - - if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { - colDataAppend(pColInfo, i, p, true); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { - colDataAppend(pColInfo, i, p, false); - } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { - char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); - varDataSetLen(tmp, tagVal.nData); - memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); - colDataAppend(pColInfo, i, tmp, false); -#if TAG_FILTER_DEBUG - qDebug("tagfilter varch:%s", tmp + 2); -#endif - taosMemoryFree(tmp); - } else { - colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); -#if TAG_FILTER_DEBUG - if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { - qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { - qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); - } -#endif - } - } - } - } - - pResBlock->info.rows = rows; - // int64_t st1 = taosGetTimestampUs(); // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); @@ -857,8 +834,8 @@ static int tableUidCompare(const void* a, const void* b) { } static int32_t filterTableInfoCompare(const void* a, const void* b) { - SFilterTableInfo* p1 = (SFilterTableInfo*) a; - SFilterTableInfo* p2 = (SFilterTableInfo*) b; + STUidTagInfo* p1 = (STUidTagInfo*) a; + STUidTagInfo* p2 = (STUidTagInfo*) b; if (p1->uid == p2->uid) { return 0; @@ -867,7 +844,7 @@ static int32_t filterTableInfoCompare(const void* a, const void* b) { return p1->uid < p2->uid? -1:1; } -static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* cond, SHashObj* tags) { +static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* cond) { int32_t ret = -1; int32_t ntype = nodeType(cond); @@ -903,12 +880,13 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes if (hasTbnameCond) { ret = metaGetTableTagsByUids(metaHandle, suid, pRes); - removeInvalidUid(pRes, tags); +// removeInvalidUid(pRes, tags); } return ret; } +#if 0 /* * handle invalid uid */ @@ -918,10 +896,10 @@ static int32_t removeInvalidUid(SArray* uids, SHashObj* tags) { return 0; } - SArray* validUid = taosArrayInit(size, sizeof(SFilterTableInfo)); + SArray* validUid = taosArrayInit(size, sizeof(STUidTagInfo)); for (int32_t i = 0; i < size; i++) { - SFilterTableInfo* p = taosArrayGet(uids, i); + STUidTagInfo* p = taosArrayGet(uids, i); if (taosHashGet(tags, &p->uid, sizeof(int64_t)) != NULL) { taosArrayPush(validUid, p); } @@ -932,6 +910,8 @@ static int32_t removeInvalidUid(SArray* uids, SHashObj* tags) { return 0; } +#endif + // only return uid that does not contained in pExistedUidList static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond) { if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) { @@ -961,7 +941,7 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidLis if (numOfExisted > 0) { uHash = taosHashInit(numOfExisted / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); for (int i = 0; i < numOfExisted; i++) { - SFilterTableInfo* pTInfo = taosArrayGet(pExistedUidList, i); + STUidTagInfo* pTInfo = taosArrayGet(pExistedUidList, i); taosHashPut(uHash, &pTInfo->uid, sizeof(uint64_t), &i, sizeof(i)); } } @@ -974,7 +954,7 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidLis ETableType tbType = TSDB_TABLE_MAX; if (metaGetTableTypeByName(metaHandle, name, &tbType) == 0 && tbType == TSDB_CHILD_TABLE) { if (NULL == uHash || taosHashGet(uHash, &uid, sizeof(uid)) == NULL) { - SFilterTableInfo s = {.uid = uid, .name = name, .pTagVal = NULL}; + STUidTagInfo s = {.uid = uid, .name = name, .pTagVal = NULL}; taosArrayPush(pExistedUidList, &s); } } else { @@ -1012,45 +992,198 @@ static void genTagFilterDigest(const SNode* pTagCond, T_MD5_CTX* pContext) { taosMemoryFree(payload); } -static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pRes, SNode* pTagCond, void* metaHandle) { +static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList) { + SSDataBlock* pResBlock = createDataBlock(); + if (pResBlock == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + for (int32_t i = 0; i < taosArrayGetSize(pColList); ++i) { + SColumnInfoData colInfo = {0}; + colInfo.info = *(SColumnInfo*)taosArrayGet(pColList, i); + blockDataAppendColInfo(pResBlock, &colInfo); + } + + int32_t code = blockDataEnsureCapacity(pResBlock, numOfTables); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return NULL; + } + + pResBlock->info.rows = numOfTables; + + int32_t numOfCols = taosArrayGetSize(pResBlock->pDataBlock); + + for (int32_t i = 0; i < numOfTables; i++) { + STUidTagInfo* p1 = taosArrayGet(pUidTagList, i); + + for (int32_t j = 0; j < numOfCols; j++) { + SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); + + if (pColInfo->info.colId == -1) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(str, p1->name); + colDataAppend(pColInfo, i, str, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); +#endif + } else { + STagVal tagVal = {0}; + tagVal.cid = pColInfo->info.colId; + if (p1->pTagVal == NULL) { + colDataAppendNULL(pColInfo, i); + } + + const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataAppendNULL(pColInfo, i); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataAppend(pColInfo, i, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = alloca(tagVal.nData + VARSTR_HEADER_SIZE + 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataAppend(pColInfo, i, tmp, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter varch:%s", tmp + 2); +#endif + } else { + colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); +#if TAG_FILTER_DEBUG + if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { + qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { + qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); + } +#endif + } + } + } + } + + return pResBlock; +} + +static void doSetQualifiedUid(SArray* pUidList, const SArray* pUidTagList, bool* pResultList) { + taosArrayClear(pUidList); + + int32_t numOfTables = taosArrayGetSize(pUidTagList); + for(int32_t i = 0; i < numOfTables; ++i) { + uint64_t uid = ((STUidTagInfo*)taosArrayGet(pUidTagList, i))->uid; + qDebug("tagfilter get uid:%" PRId64 ", res:%d", uid, pResultList[i]); + + if (pResultList[i]) { + taosArrayPush(pUidList, &uid); + } + + i += 1; + } +} + +static void copyExistedUids(SArray* pUidTagList, const SArray* pUidList) { + int32_t numOfExisted = taosArrayGetSize(pUidList); + if (numOfExisted) { + for(int32_t i = 0; i < numOfExisted; ++i) { + uint64_t* uid = taosArrayGet(pUidList, i); + STUidTagInfo info = {.uid = *uid}; + taosArrayPush(pUidTagList, &info); + } + } +} + +static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SNode* pTagCond, void* metaHandle) { if (pTagCond == NULL) { return TSDB_CODE_SUCCESS; } terrno = TDB_CODE_SUCCESS; - SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, pRes, pTagCond); - if (terrno != TDB_CODE_SUCCESS) { - colDataDestroy(pColInfoData); - taosMemoryFreeClear(pColInfoData); - taosArrayDestroy(pRes); - qError("failed to getColInfoResult, code: %s", tstrerror(terrno)); - return terrno; + + int32_t code = TSDB_CODE_SUCCESS; + SArray* pBlockList = NULL; + SSDataBlock* pResBlock = NULL; + SScalarParam output = {0}; + + tagFilterAssist ctx = {0}; + ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + if (ctx.colHash == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto end; } - int32_t i = 0; - int32_t len = taosArrayGetSize(pRes); + ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (ctx.cInfoList == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } - if (pColInfoData != NULL) { - bool* pResult = (bool*)pColInfoData->pData; - SArray* p = taosArrayInit(taosArrayGetSize(pRes), sizeof(uint64_t)); + nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx); - while (i < len && pColInfoData) { - int64_t* uid = taosArrayGet(pRes, i); - qDebug("tagfilter get uid:%" PRId64 ", res:%d", *uid, pResult[i]); + SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; - if (pResult[i]) { - taosArrayPush(p, uid); - } - - i += 1; + // int64_t stt = taosGetTimestampUs(); + SArray* pUidTagList = taosArrayInit(10, sizeof(STUidTagInfo)); + int32_t filter = optimizeTbnameInCond(metaHandle, pListInfo->suid, pUidTagList, pTagCond); + if (filter == 0) { // tbname in filter is activated, do nothing and return + int32_t numOfRows = taosArrayGetSize(pUidTagList); + taosArrayEnsureCap(pUidList, numOfRows); + for(int32_t i = 0; i < numOfRows; ++i) { + STUidTagInfo* pInfo = taosArrayGet(pUidTagList, i); + taosArrayPush(pUidList, &pInfo->uid); } - taosArraySwap(pRes, p); - taosArrayDestroy(p); + terrno = 0; + goto end; + } else { + // here we retrieve all tags from the vnode table-meta store + copyExistedUids(pUidTagList, pUidList); + code = metaGetTableTags(metaHandle, pListInfo->suid, pUidTagList); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), pListInfo->suid); + terrno = code; + goto end; + } } - colDataDestroy(pColInfoData); - taosMemoryFreeClear(pColInfoData); + int32_t numOfTables = taosArrayGetSize(pUidTagList); + if (numOfTables == 0) { + goto end; + } + + pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + // int64_t st1 = taosGetTimestampUs(); + // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); + pBlockList = taosArrayInit(2, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + + code = createResultData(&type, numOfTables, &output); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + goto end; + } + + code = scalarCalculate(pTagCond, pBlockList, &output); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to calculate scalar, reason:%s", tstrerror(code)); + terrno = code; + goto end; + } + + doSetQualifiedUid(pUidList, pUidTagList, (bool*) output.columnData->pData); + + end: + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + + colDataDestroy(output.columnData); + taosMemoryFreeClear(output.columnData); return TSDB_CODE_SUCCESS; } @@ -1062,36 +1195,37 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, uint64_t tableUid = pScanNode->uid; pListInfo->suid = pScanNode->suid; - SArray* res = taosArrayInit(8, sizeof(uint64_t)); + + SArray* pRes = taosArrayInit(8, sizeof(uint64_t)); if (pScanNode->tableType != TSDB_SUPER_TABLE) { if (metaIsTableExist(metaHandle, tableUid)) { - taosArrayPush(res, &tableUid); + taosArrayPush(pRes, &tableUid); } - code = doFilterByTagCond(pListInfo, res, pTagCond, metaHandle); + code = doFilterByTagCond(pListInfo, pRes, pTagCond, metaHandle); if (code != TSDB_CODE_SUCCESS) { return code; } } else { - T_MD5_CTX context = {0}; if (tsTagFilterCache) { // try to retrieve the result from meta cache genTagFilterDigest(pTagCond, &context); + SArray* pUidList = taosArrayInit(8, sizeof(uint64_t)); bool acquired = false; - metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), res, &acquired); + metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pUidList, &acquired); if (acquired) { - qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(res)); + qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(pUidList)); goto _end; } } if (!pTagCond) { // no tag filter condition exists, let's fetch all tables of this super table ASSERT(pTagIndexCond == NULL); - vnodeGetCtbIdList(pVnode, pScanNode->suid, res); + vnodeGetCtbIdList(pVnode, pScanNode->suid, pRes); } else { // failed to find the result in the cache, let try to calculate the results if (pTagIndexCond) { @@ -1099,7 +1233,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; SIdxFltStatus status = SFLT_NOT_INDEX; - code = doFilterTag(pTagIndexCond, &metaArg, res, &status); + code = doFilterTag(pTagIndexCond, &metaArg, pRes, &status); if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake // qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid); code = TDB_CODE_SUCCESS; @@ -1107,43 +1241,44 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, } } - code = doFilterByTagCond(pListInfo, res, pTagCond, metaHandle); + code = doFilterByTagCond(pListInfo, pRes, pTagCond, metaHandle); if (code != TSDB_CODE_SUCCESS) { return code; } // let's add the filter results into meta-cache - numOfTables = taosArrayGetSize(res); - size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t); - char* pPayload = taosMemoryMalloc(size); - *(int32_t*)pPayload = numOfTables; - - if (numOfTables > 0) { - memcpy(pPayload + sizeof(int32_t), taosArrayGet(res, 0), numOfTables * sizeof(uint64_t)); - } + numOfTables = taosArrayGetSize(pRes); if (tsTagFilterCache) { - metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); - } + size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t); + char* pPayload = taosMemoryMalloc(size); - taosMemoryFree(pPayload); + // todo convert to uid list + if (numOfTables > 0) { + *(int32_t*)pPayload = numOfTables; + memcpy(pPayload + sizeof(int32_t), taosArrayGet(pRes, 0), numOfTables * sizeof(uint64_t)); + } + + metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); + taosMemoryFree(pPayload); + } } _end: - numOfTables = taosArrayGetSize(res); + numOfTables = taosArrayGetSize(pRes); for (int i = 0; i < numOfTables; i++) { - STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0}; + STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(pRes, i), .groupId = 0}; void* p = taosArrayPush(pListInfo->pTableList, &info); if (p == NULL) { - taosArrayDestroy(res); + taosArrayDestroy(pRes); return TSDB_CODE_OUT_OF_MEMORY; } qTrace("tagfilter get uid:%" PRIu64 "", info.uid); } - taosArrayDestroy(res); + taosArrayDestroy(pRes); return code; } From c8f1c3b6e4973521ca35cbd7564b2341daa208d5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 16:18:30 +0800 Subject: [PATCH 106/267] fix(query): set the max rows as the default block buffer rows. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 26 +++---------------------- source/libs/executor/src/scanoperator.c | 6 +++++- 2 files changed, 8 insertions(+), 24 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index b8d0ea28da..e9bb7e3d09 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -423,22 +423,6 @@ static STimeWindow updateQueryTimeWindow(STsdb* pTsdb, STimeWindow* pWindow) { return win; } -// note: currently not need this limitation -static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* capacity) { -#if 0 - int32_t rowLen = 0; - for (int32_t i = 0; i < pCond->numOfCols; ++i) { - rowLen += pCond->colList[i].bytes; - } - - // make sure the output SSDataBlock size be less than 2MB. - const int32_t TWOMB = 2 * 1024 * 1024; - if ((*capacity) * rowLen > TWOMB) { - (*capacity) = TWOMB / rowLen; - } -#endif -} - // init file iterator static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader) { size_t numOfFileset = taosArrayGetSize(aDFileSet); @@ -621,8 +605,6 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd goto _end; } - limitOutputBufferSize(pCond, &pReader->capacity); - // allocate buffer in order to load data blocks from file SBlockLoadSuppInfo* pSup = &pReader->suppInfo; pSup->pColAgg = taosArrayInit(pCond->numOfCols, sizeof(SColumnDataAgg)); @@ -3837,11 +3819,9 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL pCond->twindows.ekey -= 1; } - int32_t capacity = 0; - if (pResBlock == NULL) { - capacity = pVnode->config.tsdbCfg.maxRows; - } else { - capacity = pResBlock->info.capacity; + int32_t capacity = pVnode->config.tsdbCfg.maxRows; + if (pResBlock != NULL) { + blockDataEnsureCapacity(pResBlock, capacity); } int32_t code = tsdbReaderCreate(pVnode, pCond, ppReader, capacity, pResBlock, idstr); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 6d3eb67f16..ac76f179a0 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -781,6 +781,10 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); } + + if (pInfo->pResBlock->info.capacity > pOperator->resultInfo.capacity) { + pOperator->resultInfo.capacity = pInfo->pResBlock->info.capacity; + } } SSDataBlock* result = doGroupedTableScan(pOperator); @@ -884,7 +888,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, initResultSizeInfo(&pOperator->resultInfo, 4096); pInfo->pResBlock = createDataBlockFromDescNode(pDescNode); - blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity); +// blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity); code = filterInitFromNode((SNode*)pTableScanNode->scan.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); if (code != TSDB_CODE_SUCCESS) { From 81a2807e27c3b3f782b8c15debd825b0524cd2a9 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 10 Feb 2023 15:08:34 +0800 Subject: [PATCH 107/267] add test cases --- tests/parallel_test/cases.task | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index d6f9144245..ee647500cf 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -550,7 +550,6 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py From f6b2da8c064d9c96be1fc54f36fc4e9b6da4cf70 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 16:53:11 +0800 Subject: [PATCH 108/267] fix(query): add check for table/super table dropped, when retrieving cache rows. --- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 9 +++++++-- source/libs/executor/src/cachescanoperator.c | 11 +++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index a837543e62..240dbb5d0c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -117,8 +117,13 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, return TSDB_CODE_SUCCESS; } - STableKeyInfo* pKeyInfo = &((STableKeyInfo*)pTableIdList)[0]; - p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, pKeyInfo->uid, -1, 1); + p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, suid, -1, 1); + if (p->pSchema == NULL) { + taosMemoryFree(p); + tsdbWarn("stable:%"PRIu64" has been dropped, failed to retrieve cached rows, %s", suid, idstr); + return TSDB_CODE_PAR_TABLE_NOT_EXIST; + } + p->pTableList = pTableIdList; p->numOfTables = numOfTables; diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 294424746a..7ee186511a 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -215,8 +215,15 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { T_LONG_JMP(pTaskInfo->env, code); } - tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num, - taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader, pTaskInfo->id.str); + code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num, + taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader, + pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + pInfo->currentGroupIndex += 1; + taosArrayClear(pInfo->pUidList); + continue; + } + taosArrayClear(pInfo->pUidList); code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList); From 6839ed22f242f5f818a5ab0d685900c2c6cd285e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 18:28:35 +0800 Subject: [PATCH 109/267] fix(query): fix bug in tag filter. --- source/dnode/vnode/src/meta/metaQuery.c | 37 ++-- source/libs/executor/src/executil.c | 236 +++--------------------- 2 files changed, 51 insertions(+), 222 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 21c9c77d4d..20f789b348 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1415,26 +1415,41 @@ int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *pUidTagInfo) { if (numOfElems > 0) { pSepecifiedUidMap = taosHashInit(numOfElems / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); for (int i = 0; i < numOfElems; i++) { - int64_t *uid = taosArrayGet(pUidTagInfo, i); - taosHashPut(pSepecifiedUidMap, uid, sizeof(int64_t), 0, 0); + STUidTagInfo *pTagInfo = taosArrayGet(pUidTagInfo, i); + taosHashPut(pSepecifiedUidMap, &pTagInfo->uid, sizeof(uint64_t), &i, sizeof(int32_t)); } } - while (1) { - tb_uid_t uid = metaCtbCursorNext(pCur); - if (uid == 0) { - break; - } + if (numOfElems == 0) { // all data needs to be added into the pUidTagInfo list + while (1) { + tb_uid_t uid = metaCtbCursorNext(pCur); + if (uid == 0) { + break; + } - if (numOfElems > 0 && taosHashGet(pSepecifiedUidMap, &uid, sizeof(int64_t)) == NULL) { - continue; - } else if (numOfElems == 0) { STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal}; info.pTagVal = taosMemoryMalloc(pCur->vLen); memcpy(info.pTagVal, pCur->pVal, pCur->vLen); - taosArrayPush(pUidTagInfo, &info); } + } else { // only the specified tables need to be added + while (1) { + tb_uid_t uid = metaCtbCursorNext(pCur); + if (uid == 0) { + break; + } + + int32_t *index = taosHashGet(pSepecifiedUidMap, &uid, sizeof(uint64_t)); + if (index == NULL) { + continue; + } + + STUidTagInfo *pTagInfo = taosArrayGet(pUidTagInfo, *index); + if (pTagInfo->pTagVal == NULL) { + pTagInfo->pTagVal = taosMemoryMalloc(pCur->vLen); + memcpy(pTagInfo->pTagVal, pCur->pVal, pCur->vLen); + } + } } taosHashCleanup(pSepecifiedUidMap); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index ce81408a91..61aac2761f 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -48,7 +48,7 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond); static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo); -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList); +static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* metaHandle); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; } @@ -394,188 +394,6 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara return TSDB_CODE_SUCCESS; } -static void getColInfoResult(void* metaHandle, int64_t suid, SArray* pUidList, SNode* pTagCond) { - int32_t code = TSDB_CODE_SUCCESS; - SArray* pBlockList = NULL; - SSDataBlock* pResBlock = NULL; - SHashObj* tags = NULL; - SScalarParam output = {0}; - - tagFilterAssist ctx = {0}; - ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); - if (ctx.colHash == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); - if (ctx.cInfoList == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx); - - SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; - - // int64_t stt = taosGetTimestampUs(); - SArray* pUidTagList = taosArrayInit(10, sizeof(STUidTagInfo)); - int32_t filter = optimizeTbnameInCond(metaHandle, suid, pUidTagList, pTagCond); - if (filter == 0) { // tbname in filter is activated, do nothing and return - int32_t numOfRows = taosArrayGetSize(pUidTagList); - code = createResultData(&type, numOfRows, &output); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - qError("failed to create result, reason:%s", tstrerror(code)); - goto end; - } - - taosArrayEnsureCap(pUidList, numOfRows); - for(int32_t i = 0; i < numOfRows; ++i) { - STUidTagInfo* pInfo = taosArrayGet(pUidTagList, i); - taosArrayPush(pUidList, &pInfo->uid); - } - - terrno = 0; - goto end; - } else { - // here we retrieve all tags from the vnode table-meta store - int32_t numOfExisted = taosArrayGetSize(pUidList); - if (numOfExisted) { - for(int32_t i = 0; i < numOfExisted; ++i) { - uint64_t* uid = taosArrayGet(pUidList, i); - STUidTagInfo info = {.uid = *uid}; - taosArrayPush(pUidTagList, &info); - } - } - - code = metaGetTableTags(metaHandle, suid, pUidTagList); - if (code != TSDB_CODE_SUCCESS) { - qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid); - terrno = code; - goto end; - } - } - - pResBlock = createDataBlock(); - if (pResBlock == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) { - SColumnInfoData colInfo = {0}; - colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i); - blockDataAppendColInfo(pResBlock, &colInfo); - } - - int32_t numOfTables = taosArrayGetSize(pUidTagList); - if (numOfTables == 0) { - goto end; - } - - code = blockDataEnsureCapacity(pResBlock, numOfTables); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - goto end; - } - - int32_t numOfCols = taosArrayGetSize(pResBlock->pDataBlock); - - for (int32_t i = 0; i < numOfTables; i++) { - STUidTagInfo* p1 = taosArrayGet(pUidTagList, i); - - for (int32_t j = 0; j < numOfCols; j++) { - SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); - - if (pColInfo->info.colId == -1) { // tbname - char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(str, p1->name); - colDataAppend(pColInfo, i, str, false); -#if TAG_FILTER_DEBUG - qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); -#endif - } else { - STagVal tagVal = {0}; - tagVal.cid = pColInfo->info.colId; - if (p1->pTagVal == NULL) { - colDataAppendNULL(pColInfo, i); - } - - const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal); - - if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { - colDataAppendNULL(pColInfo, i); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { - colDataAppend(pColInfo, i, p, false); - } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { - char* tmp = alloca(tagVal.nData + VARSTR_HEADER_SIZE + 1); - varDataSetLen(tmp, tagVal.nData); - memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); - colDataAppend(pColInfo, i, tmp, false); -#if TAG_FILTER_DEBUG - qDebug("tagfilter varch:%s", tmp + 2); -#endif - } else { - colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); -#if TAG_FILTER_DEBUG - if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { - qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { - qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); - } -#endif - } - } - } - } - - pResBlock->info.rows = numOfTables; - - // int64_t st1 = taosGetTimestampUs(); - // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); - pBlockList = taosArrayInit(2, POINTER_BYTES); - taosArrayPush(pBlockList, &pResBlock); - - code = createResultData(&type, numOfTables, &output); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - goto end; - } - - code = scalarCalculate(pTagCond, pBlockList, &output); - if (code != TSDB_CODE_SUCCESS) { - qError("failed to calculate scalar, reason:%s", tstrerror(code)); - terrno = code; - goto end; - } - - taosArrayClear(pUidList); - - bool* pResult = (bool*)output.columnData->pData; - for(int32_t i = 0; i < numOfTables; ++i) { - uint64_t uid = ((STUidTagInfo*)taosArrayGet(pUidTagList, i))->uid; - qDebug("tagfilter get uid:%" PRId64 ", res:%d", uid, pResult[i]); - - if (pResult[i]) { - taosArrayPush(pUidList, &uid); - } - - i += 1; - } - -end: - taosHashCleanup(tags); - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); - blockDataDestroy(pResBlock); - taosArrayDestroy(pBlockList); - - colDataDestroy(output.columnData); - taosMemoryFreeClear(output.columnData); -// return output.columnData; -} - static void releaseColInfoData(void* pCol) { if (pCol) { SColumnInfoData* col = (SColumnInfoData*)pCol; @@ -584,12 +402,17 @@ static void releaseColInfoData(void* pCol) { } } +void freeItem(void* p) { + STUidTagInfo *pInfo = p; + if (pInfo->pTagVal != NULL) { + taosMemoryFree(pInfo->pTagVal); + } +} + int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo) { int32_t code = TSDB_CODE_SUCCESS; SArray* pBlockList = NULL; SSDataBlock* pResBlock = NULL; - SHashObj* tags = NULL; - SArray* uidList = NULL; void* keyBuf = NULL; SArray* groupData = NULL; @@ -618,21 +441,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis REPLACE_NODE(pNode); } - pResBlock = createDataBlock(); - if (pResBlock == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) { - SColumnInfoData colInfo = {0}; - colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i); - blockDataAppendColInfo(pResBlock, &colInfo); - } - SArray* pUidTagList = taosArrayInit(8, sizeof(STUidTagInfo)); - - uidList = taosArrayInit(rows, sizeof(uint64_t)); for (int32_t i = 0; i < rows; ++i) { STableKeyInfo* pkeyInfo = taosArrayGet(pTableListInfo->pTableList, i); STUidTagInfo info = {.uid = pkeyInfo->uid}; @@ -640,15 +449,15 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis } // int64_t stt = taosGetTimestampUs(); - tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); code = metaGetTableTags(metaHandle, pTableListInfo->suid, pUidTagList); if (code != TSDB_CODE_SUCCESS) { goto end; } int32_t numOfTables = taosArrayGetSize(pUidTagList); - pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList); - if (code != TSDB_CODE_SUCCESS) { + pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle); + if (pResBlock == NULL) { + code = terrno; goto end; } @@ -759,12 +568,11 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis end: taosMemoryFreeClear(keyBuf); - taosHashCleanup(tags); taosHashCleanup(ctx.colHash); taosArrayDestroy(ctx.cInfoList); blockDataDestroy(pResBlock); taosArrayDestroy(pBlockList); - taosArrayDestroy(uidList); + taosArrayDestroyEx(pUidTagList, freeItem); taosArrayDestroyP(groupData, releaseColInfoData); return code; } @@ -992,7 +800,7 @@ static void genTagFilterDigest(const SNode* pTagCond, T_MD5_CTX* pContext) { taosMemoryFree(payload); } -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList) { +static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* metaHandle) { SSDataBlock* pResBlock = createDataBlock(); if (pResBlock == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -1023,7 +831,12 @@ static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTa if (pColInfo->info.colId == -1) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(str, p1->name); + if (p1->name != NULL) { + STR_TO_VARSTR(str, p1->name); + } else { // name is not retrieved during filter + metaGetTableNameByUid(metaHandle, p1->uid, str); + } + colDataAppend(pColInfo, i, str, false); #if TAG_FILTER_DEBUG qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); @@ -1151,8 +964,9 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN goto end; } - pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList); - if (code != TSDB_CODE_SUCCESS) { + pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle); + if (pResBlock == NULL) { + code = terrno; goto end; } @@ -1163,7 +977,7 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN code = createResultData(&type, numOfTables, &output); if (code != TSDB_CODE_SUCCESS) { - terrno = code; + terrno = code k; goto end; } @@ -1181,11 +995,11 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN taosArrayDestroy(ctx.cInfoList); blockDataDestroy(pResBlock); taosArrayDestroy(pBlockList); + taosArrayDestroyEx(pUidTagList, freeItem); colDataDestroy(output.columnData); taosMemoryFreeClear(output.columnData); - - return TSDB_CODE_SUCCESS; + return code; } int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, From 8b481e3d5386d05a7ff63f85c98284153f93af55 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 18:30:01 +0800 Subject: [PATCH 110/267] fix(query): not return 0 for count by default. --- source/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index d4849650e6..d726016dcf 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -124,7 +124,7 @@ int32_t tsCompressMsgSize = -1; int32_t tsCompressColData = -1; // count/hyperloglog function always return values in case of all NULL data or Empty data set. -int32_t tsCountAlwaysReturnValue = 1; +int32_t tsCountAlwaysReturnValue = 0; // 10 ms for sliding time, the value will changed in case of time precision changed int32_t tsMinSlidingTime = 10; From 6122a2fb0c281a8b43dfc5e94831ec1de0aa600b Mon Sep 17 00:00:00 2001 From: xinsheng Ren <285808407@qq.com> Date: Fri, 10 Feb 2023 18:31:09 +0800 Subject: [PATCH 111/267] fix stackflow on mac (#19592) * fix stackflow on mac * ALL UPPER CASE for macro definition --------- Co-authored-by: facetosea <25808407@qq.com> --- source/os/src/osSysinfo.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index aeaa4fcafd..897e68a126 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -18,6 +18,7 @@ #include "taoserror.h" #define PROCESS_ITEM 12 +#define UUIDLEN37 37 typedef struct { uint64_t user; @@ -830,7 +831,8 @@ int32_t taosGetSystemUUID(char *uid, int32_t uidlen) { return 0; #elif defined(_TD_DARWIN_64) uuid_t uuid = {0}; - char buf[37] = {0}; + char buf[UUIDLEN37]; + memset(buf, 0, UUIDLEN37); uuid_generate(uuid); // it's caller's responsibility to make enough space for `uid`, that's 36-char + 1-null uuid_unparse_lower(uuid, buf); From 5dfa0e69a9745c1b863bc46a9613d45aca0740d9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 19:06:23 +0800 Subject: [PATCH 112/267] fix(query): set correct schema info for normal table. --- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 45 +++++++++++++++++---- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 240dbb5d0c..fd5e8eb6e0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -99,6 +99,38 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p return TSDB_CODE_SUCCESS; } +static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* idstr) { + int32_t numOfTables = p->numOfTables; + + if (suid != 0) { + p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, suid, -1, 1); + if (p->pSchema == NULL) { + taosMemoryFree(p); + tsdbWarn("stable:%" PRIu64 " has been dropped, failed to retrieve cached rows, %s", suid, idstr); + return TSDB_CODE_PAR_TABLE_NOT_EXIST; + } + } else { + for (int32_t i = 0; i < numOfTables; ++i) { + uint64_t uid = p->pTableList[i].uid; + p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, uid, -1, 1); + if (p->pSchema != NULL) { + break; + } + + tsdbWarn("table:%" PRIu64 " has been dropped, failed to retrieve cached rows, %s", uid, idstr); + } + + // all queried tables have been dropped already, return immediately. + if (p->pSchema == NULL) { + taosMemoryFree(p); + tsdbWarn("all queried tables has been dropped, try next group, %s", idstr); + return TSDB_CODE_PAR_TABLE_NOT_EXIST; + } + } + + return TSDB_CODE_SUCCESS; +} + int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols, uint64_t suid, void** pReader, const char* idstr) { *pReader = NULL; @@ -117,16 +149,15 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, return TSDB_CODE_SUCCESS; } - p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, suid, -1, 1); - if (p->pSchema == NULL) { - taosMemoryFree(p); - tsdbWarn("stable:%"PRIu64" has been dropped, failed to retrieve cached rows, %s", suid, idstr); - return TSDB_CODE_PAR_TABLE_NOT_EXIST; - } - p->pTableList = pTableIdList; p->numOfTables = numOfTables; + int32_t code = setTableSchema(p, suid, idstr); + if (code != TSDB_CODE_SUCCESS) { + tsdbCacherowsReaderClose(p); + return code; + } + p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES); if (p->transferBuf == NULL) { tsdbCacherowsReaderClose(p); From 8781f31d1767a4af56572ccdd48b62c5f4d668f7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 19:22:38 +0800 Subject: [PATCH 113/267] fix(query): fix a typo. --- source/libs/executor/src/executil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 208f472224..232299ef6e 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -977,7 +977,7 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN code = createResultData(&type, numOfTables, &output); if (code != TSDB_CODE_SUCCESS) { - terrno = code k; + terrno = code; goto end; } From 595a57dfc9051b3bac8272f860be411f1552003b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 23:32:16 +0800 Subject: [PATCH 114/267] fix(query): fix a typo --- source/libs/executor/src/executil.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 232299ef6e..325e26b971 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -890,8 +890,6 @@ static void doSetQualifiedUid(SArray* pUidList, const SArray* pUidTagList, bool* if (pResultList[i]) { taosArrayPush(pUidList, &uid); } - - i += 1; } } From 120ee13d6d6ba7c17d93097a71e777268ffcfb22 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 23:34:03 +0800 Subject: [PATCH 115/267] refactor: do some internal refactor. --- source/libs/executor/src/executil.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 325e26b971..63b1ab1b05 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -895,12 +895,14 @@ static void doSetQualifiedUid(SArray* pUidList, const SArray* pUidTagList, bool* static void copyExistedUids(SArray* pUidTagList, const SArray* pUidList) { int32_t numOfExisted = taosArrayGetSize(pUidList); - if (numOfExisted) { - for(int32_t i = 0; i < numOfExisted; ++i) { - uint64_t* uid = taosArrayGet(pUidList, i); - STUidTagInfo info = {.uid = *uid}; - taosArrayPush(pUidTagList, &info); - } + if (numOfExisted == 0) { + return; + } + + for(int32_t i = 0; i < numOfExisted; ++i) { + uint64_t* uid = taosArrayGet(pUidList, i); + STUidTagInfo info = {.uid = *uid}; + taosArrayPush(pUidTagList, &info); } } @@ -1065,7 +1067,6 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t); char* pPayload = taosMemoryMalloc(size); - // todo convert to uid list if (numOfTables > 0) { *(int32_t*)pPayload = numOfTables; memcpy(pPayload + sizeof(int32_t), taosArrayGet(pRes, 0), numOfTables * sizeof(uint64_t)); From b14afeef0a94f55c7d0ea1f6fcde21747c822420 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Feb 2023 23:40:53 +0800 Subject: [PATCH 116/267] refactor: do some internal refactor. --- source/libs/executor/src/executil.c | 32 ++++++++++++++--------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 63b1ab1b05..7e46be6497 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -47,7 +47,7 @@ static int32_t removeInvalidUid(SArray* uids, SHashObj* tags); static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* pTagCond); static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond); static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, - SNode* pTagIndexCond, STableListInfo* pListInfo); + SNode* pTagIndexCond, STableListInfo* pListInfo, const char* idstr); static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* metaHandle); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } @@ -1003,21 +1003,21 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN } int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, - STableListInfo* pListInfo) { + STableListInfo* pListInfo, const char* idstr) { int32_t code = TSDB_CODE_SUCCESS; size_t numOfTables = 0; uint64_t tableUid = pScanNode->uid; pListInfo->suid = pScanNode->suid; - SArray* pRes = taosArrayInit(8, sizeof(uint64_t)); + SArray* pUidList = taosArrayInit(8, sizeof(uint64_t)); if (pScanNode->tableType != TSDB_SUPER_TABLE) { if (metaIsTableExist(metaHandle, tableUid)) { - taosArrayPush(pRes, &tableUid); + taosArrayPush(pUidList, &tableUid); } - code = doFilterByTagCond(pListInfo, pRes, pTagCond, metaHandle); + code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -1027,7 +1027,6 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, if (tsTagFilterCache) { // try to retrieve the result from meta cache genTagFilterDigest(pTagCond, &context); - SArray* pUidList = taosArrayInit(8, sizeof(uint64_t)); bool acquired = false; metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pUidList, &acquired); @@ -1039,7 +1038,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, if (!pTagCond) { // no tag filter condition exists, let's fetch all tables of this super table ASSERT(pTagIndexCond == NULL); - vnodeGetCtbIdList(pVnode, pScanNode->suid, pRes); + vnodeGetCtbIdList(pVnode, pScanNode->suid, pUidList); } else { // failed to find the result in the cache, let try to calculate the results if (pTagIndexCond) { @@ -1047,7 +1046,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; SIdxFltStatus status = SFLT_NOT_INDEX; - code = doFilterTag(pTagIndexCond, &metaArg, pRes, &status); + code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status); if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake // qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid); code = TDB_CODE_SUCCESS; @@ -1055,13 +1054,13 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, } } - code = doFilterByTagCond(pListInfo, pRes, pTagCond, metaHandle); + code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle); if (code != TSDB_CODE_SUCCESS) { return code; } // let's add the filter results into meta-cache - numOfTables = taosArrayGetSize(pRes); + numOfTables = taosArrayGetSize(pUidList); if (tsTagFilterCache) { size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t); @@ -1069,7 +1068,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, if (numOfTables > 0) { *(int32_t*)pPayload = numOfTables; - memcpy(pPayload + sizeof(int32_t), taosArrayGet(pRes, 0), numOfTables * sizeof(uint64_t)); + memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t)); } metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); @@ -1078,20 +1077,19 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, } _end: - numOfTables = taosArrayGetSize(pRes); for (int i = 0; i < numOfTables; i++) { - STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(pRes, i), .groupId = 0}; + STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(pUidList, i), .groupId = 0}; void* p = taosArrayPush(pListInfo->pTableList, &info); if (p == NULL) { - taosArrayDestroy(pRes); + taosArrayDestroy(pUidList); return TSDB_CODE_OUT_OF_MEMORY; } - qTrace("tagfilter get uid:%" PRIu64 "", info.uid); + qTrace("tagfilter get uid:%" PRIu64", %s", info.uid, idstr); } - taosArrayDestroy(pRes); + taosArrayDestroy(pUidList); return code; } @@ -1990,7 +1988,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags return TSDB_CODE_INVALID_PARA; } - int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo); + int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, idStr); if (code != TSDB_CODE_SUCCESS) { qError("failed to getTableList, code: %s", tstrerror(code)); return code; From ae7fc15170869d03fd95a5a0614b24cc602c1e46 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Feb 2023 00:11:21 +0800 Subject: [PATCH 117/267] fix(query): set correct table number. --- source/libs/executor/src/executil.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 7e46be6497..23251636bd 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1077,6 +1077,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, } _end: + numOfTables = taosArrayGetSize(pUidList); for (int i = 0; i < numOfTables; i++) { STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(pUidList, i), .groupId = 0}; From 2de2733405930ecf142bc22b05d8849f9b0db148 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Feb 2023 00:27:17 +0800 Subject: [PATCH 118/267] fix(query): set correct table number. --- source/libs/executor/src/executil.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 23251636bd..13666af140 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1007,14 +1007,12 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, int32_t code = TSDB_CODE_SUCCESS; size_t numOfTables = 0; - uint64_t tableUid = pScanNode->uid; pListInfo->suid = pScanNode->suid; - SArray* pUidList = taosArrayInit(8, sizeof(uint64_t)); if (pScanNode->tableType != TSDB_SUPER_TABLE) { - if (metaIsTableExist(metaHandle, tableUid)) { - taosArrayPush(pUidList, &tableUid); + if (metaIsTableExist(metaHandle, pScanNode->uid)) { + taosArrayPush(pUidList, &pScanNode->uid); } code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle); @@ -1042,8 +1040,10 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, } else { // failed to find the result in the cache, let try to calculate the results if (pTagIndexCond) { - SIndexMetaArg metaArg = { - .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; + SIndexMetaArg metaArg = {.metaEx = metaHandle, + .idx = tsdbGetIdx(metaHandle), + .ivtIdx = tsdbGetIvtIdx(metaHandle), + .suid = pScanNode->uid}; SIdxFltStatus status = SFLT_NOT_INDEX; code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status); From 38df6316292fd3f165f9258857f34e00d21b494f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Feb 2023 00:28:10 +0800 Subject: [PATCH 119/267] fix(query): set correct table number. --- source/libs/executor/src/executil.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 13666af140..79187b441e 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1040,10 +1040,9 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, } else { // failed to find the result in the cache, let try to calculate the results if (pTagIndexCond) { - SIndexMetaArg metaArg = {.metaEx = metaHandle, - .idx = tsdbGetIdx(metaHandle), - .ivtIdx = tsdbGetIvtIdx(metaHandle), - .suid = pScanNode->uid}; + void* pIndex = tsdbGetIvtIdx(metaHandle); + SIndexMetaArg metaArg = { + .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = pIndex, .suid = pScanNode->uid}; SIdxFltStatus status = SFLT_NOT_INDEX; code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status); From 18cad4f33a9b8c6dd7fb992eb5e122f06b21b95b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 11 Feb 2023 00:45:42 +0800 Subject: [PATCH 120/267] fix: taosbenchmark fix sml-rest mem leak for main (#19916) * fix: taosbenchmark fix sml-rest mem leak for main * fix: update taos-tools 4097420 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 48f31c6daf..19895a7f05 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG e04f39b + GIT_TAG 4097420 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From f6a262eeb1a188f00dfc2590101e0a63e3e03374 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Feb 2023 01:56:28 +0800 Subject: [PATCH 121/267] fix(query): fix invalid write. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 9 +++++---- source/libs/executor/src/executil.c | 22 +++++++++++++-------- tests/script/tsim/parser/regressiontest.sim | 2 +- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 7eb01206dd..8ec076fb63 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -694,10 +694,12 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, continue; } - if (pBlockIdx->uid == pList->tableUidList[j]) { - i += 1; + if (pBlockIdx->uid > pList->tableUidList[j]) { j += 1; + continue; + } + if (pBlockIdx->uid == pList->tableUidList[j]) { // this block belongs to a table that is not queried. void* p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(uint64_t)); if (p == NULL) { @@ -711,9 +713,8 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, } taosArrayPush(pIndexList, pBlockIdx); - } - if (pBlockIdx->uid > pList->tableUidList[j]) { + i += 1; j += 1; } } diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 79187b441e..b398f66e19 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -127,23 +127,29 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in pGroupResInfo->pRows = taosArrayInit(size, POINTER_BYTES); size_t keyLen = 0; - int32_t num = 0, iter = 0, itemSize = 0; + int32_t iter = 0; + int32_t bufLen = 0, offset = 0; + // todo move away and record this during create window + while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) { + /*void* key = */tSimpleHashGetKey(pData, &keyLen); + bufLen += keyLen + sizeof(SResultRowPosition); + } + + pGroupResInfo->pBuf = taosMemoryMalloc(bufLen); + + iter = 0; while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) { void* key = tSimpleHashGetKey(pData, &keyLen); - if (pGroupResInfo->pBuf == NULL) { - itemSize = keyLen + sizeof(SResultRowPosition); - pGroupResInfo->pBuf = taosMemoryMalloc(size * itemSize); - } - - SResKeyPos* p = (SResKeyPos*)(pGroupResInfo->pBuf + num * itemSize); + SResKeyPos* p = (SResKeyPos*) (pGroupResInfo->pBuf + offset); p->groupId = *(uint64_t*)key; p->pos = *(SResultRowPosition*)pData; memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t)); taosArrayPush(pGroupResInfo->pRows, &p); - num += 1; + + offset += keyLen + sizeof(struct SResultRowPosition); } if (order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC) { diff --git a/tests/script/tsim/parser/regressiontest.sim b/tests/script/tsim/parser/regressiontest.sim index c08b1bbf27..8f3ad542d0 100644 --- a/tests/script/tsim/parser/regressiontest.sim +++ b/tests/script/tsim/parser/regressiontest.sim @@ -165,7 +165,7 @@ if $data00 != 10 then return -1 endi -sql select last_row(*) from st1 group by a +sql select last_row(*) from st1 group by a order by a desc if $rows != 2 then return -1 endi From c501c81266deede6ec81c95eb622d0f45180fc25 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 17:19:21 +0800 Subject: [PATCH 122/267] modify format and remove no use delete --- tools/shell/src/shellAuto.c | 84 +++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 41 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index b391d59725..dbd378f718 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -60,23 +60,23 @@ SWords shellCommands[] = { {"alter database " " ;", 0, 0, NULL}, - {"alter dnode balance ", 0, 0, NULL}, - {"alter dnode resetlog;", 0, 0, NULL}, - {"alter dnode debugFlag 141;", 0, 0, NULL}, - {"alter dnode monitor 1;", 0, 0, NULL}, - {"alter all dnodes monitor ", 0, 0, NULL}, - {"alter alldnodes balance ", 0, 0, NULL}, - {"alter alldnodes resetlog;", 0, 0, NULL}, - {"alter alldnodes debugFlag 141;", 0, 0, NULL}, - {"alter alldnodes monitor 1;", 0, 0, NULL}, + {"alter dnode \"resetlog\";", 0, 0, NULL}, + {"alter dnode \"debugFlag\" \"141\";", 0, 0, NULL}, + {"alter dnode \"monitor\" \"0\";", 0, 0, NULL}, + {"alter dnode \"monitor\" \"1\";", 0, 0, NULL}, + {"alter all dnodes \"monitor\" ", 0, 0, NULL}, + {"alter all dnodes \"balance\" ", 0, 0, NULL}, + {"alter all dnodes \"resetlog\";", 0, 0, NULL}, + {"alter all dnodes \"debugFlag\" \"141\";", 0, 0, NULL}, + {"alter all dnodes \"monitor\" \"1\";", 0, 0, NULL}, {"alter table ;", 0, 0, NULL}, {"alter table modify column", 0, 0, NULL}, - {"alter local resetlog;", 0, 0, NULL}, - {"alter local DebugFlag 143;", 0, 0, NULL}, - {"alter local cDebugFlag 143;", 0, 0, NULL}, - {"alter local uDebugFlag 143;", 0, 0, NULL}, - {"alter local rpcDebugFlag 143;", 0, 0, NULL}, - {"alter local tmrDebugFlag 143;", 0, 0, NULL}, + {"alter local \"resetlog\";", 0, 0, NULL}, + {"alter local \"DebugFlag\" \"143\";", 0, 0, NULL}, + {"alter local \"cDebugFlag\" \"143\";", 0, 0, NULL}, + {"alter local \"uDebugFlag\" \"143\";", 0, 0, NULL}, + {"alter local \"rpcDebugFlag\" \"143\";", 0, 0, NULL}, + {"alter local \"tmrDebugFlag\" \"143\";", 0, 0, NULL}, {"alter topic", 0, 0, NULL}, {"alter user ;", 0, 0, NULL}, // 20 @@ -108,6 +108,7 @@ SWords shellCommands[] = { {"drop topic ;", 0, 0, NULL}, {"drop stream ;", 0, 0, NULL}, {"explain select", 0, 0, NULL}, // 44 append sub sql + {"flush database ;", 0, 0, NULL}, {"help;", 0, 0, NULL}, {"grant all on to ;", 0, 0, NULL}, {"grant read on to ;", 0, 0, NULL}, @@ -121,7 +122,6 @@ SWords shellCommands[] = { {"revoke read on from ;", 0, 0, NULL}, {"revoke write on from ;", 0, 0, NULL}, {"select * from ", 0, 0, NULL}, - {"select _block_dist() from \\G;", 0, 0, NULL}, {"select client_version();", 0, 0, NULL}, // 60 {"select current_user();", 0, 0, NULL}, @@ -247,7 +247,7 @@ char* db_options[] = {"keep ", "wal_retention_size ", "wal_segment_size "}; -char* alter_db_options[] = {"keep ", "cachemodel ", "cachesize ", "wal_fsync_period ", "wal_level "}; +char* alter_db_options[] = {"cachemodel ", "replica ", "keep ", "cachesize ", "wal_fsync_period ", "wal_level "}; char* data_types[] = {"timestamp", "int", "int unsigned", "varchar(16)", @@ -327,19 +327,19 @@ int cntDel = 0; // delete byte count after next press tab // show auto tab introduction void printfIntroduction() { - printf(" ****************************** Tab Completion **********************************\n"); - printf(" * The TDengine CLI supports tab completion for a variety of items, *\n"); - printf(" * including database names, table names, function names and keywords. *\n"); - printf(" * The full list of shortcut keys is as follows: *\n"); - printf(" * [ TAB ] ...... complete the current word *\n"); - printf(" * ...... if used on a blank line, display all valid commands *\n"); - printf(" * [ Ctrl + A ] ...... move cursor to the st[A]rt of the line *\n"); - printf(" * [ Ctrl + E ] ...... move cursor to the [E]nd of the line *\n"); - printf(" * [ Ctrl + W ] ...... move cursor to the middle of the line *\n"); - printf(" * [ Ctrl + L ] ...... clear the entire screen *\n"); - printf(" * [ Ctrl + K ] ...... clear the screen after the cursor *\n"); - printf(" * [ Ctrl + U ] ...... clear the screen before the cursor *\n"); - printf(" **********************************************************************************\n\n"); + printf(" ****************************** Tab Completion *************************************\n"); + printf(" * The TDengine CLI supports tab completion for a variety of items, *\n"); + printf(" * including database names, table names, function names and keywords. *\n"); + printf(" * The full list of shortcut keys is as follows: *\n"); + printf(" * [ TAB ] ...... complete the current word *\n"); + printf(" * ...... if used on a blank line, display all supported commands *\n"); + printf(" * [ Ctrl + A ] ...... move cursor to the st[A]rt of the line *\n"); + printf(" * [ Ctrl + E ] ...... move cursor to the [E]nd of the line *\n"); + printf(" * [ Ctrl + W ] ...... move cursor to the middle of the line *\n"); + printf(" * [ Ctrl + L ] ...... clear the entire screen *\n"); + printf(" * [ Ctrl + K ] ...... clear the screen after the cursor *\n"); + printf(" * [ Ctrl + U ] ...... clear the screen before the cursor *\n"); + printf(" *************************************************************************************\n\n"); } void showHelp() { @@ -348,23 +348,24 @@ void showHelp() { "\n\ ----- A ----- \n\ alter database \n\ - alter dnode balance \n\ - alter dnode resetlog;\n\ - alter all dnodes monitor \n\ - alter alldnodes balance \n\ - alter alldnodes resetlog;\n\ - alter alldnodes debugFlag \n\ - alter alldnodes monitor \n\ + alter dnode \"resetlog\";\n\ + alter dnode \"monitor\" \"0\";\n\ + alter dnode \"monitor\" \"1\";\n\ + alter dnode \"debugflag\" \"143\";\n\ + alter all dnodes \"monitor\" \"0\";\n\ + alter all dnodes \"monitor\" \"1\";\n\ + alter all dnodes \"resetlog\";\n\ + alter all dnodes \"debugFlag\" \n\ alter table ;\n\ alter table modify column\n\ - alter local resetlog;\n\ - alter local DebugFlag 143;\n\ + alter local \"resetlog\";\n\ + alter local \"DebugFlag\" \"143\";\n\ alter topic\n\ alter user ...\n\ ----- C ----- \n\ create table using tags ...\n\ create database ...\n\ - create dnode ...\n\ + create dnode \"fqdn:port\"n\ create index ...\n\ create mnode on dnode ;\n\ create qnode on dnode ;\n\ @@ -387,6 +388,8 @@ void showHelp() { drop stream ;\n\ ----- E ----- \n\ explain select clause ...\n\ + ----- F ----- \n\ + flush database ; ----- H ----- \n\ help;\n\ ----- I ----- \n\ @@ -409,7 +412,6 @@ void showHelp() { revoke write on from ;\n\ ----- S ----- \n\ select * from where ... \n\ - select _block_dist() from ;\n\ select client_version();\n\ select current_user();\n\ select database();\n\ From 503b03d661247e21296968d94091ef3cfc2ef385 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 18:29:49 +0800 Subject: [PATCH 123/267] fix: build error --- tools/shell/src/shellAuto.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index dbd378f718..2817ae2324 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -348,9 +348,9 @@ void showHelp() { "\n\ ----- A ----- \n\ alter database \n\ - alter dnode \"resetlog\";\n\ - alter dnode \"monitor\" \"0\";\n\ - alter dnode \"monitor\" \"1\";\n\ + alter dnode 'resetlog';\n\ + alter dnode 'monitor' '0';\n\ + alter dnode 'monitor' \"1\";\n\ alter dnode \"debugflag\" \"143\";\n\ alter all dnodes \"monitor\" \"0\";\n\ alter all dnodes \"monitor\" \"1\";\n\ @@ -389,7 +389,7 @@ void showHelp() { ----- E ----- \n\ explain select clause ...\n\ ----- F ----- \n\ - flush database ; + flush database ;\n\ ----- H ----- \n\ help;\n\ ----- I ----- \n\ From f5c2c6858bb5fae0fef394e89f4ca2f060635c91 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 18:33:45 +0800 Subject: [PATCH 124/267] remove alter balance cmd --- tools/shell/src/shellAuto.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 2817ae2324..0c8c801ae7 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -64,10 +64,9 @@ SWords shellCommands[] = { {"alter dnode \"debugFlag\" \"141\";", 0, 0, NULL}, {"alter dnode \"monitor\" \"0\";", 0, 0, NULL}, {"alter dnode \"monitor\" \"1\";", 0, 0, NULL}, - {"alter all dnodes \"monitor\" ", 0, 0, NULL}, - {"alter all dnodes \"balance\" ", 0, 0, NULL}, {"alter all dnodes \"resetlog\";", 0, 0, NULL}, {"alter all dnodes \"debugFlag\" \"141\";", 0, 0, NULL}, + {"alter all dnodes \"monitor\" \"0\";", 0, 0, NULL}, {"alter all dnodes \"monitor\" \"1\";", 0, 0, NULL}, {"alter table ;", 0, 0, NULL}, {"alter table modify column", 0, 0, NULL}, From 30981ece7d2ec1a59640352d176e1299ac6ac645 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Feb 2023 19:52:54 +0800 Subject: [PATCH 125/267] fix(query): output value for count if no data exists. --- source/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index d726016dcf..d4849650e6 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -124,7 +124,7 @@ int32_t tsCompressMsgSize = -1; int32_t tsCompressColData = -1; // count/hyperloglog function always return values in case of all NULL data or Empty data set. -int32_t tsCountAlwaysReturnValue = 0; +int32_t tsCountAlwaysReturnValue = 1; // 10 ms for sliding time, the value will changed in case of time precision changed int32_t tsMinSlidingTime = 10; From 7a3fc004f29f5c0d9c973a91957916371ba29837 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 11 Feb 2023 20:18:29 +0800 Subject: [PATCH 126/267] fix: taosbenchmark invalid read when ctrl-c (#19927) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 19895a7f05..3d10a0921e 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 4097420 + GIT_TAG 74c0357 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From c13b0e68d9aa648c2304226a44e5b4062effbfe5 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 22:19:45 +0800 Subject: [PATCH 127/267] feat: auto fill database name and sys table name --- tools/shell/src/shellAuto.c | 43 ++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 0c8c801ae7..d6538ddf57 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -261,6 +261,12 @@ char* key_tags[] = {"tags("}; char* key_select[] = {"select "}; +char* key_systable[] = { + "ins_dnodes", "ins_mnodes", "ins_modules", "ins_qnodes", "ins_snodes", "ins_cluster", + "ins_databases", "ins_functions", "ins_indexes", "ins_stables", "ins_tables", "ins_tags", + "ins_users", "ins_grants", "ins_vgroups", "ins_configs", "ins_dnode_variables", "ins_topics", + "ins_subscriptions", "ins_streams", "ins_stream_tasks", "ins_vnodes", "ins_user_privileges"}; + // // ------- gobal variant define --------- // @@ -292,8 +298,9 @@ bool waitAutoFill = false; #define WT_VAR_TBOPTION 16 #define WT_VAR_USERACTION 17 #define WT_VAR_KEYSELECT 18 +#define WT_VAR_SYSTABLE 19 -#define WT_VAR_CNT 19 +#define WT_VAR_CNT 20 #define WT_FROM_DB_MAX 6 // max get content from db #define WT_FROM_DB_CNT (WT_FROM_DB_MAX + 1) @@ -609,6 +616,10 @@ bool shellAutoInit() { // threads memset(threads, 0, sizeof(TdThread*) * WT_FROM_DB_CNT); + // init database and stable + tireSearchWord(WT_VAR_DBNAME, ""); + tireSearchWord(WT_VAR_STABLE, ""); + // generate varType GenerateVarType(WT_VAR_FUNC, functions, sizeof(functions) / sizeof(char*)); GenerateVarType(WT_VAR_KEYWORD, keywords, sizeof(keywords) / sizeof(char*)); @@ -620,6 +631,7 @@ bool shellAutoInit() { GenerateVarType(WT_VAR_TBOPTION, tb_options, sizeof(tb_options) / sizeof(char*)); GenerateVarType(WT_VAR_USERACTION, user_actions, sizeof(user_actions) / sizeof(char*)); GenerateVarType(WT_VAR_KEYSELECT, key_select, sizeof(key_select) / sizeof(char*)); + GenerateVarType(WT_VAR_SYSTABLE, key_systable, sizeof(key_systable) / sizeof(char*)); return true; } @@ -1661,6 +1673,32 @@ bool matchOther(TAOS* con, SShellCmd* cmd) { return false; } +// last match if nothing matched +bool matchEnd(TAOS* con, SShellCmd* cmd) { + // str dump + bool ret = false; + char* ps = strndup(cmd->command, cmd->commandSize); + char* last = lastWord(ps); + if(strlen(last) < 2 ) { + goto _return; + } + + // match database + if (fillWithType(con, last, WT_VAR_DBNAME)) { + ret = true + goto _return; + } + + if (fillWithType(con, last, WT_VAR_SYSTABLE)) { + ret = true + goto _return; + } + +_return: + taosMemoryFree(ps); + return ret; +} + // main key press tab void pressTabKey(SShellCmd* cmd) { // check @@ -1696,6 +1734,9 @@ void pressTabKey(SShellCmd* cmd) { matched = matchSelectQuery(varCon, cmd); if (matched) return; + // match end + matched = matchEnd(varCon, cmd); + return; } From af14254639f5e74c73fc626c1ee51848c008754b Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 22:22:17 +0800 Subject: [PATCH 128/267] feat: auto fill database name and sys table name1 --- tools/shell/src/shellAuto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index d6538ddf57..f1b6c47881 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -1685,12 +1685,12 @@ bool matchEnd(TAOS* con, SShellCmd* cmd) { // match database if (fillWithType(con, last, WT_VAR_DBNAME)) { - ret = true + ret = true; goto _return; } if (fillWithType(con, last, WT_VAR_SYSTABLE)) { - ret = true + ret = true; goto _return; } From adfdb0967cb70928c27bb46b70bf8932316ef1d5 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 22:25:39 +0800 Subject: [PATCH 129/267] feat: auto fill database name and sys table name2 --- tools/shell/src/shellAuto.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index f1b6c47881..7db4a6f11e 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -32,6 +32,7 @@ void shellShowOnScreen(SShellCmd* cmd); void shellInsertChar(SShellCmd* cmd, char* c, int size); void shellInsertStr(SShellCmd* cmd, char* str, int size); bool appendAfterSelect(TAOS* con, SShellCmd* cmd, char* p, int32_t len); +char* tireSearchWord(int type, char* pre); typedef struct SAutoPtr { STire* p; @@ -1684,12 +1685,12 @@ bool matchEnd(TAOS* con, SShellCmd* cmd) { } // match database - if (fillWithType(con, last, WT_VAR_DBNAME)) { + if (fillWithType(con, cmd, last, WT_VAR_DBNAME)) { ret = true; goto _return; } - if (fillWithType(con, last, WT_VAR_SYSTABLE)) { + if (fillWithType(con, cmd, last, WT_VAR_SYSTABLE)) { ret = true; goto _return; } From 2fe1ee2aab9a8b3641e9959785a8204445ee5cfc Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 22:39:33 +0800 Subject: [PATCH 130/267] feat: get dbname after set conn --- tools/shell/src/shellAuto.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 7db4a6f11e..4be35255b7 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -617,10 +617,6 @@ bool shellAutoInit() { // threads memset(threads, 0, sizeof(TdThread*) * WT_FROM_DB_CNT); - // init database and stable - tireSearchWord(WT_VAR_DBNAME, ""); - tireSearchWord(WT_VAR_STABLE, ""); - // generate varType GenerateVarType(WT_VAR_FUNC, functions, sizeof(functions) / sizeof(char*)); GenerateVarType(WT_VAR_KEYWORD, keywords, sizeof(keywords) / sizeof(char*)); @@ -638,7 +634,12 @@ bool shellAutoInit() { } // set conn -void shellSetConn(TAOS* conn) { varCon = conn; } +void shellSetConn(TAOS* conn) { + varCon = conn; + // init database and stable + tireSearchWord(WT_VAR_DBNAME, ""); + tireSearchWord(WT_VAR_STABLE, ""); +} // exit shell auto funciton, shell exit call once void shellAutoExit() { @@ -817,6 +818,7 @@ void* varObtainThread(void* param) { // only match next one word from all match words, return valuue must free by caller char* matchNextPrefix(STire* tire, char* pre) { SMatch* match = NULL; + if(tire == NULL) return NULL; // re-use last result if (lastMatch) { From 8bde21894fd33f39312fe9ae296e26d3deaaeffa Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 22:44:43 +0800 Subject: [PATCH 131/267] feat: get dbname after set conn --- tools/shell/src/shellAuto.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 4be35255b7..fcf0d247f1 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -1132,6 +1132,7 @@ void printScreen(TAOS* con, SShellCmd* cmd, SWords* match) { // main key press tab , matched return true else false bool firstMatchCommand(TAOS* con, SShellCmd* cmd) { + if(con == NULL || cmd == NULL) return false; // parse command SWords* input = (SWords*)taosMemoryMalloc(sizeof(SWords)); memset(input, 0, sizeof(SWords)); From 095a6e0f82bc4dbe34ef4ba52d0737aeeff76c55 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 22:49:17 +0800 Subject: [PATCH 132/267] feat: auto get stable after use db --- tools/shell/src/shellAuto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index fcf0d247f1..9ea6626e22 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -638,7 +638,6 @@ void shellSetConn(TAOS* conn) { varCon = conn; // init database and stable tireSearchWord(WT_VAR_DBNAME, ""); - tireSearchWord(WT_VAR_STABLE, ""); } // exit shell auto funciton, shell exit call once @@ -1957,6 +1956,7 @@ void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb) { if (dealUseDB(sql)) { // change to new db + tireSearchWord(WT_VAR_STABLE, ""); return; } From 932dc94f7ed13a7cac45d782f67c0b6a9fcb2e44 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 22:58:18 +0800 Subject: [PATCH 133/267] feat: auto get stable after use db --- tools/shell/src/shellAuto.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 9ea6626e22..bcb2143b4e 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -1682,6 +1682,11 @@ bool matchEnd(TAOS* con, SShellCmd* cmd) { bool ret = false; char* ps = strndup(cmd->command, cmd->commandSize); char* last = lastWord(ps); + char* elast = strrchr(last, '.'); // find end last + if(elast) { + last = elast; + } + if(strlen(last) < 2 ) { goto _return; } From 17b77c44bed13f705ab2214f24608889359af84a Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 23:02:11 +0800 Subject: [PATCH 134/267] feat: auto get stable after use db --- tools/shell/src/shellAuto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index bcb2143b4e..97ed3dfdb9 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -1684,7 +1684,7 @@ bool matchEnd(TAOS* con, SShellCmd* cmd) { char* last = lastWord(ps); char* elast = strrchr(last, '.'); // find end last if(elast) { - last = elast; + last = elast + 1; } if(strlen(last) < 2 ) { From 979ca40214384ce2fc0deb3c4317c96eb8c16f76 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 23:09:47 +0800 Subject: [PATCH 135/267] feat: add perf systable name --- tools/shell/src/shellAuto.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 97ed3dfdb9..bf07eebf1d 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -263,10 +263,12 @@ char* key_tags[] = {"tags("}; char* key_select[] = {"select "}; char* key_systable[] = { - "ins_dnodes", "ins_mnodes", "ins_modules", "ins_qnodes", "ins_snodes", "ins_cluster", - "ins_databases", "ins_functions", "ins_indexes", "ins_stables", "ins_tables", "ins_tags", - "ins_users", "ins_grants", "ins_vgroups", "ins_configs", "ins_dnode_variables", "ins_topics", - "ins_subscriptions", "ins_streams", "ins_stream_tasks", "ins_vnodes", "ins_user_privileges"}; + "ins_dnodes", "ins_mnodes", "ins_modules", "ins_qnodes", "ins_snodes", "ins_cluster", + "ins_databases", "ins_functions", "ins_indexes", "ins_stables", "ins_tables", "ins_tags", + "ins_users", "ins_grants", "ins_vgroups", "ins_configs", "ins_dnode_variables", "ins_topics", + "ins_subscriptions", "ins_streams", "ins_stream_tasks", "ins_vnodes", "ins_user_privileges", "perf_connections", + "perf_queries", "perf_consumers", "perf_trans", "perf_apps"}; + // // ------- gobal variant define --------- From 494b1f1d8311df693b4cce97af61d90cc3bd451f Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sat, 11 Feb 2023 23:19:19 +0800 Subject: [PATCH 136/267] feat: less one char to match --- tools/shell/src/shellAuto.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index bf07eebf1d..ae020576cf 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -1689,7 +1689,8 @@ bool matchEnd(TAOS* con, SShellCmd* cmd) { last = elast + 1; } - if(strlen(last) < 2 ) { + // less one char can match + if(strlen(last) == 0 ) { goto _return; } From 30f61ff9d207e4b631d2a78a309264f8d966e481 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 12 Feb 2023 11:44:15 +0800 Subject: [PATCH 137/267] fix: taosbenchmark schemaless refine main (#19932) * fix: taos-tools 143b9e4 for taosbenchmark schemaless refine for main * fix: update taos-tools 723f696 * test: fix ../tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py * test: check nchar temporarily as diff behavior between main and 3.0 branch * fix: update taos-tools 181bcac --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 3d10a0921e..63014500ef 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 74c0357 + GIT_TAG 05b61e0 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 9c296f6c7135da0c25010f5437bf702d58b8b0cc Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 12 Feb 2023 16:23:41 +0800 Subject: [PATCH 138/267] fix: dot not complete with dbname --- tools/shell/src/shellAuto.c | 77 ++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 32 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index ae020576cf..48ecf97622 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -33,6 +33,7 @@ void shellInsertChar(SShellCmd* cmd, char* c, int size); void shellInsertStr(SShellCmd* cmd, char* str, int size); bool appendAfterSelect(TAOS* con, SShellCmd* cmd, char* p, int32_t len); char* tireSearchWord(int type, char* pre); +bool updateTireValue(int type, bool autoFill) ; typedef struct SAutoPtr { STire* p; @@ -639,7 +640,7 @@ bool shellAutoInit() { void shellSetConn(TAOS* conn) { varCon = conn; // init database and stable - tireSearchWord(WT_VAR_DBNAME, ""); + updateTireValue(WT_VAR_DBNAME, false); } // exit shell auto funciton, shell exit call once @@ -740,6 +741,38 @@ void putBackAutoPtr(int type, STire* tire) { // ------------------- var Word -------------------------- // +// return true is need update value by async +bool updateTireValue(int type, bool autoFill) { + // TYPE CONTEXT GET FROM DB + taosThreadMutexLock(&tiresMutex); + + // check need obtain from server + if (tires[type] == NULL) { + waitAutoFill = autoFill; + // need async obtain var names from db sever + if (threads[type] != NULL) { + if (taosThreadRunning(threads[type])) { + // thread running , need not obtain again, return + taosThreadMutexUnlock(&tiresMutex); + return NULL; + } + // destroy previous thread handle for new create thread handle + taosDestroyThread(threads[type]); + threads[type] = NULL; + } + + // create new + void* param = taosMemoryMalloc(sizeof(int)); + *((int*)param) = type; + threads[type] = taosCreateThread(varObtainThread, param); + taosThreadMutexUnlock(&tiresMutex); + return true; + } + taosThreadMutexUnlock(&tiresMutex); + + return false; +} + #define MAX_CACHED_CNT 100000 // max cached rows 10w // write sql result to var name, return write rows cnt int writeVarNames(int type, TAOS_RES* tres) { @@ -905,39 +938,16 @@ char* tireSearchWord(int type, char* pre) { return matchNextPrefix(tire, pre); } - // TYPE CONTEXT GET FROM DB - taosThreadMutexLock(&tiresMutex); - - // check need obtain from server - if (tires[type] == NULL) { - waitAutoFill = true; - // need async obtain var names from db sever - if (threads[type] != NULL) { - if (taosThreadRunning(threads[type])) { - // thread running , need not obtain again, return - taosThreadMutexUnlock(&tiresMutex); - return NULL; - } - // destroy previous thread handle for new create thread handle - taosDestroyThread(threads[type]); - threads[type] = NULL; - } - - // create new - void* param = taosMemoryMalloc(sizeof(int)); - *((int*)param) = type; - threads[type] = taosCreateThread(varObtainThread, param); - taosThreadMutexUnlock(&tiresMutex); - return NULL; - } - taosThreadMutexUnlock(&tiresMutex); - // can obtain var names from local STire* tire = getAutoPtr(type); if (tire == NULL) { return NULL; } + if(updateTireValue(type, true)) { + return NULL; + } + char* str = matchNextPrefix(tire, pre); // used finish, put back pointer to autoptr array putBackAutoPtr(type, tire); @@ -1695,9 +1705,12 @@ bool matchEnd(TAOS* con, SShellCmd* cmd) { } // match database - if (fillWithType(con, cmd, last, WT_VAR_DBNAME)) { - ret = true; - goto _return; + if(elast == NUL) { + // dot need not completed with dbname + if (fillWithType(con, cmd, last, WT_VAR_DBNAME)) { + ret = true; + goto _return; + } } if (fillWithType(con, cmd, last, WT_VAR_SYSTABLE)) { @@ -1964,7 +1977,7 @@ void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb) { if (dealUseDB(sql)) { // change to new db - tireSearchWord(WT_VAR_STABLE, ""); + updateTireValue(WT_VAR_STABLE, false); return; } From 8e28a63af8410f98436cf44047b10c592c43bcab Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 12 Feb 2023 16:28:03 +0800 Subject: [PATCH 139/267] fix: dot not complete with dbname --- tools/shell/src/shellAuto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 48ecf97622..14b85d4d3d 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -1705,7 +1705,7 @@ bool matchEnd(TAOS* con, SShellCmd* cmd) { } // match database - if(elast == NUL) { + if(elast == NULL) { // dot need not completed with dbname if (fillWithType(con, cmd, last, WT_VAR_DBNAME)) { ret = true; From c823ad912f8355aebb8b6731d19e917bdd3c86ac Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 12 Feb 2023 16:31:24 +0800 Subject: [PATCH 140/267] fix: build error --- tools/shell/src/shellAuto.c | 64 ++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 14b85d4d3d..fdf6d1b117 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -741,38 +741,6 @@ void putBackAutoPtr(int type, STire* tire) { // ------------------- var Word -------------------------- // -// return true is need update value by async -bool updateTireValue(int type, bool autoFill) { - // TYPE CONTEXT GET FROM DB - taosThreadMutexLock(&tiresMutex); - - // check need obtain from server - if (tires[type] == NULL) { - waitAutoFill = autoFill; - // need async obtain var names from db sever - if (threads[type] != NULL) { - if (taosThreadRunning(threads[type])) { - // thread running , need not obtain again, return - taosThreadMutexUnlock(&tiresMutex); - return NULL; - } - // destroy previous thread handle for new create thread handle - taosDestroyThread(threads[type]); - threads[type] = NULL; - } - - // create new - void* param = taosMemoryMalloc(sizeof(int)); - *((int*)param) = type; - threads[type] = taosCreateThread(varObtainThread, param); - taosThreadMutexUnlock(&tiresMutex); - return true; - } - taosThreadMutexUnlock(&tiresMutex); - - return false; -} - #define MAX_CACHED_CNT 100000 // max cached rows 10w // write sql result to var name, return write rows cnt int writeVarNames(int type, TAOS_RES* tres) { @@ -849,6 +817,38 @@ void* varObtainThread(void* param) { return NULL; } +// return true is need update value by async +bool updateTireValue(int type, bool autoFill) { + // TYPE CONTEXT GET FROM DB + taosThreadMutexLock(&tiresMutex); + + // check need obtain from server + if (tires[type] == NULL) { + waitAutoFill = autoFill; + // need async obtain var names from db sever + if (threads[type] != NULL) { + if (taosThreadRunning(threads[type])) { + // thread running , need not obtain again, return + taosThreadMutexUnlock(&tiresMutex); + return NULL; + } + // destroy previous thread handle for new create thread handle + taosDestroyThread(threads[type]); + threads[type] = NULL; + } + + // create new + void* param = taosMemoryMalloc(sizeof(int)); + *((int*)param) = type; + threads[type] = taosCreateThread(varObtainThread, param); + taosThreadMutexUnlock(&tiresMutex); + return true; + } + taosThreadMutexUnlock(&tiresMutex); + + return false; +} + // only match next one word from all match words, return valuue must free by caller char* matchNextPrefix(STire* tire, char* pre) { SMatch* match = NULL; From f2b8f18661c7bdebf750fc57a8c09a86808135c0 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 12 Feb 2023 16:41:43 +0800 Subject: [PATCH 141/267] fix: first update var values then search --- tools/shell/src/shellAuto.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index fdf6d1b117..6e50a97c02 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -938,13 +938,13 @@ char* tireSearchWord(int type, char* pre) { return matchNextPrefix(tire, pre); } - // can obtain var names from local - STire* tire = getAutoPtr(type); - if (tire == NULL) { + if(updateTireValue(type, true)) { return NULL; } - if(updateTireValue(type, true)) { + // can obtain var names from local + STire* tire = getAutoPtr(type); + if (tire == NULL) { return NULL; } From f85cf08f876cf41e8bb28047a3a11ad072d18e4b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 12 Feb 2023 22:09:46 +0800 Subject: [PATCH 142/267] fix(query): set the initial iterator table before check data in buffer. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 80 ++++++++++++-------------- 1 file changed, 38 insertions(+), 42 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 8ec076fb63..4a81b615f0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -133,17 +133,17 @@ typedef struct SFileBlockDumpInfo { bool allDumped; } SFileBlockDumpInfo; -typedef struct SUidOrderedList { +typedef struct STableUidList { uint64_t* tableUidList; // access table uid list in uid ascending order list int32_t currentIndex; // index in table uid list -} SUidOrderedList; +} STableUidList; typedef struct SReaderStatus { bool loadFromFile; // check file stage bool composedDataBlock; // the returned data block is a composed block or not SHashObj* pTableMap; // SHash STableBlockScanInfo** pTableIter; // table iterator used in building in-memory buffer data blocks. - SUidOrderedList uidCheckInfo; // check all table in uid order + STableUidList uidList; // check tables in uid order, to avoid the repeatly load of blocks in STT. SFileBlockDumpInfo fBlockDumpInfo; SDFileSet* pCurrentFileset; // current opened file set SBlockData fileBlockData; @@ -341,7 +341,7 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf int64_t st = taosGetTimestampUs(); initBlockScanInfoBuf(pBuf, numOfTables); - SUidOrderedList* pOrderedCheckInfo = &pTsdbReader->status.uidCheckInfo; + STableUidList* pOrderedCheckInfo = &pTsdbReader->status.uidList; pOrderedCheckInfo->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t)); if (pOrderedCheckInfo->tableUidList == NULL) { @@ -679,7 +679,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, int64_t et1 = taosGetTimestampUs(); SBlockIdx* pBlockIdx = NULL; - SUidOrderedList* pList = &pReader->status.uidCheckInfo; + STableUidList* pList = &pReader->status.uidList; int32_t i = 0, j = 0; while(i < num && j < numOfTables) { @@ -2768,27 +2768,15 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) { return TSDB_CODE_SUCCESS; } -// reset the last del file index -static void resetScanBlockLastBlockDelIndex(SReaderStatus* pStatus, int32_t order) { - void* p = taosHashIterate(pStatus->pTableMap, NULL); - while (p != NULL) { - STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; - - // reset the last del file index - pScanInfo->lastBlockDelIndex = getInitialDelIndex(pScanInfo->delSkyline, order); - p = taosHashIterate(pStatus->pTableMap, p); - } -} - static void resetTableListIndex(SReaderStatus *pStatus) { - SUidOrderedList* pList = &pStatus->uidCheckInfo; + STableUidList* pList = &pStatus->uidList; pList->currentIndex = 0; uint64_t uid = pList->tableUidList[0]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); } -static bool moveToNextTable(SUidOrderedList* pOrderedCheckInfo, SReaderStatus* pStatus) { +static bool moveToNextTable(STableUidList* pOrderedCheckInfo, SReaderStatus* pStatus) { pOrderedCheckInfo->currentIndex += 1; if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) { pStatus->pTableIter = NULL; @@ -2803,8 +2791,8 @@ static bool moveToNextTable(SUidOrderedList* pOrderedCheckInfo, SReaderStatus* p static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader; + STableUidList* pUidList = &pStatus->uidList; - SUidOrderedList* pOrderedCheckInfo = &pStatus->uidCheckInfo; if (taosHashGetSize(pStatus->pTableMap) == 0) { return TSDB_CODE_SUCCESS; } @@ -2817,7 +2805,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { bool hasVal = initLastBlockReader(pLastBlockReader, pScanInfo, pReader); if (!hasVal) { - bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus); + bool hasNexTable = moveToNextTable(pUidList, pStatus); if (!hasNexTable) { return TSDB_CODE_SUCCESS; } @@ -2852,7 +2840,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { } // current table is exhausted, let's try next table - bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus); + bool hasNexTable = moveToNextTable(pUidList, pStatus); if (!hasNexTable) { return TSDB_CODE_SUCCESS; } @@ -2956,14 +2944,15 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; + STableUidList* pUidList = &pStatus->uidList; while (1) { - if (pStatus->pTableIter == NULL) { - pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL); - if (pStatus->pTableIter == NULL) { - return TSDB_CODE_SUCCESS; - } - } +// if (pStatus->pTableIter == NULL) { +// pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL); +// if (pStatus->pTableIter == NULL) { +// return TSDB_CODE_SUCCESS; +// } +// } STableBlockScanInfo** pBlockScanInfo = pStatus->pTableIter; initMemDataIterator(*pBlockScanInfo, pReader); @@ -2978,9 +2967,9 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) { return TSDB_CODE_SUCCESS; } - // current table is exhausted, let's try the next table - pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter); - if (pStatus->pTableIter == NULL) { + // current table is exhausted, let's try next table + bool hasNexTable = moveToNextTable(pUidList, pStatus); + if (!hasNexTable) { return TSDB_CODE_SUCCESS; } } @@ -3001,7 +2990,6 @@ void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter) { static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBlockIter) { SBlockNumber num = {0}; - int32_t code = moveToNextFile(pReader, &num); if (code != TSDB_CODE_SUCCESS) { return code; @@ -3767,7 +3755,7 @@ int32_t tsdbSetTableList(STsdbReader* pReader, const void* pTableList, int32_t n ASSERT(size >= num); taosHashClear(pReader->status.pTableMap); - SUidOrderedList* pUidList = &pReader->status.uidCheckInfo; + STableUidList* pUidList = &pReader->status.uidList; pUidList->currentIndex = 0; STableKeyInfo* pList = (STableKeyInfo*)pTableList; @@ -3799,18 +3787,24 @@ void* tsdbGetIvtIdx(SMeta* pMeta) { uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; } static int32_t doOpenReaderImpl(STsdbReader* pReader) { - SDataBlockIter* pBlockIter = &pReader->status.blockIter; + SReaderStatus* pStatus = &pReader->status; + SDataBlockIter* pBlockIter = &pStatus->blockIter; - initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); - resetDataBlockIterator(&pReader->status.blockIter, pReader->order); + initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); + resetDataBlockIterator(&pStatus->blockIter, pReader->order); - // no data in files, let's try buffer in memory - if (pReader->status.fileIter.numOfFiles == 0) { - pReader->status.loadFromFile = false; - return TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; + if (pStatus->fileIter.numOfFiles == 0) { + pStatus->loadFromFile = false; } else { - return initForFirstBlockInFile(pReader, pBlockIter); + code = initForFirstBlockInFile(pReader, pBlockIter); } + + if (!pStatus->loadFromFile) { + resetTableListIndex(pStatus); + } + + return code; } // ====================================== EXPOSED APIs ====================================== @@ -4012,7 +4006,7 @@ void tsdbReaderClose(STsdbReader* pReader) { tsdbUntakeReadSnap(pReader->pTsdb, pReader->pReadSnap, pReader->idStr); - taosMemoryFree(pReader->status.uidCheckInfo.tableUidList); + taosMemoryFree(pReader->status.uidList.tableUidList); SIOCostSummary* pCost = &pReader->cost; SFilesetIter* pFilesetIter = &pReader->status.fileIter; @@ -4066,6 +4060,7 @@ static bool doTsdbNextDataBlock(STsdbReader* pReader) { if (pBlock->info.rows > 0) { return true; } else { + resetTableListIndex(&pReader->status); buildBlockFromBufferSequentially(pReader); return pBlock->info.rows > 0; } @@ -4309,6 +4304,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { // no data in files, let's try buffer in memory if (pStatus->fileIter.numOfFiles == 0) { pStatus->loadFromFile = false; + resetTableListIndex(pStatus); } else { code = initForFirstBlockInFile(pReader, pBlockIter); if (code != TSDB_CODE_SUCCESS) { From f4a17d16f2d231cbcedf3b9ccb0d76a578081018 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Feb 2023 00:25:26 +0800 Subject: [PATCH 143/267] fix(query): fix error in windows. --- source/util/src/tsimplehash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/src/tsimplehash.c b/source/util/src/tsimplehash.c index db73dac929..062d7a0ae4 100644 --- a/source/util/src/tsimplehash.c +++ b/source/util/src/tsimplehash.c @@ -108,7 +108,7 @@ static void* doInternalAlloc(SSHashObj* pHashObj, int32_t size) { taosArrayPush(pHashObj->pHashNodeBuf, &pNewPage); return pNewPage; } else { - void* pPos = (*p) + pHashObj->offset; + void* pPos = (char*)(*p) + pHashObj->offset; pHashObj->offset += size; return pPos; } From 34036b932f2c418d9622b289e61742411c069cad Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Feb 2023 01:05:40 +0800 Subject: [PATCH 144/267] fix(query): fix error in windows. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 4a81b615f0..bc37be580c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -329,7 +329,7 @@ static int32_t uidComparFunc(const void* p1, const void* p2) { // NOTE: speedup the whole processing by preparing the buffer for STableBlockScanInfo in batch model static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList, - int32_t numOfTables) { + STableUidList *pUidList, int32_t numOfTables) { // allocate buffer in order to load data blocks from file // todo use simple hash instead, optimize the memory consumption SHashObj* pTableMap = @@ -341,19 +341,17 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf int64_t st = taosGetTimestampUs(); initBlockScanInfoBuf(pBuf, numOfTables); - STableUidList* pOrderedCheckInfo = &pTsdbReader->status.uidList; - - pOrderedCheckInfo->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t)); - if (pOrderedCheckInfo->tableUidList == NULL) { + pUidList->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t)); + if (pUidList->tableUidList == NULL) { return NULL; } - pOrderedCheckInfo->currentIndex = 0; + pUidList->currentIndex = 0; for (int32_t j = 0; j < numOfTables; ++j) { STableBlockScanInfo* pScanInfo = getPosInBlockInfoBuf(pBuf, j); pScanInfo->uid = idList[j].uid; - pOrderedCheckInfo->tableUidList[j] = idList[j].uid; + pUidList->tableUidList[j] = idList[j].uid; if (ASCENDING_TRAVERSE(pTsdbReader->order)) { int64_t skey = pTsdbReader->window.skey; @@ -368,7 +366,7 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf pScanInfo->lastKey, pTsdbReader->idStr); } - taosSort(pOrderedCheckInfo->tableUidList, numOfTables, sizeof(uint64_t), uidComparFunc); + taosSort(pUidList->tableUidList, numOfTables, sizeof(uint64_t), uidComparFunc); pTsdbReader->cost.createScanInfoList = (taosGetTimestampUs() - st) / 1000.0; tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, elapsed time:%.2f ms, %s", pTsdbReader, numOfTables, @@ -3891,7 +3889,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL } STsdbReader* p = (pReader->innerReader[0] != NULL) ? pReader->innerReader[0] : pReader; - pReader->status.pTableMap = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, numOfTables); + pReader->status.pTableMap = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, &pReader->status.uidList, numOfTables); if (pReader->status.pTableMap == NULL) { *ppReader = NULL; code = TSDB_CODE_OUT_OF_MEMORY; @@ -3916,12 +3914,14 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL // we need only one row pPrevReader->capacity = 1; pPrevReader->status.pTableMap = pReader->status.pTableMap; + pPrevReader->status.uidList = pReader->status.uidList; pPrevReader->pSchema = pReader->pSchema; pPrevReader->pMemSchema = pReader->pMemSchema; pPrevReader->pReadSnap = pReader->pReadSnap; pNextReader->capacity = 1; pNextReader->status.pTableMap = pReader->status.pTableMap; + pNextReader->status.uidList = pReader->status.uidList; pNextReader->pSchema = pReader->pSchema; pNextReader->pMemSchema = pReader->pMemSchema; pNextReader->pReadSnap = pReader->pReadSnap; @@ -3952,6 +3952,7 @@ void tsdbReaderClose(STsdbReader* pReader) { STsdbReader* p = pReader->innerReader[0]; p->status.pTableMap = NULL; + p->status.uidList.tableUidList = NULL; p->pReadSnap = NULL; p->pSchema = NULL; p->pMemSchema = NULL; @@ -3959,6 +3960,7 @@ void tsdbReaderClose(STsdbReader* pReader) { p = pReader->innerReader[1]; p->status.pTableMap = NULL; + p->status.uidList.tableUidList = NULL; p->pReadSnap = NULL; p->pSchema = NULL; p->pMemSchema = NULL; From 73bb73085ce7d8558f40cfbce2e6b09dded21f9f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 13 Feb 2023 01:49:07 +0800 Subject: [PATCH 145/267] fix: sml rest ctrlc main (#19940) * fix: taosbenchmark fix sml-rest mem leak for main * fix: update taos-tools 4097420 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 63014500ef..05191138e5 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 05b61e0 + GIT_TAG 22627d7 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 1114fef3378f5aa6085da23e85c16ff02b36d33b Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 13 Feb 2023 09:47:30 +0800 Subject: [PATCH 146/267] add cfg --- source/libs/stream/src/streamState.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 330f67991d..563c59f419 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -107,8 +107,6 @@ static inline int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, } SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages) { - szPage = szPage < 0 ? 4096 * 8 : szPage; - pages = pages < 0 ? 256 * 32 : pages; SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState)); if (pState == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -128,6 +126,30 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int memset(statePath, 0, 1024); tstrncpy(statePath, path, 1024); } + + char cfgPath[1024]; + sprintf(cfgPath, "%s/cfg", statePath); + + char cfg[1024]; + memset(cfg, 0, 1024); + TdFilePtr pCfgFile = taosOpenFile(cfgPath, TD_FILE_READ); + if (pCfgFile != NULL) { + int64_t size; + taosFStatFile(pCfgFile, &size, NULL); + taosReadFile(pCfgFile, cfg, size); + sscanf(cfg, "%d\n%d\n", &szPage, &pages); + } else { + taosMulModeMkDir(statePath, 0755); + pCfgFile = taosOpenFile(cfgPath, TD_FILE_WRITE | TD_FILE_CREATE); + szPage = szPage < 0 ? 4096 * 8 : szPage; + pages = pages < 0 ? 256 * 32 : pages; + /*szPage = szPage < 0 ? 4096 : szPage;*/ + /*pages = pages < 0 ? 256 : pages;*/ + sprintf(cfg, "%d\n%d\n", szPage, pages); + taosWriteFile(pCfgFile, cfg, strlen(cfg)); + } + taosCloseFile(&pCfgFile); + if (tdbOpen(statePath, szPage, pages, &pState->pTdbState->db, 1) < 0) { goto _err; } From 27a53a0c64252e48b252cbe1c041c31a81fecc7e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Feb 2023 10:05:26 +0800 Subject: [PATCH 147/267] fix(query): set the correct table iter. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index bc37be580c..c8f2b77246 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -4073,11 +4073,11 @@ static bool doTsdbNextDataBlock(STsdbReader* pReader) { } bool tsdbNextDataBlock(STsdbReader* pReader) { - if (isEmptyQueryTimeWindow(&pReader->window)) { + if (isEmptyQueryTimeWindow(&pReader->window) || pReader->step == EXTERNAL_ROWS_NEXT) { return false; } - if (pReader->innerReader[0] != NULL && pReader->step == 0) { + if (pReader->step == 0 && pReader->innerReader[0] != NULL) { bool ret = doTsdbNextDataBlock(pReader->innerReader[0]); pReader->step = EXTERNAL_ROWS_PREV; if (ret) { @@ -4102,7 +4102,7 @@ bool tsdbNextDataBlock(STsdbReader* pReader) { return ret; } - if (pReader->innerReader[1] != NULL && pReader->step == EXTERNAL_ROWS_MAIN) { + if (pReader->step == EXTERNAL_ROWS_MAIN && pReader->innerReader[1] != NULL) { // prepare for the next row scan int32_t code = doOpenReaderImpl(pReader->innerReader[1]); resetAllDataBlockScanInfo(pReader->innerReader[1]->status.pTableMap, pReader->window.ekey); @@ -4110,10 +4110,10 @@ bool tsdbNextDataBlock(STsdbReader* pReader) { return code; } - bool ret1 = doTsdbNextDataBlock(pReader->innerReader[1]); + ret = doTsdbNextDataBlock(pReader->innerReader[1]); pReader->step = EXTERNAL_ROWS_NEXT; - if (ret1) { - return ret1; + if (ret) { + return ret; } } From 08126ccde12660b9eaeebf65bc3d6e1ecb365126 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Feb 2023 10:07:56 +0800 Subject: [PATCH 148/267] fix(query): add kill check for cache scan operator. --- source/libs/executor/src/cachescanoperator.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 294424746a..d460f490ba 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -149,6 +149,10 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { // check if it is a group by tbname if ((pInfo->retrieveType & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) { + if (isTaskKilled(pTaskInfo)) { + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); + } + if (pInfo->indexOfBufferedRes >= pInfo->pBufferredRes->info.rows) { blockDataCleanup(pInfo->pBufferredRes); taosArrayClear(pInfo->pUidList); @@ -207,6 +211,10 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { size_t totalGroups = tableListGetOutputGroups(pTableList); while (pInfo->currentGroupIndex < totalGroups) { + if (isTaskKilled(pTaskInfo)) { + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); + } + STableKeyInfo* pList = NULL; int32_t num = 0; From b37bf5679499a6a41e1243fa4533c7875550f035 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Feb 2023 10:12:13 +0800 Subject: [PATCH 149/267] fix(query): add kill check for table merge scan. --- source/libs/executor/src/scanoperator.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ac76f179a0..6782b7f061 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2779,6 +2779,10 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { SSDataBlock* pBlock = NULL; while (pInfo->tableStartIndex < tableListSize) { + if (isTaskKilled(pTaskInfo)) { + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); + } + pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity, pOperator); if (pBlock != NULL) { From 7a4c8f30db95c341f7cbdfd4f2d8d6a4306c70e5 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 13 Feb 2023 10:12:49 +0800 Subject: [PATCH 150/267] reset default cfg --- source/libs/stream/src/streamState.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 563c59f419..b4e44cc5b7 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -127,7 +127,7 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int tstrncpy(statePath, path, 1024); } - char cfgPath[1024]; + char cfgPath[1030]; sprintf(cfgPath, "%s/cfg", statePath); char cfg[1024]; @@ -141,10 +141,8 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int } else { taosMulModeMkDir(statePath, 0755); pCfgFile = taosOpenFile(cfgPath, TD_FILE_WRITE | TD_FILE_CREATE); - szPage = szPage < 0 ? 4096 * 8 : szPage; - pages = pages < 0 ? 256 * 32 : pages; - /*szPage = szPage < 0 ? 4096 : szPage;*/ - /*pages = pages < 0 ? 256 : pages;*/ + szPage = szPage < 0 ? 4096 : szPage; + pages = pages < 0 ? 256 : pages; sprintf(cfg, "%d\n%d\n", szPage, pages); taosWriteFile(pCfgFile, cfg, strlen(cfg)); } From 31a75f837bfb962ee3ad58431825fff85ed233d7 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Mon, 13 Feb 2023 09:38:47 +0800 Subject: [PATCH 151/267] fix:crash at generate session scan range --- source/libs/executor/src/scanoperator.c | 23 ++++- tests/script/tsim/stream/deleteSession.sim | 106 +++++++++++++++++++++ tests/script/tsim/stream/deleteState.sim | 106 +++++++++++++++++++++ 3 files changed, 234 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 29fe9817c9..0e6ca022a8 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1175,6 +1175,20 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32 } } +static int32_t getPreSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId, + SSessionKey* pKey) { + pKey->win.skey = startTs; + pKey->win.ekey = endTs; + pKey->groupId = groupId; + + SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentPrev(pAggSup->pState, pKey); + int32_t code = streamStateSessionGetKVByCur(pCur, pKey, NULL, 0); + if (code != TSDB_CODE_SUCCESS) { + SET_SESSION_WIN_KEY_INVALID(pKey); + } + return code; +} + static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) { blockDataCleanup(pDestBlock); if (pSrcBlock->info.rows == 0) { @@ -1210,7 +1224,14 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr } SSessionKey endWin = {0}; getCurSessionWindow(pInfo->windowSup.pStreamAggSup, endData[i], endData[i], groupId, &endWin); - ASSERT(!IS_INVALID_SESSION_WIN_KEY(endWin)); + if(IS_INVALID_SESSION_WIN_KEY(endWin)) { + getPreSessionWindow(pInfo->windowSup.pStreamAggSup, endData[i], endData[i], groupId, &endWin); + } + if (IS_INVALID_SESSION_WIN_KEY(startWin)) { + // window has been closed. + qError("generate session scan range failed. rang start:%" PRIx64 ", end:%" PRIx64 , startData[i], endData[i]); + continue; + } colDataAppend(pDestStartCol, i, (const char*)&startWin.win.skey, false); colDataAppend(pDestEndCol, i, (const char*)&endWin.win.ekey, false); diff --git a/tests/script/tsim/stream/deleteSession.sim b/tests/script/tsim/stream/deleteSession.sim index 541609633b..c3c64a5977 100644 --- a/tests/script/tsim/stream/deleteSession.sim +++ b/tests/script/tsim/stream/deleteSession.sim @@ -524,6 +524,112 @@ if $data42 != 14 then goto loop20 endi +sql drop database if exists test4; +sql drop stream if exists streams4; +sql create database test4 vgroups 1; +sql use test4; +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +print create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname session(ts, 2s); +sql create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname session(ts, 2s); + +sql insert into t1 values(1648791210000,1,2,3); +sql insert into t1 values(1648791220000,2,2,3); +sql insert into t1 values(1648791221000,2,2,3); +sql insert into t1 values(1648791222000,2,2,3); +sql insert into t1 values(1648791223000,2,2,3); +sql insert into t1 values(1648791231000,2,2,3); + +sql insert into t2 values(1648791210000,1,2,3); +sql insert into t2 values(1648791220000,2,2,3); +sql insert into t2 values(1648791221000,2,2,3); +sql insert into t2 values(1648791231000,2,2,3); + +$loop_count = 0 + +loop21: +sleep 200 +sql select * from streamt4 order by c1 desc;; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 6 then + print =====rows=$rows + goto loop21 +endi + +if $data01 != 4 then + print =====data01=$data01 + goto loop21 +endi + +if $data11 != 2 then + print =====data11=$data11 + goto loop21 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop21 +endi + +if $data31 != 1 then + print =====data31=$data31 + goto loop21 +endi + +if $data41 != 1 then + print =====data41=$data41 + goto loop21 +endi + +if $data51 != 1 then + print =====data51=$data51 + goto loop21 +endi + +print delete from st where ts >= 1648791220000 and ts <=1648791223000; +sql delete from st where ts >= 1648791220000 and ts <=1648791223000; + +loop22: +sleep 200 +sql select * from streamt4 order by c1 desc;; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 4 then + print =====rows=$rows + goto loop22 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop22 +endi + +if $data11 != 1 then + print =====data11=$data11 + goto loop22 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop22 +endi + +if $data31 != 1 then + print =====data31=$data31 + goto loop22 +endi + $loop_all = $loop_all + 1 print ============loop_all=$loop_all diff --git a/tests/script/tsim/stream/deleteState.sim b/tests/script/tsim/stream/deleteState.sim index ecd9f55340..45d9bc3e39 100644 --- a/tests/script/tsim/stream/deleteState.sim +++ b/tests/script/tsim/stream/deleteState.sim @@ -189,6 +189,112 @@ if $data12 != 4 then goto loop6 endi +sql drop database if exists test4; +sql drop stream if exists streams4; +sql create database test4 vgroups 1; +sql use test4; +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +print create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname state_window(c); +sql create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname state_window(c); + +sql insert into t1 values(1648791210000,1,2,1); +sql insert into t1 values(1648791220000,2,2,2); +sql insert into t1 values(1648791221000,2,2,2); +sql insert into t1 values(1648791222000,2,2,2); +sql insert into t1 values(1648791223000,2,2,2); +sql insert into t1 values(1648791231000,2,2,3); + +sql insert into t2 values(1648791210000,1,2,1); +sql insert into t2 values(1648791220000,2,2,2); +sql insert into t2 values(1648791221000,2,2,2); +sql insert into t2 values(1648791231000,2,2,3); + +$loop_count = 0 + +loop21: +sleep 200 +sql select * from streamt4 order by c1 desc;; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 6 then + print =====rows=$rows + goto loop21 +endi + +if $data01 != 4 then + print =====data01=$data01 + goto loop21 +endi + +if $data11 != 2 then + print =====data11=$data11 + goto loop21 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop21 +endi + +if $data31 != 1 then + print =====data31=$data31 + goto loop21 +endi + +if $data41 != 1 then + print =====data41=$data41 + goto loop21 +endi + +if $data51 != 1 then + print =====data51=$data51 + goto loop21 +endi + +print delete from st where ts >= 1648791220000 and ts <=1648791223000; +sql delete from st where ts >= 1648791220000 and ts <=1648791223000; + +loop22: +sleep 200 +sql select * from streamt4 order by c1 desc;; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 4 then + print =====rows=$rows + goto loop22 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop22 +endi + +if $data11 != 1 then + print =====data11=$data11 + goto loop22 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop22 +endi + +if $data31 != 1 then + print =====data31=$data31 + goto loop22 +endi + $loop_all = $loop_all + 1 print ============loop_all=$loop_all From 767080922aed923a2780bd7c53421f9658edabbd Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Mon, 13 Feb 2023 14:16:19 +0800 Subject: [PATCH 152/267] fix: set SYNC_VNODE_LOG_RETENTION as TSDB_SYNC_LOG_BUFFER_RETENTION + 1 --- include/libs/sync/sync.h | 2 +- include/util/tdef.h | 1 + source/libs/sync/src/syncPipeline.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index defafce30e..5e37da4f3f 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -36,7 +36,7 @@ extern "C" { #define SYNC_DEL_WAL_MS (1000 * 60) #define SYNC_ADD_QUORUM_COUNT 3 #define SYNC_MNODE_LOG_RETENTION 10000 -#define SYNC_VNODE_LOG_RETENTION 20 +#define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1) #define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10 #define SNAPSHOT_WAIT_MS 1000 * 30 diff --git a/include/util/tdef.h b/include/util/tdef.h index 3a152a36a1..e03352d98b 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -283,6 +283,7 @@ typedef enum ELogicConditionType { #define TSDB_MAX_REPLICA 5 #define TSDB_SYNC_LOG_BUFFER_SIZE 4096 +#define TSDB_SYNC_LOG_BUFFER_RETENTION (TSDB_SYNC_LOG_BUFFER_SIZE >> 4) #define TSDB_TBNAME_COLUMN_INDEX (-1) #define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index b3eb5684cf..d1ef8701be 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -563,7 +563,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm } // recycle - SyncIndex until = pBuf->commitIndex - (pBuf->size >> 4); + SyncIndex until = pBuf->commitIndex - TSDB_SYNC_LOG_BUFFER_RETENTION; for (SyncIndex index = pBuf->startIndex; index < until; index++) { SSyncRaftEntry* pEntry = pBuf->entries[(index + pBuf->size) % pBuf->size].pItem; ASSERT(pEntry != NULL); From de92def9885fbb73e3152bfa8b4cafaaa17c42ac Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 13 Feb 2023 15:25:00 +0800 Subject: [PATCH 153/267] fix: query compatible with lower version messages --- source/libs/nodes/src/nodesMsgFuncs.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 274cac405d..104f066fa9 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -315,6 +315,11 @@ static int32_t tlvDecodeImpl(STlv* pTlv, void* pValue, int32_t len) { } static int32_t tlvDecodeValueImpl(STlvDecoder* pDecoder, void* pValue, int32_t len) { + // compatible with lower version messages + if (pDecoder->bufSize == pDecoder->offset) { + memset(pValue, 0, len); + return TSDB_CODE_SUCCESS; + } if (len > pDecoder->bufSize - pDecoder->offset) { return TSDB_CODE_FAILED; } From c597a1e4ce9d86c1a5af461f829744efe83bdea4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Feb 2023 15:46:47 +0800 Subject: [PATCH 154/267] refactor: do some internal refactor. --- include/util/tarray.h | 8 -------- source/common/src/tdatablock.c | 4 +--- source/dnode/mnode/impl/src/mndStream.c | 7 +++---- source/dnode/vnode/src/tsdb/tsdbUtil.c | 4 +--- source/libs/wal/src/walMeta.c | 20 +++++++++++--------- source/libs/wal/src/walWrite.c | 10 +++++----- source/util/src/tarray.c | 7 +------ source/util/src/tjson.c | 3 +-- 8 files changed, 23 insertions(+), 40 deletions(-) diff --git a/include/util/tarray.h b/include/util/tarray.h index f2fe5bc844..af9cf5fde1 100644 --- a/include/util/tarray.h +++ b/include/util/tarray.h @@ -150,14 +150,6 @@ void* taosArrayGetLast(const SArray* pArray); */ size_t taosArrayGetSize(const SArray* pArray); -/** - * set the size of array - * @param pArray - * @param size size of the array - * @return - */ -void taosArraySetSize(SArray* pArray, size_t size); - /** * insert data into array * @param pArray diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 2e633e7479..86dcd1eceb 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2356,9 +2356,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) { pStart += sizeof(uint64_t); if (pBlock->pDataBlock == NULL) { - pBlock->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); - - taosArraySetSize(pBlock->pDataBlock, numOfCols); + pBlock->pDataBlock = taosArrayInit_s(numOfCols, sizeof(SColumnInfoData), numOfCols); } for (int32_t i = 0; i < numOfCols; ++i) { diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 61e9fc5366..fa1ac88ab6 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -477,9 +477,8 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre tstrncpy(createReq.name, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN); createReq.numOfColumns = pStream->outputSchema.nCols; createReq.numOfTags = 1; // group id - createReq.pColumns = taosArrayInit(createReq.numOfColumns, sizeof(SField)); + createReq.pColumns = taosArrayInit_s(createReq.numOfColumns, sizeof(SField), createReq.numOfColumns); // build fields - taosArraySetSize(createReq.pColumns, createReq.numOfColumns); for (int32_t i = 0; i < createReq.numOfColumns; i++) { SField *pField = taosArrayGet(createReq.pColumns, i); tstrncpy(pField->name, pStream->outputSchema.pSchema[i].name, TSDB_COL_NAME_LEN); @@ -487,8 +486,8 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre pField->type = pStream->outputSchema.pSchema[i].type; pField->bytes = pStream->outputSchema.pSchema[i].bytes; } - createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField)); - taosArraySetSize(createReq.pTags, 1); + createReq.pTags = taosArrayInit_s(createReq.numOfTags, sizeof(SField), 1); + // build tags SField *pField = taosArrayGet(createReq.pTags, 0); strcpy(pField->name, "group_id"); diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index a252c6deb6..853f0bcc21 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -1054,9 +1054,7 @@ static int32_t tsdbMergeSkyline(SArray *pSkyline1, SArray *pSkyline2, SArray *pS i2++; } - taosArraySetSize(pSkyline, TARRAY_ELEM_IDX(pSkyline, pItem)); - -_exit: + pSkyline->size = TARRAY_ELEM_IDX(pSkyline, pItem); return code; } diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index a547378967..707c47f6b1 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -759,28 +759,30 @@ int walMetaDeserialize(SWal* pWal, const char* bytes) { // deserialize SArray* pArray = pWal->fileInfoSet; taosArrayEnsureCap(pArray, sz); - SWalFileInfo* pData = pArray->pData; + for (int i = 0; i < sz; i++) { - cJSON* pInfoJson = cJSON_GetArrayItem(pFiles, i); + pInfoJson = cJSON_GetArrayItem(pFiles, i); if (!pInfoJson) goto _err; - SWalFileInfo* pInfo = &pData[i]; + + SWalFileInfo info = {0}; + pField = cJSON_GetObjectItem(pInfoJson, "firstVer"); if (!pField) goto _err; - pInfo->firstVer = atoll(cJSON_GetStringValue(pField)); + info.firstVer = atoll(cJSON_GetStringValue(pField)); pField = cJSON_GetObjectItem(pInfoJson, "lastVer"); if (!pField) goto _err; - pInfo->lastVer = atoll(cJSON_GetStringValue(pField)); + info.lastVer = atoll(cJSON_GetStringValue(pField)); pField = cJSON_GetObjectItem(pInfoJson, "createTs"); if (!pField) goto _err; - pInfo->createTs = atoll(cJSON_GetStringValue(pField)); + info.createTs = atoll(cJSON_GetStringValue(pField)); pField = cJSON_GetObjectItem(pInfoJson, "closeTs"); if (!pField) goto _err; - pInfo->closeTs = atoll(cJSON_GetStringValue(pField)); + info.closeTs = atoll(cJSON_GetStringValue(pField)); pField = cJSON_GetObjectItem(pInfoJson, "fileSize"); if (!pField) goto _err; - pInfo->fileSize = atoll(cJSON_GetStringValue(pField)); + info.fileSize = atoll(cJSON_GetStringValue(pField)); + taosArrayPush(pArray, &info); } - taosArraySetSize(pArray, sz); pWal->fileInfoSet = pArray; pWal->writeCur = sz - 1; cJSON_Delete(pRoot); diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index d4ea526b78..643fc83999 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -122,16 +122,16 @@ int32_t walRollback(SWal *pWal, int64_t ver) { // delete files in descending order int fileSetSize = taosArrayGetSize(pWal->fileInfoSet); - for (int i = fileSetSize - 1; i >= pWal->writeCur + 1; i--) { - walBuildLogName(pWal, ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, i))->firstVer, fnameStr); + for (int i = pWal->writeCur + 1; i < fileSetSize; i++) { + SWalFileInfo* pInfo = taosArrayPop(pWal->fileInfoSet); + + walBuildLogName(pWal, pInfo->firstVer, fnameStr); wDebug("vgId:%d, wal remove file %s for rollback", pWal->cfg.vgId, fnameStr); taosRemoveFile(fnameStr); - walBuildIdxName(pWal, ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, i))->firstVer, fnameStr); + walBuildIdxName(pWal, pInfo->firstVer, fnameStr); wDebug("vgId:%d, wal remove file %s for rollback", pWal->cfg.vgId, fnameStr); taosRemoveFile(fnameStr); } - // pop from fileInfoSet - taosArraySetSize(pWal->fileInfoSet, pWal->writeCur + 1); } walBuildIdxName(pWal, walGetCurFileFirstVer(pWal), fnameStr); diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 8da1a1ec4a..64701574bb 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -36,7 +36,7 @@ SArray* taosArrayInit(size_t size, size_t elemSize) { } pArray->size = 0; - pArray->pData = taosMemoryMalloc(size * elemSize); + pArray->pData = taosMemoryCalloc(size, elemSize); if (pArray->pData == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; taosMemoryFree(pArray); @@ -258,11 +258,6 @@ size_t taosArrayGetSize(const SArray* pArray) { return pArray->size; } -void taosArraySetSize(SArray* pArray, size_t size) { - assert(size <= pArray->capacity); - pArray->size = size; -} - void* taosArrayInsert(SArray* pArray, size_t index, void* pData) { if (pArray == NULL || pData == NULL) { return NULL; diff --git a/source/util/src/tjson.c b/source/util/src/tjson.c index 48638af8d5..27d14d05b1 100644 --- a/source/util/src/tjson.c +++ b/source/util/src/tjson.c @@ -325,11 +325,10 @@ int32_t tjsonToTArray(const SJson* pJson, const char* pName, FToObject func, SAr const cJSON* jArray = tjsonGetObjectItem(pJson, pName); int32_t size = tjsonGetArraySize(jArray); if (size > 0) { - *pArray = taosArrayInit(size, itemSize); + *pArray = taosArrayInit_s(size, itemSize, size); if (NULL == *pArray) { return TSDB_CODE_OUT_OF_MEMORY; } - taosArraySetSize(*pArray, size); for (int32_t i = 0; i < size; ++i) { int32_t code = func(tjsonGetArrayItem(jArray, i), taosArrayGet(*pArray, i)); if (TSDB_CODE_SUCCESS != code) { From 1aee609d3646bb90d619a8e7e6a9e1a1ac662354 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 13 Feb 2023 16:46:42 +0800 Subject: [PATCH 155/267] fix: change socket opt --- source/os/src/osSocket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c index 6b0bfb3a02..3a93a26b85 100644 --- a/source/os/src/osSocket.c +++ b/source/os/src/osSocket.c @@ -1078,7 +1078,7 @@ int32_t taosCreateSocketWithTimeout(uint32_t timeout) { // return -1; //} #else // Linux like systems - uint32_t conn_timeout_ms = timeout * 1000; + uint32_t conn_timeout_ms = timeout; if (0 != setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) { taosCloseSocketNoCheck1(fd); return -1; From 99bfc95d546f9134965de183ef0c8a85934d7c6e Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Mon, 13 Feb 2023 19:00:10 +0800 Subject: [PATCH 156/267] fix: synchronize access to raftStore --- source/libs/sync/inc/syncInt.h | 3 +- source/libs/sync/inc/syncRaftStore.h | 5 +- source/libs/sync/src/syncAppendEntries.c | 6 +- source/libs/sync/src/syncAppendEntriesReply.c | 6 +- source/libs/sync/src/syncCommit.c | 2 +- source/libs/sync/src/syncElection.c | 10 +- source/libs/sync/src/syncMain.c | 159 ++++-------------- source/libs/sync/src/syncMessage.c | 2 +- source/libs/sync/src/syncPipeline.c | 13 +- source/libs/sync/src/syncRaftStore.c | 33 +++- source/libs/sync/src/syncReplication.c | 2 +- source/libs/sync/src/syncRequestVote.c | 14 +- source/libs/sync/src/syncRequestVoteReply.c | 14 +- source/libs/sync/src/syncRespMgr.c | 2 +- source/libs/sync/src/syncSnapshot.c | 40 ++--- source/libs/sync/src/syncUtil.c | 6 +- .../test/sync_test_lib/src/syncMainDebug.c | 2 +- .../sync_test_lib/src/syncSnapshotDebug.c | 2 +- 18 files changed, 130 insertions(+), 191 deletions(-) diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 7e08e195c1..6f2c1a1ad0 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -71,6 +71,7 @@ typedef struct SRaftId { typedef struct SRaftStore { SyncTerm currentTerm; SRaftId voteFor; + TdThreadMutex mutex; } SRaftStore; typedef struct SSyncHbTimerData { @@ -282,7 +283,7 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode); // raft vote -------------- void syncNodeVoteForTerm(SSyncNode* pSyncNode, SyncTerm term, SRaftId* pRaftId); -void syncNodeVoteForSelf(SSyncNode* pSyncNode); +void syncNodeVoteForSelf(SSyncNode* pSyncNode, SyncTerm term); // log replication SSyncLogReplMgr* syncNodeGetLogReplMgr(SSyncNode* pNode, SRaftId* pDestId); diff --git a/source/libs/sync/inc/syncRaftStore.h b/source/libs/sync/inc/syncRaftStore.h index 21a8fc64a8..38a8ed234b 100644 --- a/source/libs/sync/inc/syncRaftStore.h +++ b/source/libs/sync/inc/syncRaftStore.h @@ -26,14 +26,15 @@ extern "C" { #define RAFT_STORE_PATH_LEN (TSDB_FILENAME_LEN * 2) #define EMPTY_RAFT_ID ((SRaftId){.addr = 0, .vgId = 0}) -int32_t raftStoreReadFile(SSyncNode *pNode); -int32_t raftStoreWriteFile(SSyncNode *pNode); +int32_t raftStoreOpen(SSyncNode *pNode); +void raftStoreClose(SSyncNode *pNode); bool raftStoreHasVoted(SSyncNode *pNode); void raftStoreVote(SSyncNode *pNode, SRaftId *pRaftId); void raftStoreClearVote(SSyncNode *pNode); void raftStoreNextTerm(SSyncNode *pNode); void raftStoreSetTerm(SSyncNode *pNode, SyncTerm term); +SyncTerm raftStoreGetTerm(SSyncNode *pNode); #ifdef __cplusplus } diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 835e5d248e..b04bcb86c6 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -120,17 +120,17 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { // prepare response msg pReply->srcId = ths->myRaftId; pReply->destId = pMsg->srcId; - pReply->term = ths->raftStore.currentTerm; + pReply->term = raftStoreGetTerm(ths); pReply->success = false; pReply->matchIndex = SYNC_INDEX_INVALID; pReply->lastSendIndex = pMsg->prevLogIndex + 1; pReply->startTime = ths->startTime; - if (pMsg->term < ths->raftStore.currentTerm) { + if (pMsg->term < raftStoreGetTerm(ths)) { goto _SEND_RESPONSE; } - if (pMsg->term > ths->raftStore.currentTerm) { + if (pMsg->term > raftStoreGetTerm(ths)) { pReply->term = pMsg->term; } diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 44a29da3ea..f81699b9f6 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -50,19 +50,19 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } // drop stale response - if (pMsg->term < ths->raftStore.currentTerm) { + if (pMsg->term < raftStoreGetTerm(ths)) { syncLogRecvAppendEntriesReply(ths, pMsg, "drop stale response"); return 0; } if (ths->state == TAOS_SYNC_STATE_LEADER) { - if (pMsg->term > ths->raftStore.currentTerm) { + if (pMsg->term > raftStoreGetTerm(ths)) { syncLogRecvAppendEntriesReply(ths, pMsg, "error term"); syncNodeStepDown(ths, pMsg->term); return -1; } - ASSERT(pMsg->term == ths->raftStore.currentTerm); + ASSERT(pMsg->term == raftStoreGetTerm(ths)); sTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "", pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex); diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index 6d256a735d..2501b4df8b 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -111,7 +111,7 @@ int64_t syncNodeCheckCommitIndex(SSyncNode* ths, SyncIndex indexLikely) { SyncIndex commitIndex = indexLikely; syncNodeUpdateCommitIndex(ths, commitIndex); sTrace("vgId:%d, agreed upon. role:%d, term:%" PRId64 ", index:%" PRId64 "", ths->vgId, ths->state, - ths->raftStore.currentTerm, commitIndex); + raftStoreGetTerm(ths), commitIndex); } return ths->commitIndex; } diff --git a/source/libs/sync/src/syncElection.c b/source/libs/sync/src/syncElection.c index 682ace83ec..e53b8ade1c 100644 --- a/source/libs/sync/src/syncElection.c +++ b/source/libs/sync/src/syncElection.c @@ -51,7 +51,7 @@ static int32_t syncNodeRequestVotePeers(SSyncNode* pNode) { SyncRequestVote* pMsg = rpcMsg.pCont; pMsg->srcId = pNode->myRaftId; pMsg->destId = pNode->peersId[i]; - pMsg->term = pNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pNode); ret = syncNodeGetLastIndexTerm(pNode, &pMsg->lastLogIndex, &pMsg->lastLogTerm); if (ret < 0) { @@ -85,10 +85,12 @@ int32_t syncNodeElect(SSyncNode* pSyncNode) { // start election raftStoreNextTerm(pSyncNode); raftStoreClearVote(pSyncNode); - voteGrantedReset(pSyncNode->pVotesGranted, pSyncNode->raftStore.currentTerm); - votesRespondReset(pSyncNode->pVotesRespond, pSyncNode->raftStore.currentTerm); - syncNodeVoteForSelf(pSyncNode); + SyncTerm currentTerm = raftStoreGetTerm(pSyncNode); + voteGrantedReset(pSyncNode->pVotesGranted, currentTerm); + votesRespondReset(pSyncNode->pVotesRespond, currentTerm); + syncNodeVoteForSelf(pSyncNode, currentTerm); + if (voteGrantedMajority(pSyncNode->pVotesGranted)) { // only myself, to leader ASSERT(!pSyncNode->pVotesGranted->toLeader); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 803df6b662..7f7a3f113b 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -41,7 +41,6 @@ static void syncNodeEqPingTimer(void* param, void* tmrId); static void syncNodeEqElectTimer(void* param, void* tmrId); static void syncNodeEqHeartbeatTimer(void* param, void* tmrId); -static int32_t syncNodeEqNoop(SSyncNode* ths); static int32_t syncNodeAppendNoop(SSyncNode* ths); static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId); static bool syncIsConfigChanged(const SSyncCfg* pOldCfg, const SSyncCfg* pNewCfg); @@ -468,7 +467,7 @@ bool syncNodeIsReadyForRead(SSyncNode* pSyncNode) { } if (code == 0 && pEntry != NULL) { - if (pEntry->originalRpcType == TDMT_SYNC_NOOP && pEntry->term == pSyncNode->raftStore.currentTerm) { + if (pEntry->originalRpcType == TDMT_SYNC_NOOP && pEntry->term == raftStoreGetTerm(pSyncNode)) { ready = true; } @@ -664,7 +663,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_ int32_t code = syncNodeOnClientRequest(pSyncNode, pMsg, &retIndex); if (code == 0) { pMsg->info.conn.applyIndex = retIndex; - pMsg->info.conn.applyTerm = pSyncNode->raftStore.currentTerm; + pMsg->info.conn.applyTerm = raftStoreGetTerm(pSyncNode); sTrace("vgId:%d, propose optimized msg, index:%" PRId64 " type:%s", pSyncNode->vgId, retIndex, TMSG_INFO(pMsg->msgType)); return 1; @@ -911,7 +910,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) { // init TLA+ server vars pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER; - if (raftStoreReadFile(pSyncNode) != 0) { + if (raftStoreOpen(pSyncNode) != 0) { sError("vgId:%d, failed to open raft store at path %s", pSyncNode->vgId, pSyncNode->raftStorePath); goto _error; } @@ -1212,7 +1211,12 @@ void syncNodeClose(SSyncNode* pSyncNode) { if (pSyncNode == NULL) return; sNInfo(pSyncNode, "sync close, node:%p", pSyncNode); + syncNodeStopPingTimer(pSyncNode); + syncNodeStopElectTimer(pSyncNode); + syncNodeStopHeartbeatTimer(pSyncNode); + syncNodeLogReplMgrDestroy(pSyncNode); + syncRespMgrDestroy(pSyncNode->pSyncRespMgr); pSyncNode->pSyncRespMgr = NULL; voteGrantedDestroy(pSyncNode->pVotesGranted); @@ -1228,10 +1232,6 @@ void syncNodeClose(SSyncNode* pSyncNode) { syncLogBufferDestroy(pSyncNode->pLogBuf); pSyncNode->pLogBuf = NULL; - syncNodeStopPingTimer(pSyncNode); - syncNodeStopElectTimer(pSyncNode); - syncNodeStopHeartbeatTimer(pSyncNode); - for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) { if (pSyncNode->senders[i] != NULL) { sDebug("vgId:%d, snapshot sender destroy while close, data:%p", pSyncNode->vgId, pSyncNode->senders[i]); @@ -1259,6 +1259,8 @@ void syncNodeClose(SSyncNode* pSyncNode) { taosMemoryFree(pSyncNode->pFsm); } + raftStoreClose(pSyncNode); + taosMemoryFree(pSyncNode); } @@ -1633,7 +1635,7 @@ _END: // raft state change -------------- void syncNodeUpdateTerm(SSyncNode* pSyncNode, SyncTerm term) { - if (term > pSyncNode->raftStore.currentTerm) { + if (term > raftStoreGetTerm(pSyncNode)) { raftStoreSetTerm(pSyncNode, term); char tmpBuf[64]; snprintf(tmpBuf, sizeof(tmpBuf), "update term to %" PRId64, term); @@ -1643,24 +1645,23 @@ void syncNodeUpdateTerm(SSyncNode* pSyncNode, SyncTerm term) { } void syncNodeUpdateTermWithoutStepDown(SSyncNode* pSyncNode, SyncTerm term) { - if (term > pSyncNode->raftStore.currentTerm) { + if (term > raftStoreGetTerm(pSyncNode)) { raftStoreSetTerm(pSyncNode, term); } } void syncNodeStepDown(SSyncNode* pSyncNode, SyncTerm newTerm) { - if (pSyncNode->raftStore.currentTerm > newTerm) { - sNTrace(pSyncNode, "step down, ignore, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, - pSyncNode->raftStore.currentTerm); + SyncTerm currentTerm = raftStoreGetTerm(pSyncNode); + if (currentTerm > newTerm) { + sNTrace(pSyncNode, "step down, ignore, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, currentTerm); return; } do { - sNTrace(pSyncNode, "step down, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, - pSyncNode->raftStore.currentTerm); + sNTrace(pSyncNode, "step down, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, currentTerm); } while (0); - if (pSyncNode->raftStore.currentTerm < newTerm) { + if (currentTerm < newTerm) { raftStoreSetTerm(pSyncNode, newTerm); char tmpBuf[64]; snprintf(tmpBuf, sizeof(tmpBuf), "step down, update term to %" PRId64, newTerm); @@ -1820,8 +1821,8 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) { SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); ASSERT(lastIndex >= 0); - sInfo("vgId:%d, become leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64 "", - pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); + sInfo("vgId:%d, become leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64 "", pSyncNode->vgId, + raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, lastIndex); } bool syncNodeIsMnode(SSyncNode* pSyncNode) { return (pSyncNode->vgId == 1); } @@ -1840,7 +1841,7 @@ void syncNodeFollower2Candidate(SSyncNode* pSyncNode) { pSyncNode->state = TAOS_SYNC_STATE_CANDIDATE; SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); sInfo("vgId:%d, become candidate from follower. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64, - pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); + pSyncNode->vgId, raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, lastIndex); sNTrace(pSyncNode, "follower to candidate"); } @@ -1850,7 +1851,7 @@ void syncNodeLeader2Follower(SSyncNode* pSyncNode) { syncNodeBecomeFollower(pSyncNode, "leader to follower"); SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); sInfo("vgId:%d, become follower from leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64, - pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); + pSyncNode->vgId, raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, lastIndex); sNTrace(pSyncNode, "leader to follower"); } @@ -1860,7 +1861,7 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode) { syncNodeBecomeFollower(pSyncNode, "candidate to follower"); SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); sInfo("vgId:%d, become follower from candidate. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64, - pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); + pSyncNode->vgId, raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, lastIndex); sNTrace(pSyncNode, "candidate to follower"); } @@ -1868,7 +1869,7 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode) { // just called by syncNodeVoteForSelf // need assert void syncNodeVoteForTerm(SSyncNode* pSyncNode, SyncTerm term, SRaftId* pRaftId) { - ASSERT(term == pSyncNode->raftStore.currentTerm); + ASSERT(term == raftStoreGetTerm(pSyncNode)); bool voted = raftStoreHasVoted(pSyncNode); ASSERT(!voted); @@ -1876,8 +1877,8 @@ void syncNodeVoteForTerm(SSyncNode* pSyncNode, SyncTerm term, SRaftId* pRaftId) } // simulate get vote from outside -void syncNodeVoteForSelf(SSyncNode* pSyncNode) { - syncNodeVoteForTerm(pSyncNode, pSyncNode->raftStore.currentTerm, &pSyncNode->myRaftId); +void syncNodeVoteForSelf(SSyncNode* pSyncNode, SyncTerm currentTerm) { + syncNodeVoteForTerm(pSyncNode, currentTerm, &pSyncNode->myRaftId); SRpcMsg rpcMsg = {0}; int32_t ret = syncBuildRequestVoteReply(&rpcMsg, pSyncNode->vgId); @@ -1886,7 +1887,7 @@ void syncNodeVoteForSelf(SSyncNode* pSyncNode) { SyncRequestVoteReply* pMsg = rpcMsg.pCont; pMsg->srcId = pSyncNode->myRaftId; pMsg->destId = pSyncNode->myRaftId; - pMsg->term = pSyncNode->raftStore.currentTerm; + pMsg->term = currentTerm; pMsg->voteGranted = true; voteGrantedVote(pSyncNode->pVotesGranted, pMsg); @@ -2199,7 +2200,7 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { SyncHeartbeat* pSyncMsg = rpcMsg.pCont; pSyncMsg->srcId = pSyncNode->myRaftId; pSyncMsg->destId = pData->destId; - pSyncMsg->term = pSyncNode->raftStore.currentTerm; + pSyncMsg->term = raftStoreGetTerm(pSyncNode); pSyncMsg->commitIndex = pSyncNode->commitIndex; pSyncMsg->minMatchIndex = syncMinMatchIndex(pSyncNode); pSyncMsg->privateTerm = 0; @@ -2238,30 +2239,6 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { syncNodeRelease(pSyncNode); } -static int32_t syncNodeEqNoop(SSyncNode* pNode) { - if (pNode->state == TAOS_SYNC_STATE_LEADER) { - terrno = TSDB_CODE_SYN_NOT_LEADER; - return -1; - } - - SyncIndex index = pNode->pLogStore->syncLogWriteIndex(pNode->pLogStore); - SyncTerm term = pNode->raftStore.currentTerm; - SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, pNode->vgId); - if (pEntry == NULL) return -1; - - SRpcMsg rpcMsg = {0}; - int32_t code = syncBuildClientRequestFromNoopEntry(&rpcMsg, pEntry, pNode->vgId); - syncEntryDestroy(pEntry); - - sNTrace(pNode, "propose msg, type:noop"); - code = (*pNode->syncEqMsg)(pNode->msgcb, &rpcMsg); - if (code != 0) { - sError("failed to propose noop msg while enqueue since %s", terrstr()); - } - - return code; -} - static void deleteCacheEntry(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); } int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHandle** h) { @@ -2291,7 +2268,7 @@ int32_t syncNodeAppend(SSyncNode* ths, SSyncRaftEntry* pEntry) { if (syncLogBufferAppend(ths->pLogBuf, ths, pEntry) < 0) { sError("vgId:%d, failed to enqueue sync log buffer, index:%" PRId64, ths->vgId, pEntry->index); terrno = TSDB_CODE_SYN_BUFFER_FULL; - (void)syncLogFsmExecute(ths, ths->pFsm, ths->state, ths->raftStore.currentTerm, pEntry, TSDB_CODE_SYN_BUFFER_FULL); + (void)syncLogFsmExecute(ths, ths->pFsm, ths->state, raftStoreGetTerm(ths), pEntry, TSDB_CODE_SYN_BUFFER_FULL); syncEntryDestroy(pEntry); return -1; } @@ -2364,7 +2341,7 @@ bool syncNodeSnapshotRecving(SSyncNode* pSyncNode) { static int32_t syncNodeAppendNoop(SSyncNode* ths) { SyncIndex index = syncLogBufferGetEndIndex(ths->pLogBuf); - SyncTerm term = ths->raftStore.currentTerm; + SyncTerm term = raftStoreGetTerm(ths); SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, ths->vgId); if (pEntry == NULL) { @@ -2380,7 +2357,7 @@ static int32_t syncNodeAppendNoopOld(SSyncNode* ths) { int32_t ret = 0; SyncIndex index = ths->pLogStore->syncLogWriteIndex(ths->pLogStore); - SyncTerm term = ths->raftStore.currentTerm; + SyncTerm term = raftStoreGetTerm(ths); SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, ths->vgId); ASSERT(pEntry != NULL); @@ -2418,16 +2395,17 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { SRpcMsg rpcMsg = {0}; (void)syncBuildHeartbeatReply(&rpcMsg, ths->vgId); + SyncTerm currentTerm = raftStoreGetTerm(ths); SyncHeartbeatReply* pMsgReply = rpcMsg.pCont; pMsgReply->destId = pMsg->srcId; pMsgReply->srcId = ths->myRaftId; - pMsgReply->term = ths->raftStore.currentTerm; + pMsgReply->term = currentTerm; pMsgReply->privateTerm = 8864; // magic number pMsgReply->startTime = ths->startTime; pMsgReply->timeStamp = tsMs; - if (pMsg->term == ths->raftStore.currentTerm && ths->state != TAOS_SYNC_STATE_LEADER) { + if (pMsg->term == currentTerm && ths->state != TAOS_SYNC_STATE_LEADER) { syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), tsMs); syncNodeResetElectTimer(ths); @@ -2456,7 +2434,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } } - if (pMsg->term >= ths->raftStore.currentTerm && ths->state != TAOS_SYNC_STATE_FOLLOWER) { + if (pMsg->term >= currentTerm && ths->state != TAOS_SYNC_STATE_FOLLOWER) { // syncNodeStepDown(ths, pMsg->term); SRpcMsg rpcMsgLocalCmd = {0}; (void)syncBuildLocalCmd(&rpcMsgLocalCmd, ths->vgId); @@ -2565,7 +2543,7 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIn int32_t code = 0; SyncIndex index = syncLogBufferGetEndIndex(ths->pLogBuf); - SyncTerm term = ths->raftStore.currentTerm; + SyncTerm term = raftStoreGetTerm(ths); SSyncRaftEntry* pEntry = NULL; if (pMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { pEntry = syncEntryBuildFromClientRequest(pMsg->pCont, term, index); @@ -2609,73 +2587,6 @@ const char* syncStr(ESyncState state) { } } -#if 0 -int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry) { - if (ths->state != TAOS_SYNC_STATE_FOLLOWER) { - sNTrace(ths, "I am not follower, can not do leader transfer"); - return 0; - } - - if (!ths->restoreFinish) { - sNTrace(ths, "restore not finish, can not do leader transfer"); - return 0; - } - - if (pEntry->term < ths->raftStore.currentTerm) { - sNTrace(ths, "little term:%" PRId64 ", can not do leader transfer", pEntry->term); - return 0; - } - - if (pEntry->index < syncNodeGetLastIndex(ths)) { - sNTrace(ths, "little index:%" PRId64 ", can not do leader transfer", pEntry->index); - return 0; - } - - /* - if (ths->vgId > 1) { - sNTrace(ths, "I am vnode, can not do leader transfer"); - return 0; - } - */ - - SyncLeaderTransfer* pSyncLeaderTransfer = pRpcMsg->pCont; - sNTrace(ths, "do leader transfer, index:%" PRId64, pEntry->index); - - bool sameId = syncUtilSameId(&(pSyncLeaderTransfer->newLeaderId), &(ths->myRaftId)); - bool sameNodeInfo = strcmp(pSyncLeaderTransfer->newNodeInfo.nodeFqdn, ths->myNodeInfo.nodeFqdn) == 0 && - pSyncLeaderTransfer->newNodeInfo.nodePort == ths->myNodeInfo.nodePort; - - bool same = sameId || sameNodeInfo; - if (same) { - // reset elect timer now! - int32_t electMS = 1; - int32_t ret = syncNodeRestartElectTimer(ths, electMS); - ASSERT(ret == 0); - - sNTrace(ths, "maybe leader transfer to %s:%d %" PRId64, pSyncLeaderTransfer->newNodeInfo.nodeFqdn, - pSyncLeaderTransfer->newNodeInfo.nodePort, pSyncLeaderTransfer->newLeaderId.addr); - } - - if (ths->pFsm->FpLeaderTransferCb != NULL) { - SFsmCbMeta cbMeta = { - .code = 0, - .currentTerm = ths->raftStore.currentTerm, - .flag = 0, - .index = pEntry->index, - .lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, pEntry->index), - .isWeak = pEntry->isWeak, - .seqNum = pEntry->seqNum, - .state = ths->state, - .term = pEntry->term, - }; - ths->pFsm->FpLeaderTransferCb(ths->pFsm, pRpcMsg, &cbMeta); - } - - return 0; -} - -#endif - int32_t syncNodeUpdateNewConfigIndex(SSyncNode* ths, SSyncCfg* pNewCfg) { for (int32_t i = 0; i < pNewCfg->replicaNum; ++i) { SRaftId raftId = { diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c index 7d534c671e..2a44588eef 100644 --- a/source/libs/sync/src/syncMessage.c +++ b/source/libs/sync/src/syncMessage.c @@ -176,7 +176,7 @@ int32_t syncBuildAppendEntriesFromRaftEntry(SSyncNode* pNode, SSyncRaftEntry* pE pMsg->prevLogTerm = prevLogTerm; pMsg->vgId = pNode->vgId; pMsg->srcId = pNode->myRaftId; - pMsg->term = pNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pNode); pMsg->commitIndex = pNode->commitIndex; pMsg->privateTerm = 0; return 0; diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index b3eb5684cf..0a34c370da 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -61,6 +61,7 @@ int32_t syncLogBufferAppend(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt SSyncRaftEntry* pMatch = pBuf->entries[(index - 1 + pBuf->size) % pBuf->size].pItem; ASSERTS(pMatch != NULL, "no matched log entry"); ASSERT(pMatch->index + 1 == index); + ASSERT(pMatch->term <= pEntry->term); SSyncLogBufEntry tmp = {.pItem = pEntry, .prevLogIndex = pMatch->index, .prevLogTerm = pMatch->term}; pBuf->entries[index % pBuf->size] = tmp; @@ -514,7 +515,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm SSyncLogStore* pLogStore = pNode->pLogStore; SSyncFSM* pFsm = pNode->pFsm; ESyncState role = pNode->state; - SyncTerm term = pNode->raftStore.currentTerm; + SyncTerm currentTerm = raftStoreGetTerm(pNode); SyncGroupId vgId = pNode->vgId; int32_t ret = -1; int64_t upperIndex = TMIN(commitIndex, pBuf->matchIndex); @@ -529,7 +530,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm } sTrace("vgId:%d, commit. log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 "), role:%d, term:%" PRId64, - pNode->vgId, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, role, term); + pNode->vgId, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, role, currentTerm); // execute in fsm for (int64_t index = pBuf->commitIndex + 1; index <= upperIndex; index++) { @@ -545,16 +546,16 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm pEntry->term, TMSG_INFO(pEntry->originalRpcType)); } - if (syncLogFsmExecute(pNode, pFsm, role, term, pEntry, 0) != 0) { + if (syncLogFsmExecute(pNode, pFsm, role, currentTerm, pEntry, 0) != 0) { sError("vgId:%d, failed to execute sync log entry. index:%" PRId64 ", term:%" PRId64 ", role:%d, current term:%" PRId64, - vgId, pEntry->index, pEntry->term, role, term); + vgId, pEntry->index, pEntry->term, role, currentTerm); goto _out; } pBuf->commitIndex = index; sTrace("vgId:%d, committed index:%" PRId64 ", term:%" PRId64 ", role:%d, current term:%" PRId64 "", pNode->vgId, - pEntry->index, pEntry->term, role, term); + pEntry->index, pEntry->term, role, currentTerm); if (!inBuf) { syncEntryDestroy(pEntry); @@ -576,7 +577,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm _out: // mark as restored if needed if (!pNode->restoreFinish && pBuf->commitIndex >= pNode->commitIndex && pEntry != NULL && - pNode->raftStore.currentTerm <= pEntry->term) { + currentTerm <= pEntry->term) { pNode->pFsm->FpRestoreFinishCb(pNode->pFsm); pNode->restoreFinish = true; sInfo("vgId:%d, restore finished. log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pNode->vgId, diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index 197d1463fd..68e735cf0d 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -18,6 +18,9 @@ #include "syncUtil.h" #include "tjson.h" +int32_t raftStoreReadFile(SSyncNode *pNode); +int32_t raftStoreWriteFile(SSyncNode *pNode); + static int32_t raftStoreDecode(const SJson *pJson, SRaftStore *pStore) { int32_t code = 0; @@ -150,27 +153,53 @@ _OVER: return code; } +int32_t raftStoreOpen(SSyncNode *pNode) { + taosThreadMutexInit(&pNode->raftStore.mutex, NULL); + return raftStoreReadFile(pNode); +} + +void raftStoreClose(SSyncNode *pNode) { taosThreadMutexDestroy(&pNode->raftStore.mutex); } + bool raftStoreHasVoted(SSyncNode *pNode) { + taosThreadMutexLock(&pNode->raftStore.mutex); bool b = syncUtilEmptyId(&pNode->raftStore.voteFor); + taosThreadMutexUnlock(&pNode->raftStore.mutex); return (!b); } void raftStoreVote(SSyncNode *pNode, SRaftId *pRaftId) { + taosThreadMutexLock(&pNode->raftStore.mutex); pNode->raftStore.voteFor = *pRaftId; (void)raftStoreWriteFile(pNode); + taosThreadMutexUnlock(&pNode->raftStore.mutex); } void raftStoreClearVote(SSyncNode *pNode) { + taosThreadMutexLock(&pNode->raftStore.mutex); pNode->raftStore.voteFor = EMPTY_RAFT_ID; (void)raftStoreWriteFile(pNode); + taosThreadMutexUnlock(&pNode->raftStore.mutex); } void raftStoreNextTerm(SSyncNode *pNode) { + taosThreadMutexLock(&pNode->raftStore.mutex); pNode->raftStore.currentTerm++; (void)raftStoreWriteFile(pNode); + taosThreadMutexUnlock(&pNode->raftStore.mutex); } void raftStoreSetTerm(SSyncNode *pNode, SyncTerm term) { - pNode->raftStore.currentTerm = term; - (void)raftStoreWriteFile(pNode); + taosThreadMutexLock(&pNode->raftStore.mutex); + if (pNode->raftStore.currentTerm < term) { + pNode->raftStore.currentTerm = term; + (void)raftStoreWriteFile(pNode); + } + taosThreadMutexUnlock(&pNode->raftStore.mutex); +} + +SyncTerm raftStoreGetTerm(SSyncNode *pNode) { + taosThreadMutexLock(&pNode->raftStore.mutex); + SyncTerm term = pNode->raftStore.currentTerm; + taosThreadMutexUnlock(&pNode->raftStore.mutex); + return term; } diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 3df203221b..8cdf821cff 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -107,7 +107,7 @@ int32_t syncNodeHeartbeatPeers(SSyncNode* pSyncNode) { SyncHeartbeat* pSyncMsg = rpcMsg.pCont; pSyncMsg->srcId = pSyncNode->myRaftId; pSyncMsg->destId = pSyncNode->peersId[i]; - pSyncMsg->term = pSyncNode->raftStore.currentTerm; + pSyncMsg->term = raftStoreGetTerm(pSyncNode); pSyncMsg->commitIndex = pSyncNode->commitIndex; pSyncMsg->minMatchIndex = syncMinMatchIndex(pSyncNode); pSyncMsg->privateTerm = 0; diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c index 069ea2ea88..2fda2a19b8 100644 --- a/source/libs/sync/src/syncRequestVote.c +++ b/source/libs/sync/src/syncRequestVote.c @@ -97,15 +97,14 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } bool logOK = syncNodeOnRequestVoteLogOK(ths, pMsg); - // maybe update term - if (pMsg->term > ths->raftStore.currentTerm) { + if (pMsg->term > raftStoreGetTerm(ths)) { syncNodeStepDown(ths, pMsg->term); - // syncNodeUpdateTerm(ths, pMsg->term); } - ASSERT(pMsg->term <= ths->raftStore.currentTerm); + SyncTerm currentTerm = raftStoreGetTerm(ths); + ASSERT(pMsg->term <= currentTerm); - bool grant = (pMsg->term == ths->raftStore.currentTerm) && logOK && + bool grant = (pMsg->term == currentTerm) && logOK && ((!raftStoreHasVoted(ths)) || (syncUtilSameId(&ths->raftStore.voteFor, &pMsg->srcId))); if (grant) { // maybe has already voted for pMsg->srcId @@ -113,7 +112,7 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { raftStoreVote(ths, &(pMsg->srcId)); // candidate ? - syncNodeStepDown(ths, ths->raftStore.currentTerm); + syncNodeStepDown(ths, currentTerm); // forbid elect for this round syncNodeResetElectTimer(ths); @@ -127,8 +126,9 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { SyncRequestVoteReply* pReply = rpcMsg.pCont; pReply->srcId = ths->myRaftId; pReply->destId = pMsg->srcId; - pReply->term = ths->raftStore.currentTerm; + pReply->term = currentTerm; pReply->voteGranted = grant; + ASSERT(!grant || pMsg->term == pReply->term); // trace log syncLogRecvRequestVote(ths, pMsg, pReply->voteGranted, ""); diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c index a0d6cbf597..25c9f813a6 100644 --- a/source/libs/sync/src/syncRequestVoteReply.c +++ b/source/libs/sync/src/syncRequestVoteReply.c @@ -47,27 +47,21 @@ int32_t syncNodeOnRequestVoteReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { syncLogRecvRequestVoteReply(ths, pMsg, "not in my config"); return -1; } - + SyncTerm currentTerm = raftStoreGetTerm(ths); // drop stale response - if (pMsg->term < ths->raftStore.currentTerm) { + if (pMsg->term < currentTerm) { syncLogRecvRequestVoteReply(ths, pMsg, "drop stale response"); return -1; } - // ASSERT(!(pMsg->term > ths->raftStore.currentTerm)); - // no need this code, because if I receive reply.term, then I must have sent for that term. - // if (pMsg->term > ths->raftStore.currentTerm) { - // syncNodeUpdateTerm(ths, pMsg->term); - // } - - if (pMsg->term > ths->raftStore.currentTerm) { + if (pMsg->term > currentTerm) { syncLogRecvRequestVoteReply(ths, pMsg, "error term"); syncNodeStepDown(ths, pMsg->term); return -1; } syncLogRecvRequestVoteReply(ths, pMsg, ""); - ASSERT(pMsg->term == ths->raftStore.currentTerm); + ASSERT(pMsg->term == currentTerm); // This tallies votes even when the current state is not Candidate, // but they won't be looked at, so it doesn't matter. diff --git a/source/libs/sync/src/syncRespMgr.c b/source/libs/sync/src/syncRespMgr.c index 9373eccaef..f9f14c2e00 100644 --- a/source/libs/sync/src/syncRespMgr.c +++ b/source/libs/sync/src/syncRespMgr.c @@ -143,7 +143,7 @@ static void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl, bool rsp) { .state = pNode->state, .seqNum = *pSeqNum, .term = SYNC_TERM_INVALID, - .currentTerm = pNode->raftStore.currentTerm, + .currentTerm = SYNC_TERM_INVALID, .flag = 0, }; diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 18f263cc95..a83a19928e 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -43,7 +43,7 @@ SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaI pSender->sendingMS = SYNC_SNAPSHOT_RETRY_MS; pSender->pSyncNode = pSyncNode; pSender->replicaIndex = replicaIndex; - pSender->term = pSyncNode->raftStore.currentTerm; + pSender->term = raftStoreGetTerm(pSyncNode); pSender->startTime = 0; pSender->endTime = 0; pSender->pSyncNode->pFsm->FpGetSnapshotInfo(pSender->pSyncNode->pFsm, &pSender->snapshot); @@ -90,7 +90,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) { memset(&pSender->lastConfig, 0, sizeof(pSender->lastConfig)); pSender->sendingMS = 0; - pSender->term = pSender->pSyncNode->raftStore.currentTerm; + pSender->term = raftStoreGetTerm(pSender->pSyncNode); pSender->startTime = taosGetTimestampMs(); pSender->lastSendTime = pSender->startTime; pSender->finish = false; @@ -105,7 +105,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) { SyncSnapshotSend *pMsg = rpcMsg.pCont; pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pMsg->term = pSender->pSyncNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pSender->pSyncNode); pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -185,7 +185,7 @@ static int32_t snapshotSend(SSyncSnapshotSender *pSender) { SyncSnapshotSend *pMsg = rpcMsg.pCont; pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pMsg->term = pSender->pSyncNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pSender->pSyncNode); pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -226,7 +226,7 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { SyncSnapshotSend *pMsg = rpcMsg.pCont; pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pMsg->term = pSender->pSyncNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pSender->pSyncNode); pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -314,7 +314,7 @@ SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId from pReceiver->pWriter = NULL; pReceiver->pSyncNode = pSyncNode; pReceiver->fromId = fromId; - pReceiver->term = pSyncNode->raftStore.currentTerm; + pReceiver->term = raftStoreGetTerm(pSyncNode); pReceiver->snapshot.data = NULL; pReceiver->snapshot.lastApplyIndex = SYNC_INDEX_INVALID; pReceiver->snapshot.lastApplyTerm = 0; @@ -380,7 +380,7 @@ void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *p pReceiver->start = true; pReceiver->ack = SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT; - pReceiver->term = pReceiver->pSyncNode->raftStore.currentTerm; + pReceiver->term = raftStoreGetTerm(pReceiver->pSyncNode); pReceiver->fromId = pPreMsg->srcId; pReceiver->startTime = pPreMsg->startTime; @@ -437,9 +437,8 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap } // maybe update term - if (pReceiver->snapshot.lastApplyTerm > pReceiver->pSyncNode->raftStore.currentTerm) { - pReceiver->pSyncNode->raftStore.currentTerm = pReceiver->snapshot.lastApplyTerm; - (void)raftStoreWriteFile(pReceiver->pSyncNode); + if (pReceiver->snapshot.lastApplyTerm > raftStoreGetTerm(pReceiver->pSyncNode)) { + raftStoreSetTerm(pReceiver->pSyncNode, pReceiver->snapshot.lastApplyTerm); } // stop writer, apply data @@ -584,7 +583,7 @@ _SEND_REPLY: SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->raftStore.currentTerm; + pRspMsg->term = raftStoreGetTerm(pSyncNode); pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -640,7 +639,7 @@ _SEND_REPLY: SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->raftStore.currentTerm; + pRspMsg->term = raftStoreGetTerm(pSyncNode); pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -690,7 +689,7 @@ static int32_t syncNodeOnSnapshotReceive(SSyncNode *pSyncNode, SyncSnapshotSend SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->raftStore.currentTerm; + pRspMsg->term = raftStoreGetTerm(pSyncNode); pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -737,7 +736,7 @@ static int32_t syncNodeOnSnapshotEnd(SSyncNode *pSyncNode, SyncSnapshotSend *pMs SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->raftStore.currentTerm; + pRspMsg->term = raftStoreGetTerm(pSyncNode); pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -786,13 +785,13 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { return -1; } - if (pMsg->term < pSyncNode->raftStore.currentTerm) { + if (pMsg->term < raftStoreGetTerm(pSyncNode)) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "reject since small term"); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; return -1; } - if (pMsg->term > pSyncNode->raftStore.currentTerm) { + if (pMsg->term > raftStoreGetTerm(pSyncNode)) { syncNodeStepDown(pSyncNode, pMsg->term); } syncNodeResetElectTimer(pSyncNode); @@ -800,7 +799,7 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { // state, term, seq/ack int32_t code = 0; if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) { - if (pMsg->term == pSyncNode->raftStore.currentTerm) { + if (pMsg->term == raftStoreGetTerm(pSyncNode)) { if (pMsg->seq == SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq pre-snapshot"); code = syncNodeOnSnapshotPrep(pSyncNode, pMsg); @@ -884,7 +883,7 @@ static int32_t syncNodeOnSnapshotPrepRsp(SSyncNode *pSyncNode, SSyncSnapshotSend SyncSnapshotSend *pSendMsg = rpcMsg.pCont; pSendMsg->srcId = pSender->pSyncNode->myRaftId; pSendMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pSendMsg->term = pSender->pSyncNode->raftStore.currentTerm; + pSendMsg->term = raftStoreGetTerm(pSender->pSyncNode); pSendMsg->beginIndex = pSender->snapshotParam.start; pSendMsg->lastIndex = pSender->snapshot.lastApplyIndex; pSendMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -943,10 +942,11 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { goto _ERROR; } - if (pMsg->term != pSyncNode->raftStore.currentTerm) { + SyncTerm currentTerm = raftStoreGetTerm(pSyncNode); + if (pMsg->term != currentTerm) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "snapshot sender and receiver term not match"); sSError(pSender, "snapshot sender term not equal, msg term:%" PRId64 " currentTerm:%" PRId64, pMsg->term, - pSyncNode->raftStore.currentTerm); + currentTerm); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; goto _ERROR; } diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index 97641b8f41..a519c76cda 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -154,7 +154,7 @@ static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) { if (pNode == NULL || pNode->pLogStore == NULL) return; - int64_t currentTerm = pNode->raftStore.currentTerm; + int64_t currentTerm = raftStoreGetTerm(pNode); // save error code, otherwise it will be overwritten int32_t errCode = terrno; @@ -260,7 +260,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla pNode->vgId, eventLog, syncStr(pNode->state), pSender, pSender->snapshotParam.start, pSender->snapshotParam.end, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, pSender->finish, pSender->replicaIndex, - DID(&pNode->replicasId[pSender->replicaIndex]), pNode->raftStore.currentTerm, pNode->commitIndex, + DID(&pNode->replicasId[pSender->replicaIndex]), raftStoreGetTerm(pNode), pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, @@ -308,7 +308,7 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df pNode->vgId, eventLog, syncStr(pNode->state), pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, pReceiver->startTime, DID(&pReceiver->fromId), pReceiver->snapshotParam.start, pReceiver->snapshotParam.end, pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex, - pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, + raftStoreGetTerm(pNode), pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); diff --git a/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c b/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c index 1dbf4fb4fb..18a75934fd 100644 --- a/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c +++ b/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c @@ -199,7 +199,7 @@ inline char* syncNode2SimpleStr(const SSyncNode* pSyncNode) { ", sby:%d, " "r-num:%d, " "lcfg:%" PRId64 ", chging:%d, rsto:%d", - pSyncNode->vgId, syncStr(pSyncNode->state), pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, + pSyncNode->vgId, syncStr(pSyncNode->state), raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, pSyncNode->raftCfg.isStandBy, pSyncNode->replicaNum, pSyncNode->raftCfg.lastConfigIndex, pSyncNode->changing, pSyncNode->restoreFinish); diff --git a/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c b/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c index d8740de16a..2edcb0ad4d 100644 --- a/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c +++ b/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c @@ -137,7 +137,7 @@ int32_t syncNodeOnPreSnapshot(SSyncNode *ths, SyncPreSnapshot *pMsg) { SyncPreSnapshotReply *pMsgReply = syncPreSnapshotReplyBuild(ths->vgId); pMsgReply->srcId = ths->myRaftId; pMsgReply->destId = pMsg->srcId; - pMsgReply->term = ths->raftStore.currentTerm; + pMsgReply->term = raftStoreGetTerm(ths); SSyncLogStoreData *pData = ths->pLogStore->data; SWal *pWal = pData->pWal; From c1e7b3be662e98e6b2316e75a6c86189228b4485 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Feb 2023 19:10:31 +0800 Subject: [PATCH 157/267] fix(query): 1. reset the pointer, when initialize the reader failed. 2. add check for null pointer when extracting cached rows. --- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 7 +++++++ source/dnode/vnode/src/tsdb/tsdbRead.c | 1 + 2 files changed, 8 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index fd5e8eb6e0..cf2e348fb2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -41,6 +41,13 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p int32_t slotId = slotIds[i]; SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId); + // add check for null value, caused by the modification of table schema (new column added). + if (pColVal == NULL) { + p->ts = 0; + p->isNull = true; + continue; + } + p->ts = pColVal->ts; p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal); allNullRow = p->isNull & allNullRow; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index e9bb7e3d09..eb083a75cd 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -3942,6 +3942,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL _err: tsdbError("failed to create data reader, code:%s %s", tstrerror(code), idstr); tsdbReaderClose(pReader); + *ppReader = NULL; // reset the pointer value. return code; } From ee9c59dc778ec622d0e84e5bf95337dea306353b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 13 Feb 2023 19:30:48 +0800 Subject: [PATCH 158/267] fix(tsdb/cache): invalidate cache entry if schema changed --- source/dnode/vnode/src/tsdb/tsdbCache.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index fb2efda8e4..e2ffe398bf 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -244,6 +244,11 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST int16_t nCol = taosArrayGetSize(pLast); int16_t iCol = 0; + if (nCol != pTSchema->numOfCols) { + invalidate = true; + goto _invalidate; + } + SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol); if (keyTs > tTsVal->ts) { STColumn *pTColumn = &pTSchema->columns[0]; @@ -259,6 +264,12 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST SColVal colVal = {0}; tTSRowGetVal(row, pTSchema, iCol, &colVal); + + if (colVal.cid != tColVal->cid) { + invalidate = true; + goto _invalidate; + } + if (!COL_VAL_IS_NONE(&colVal)) { if (keyTs == tTsVal1->ts && !COL_VAL_IS_NONE(tColVal)) { invalidate = true; @@ -315,6 +326,11 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb int16_t nCol = taosArrayGetSize(pLast); int16_t iCol = 0; + if (nCol != pTSchema->numOfCols) { + invalidate = true; + goto _invalidate; + } + SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol); if (keyTs > tTsVal->ts) { STColumn *pTColumn = &pTSchema->columns[0]; @@ -330,6 +346,12 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb SColVal colVal = {0}; tTSRowGetVal(row, pTSchema, iCol, &colVal); + + if (colVal.cid != tColVal->cid) { + invalidate = true; + goto _invalidate; + } + if (COL_VAL_IS_VALUE(&colVal)) { if (keyTs == tTsVal1->ts && COL_VAL_IS_VALUE(tColVal)) { invalidate = true; From fab32ae9307fd145e16939d98188c8ee89aac596 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 13 Feb 2023 23:19:09 +0800 Subject: [PATCH 159/267] fix(query): fix bug in "tbname in" query. --- source/libs/executor/src/executil.c | 5 ++++- tests/script/tsim/parser/nestquery.sim | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index b398f66e19..8a2d18228a 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -943,8 +943,12 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN // int64_t stt = taosGetTimestampUs(); SArray* pUidTagList = taosArrayInit(10, sizeof(STUidTagInfo)); + copyExistedUids(pUidTagList, pUidList); + int32_t filter = optimizeTbnameInCond(metaHandle, pListInfo->suid, pUidTagList, pTagCond); if (filter == 0) { // tbname in filter is activated, do nothing and return + taosArrayClear(pUidList); + int32_t numOfRows = taosArrayGetSize(pUidTagList); taosArrayEnsureCap(pUidList, numOfRows); for(int32_t i = 0; i < numOfRows; ++i) { @@ -956,7 +960,6 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN goto end; } else { // here we retrieve all tags from the vnode table-meta store - copyExistedUids(pUidTagList, pUidList); code = metaGetTableTags(metaHandle, pListInfo->suid, pUidTagList); if (code != TSDB_CODE_SUCCESS) { qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), pListInfo->suid); diff --git a/tests/script/tsim/parser/nestquery.sim b/tests/script/tsim/parser/nestquery.sim index 494c3de99f..2a363de43d 100644 --- a/tests/script/tsim/parser/nestquery.sim +++ b/tests/script/tsim/parser/nestquery.sim @@ -351,7 +351,7 @@ sql_error select last_row(*) from (select * from nest_tb0) having c1 > 0 print ===========>td-4805 sql_error select tbname, i from (select * from nest_tb0) group by i; -sql select count(*),c1 from (select * from nest_tb0) where c1 < 2 group by c1; +sql select count(*),c1 from (select * from nest_tb0) where c1 < 2 group by c1 order by c1; if $rows != 2 then return -1 endi From 61a7751b574d1d0acbeb740b0a92f226da40b0f1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 14 Feb 2023 10:12:13 +0800 Subject: [PATCH 160/267] fix(query): fix bug in tIntToHex and add test case. --- source/libs/executor/src/executorimpl.c | 4 +--- source/util/src/tutil.c | 9 ++++++-- source/util/test/utilTests.cpp | 28 +++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index ef76402d34..d28c3cfe58 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1988,14 +1988,12 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTaskInfo->schemaInfo.dbname = strdup(dbFName); - pTaskInfo->id.queryId = queryId; pTaskInfo->execModel = model; pTaskInfo->pTableInfoList = tableListCreate(); pTaskInfo->stopInfo.pStopInfo = taosArrayInit(4, sizeof(SExchangeOpStopInfo)); pTaskInfo->pResultBlockList = taosArrayInit(128, POINTER_BYTES); -// char* p = taosMemoryMalloc(64); -// snprintf(p, 64, "TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, queryId); + pTaskInfo->id.queryId = queryId; pTaskInfo->id.str = buildTaskId(taskId, queryId); return pTaskInfo; } diff --git a/source/util/src/tutil.c b/source/util/src/tutil.c index 8beda55c79..55d7d4f6e7 100644 --- a/source/util/src/tutil.c +++ b/source/util/src/tutil.c @@ -323,8 +323,13 @@ char *strbetween(char *string, char *begin, char *end) { int32_t tintToHex(uint64_t val, char hex[]) { const char hexstr[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; - int32_t j = 0; - int32_t k = 0; + int32_t j = 0, k = 0; + if (val == 0) { + hex[j++] = hexstr[0]; + return j; + } + + // ignore the initial 0 while((val & (((uint64_t)0xfL) << ((15 - k) * 4))) == 0) { k += 1; } diff --git a/source/util/test/utilTests.cpp b/source/util/test/utilTests.cpp index c56ef348cc..a355125410 100644 --- a/source/util/test/utilTests.cpp +++ b/source/util/test/utilTests.cpp @@ -294,4 +294,32 @@ TEST(utilTest, tstrncspn) { const char* reject5 = "911"; v = tstrncspn(p2, strlen(p2), reject5, 0); ASSERT_EQ(v, 14); +} + +TEST(utilTest, intToHextStr) { + char buf[64] = {0}; + + int64_t v = 0; + tintToHex(0, buf); + ASSERT_STREQ(buf, "0"); + + v = 100000000; + tintToHex(v, buf); + + char destBuf[128]; + sprintf(destBuf, "%" PRIx64, v); + ASSERT_STREQ(buf, destBuf); + + taosSeedRand(taosGetTimestampSec()); + + for(int32_t i = 0; i < 100000; ++i) { + memset(buf, 0, tListLen(buf)); + memset(destBuf, 0, tListLen(destBuf)); + + v = taosRand(); + tintToHex(v, buf); + + sprintf(destBuf, "%" PRIx64, v); + ASSERT_STREQ(buf, destBuf); + } } \ No newline at end of file From 8ec5df8d7dd6be81bc955acaae554fb29e65754c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 14 Feb 2023 10:20:55 +0800 Subject: [PATCH 161/267] fix(tsdb/cache): Not free zero length var data --- source/dnode/vnode/src/tsdb/tsdbCache.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index e2ffe398bf..ec0944193a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -279,7 +279,8 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST SLastCol lastCol = {.ts = keyTs, .colVal = colVal}; if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) { SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol); - taosMemoryFree(pLastCol->colVal.value.pData); + if (pLastCol->colVal.value.nData > 0 && NULL != pLastCol->colVal.value.pData) + taosMemoryFree(pLastCol->colVal.value.pData); lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData); if (lastCol.colVal.value.pData == NULL) { @@ -361,7 +362,8 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb SLastCol lastCol = {.ts = keyTs, .colVal = colVal}; if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) { SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol); - taosMemoryFree(pLastCol->colVal.value.pData); + if (pLastCol->colVal.value.nData > 0 && NULL != pLastCol->colVal.value.pData) + taosMemoryFree(pLastCol->colVal.value.pData); lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData); if (lastCol.colVal.value.pData == NULL) { From 606993dc1de5a447603bb85cb632381171d6c424 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 14 Feb 2023 10:22:09 +0800 Subject: [PATCH 162/267] fix: restrict interp query on stable for now --- source/libs/parser/src/parTranslater.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 57b13f0218..0d052846f7 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1467,6 +1467,15 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; + + if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) || + (TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType && + TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, + "%s is only supported in single table query", pFunc->functionName); + } + if (pSelect->hasAggFuncs || pSelect->hasMultiRowsFunc || pSelect->hasIndefiniteRowsFunc) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } From 3f38d56c0d7cc054abb37316705e1145aac2efdf Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Tue, 14 Feb 2023 10:40:16 +0800 Subject: [PATCH 163/267] fix:number of ssdatablock rows exceeds the capacity --- source/libs/executor/src/filloperator.c | 37 +-- .../script/tsim/stream/fillIntervalRange.sim | 225 ++++++++++++++++++ 2 files changed, 247 insertions(+), 15 deletions(-) create mode 100644 tests/script/tsim/stream/fillIntervalRange.sim diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index 41e4c990f8..16983cb507 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -832,10 +832,13 @@ static bool checkResult(SStreamFillSupporter* pFillSup, TSKEY ts, uint64_t group return true; } -static void buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFillSup, TSKEY ts, SSDataBlock* pBlock) { +static bool buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFillSup, TSKEY ts, SSDataBlock* pBlock) { + if (pBlock->info.rows >= pBlock->info.capacity) { + return false; + } uint64_t groupId = pBlock->info.id.groupId; if (pFillSup->hasDelete && !checkResult(pFillSup, ts, groupId)) { - return; + return true; } for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) { SFillColInfo* pFillCol = pFillSup->pAllColInfo + i; @@ -853,6 +856,7 @@ static void buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFill } } pBlock->info.rows++; + return true; } static bool hasRemainCalc(SStreamFillInfo* pFillInfo) { @@ -932,7 +936,9 @@ static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter* } if (pFillInfo->pos == FILL_POS_START) { - buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes); + if (buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes)) { + pFillInfo->pos = FILL_POS_INVALID; + } } if (pFillInfo->type != TSDB_FILL_LINEAR) { doStreamFillNormal(pFillSup, pFillInfo, pRes); @@ -940,7 +946,9 @@ static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter* doStreamFillLinear(pFillSup, pFillInfo, pRes); if (pFillInfo->pos == FILL_POS_MID) { - buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes); + if (buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes)) { + pFillInfo->pos = FILL_POS_INVALID; + } } if (pFillInfo->current > pFillInfo->end && pFillInfo->pLinearInfo->hasNext) { @@ -954,7 +962,9 @@ static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter* } } if (pFillInfo->pos == FILL_POS_END) { - buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes); + if (buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes)) { + pFillInfo->pos = FILL_POS_INVALID; + } } } @@ -989,10 +999,6 @@ static void doStreamFillImpl(SOperatorInfo* pOperator) { uint64_t groupId = pBlock->info.id.groupId; SSDataBlock* pRes = pInfo->pRes; pRes->info.id.groupId = groupId; - if (hasRemainCalc(pFillInfo)) { - doStreamFillRange(pFillInfo, pFillSup, pRes); - } - SColumnInfoData* pTsCol = taosArrayGet(pInfo->pSrcBlock->pDataBlock, pInfo->primaryTsCol); TSKEY* tsCol = (TSKEY*)pTsCol->pData; @@ -1204,13 +1210,14 @@ static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) { return NULL; } blockDataCleanup(pInfo->pRes); - if (pOperator->status == OP_RES_TO_RETURN) { - if (hasRemainCalc(pInfo->pFillInfo)) { - doStreamFillRange(pInfo->pFillInfo, pInfo->pFillSup, pInfo->pRes); - if (pInfo->pRes->info.rows > 0) { - return pInfo->pRes; - } + if (hasRemainCalc(pInfo->pFillInfo) || (pInfo->pFillInfo->pos != FILL_POS_INVALID && pInfo->pFillInfo->needFill == true )) { + doStreamFillRange(pInfo->pFillInfo, pInfo->pFillSup, pInfo->pRes); + if (pInfo->pRes->info.rows > 0) { + printDataBlock(pInfo->pRes, "stream fill"); + return pInfo->pRes; } + } + if (pOperator->status == OP_RES_TO_RETURN) { doDeleteFillFinalize(pOperator); if (pInfo->pRes->info.rows > 0) { printDataBlock(pInfo->pRes, "stream fill"); diff --git a/tests/script/tsim/stream/fillIntervalRange.sim b/tests/script/tsim/stream/fillIntervalRange.sim new file mode 100644 index 0000000000..a0905141f2 --- /dev/null +++ b/tests/script/tsim/stream/fillIntervalRange.sim @@ -0,0 +1,225 @@ +$loop_all = 0 +looptest: + +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start + +sleep 500 +sql connect + +sql drop database if exists test; +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));; +sql create stream streams1 trigger at_once into streamt as select _wstart ts, count(*) c1 from t1 interval(1s) fill(NULL); +sql insert into t1 values(1648791211000,1,2,3,1.0,'aaa'); +sleep 100 +sql insert into t1 values(1648795308000,1,2,3,1.0,'aaa'); + +$loop_count = 0 + +loop0: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 +sql select * from streamt where c1 > 0; + +if $rows != 2 then + print =====rows=$rows + goto loop0 +endi + +sql select count(*) from streamt; + +if $data00 != 4098 then + print =====data00=$data00 + goto loop0 +endi + +sql insert into t1 values(1648800308000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop1: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 +sql select * from streamt where c1 > 0; + +if $rows != 3 then + print =====rows=$rows + goto loop1 +endi + +sql select count(*) from streamt; + +if $data00 != 9098 then + print =====rows=$rows + goto loop1 +endi + +sql insert into t1 values(1648786211000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop2: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 +sql select * from streamt where c1 > 0; + +if $rows != 4 then + print =====rows=$rows + goto loop2 +endi + +sql select count(*) from streamt; + +if $data00 != 14098 then + print =====rows=$rows + goto loop2 +endi + +sql insert into t1 values(1648801308000,1,1,1,1.0,'aaa') (1648802308000,1,1,1,1.0,'aaa') (1648803308000,1,1,1,1.0,'aaa') (1648804308000,1,1,1,1.0,'aaa') (1648805308000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop21: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 +sql select * from streamt where c1 > 0; + +if $rows != 9 then + print =====rows=$rows + goto loop21 +endi + +sql select count(*) from streamt; + +if $data00 != 19098 then + print =====rows=$rows + goto loop21 +endi + +sql drop database if exists test; +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); +print create stream streams1 trigger at_once into streamt as select _wstart ts, max(a) c1 from t1 interval(1s) fill(linear); +sql create stream streams1 trigger at_once into streamt as select _wstart ts, max(a) c1 from t1 interval(1s) fill(linear); + +print create stream streams2 trigger at_once into streamt2 as select _wstart ts, max(a) c1 from t1 interval(1s) fill(prev); +sql create stream streams2 trigger at_once into streamt2 as select _wstart ts, max(a) c1 from t1 interval(1s) fill(prev); + +sql insert into t1 values(1648791211000,1,2,3,1.0,'aaa'); +sleep 100 +sql insert into t1 values(1648795308000,1,2,3,1.0,'aaa'); + +$loop_count = 0 + +loop3: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 + +print select count(*) from streamt; +sql select count(*) from streamt; + +if $data00 != 4098 then + print =====data00=$data00 + goto loop3 +endi + +print select count(*) from streamt2; +sql select count(*) from streamt2; + +if $data00 != 4098 then + print =====data00=$data00 + goto loop3 +endi + +sql insert into t1 values(1648800308000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop4: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 + +print select count(*) from streamt; +sql select count(*) from streamt; + +if $data00 != 9098 then + print =====rows=$rows + goto loop4 +endi + +print select count(*) from streamt2; +sql select count(*) from streamt2; + +if $data00 != 9098 then + print =====rows=$rows + goto loop4 +endi + +sql insert into t1 values(1648786211000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop5: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 + +print select count(*) from streamt; +sql select count(*) from streamt; + +if $data00 != 14098 then + print =====rows=$rows + goto loop5 +endi + +print select count(*) from streamt2; +sql select count(*) from streamt2; + +if $data00 != 14098 then + print =====rows=$rows + goto loop5 +endi + +system sh/stop_dnodes.sh + +#goto looptest From 6fffc717857b6897d30ff0aeb697b1f397a0186a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 14 Feb 2023 10:41:17 +0800 Subject: [PATCH 164/267] fix(query): check validation of suid, --- source/libs/executor/src/sysscanoperator.c | 2 +- tests/script/tsim/parser/regressiontest.sim | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 3f9e61dbbb..88d9fb1a1f 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -2001,7 +2001,7 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi } pInfo->readHandle = *readHandle; - pInfo->uid = pBlockScanNode->suid; + pInfo->uid = (pBlockScanNode->suid != 0)? pBlockScanNode->suid:pBlockScanNode->uid; int32_t numOfCols = 0; SExprInfo* pExprInfo = createExprInfo(pBlockScanNode->pScanPseudoCols, NULL, &numOfCols); diff --git a/tests/script/tsim/parser/regressiontest.sim b/tests/script/tsim/parser/regressiontest.sim index c08b1bbf27..1a18d1795c 100644 --- a/tests/script/tsim/parser/regressiontest.sim +++ b/tests/script/tsim/parser/regressiontest.sim @@ -143,9 +143,11 @@ sql delete from t1 where ts<=1537146409500 sql flush database $db +print ======================================>TS-2639 +sql show table distributed t1; + print =====================================>TD-22007 sql select count(*) from t1 interval(10a) - sql drop table t1 sql create table st1 (ts timestamp, k int) tags(a int); From ea81fc0b309c57f5d03fee349810ebba630df9b5 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 14 Feb 2023 10:56:00 +0800 Subject: [PATCH 165/267] fix(vnd): zero meta in vnode when closing --- source/dnode/vnode/src/inc/vnodeInt.h | 2 +- source/dnode/vnode/src/meta/metaOpen.c | 6 ++++-- source/dnode/vnode/src/vnd/vnodeOpen.c | 4 ++-- source/dnode/vnode/test/tsdbSmaTest.cpp | 6 +++--- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 2501af7f04..ec925087d0 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -107,7 +107,7 @@ typedef struct STbUidStore STbUidStore; #define META_BEGIN_HEAP_NIL 2 int metaOpen(SVnode* pVnode, SMeta** ppMeta, int8_t rollback); -int metaClose(SMeta* pMeta); +int metaClose(SMeta** pMeta); int metaBegin(SMeta* pMeta, int8_t fromSys); TXN* metaGetTxn(SMeta* pMeta); int metaCommit(SMeta* pMeta, TXN* txn); diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 35677d6f07..550d7b587c 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -201,7 +201,8 @@ _err: return -1; } -int metaClose(SMeta *pMeta) { +int metaClose(SMeta **ppMeta) { + SMeta *pMeta = *ppMeta; if (pMeta) { if (pMeta->pEnv) metaAbort(pMeta); if (pMeta->pCache) metaCacheClose(pMeta); @@ -221,7 +222,8 @@ int metaClose(SMeta *pMeta) { if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb); if (pMeta->pEnv) tdbClose(pMeta->pEnv); metaDestroyLock(pMeta); - taosMemoryFree(pMeta); + + taosMemoryFreeClear(*ppMeta); } return 0; diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 58d9f1a049..97ee2e4a89 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -239,7 +239,7 @@ _err: if (pVnode->pWal) walClose(pVnode->pWal); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); if (pVnode->pSma) smaClose(pVnode->pSma); - if (pVnode->pMeta) metaClose(pVnode->pMeta); + if (pVnode->pMeta) metaClose(&pVnode->pMeta); if (pVnode->pPool) vnodeCloseBufPool(pVnode); tsem_destroy(&(pVnode->canCommit)); @@ -263,7 +263,7 @@ void vnodeClose(SVnode *pVnode) { tqClose(pVnode->pTq); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); smaClose(pVnode->pSma); - metaClose(pVnode->pMeta); + if (pVnode->pMeta) metaClose(&pVnode->pMeta); vnodeCloseBufPool(pVnode); tsem_post(&pVnode->canCommit); diff --git a/source/dnode/vnode/test/tsdbSmaTest.cpp b/source/dnode/vnode/test/tsdbSmaTest.cpp index be101059f2..43eaacfff9 100644 --- a/source/dnode/vnode/test/tsdbSmaTest.cpp +++ b/source/dnode/vnode/test/tsdbSmaTest.cpp @@ -283,7 +283,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) { metaRemoveSmaFromDb(pMeta, indexUid2); tDestroyTSma(&tSma); - metaClose(pMeta); + metaClose(&pMeta); } #endif @@ -577,9 +577,9 @@ TEST(testCase, tSma_Data_Insert_Query_Test) { tDestroyTSma(&tSma); tfsClose(pTsdb->pTfs); tsdbClose(pTsdb); - metaClose(pMeta); + metaClose(&pMeta); } #endif -#pragma GCC diagnostic pop \ No newline at end of file +#pragma GCC diagnostic pop From e6b0560d470c7e47db1234a161eae7c5b69eabcb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 14 Feb 2023 11:55:21 +0800 Subject: [PATCH 166/267] test(query): update the sim. --- tests/script/tsim/compute/block_dist.sim | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/script/tsim/compute/block_dist.sim b/tests/script/tsim/compute/block_dist.sim index 4fdcf63e34..772959644e 100644 --- a/tests/script/tsim/compute/block_dist.sim +++ b/tests/script/tsim/compute/block_dist.sim @@ -81,7 +81,6 @@ $nt = $ntPrefix . $i #sql select _block_dist() from $nt print show table distributed $nt -sql_error show table distributed $nt #if $rows == 0 then # return -1 From b9918b0eda5bc84690d8cc53f95ca76b6bc617b3 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 14 Feb 2023 13:49:07 +0800 Subject: [PATCH 167/267] fix: add alter dnode configuration validdation --- source/client/src/clientMain.c | 1 + source/dnode/mnode/impl/src/mndDnode.c | 14 +++++++++++++- source/libs/scalar/src/filter.c | 2 +- tests/script/tsim/alter/dnode.sim | 4 ++++ 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index e5f677637e..2042ff141c 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -934,6 +934,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { } if (TSDB_CODE_SUCCESS == code) { + pRequest->stmtType = pRequest->pQuery->pRoot->type; phaseAsyncQuery(pWrapper); } else { tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code), diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index f4e6aad7a7..b8bafff104 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -880,6 +880,12 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (strcasecmp(cfgReq.config, "resetlog") == 0) { strcpy(dcfgReq.config, "resetlog"); } else if (strncasecmp(cfgReq.config, "monitor", 7) == 0) { + if (' ' != cfgReq.config[7] && 0 != cfgReq.config[7]) { + mError("dnode:%d, failed to config monitor since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); + terrno = TSDB_CODE_INVALID_CFG; + return -1; + } + const char *value = cfgReq.value; int32_t flag = atoi(value); if (flag <= 0) { @@ -900,12 +906,18 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { int32_t optLen = strlen(optName); if (strncasecmp(cfgReq.config, optName, optLen) != 0) continue; + if (' ' != cfgReq.config[optLen] && 0 != cfgReq.config[optLen]) { + mError("dnode:%d, failed to config since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); + terrno = TSDB_CODE_INVALID_CFG; + return -1; + } + const char *value = cfgReq.value; int32_t flag = atoi(value); if (flag <= 0) { flag = atoi(cfgReq.config + optLen + 1); } - if (flag <= 0 || flag > 255) { + if (flag < 0 || flag > 255) { mError("dnode:%d, failed to config %s since value:%d", cfgReq.dnodeId, optName, flag); terrno = TSDB_CODE_INVALID_CFG; return -1; diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 74d555af77..25e65d2588 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -1057,7 +1057,7 @@ static FORCE_INLINE int32_t filterAddColFieldFromField(SFilterInfo *info, SFilte int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *fid) { if (node == NULL) { - fltError("empty node"); + fltDebug("empty node"); FLT_ERR_RET(TSDB_CODE_APP_ERROR); } diff --git a/tests/script/tsim/alter/dnode.sim b/tests/script/tsim/alter/dnode.sim index 8cfa86a88a..f9b794924b 100644 --- a/tests/script/tsim/alter/dnode.sim +++ b/tests/script/tsim/alter/dnode.sim @@ -58,6 +58,8 @@ sql_error alter dnode 1 'monDebugFlag 131' sql_error alter dnode 1 'cqDebugFlag 131' sql_error alter dnode 1 'httpDebugFlag 131' sql_error alter dnode 1 'mqttDebugFlag 131' +sql_error alter dnode 1 'qDebugFlaga 131' +sql_error alter all dnodes 'qDebugFlaga 131' sql_error alter dnode 2 'wDebugFlag' '135' sql_error alter dnode 2 'tmrDebugFlag' '135' @@ -65,6 +67,8 @@ sql_error alter dnode 1 'monDebugFlag' '131' sql_error alter dnode 1 'cqDebugFlag' '131' sql_error alter dnode 1 'httpDebugFlag' '131' sql_error alter dnode 1 'mqttDebugFlag' '131' +sql_error alter dnode 1 'qDebugFlaga' '131' +sql_error alter all dnodes 'qDebugFlaga' '131' print ======== step3 sql_error alter $hostname1 debugFlag 135 From e010d35839f318bcb6ac0297a1d22c001d96d4a1 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 14 Feb 2023 14:12:17 +0800 Subject: [PATCH 168/267] fix: remove code --- tools/shell/src/shellAuto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 6e50a97c02..0492a76f8d 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -640,7 +640,7 @@ bool shellAutoInit() { void shellSetConn(TAOS* conn) { varCon = conn; // init database and stable - updateTireValue(WT_VAR_DBNAME, false); + //updateTireValue(WT_VAR_DBNAME, false); } // exit shell auto funciton, shell exit call once @@ -1977,7 +1977,7 @@ void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb) { if (dealUseDB(sql)) { // change to new db - updateTireValue(WT_VAR_STABLE, false); + //updateTireValue(WT_VAR_STABLE, false); return; } From 10e01c0512de659d91d6b622389db379681289d3 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 14 Feb 2023 14:25:03 +0800 Subject: [PATCH 169/267] add test cases --- tests/system-test/2-query/interp.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index ce57357abd..d30575aaa3 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -1850,15 +1850,16 @@ class TDTestCase: tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdLog.printNoPrefix("==========step13:stable cases") - #tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") + tdSql.error(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") #tdSql.checkRows(13) #tdSql.query(f"select interp(c0) from {dbname}.{ctbname1} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") #tdSql.checkRows(13) - #tdSql.query(f"select interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1s) fill(null)") + tdSql.error(f"select interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1s) fill(null)") #tdSql.checkRows(13) #tdSql.query(f"select _irowts,interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1h) fill(prev)") From a1eafe88ac78812dbea8d25d831901959f742547 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 14 Feb 2023 14:48:33 +0800 Subject: [PATCH 170/267] fix(query): fix some errors. --- source/libs/executor/src/executil.c | 4 +- source/libs/executor/src/timewindowoperator.c | 2 + source/util/src/tsimplehash.c | 22 +++++- .../develop-test/2-query/table_count_scan.py | 68 ++++++++++--------- tests/script/tsim/scalar/caseWhen.sim | 2 +- 5 files changed, 61 insertions(+), 37 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 8a2d18228a..da4d8317a8 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1026,7 +1026,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle); if (code != TSDB_CODE_SUCCESS) { - return code; + goto _end; } } else { T_MD5_CTX context = {0}; @@ -1064,7 +1064,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle); if (code != TSDB_CODE_SUCCESS) { - return code; + goto _end; } // let's add the filter results into meta-cache diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 1cefc6b0ec..1fe1b9081b 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -847,6 +847,7 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_ if (newPos == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } + newPos->groupId = groupId; newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset}; *(int64_t*)newPos->key = ts; @@ -854,6 +855,7 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_ if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) { taosMemoryFree(newPos); } + return TSDB_CODE_SUCCESS; } diff --git a/source/util/src/tsimplehash.c b/source/util/src/tsimplehash.c index 062d7a0ae4..70acffed5d 100644 --- a/source/util/src/tsimplehash.c +++ b/source/util/src/tsimplehash.c @@ -81,6 +81,7 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } + return pHashObj; } @@ -92,6 +93,7 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) { } static void* doInternalAlloc(SSHashObj* pHashObj, int32_t size) { +#if 0 void** p = taosArrayGetLast(pHashObj->pHashNodeBuf); if (p == NULL || (pHashObj->offset + size) > DEFAULT_BUF_PAGE_SIZE) { // let's allocate one new page @@ -112,6 +114,9 @@ static void* doInternalAlloc(SSHashObj* pHashObj, int32_t size) { pHashObj->offset += size; return pPos; } +#else + return taosMemoryMalloc(size); +#endif } static SHNode *doCreateHashNode(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen, @@ -356,7 +361,22 @@ void tSimpleHashClear(SSHashObj *pHashObj) { return; } - memset(pHashObj->hashList, 0, pHashObj->capacity * sizeof(void*)); + SHNode *pNode = NULL, *pNext = NULL; + for (int32_t i = 0; i < pHashObj->capacity; ++i) { + pNode = pHashObj->hashList[i]; + if (!pNode) { + continue; + } + + while (pNode) { + pNext = pNode->next; + FREE_HASH_NODE(pNode); + pNode = pNext; + } + + pHashObj->hashList[i] = NULL; + } + taosArrayClearEx(pHashObj->pHashNodeBuf, destroyItems); pHashObj->offset = 0; pHashObj->size = 0; diff --git a/tests/develop-test/2-query/table_count_scan.py b/tests/develop-test/2-query/table_count_scan.py index 1ef65bfc67..5bdc915cdd 100644 --- a/tests/develop-test/2-query/table_count_scan.py +++ b/tests/develop-test/2-query/table_count_scan.py @@ -75,7 +75,7 @@ class TDTestCase: tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.query('select count(1),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v desc;') tdSql.checkRows(3) tdSql.checkData(0, 0, 23) tdSql.checkData(0, 1, 'information_schema') @@ -87,12 +87,12 @@ class TDTestCase: tdSql.checkData(2, 1, 'tbl_count') tdSql.checkData(2, 2, 'stb1') - tdSql.query('select count(1),db_name from information_schema.ins_tables group by db_name') + tdSql.query('select count(1) v,db_name from information_schema.ins_tables group by db_name order by v asc') tdSql.checkRows(3) - tdSql.checkData(0, 0, 5) - tdSql.checkData(0, 1, 'performance_schema') - tdSql.checkData(1, 0, 3) - tdSql.checkData(1, 1, 'tbl_count') + tdSql.checkData(1, 0, 5) + tdSql.checkData(1, 1, 'performance_schema') + tdSql.checkData(0, 0, 3) + tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') @@ -177,42 +177,44 @@ class TDTestCase: tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:29\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') - tdSql.query('select count(*),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.query('select count(*) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v;') tdSql.checkRows(4) tdSql.checkData(0, 0, 1) tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(0, 2, 'stba') - tdSql.checkData(1, 0, 23) - tdSql.checkData(1, 1, 'information_schema') - tdSql.checkData(1, 2, None) - tdSql.checkData(2, 0, 3) - tdSql.checkData(2, 1, 'tbl_count') - tdSql.checkData(2, 2, 'stb1') - tdSql.checkData(3, 0, 5) - tdSql.checkData(3, 1, 'performance_schema') + + tdSql.checkData(1, 0, 3) + tdSql.checkData(1, 1, 'tbl_count') + tdSql.checkData(1, 2, 'stb1') + tdSql.checkData(2, 0, 5) + tdSql.checkData(2, 1, 'performance_schema') + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 0, 23) + tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) - tdSql.query('select count(1),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v;') tdSql.checkRows(4) - tdSql.checkData(0, 0, 23) - tdSql.checkData(0, 1, 'information_schema') - tdSql.checkData(0, 2, None) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 'tbl_count') + tdSql.checkData(0, 2, 'stba') + tdSql.checkData(1, 0, 3) + tdSql.checkData(1, 1, 'tbl_count') + tdSql.checkData(1, 2, 'stb1') + tdSql.checkData(2, 0, 5) + tdSql.checkData(2, 1, 'performance_schema') + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 0, 23) + tdSql.checkData(3, 1, 'information_schema') + tdSql.checkData(3, 2, None) + + tdSql.query('select count(1) v,db_name from information_schema.ins_tables group by db_name order by v') + tdSql.checkRows(3) + + tdSql.checkData(0, 0, 4) + tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(1, 0, 5) tdSql.checkData(1, 1, 'performance_schema') - tdSql.checkData(1, 2, None) - tdSql.checkData(2, 0, 1) - tdSql.checkData(2, 1, 'tbl_count') - tdSql.checkData(2, 2, 'stba') - tdSql.checkData(3, 0, 3) - tdSql.checkData(3, 1, 'tbl_count') - tdSql.checkData(3, 2, 'stb1') - - tdSql.query('select count(1),db_name from information_schema.ins_tables group by db_name') - tdSql.checkRows(3) - tdSql.checkData(0, 0, 5) - tdSql.checkData(0, 1, 'performance_schema') - tdSql.checkData(1, 0, 4) - tdSql.checkData(1, 1, 'tbl_count') tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') diff --git a/tests/script/tsim/scalar/caseWhen.sim b/tests/script/tsim/scalar/caseWhen.sim index f6b9c3ff08..c10413f23c 100644 --- a/tests/script/tsim/scalar/caseWhen.sim +++ b/tests/script/tsim/scalar/caseWhen.sim @@ -519,7 +519,7 @@ if $rows != 0 then return -1 endi -sql select sum(f1),count(f1) from tba1 partition by case when f1 then f1 when 1 then 1 end; +sql select sum(f1) v,count(f1) from tba1 partition by case when f1 then f1 when 1 then 1 end order by v; if $rows != 2 then return -1 endi From b147ba2812b4671a1d1a54b9a6f977fc2977a784 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 14 Feb 2023 16:18:40 +0800 Subject: [PATCH 171/267] fix: handle insufficient resource --- source/dnode/vnode/src/tq/tq.c | 10 ++-------- source/dnode/vnode/src/tq/tqMeta.c | 26 ++++++++++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index cca241a1cf..3e13eaa6e8 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -816,7 +816,6 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL // TODO version should be assigned and refed during preprocess SWalRef* pRef = walRefCommittedVer(pTq->pVnode->pWal); if (pRef == NULL) { - ASSERT(0); return -1; } int64_t ver = pRef->refVer; @@ -837,12 +836,9 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL pHandle->execHandle.task = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols, NULL); - ASSERT(pHandle->execHandle.task); void* scanner = NULL; qExtractStreamScanner(pHandle->execHandle.task, &scanner); - ASSERT(scanner); pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner); - ASSERT(pHandle->execHandle.pExecReader); } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) { pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode); @@ -875,8 +871,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle)); tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId); if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) { - // TODO - ASSERT(0); + return -1; } } else { /*ASSERT(pExec->consumerId == req.oldConsumerId);*/ @@ -886,8 +881,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL atomic_add_fetch_32(&pHandle->epoch, 1); taosMemoryFree(req.qmsg); if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) { - // TODO - ASSERT(0); + return -1; } // close handle } diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index 3ad01e2370..34f57bc697 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -71,17 +71,14 @@ int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { int32_t tqMetaOpen(STQ* pTq) { if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaDB, 0) < 0) { - ASSERT(0); return -1; } if (tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pExecStore, 0) < 0) { - ASSERT(0); return -1; } if (tdbTbOpen("tq.check.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pCheckStore, 0) < 0) { - ASSERT(0); return -1; } @@ -197,40 +194,49 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { int32_t code; int32_t vlen; tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code); - ASSERT(code == 0); tqDebug("tq save %s(%d) consumer %" PRId64 " vgId:%d", pHandle->subKey, (int32_t)strlen(pHandle->subKey), pHandle->consumerId, TD_VID(pTq->pVnode)); void* buf = taosMemoryCalloc(1, vlen); if (buf == NULL) { - ASSERT(0); + return -1; } SEncoder encoder; tEncoderInit(&encoder, buf, vlen); if (tEncodeSTqHandle(&encoder, pHandle) < 0) { - ASSERT(0); + tEncoderClear(&encoder); + taosMemoryFree(buf); + return -1; } TXN* txn; if (tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { - ASSERT(0); + tEncoderClear(&encoder); + taosMemoryFree(buf); + return -1; } if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, txn) < 0) { - ASSERT(0); + tEncoderClear(&encoder); + taosMemoryFree(buf); + return -1; } if (tdbCommit(pTq->pMetaDB, txn) < 0) { - ASSERT(0); + tEncoderClear(&encoder); + taosMemoryFree(buf); + return -1; } if (tdbPostCommit(pTq->pMetaDB, txn) < 0) { - ASSERT(0); + tEncoderClear(&encoder); + taosMemoryFree(buf); + return -1; } tEncoderClear(&encoder); From e4a365c2658fcc7c5216324832fbf163fe94bc5c Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 14 Feb 2023 16:36:55 +0800 Subject: [PATCH 172/267] remove assert --- source/libs/wal/src/walWrite.c | 35 +++------------------------------- 1 file changed, 3 insertions(+), 32 deletions(-) diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index d4ea526b78..232e3e3b39 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -87,8 +87,6 @@ int32_t walApplyVer(SWal *pWal, int64_t ver) { } int32_t walCommit(SWal *pWal, int64_t ver) { - ASSERT(pWal->vers.commitVer >= pWal->vers.snapshotVer); - ASSERT(pWal->vers.commitVer <= pWal->vers.lastVer); if (ver < pWal->vers.commitVer) { return 0; } @@ -138,25 +136,21 @@ int32_t walRollback(SWal *pWal, int64_t ver) { TdFilePtr pIdxFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND); if (pIdxFile == NULL) { - ASSERT(0); taosThreadMutexUnlock(&pWal->mutex); return -1; } int64_t idxOff = walGetVerIdxOffset(pWal, ver); code = taosLSeekFile(pIdxFile, idxOff, SEEK_SET); if (code < 0) { - ASSERT(0); taosThreadMutexUnlock(&pWal->mutex); return -1; } // read idx file and get log file pos SWalIdxEntry entry; if (taosReadFile(pIdxFile, &entry, sizeof(SWalIdxEntry)) != sizeof(SWalIdxEntry)) { - ASSERT(0); taosThreadMutexUnlock(&pWal->mutex); return -1; } - ASSERT(entry.ver == ver); walBuildLogName(pWal, walGetCurFileFirstVer(pWal), fnameStr); TdFilePtr pLogFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND); @@ -176,24 +170,19 @@ int32_t walRollback(SWal *pWal, int64_t ver) { } // validate offset SWalCkHead head; - ASSERT(taosValidFile(pLogFile)); - int64_t size = taosReadFile(pLogFile, &head, sizeof(SWalCkHead)); + int64_t size = taosReadFile(pLogFile, &head, sizeof(SWalCkHead)); if (size != sizeof(SWalCkHead)) { - ASSERT(0); taosThreadMutexUnlock(&pWal->mutex); return -1; } code = walValidHeadCksum(&head); - ASSERT(code == 0); if (code != 0) { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - ASSERT(0); taosThreadMutexUnlock(&pWal->mutex); return -1; } if (head.head.version != ver) { - ASSERT(0); terrno = TSDB_CODE_WAL_FILE_CORRUPTED; taosThreadMutexUnlock(&pWal->mutex); return -1; @@ -202,22 +191,17 @@ int32_t walRollback(SWal *pWal, int64_t ver) { // truncate old files code = taosFtruncateFile(pLogFile, entry.offset); if (code < 0) { - ASSERT(0); terrno = TAOS_SYSTEM_ERROR(errno); taosThreadMutexUnlock(&pWal->mutex); return -1; } code = taosFtruncateFile(pIdxFile, idxOff); if (code < 0) { - ASSERT(0); terrno = TAOS_SYSTEM_ERROR(errno); taosThreadMutexUnlock(&pWal->mutex); return -1; } pWal->vers.lastVer = ver - 1; - if (pWal->vers.lastVer < pWal->vers.firstVer) { - ASSERT(pWal->vers.lastVer == pWal->vers.firstVer - 1); - } ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->lastVer = ver - 1; ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->fileSize = entry.offset; taosCloseFile(&pIdxFile); @@ -386,7 +370,7 @@ int32_t walEndSnapshot(SWal *pWal) { walBuildIdxName(pWal, pInfo->firstVer, fnameStr); wDebug("vgId:%d, wal remove file %s", pWal->cfg.vgId, fnameStr); if (taosRemoveFile(fnameStr) < 0 && errno != ENOENT) { - ASSERT(0); + goto END; } } taosArrayClear(pWal->toDeleteFiles); @@ -441,7 +425,6 @@ int32_t walRollImpl(SWal *pWal) { pWal->pIdxFile = pIdxFile; pWal->pLogFile = pLogFile; pWal->writeCur = taosArrayGetSize(pWal->fileInfoSet) - 1; - ASSERT(pWal->writeCur >= 0); pWal->lastRollSeq = walGetSeq(); @@ -458,9 +441,7 @@ END: static int32_t walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) { SWalIdxEntry entry = {.ver = ver, .offset = offset}; SWalFileInfo *pFileInfo = walGetCurFileInfo(pWal); - ASSERT(pFileInfo != NULL); - ASSERT(pFileInfo->firstVer >= 0); - int64_t idxOffset = (entry.ver - pFileInfo->firstVer) * sizeof(SWalIdxEntry); + int64_t idxOffset = (entry.ver - pFileInfo->firstVer) * sizeof(SWalIdxEntry); wDebug("vgId:%d, write index, index:%" PRId64 ", offset:%" PRId64 ", at %" PRId64, pWal->cfg.vgId, ver, offset, idxOffset); @@ -476,7 +457,6 @@ static int32_t walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) { if (endOffset < 0) { wFatal("vgId:%d, failed to seek end of idxfile due to %s. ver:%" PRId64 "", pWal->cfg.vgId, strerror(errno), ver); } - ASSERT(endOffset == idxOffset + sizeof(SWalIdxEntry) && "Offset of idx entries misaligned"); return 0; } @@ -486,9 +466,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy int64_t offset = walGetCurFileOffset(pWal); SWalFileInfo *pFileInfo = walGetCurFileInfo(pWal); - ASSERT(pFileInfo != NULL); - ASSERT(pFileInfo->firstVer != -1); pWal->writeHead.head.version = index; pWal->writeHead.head.bodyLen = bodyLen; pWal->writeHead.head.msgType = msgType; @@ -525,7 +503,6 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy // set status if (pWal->vers.firstVer == -1) { - ASSERT(index == 0); pWal->vers.firstVer = 0; } pWal->vers.lastVer = index; @@ -541,7 +518,6 @@ END: wFatal("vgId:%d, failed to ftruncate logfile to offset:%" PRId64 " during recovery due to %s", pWal->cfg.vgId, offset, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); - ASSERT(0 && "failed to recover from error"); } int64_t idxOffset = (index - pFileInfo->firstVer) * sizeof(SWalIdxEntry); @@ -549,7 +525,6 @@ END: wFatal("vgId:%d, failed to ftruncate idxfile to offset:%" PRId64 "during recovery due to %s", pWal->cfg.vgId, idxOffset, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); - ASSERT(0 && "failed to recover from error"); } return -1; } @@ -576,8 +551,6 @@ int64_t walAppendLog(SWal *pWal, int64_t index, tmsg_t msgType, SWalSyncInfo syn } } - ASSERT(pWal->pLogFile != NULL && pWal->pIdxFile != NULL && pWal->writeCur >= 0); - if (walWriteImpl(pWal, index, msgType, syncMeta, body, bodyLen) < 0) { taosThreadMutexUnlock(&pWal->mutex); return -1; @@ -614,8 +587,6 @@ int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SWalSync } } - ASSERT(pWal->pIdxFile != NULL && pWal->pLogFile != NULL && pWal->writeCur >= 0); - if (walWriteImpl(pWal, index, msgType, syncMeta, body, bodyLen) < 0) { taosThreadMutexUnlock(&pWal->mutex); return -1; From 6aa44b2fa9105eef3d57e2951cc8f58d974665d2 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 14 Feb 2023 17:54:24 +0800 Subject: [PATCH 173/267] fix:[TS638]diable tsdbReader open in streamScanOperator --- source/libs/executor/src/scanoperator.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 6ce161560a..b556733254 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2297,13 +2297,14 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys if (pHandle->initTableReader) { pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER; pTSInfo->base.dataReader = NULL; - code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->base.cond, pList, num, pTSInfo->pResBlock, - &pTSInfo->base.dataReader, NULL); - if (code != 0) { - terrno = code; - destroyTableScanOperatorInfo(pTableScanOp); - goto _error; - } + pTaskInfo->streamInfo.lastStatus.uid = -1; +// code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->base.cond, pList, num, pTSInfo->pResBlock, +// &pTSInfo->base.dataReader, NULL); +// if (code != 0) { +// terrno = code; +// destroyTableScanOperatorInfo(pTableScanOp); +// goto _error; +// } } if (pHandle->initTqReader) { From 34bf2f061b3a90bbbc8bd3a7ec5f1fd6646c6fa8 Mon Sep 17 00:00:00 2001 From: xinsheng Ren <285808407@qq.com> Date: Tue, 14 Feb 2023 18:59:09 +0800 Subject: [PATCH 174/267] fix: TD-22352 sudo required to start service on mac (#19962) Co-authored-by: facetosea <25808407@qq.com> --- README-CN.md | 2 +- README.md | 2 +- packaging/tools/make_install.sh | 8 ++++---- packaging/tools/post.sh | 8 ++++---- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/README-CN.md b/README-CN.md index a5d239a532..b5a2564f91 100644 --- a/README-CN.md +++ b/README-CN.md @@ -276,7 +276,7 @@ sudo make install 安装成功后,可以在应用程序中双击 TDengine 图标启动服务,或者在终端中启动 TDengine 服务: ```bash -launchctl start com.tdengine.taosd +sudo launchctl start com.tdengine.taosd ``` 用户可以使用 TDengine CLI 来连接 TDengine 服务,在终端中,输入: diff --git a/README.md b/README.md index 885f91cb4e..3f7208dfb9 100644 --- a/README.md +++ b/README.md @@ -286,7 +286,7 @@ Installing from source code will also configure service management for TDengine. To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use: ```bash -launchctl start com.tdengine.taosd +sudo launchctl start com.tdengine.taosd ``` Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use: diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 9034fd85f5..aae3c3b593 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -531,13 +531,13 @@ function install_taosadapter_service() { } function install_service_on_launchctl() { - ${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : + ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : ${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist - ${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : + ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : - ${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : + ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : ${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist - ${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : + ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : } function install_service() { diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 482345dcd8..4441e0ba1f 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -512,14 +512,14 @@ function install_service_on_systemd() { function install_service_on_launchctl() { if [ -f ${install_main_dir}/service/com.taosdata.taosd.plist ]; then - ${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : + ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : ${csudo}cp ${install_main_dir}/service/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist || : - ${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist || : + ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist || : fi if [ -f ${install_main_dir}/service/com.taosdata.taosadapter.plist ]; then - ${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : + ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : ${csudo}cp ${install_main_dir}/service/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist || : - ${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist || : + ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist || : fi } From 4e9146f061a6671bf169d6c794600c342a3ddd12 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 14 Feb 2023 22:46:31 +0800 Subject: [PATCH 175/267] fix(query): fix memory leak. --- source/libs/executor/inc/executil.h | 2 +- source/libs/executor/src/executil.c | 46 ++++++++++++++++-- source/libs/executor/src/timewindowoperator.c | 47 +++++++------------ tests/system-test/2-query/unique.py | 2 +- 4 files changed, 62 insertions(+), 35 deletions(-) diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 523957b54d..e6fbcc242f 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -127,7 +127,7 @@ static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRo void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order); void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo); -void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList); +int32_t initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SHashObj* pResultHash); bool hasRemainResults(SGroupResInfo* pGroupResInfo); int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index da4d8317a8..11b176ad29 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -162,14 +162,54 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in assert(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo)); } -void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) { +int32_t initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SHashObj* pResultHash) { + int32_t itemSize = sizeof(SResKeyPos) + sizeof(uint64_t); + int32_t bufLen = taosHashGetSize(pResultHash) * itemSize; + int32_t offset = 0; + void* pIter = NULL; + + int32_t numOfRows = taosHashGetSize(pResultHash); if (pGroupResInfo->pRows != NULL) { - taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree); + taosArrayClear(pGroupResInfo->pRows); + } else { + pGroupResInfo->pRows = taosArrayInit(numOfRows, sizeof(void*)); } - pGroupResInfo->pRows = pArrayList; + if (numOfRows == 0) { + pGroupResInfo->index = 0; + return TSDB_CODE_SUCCESS; + } + + if (pGroupResInfo->pBuf == NULL) { + pGroupResInfo->pBuf = taosMemoryMalloc(bufLen); + if (pGroupResInfo->pBuf == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } else { + char* p = taosMemoryRealloc(pGroupResInfo->pBuf, bufLen); + if (p == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + pGroupResInfo->pBuf = p; + } + + while ((pIter = taosHashIterate(pResultHash, pIter)) != NULL) { + SResKeyPos* p = (SResKeyPos*) (pGroupResInfo->pBuf + offset); + SResKeyPos* p1 = pIter; + + qDebug("key:%"PRId64", gid:%"PRId64, *(uint64_t*)p1->key, p1->groupId); + + memcpy(p, p1, itemSize); + taosArrayPush(pGroupResInfo->pRows, &p); + offset += itemSize; + } + + taosSort(pGroupResInfo->pRows->pData, taosArrayGetSize(pGroupResInfo->pRows), sizeof(void*), resultrowComparAsc); pGroupResInfo->index = 0; ASSERT(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo)); + + return TSDB_CODE_SUCCESS; } bool hasRemainResults(SGroupResInfo* pGroupResInfo) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 1fe1b9081b..4ca1593b09 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -843,19 +843,15 @@ static int32_t saveResult(SResultWindowInfo winInfo, SSHashObj* pStUpdated) { } static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SHashObj* pUpdatedMap) { - SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - if (newPos == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } + char buf[sizeof(SResKeyPos) + sizeof(uint64_t)] = {0}; + SResKeyPos* pResPos = (SResKeyPos*)buf; + + *(int64_t*) pResPos->key = ts; + pResPos->groupId = groupId; + pResPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset}; - newPos->groupId = groupId; - newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset}; - *(int64_t*)newPos->key = ts; SWinKey key = {.ts = ts, .groupId = groupId}; - if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) { - taosMemoryFree(newPos); - } - + taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), pResPos, sizeof(SResKeyPos) + sizeof(uint64_t)); return TSDB_CODE_SUCCESS; } @@ -2568,7 +2564,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { } } - SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); +// SArray* pUpdated = taosArrayInit(4, sizeof(SResKeyPos)); + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); while (1) { @@ -2610,9 +2607,9 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { continue; } else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) { doDeleteWindows(pOperator, &pInfo->interval, pBlock, NULL, pUpdatedMap); - if (taosArrayGetSize(pUpdated) > 0) { - break; - } +// if (taosArrayGetSize(pUpdated) > 0) { +// break; +// } continue; } else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) { processPullOver(pBlock, pInfo->pPullDataMap, &pInfo->interval); @@ -2659,14 +2656,10 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { } pInfo->binfo.pRes->info.watermark = pInfo->twAggSup.maxTs; - void* pIte = NULL; - while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) { - taosArrayPush(pUpdated, pIte); - } - taosHashCleanup(pUpdatedMap); - taosArraySort(pUpdated, resultrowComparAsc); + // todo + int32_t code = initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdatedMap); - initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); + taosHashCleanup(pUpdatedMap); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); doBuildPullDataBlock(pInfo->pPullWins, &pInfo->pullIndex, pInfo->pPullDataRes); @@ -4755,7 +4748,6 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); @@ -4808,13 +4800,8 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { closeStreamIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap, pInfo->pDelWins, pOperator); - void* pIte = NULL; - while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) { - taosArrayPush(pUpdated, pIte); - } - taosArraySort(pUpdated, resultrowComparAsc); - - initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); + // todo + int32_t code = initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdatedMap); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); taosHashCleanup(pUpdatedMap); diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py index 6af9b130ef..9b5da50e1f 100644 --- a/tests/system-test/2-query/unique.py +++ b/tests/system-test/2-query/unique.py @@ -433,7 +433,7 @@ class TDTestCase: tdSql.checkRows(11) tdSql.checkData(1,0,0) tdSql.checkData(10,0,9) - tdSql.query(f"select unique(t1) from (select _rowts , t1 , tbname from {dbname}.stb1 )") + tdSql.query(f"select unique(t1) v from (select _rowts , t1 , tbname from {dbname}.stb1 ) order by v desc") tdSql.checkRows(2) tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) From f6ced36c87ac00a4d18373057089cc3343b2febb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 15 Feb 2023 01:28:50 +0800 Subject: [PATCH 176/267] fix(query): fix memory leak. --- source/libs/executor/inc/executil.h | 3 +- source/libs/executor/src/executil.c | 60 +++++-------------- source/libs/executor/src/timewindowoperator.c | 48 ++++++++++----- 3 files changed, 50 insertions(+), 61 deletions(-) diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index e6fbcc242f..f99c7de93d 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -45,6 +45,7 @@ typedef struct SGroupResInfo { int32_t index; SArray* pRows; // SArray char* pBuf; + bool freeItem; } SGroupResInfo; typedef struct SResultRow { @@ -127,7 +128,7 @@ static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRo void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order); void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo); -int32_t initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SHashObj* pResultHash); +void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList); bool hasRemainResults(SGroupResInfo* pGroupResInfo); int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 11b176ad29..ec2c819cf3 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -89,9 +89,20 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) { return rowSize; } +static void freeEx(void* p) { + taosMemoryFree(*(void**)p); +} + void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) { taosMemoryFreeClear(pGroupResInfo->pBuf); - pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows); + if (pGroupResInfo->freeItem) { + taosArrayDestroy(pGroupResInfo->pRows); +// taosArrayDestroyEx(pGroupResInfo->pRows, freeEx); +// pGroupResInfo->freeItem = false; + pGroupResInfo->pRows = NULL; + } else { + pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows); + } pGroupResInfo->index = 0; } @@ -162,54 +173,15 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in assert(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo)); } -int32_t initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SHashObj* pResultHash) { - int32_t itemSize = sizeof(SResKeyPos) + sizeof(uint64_t); - int32_t bufLen = taosHashGetSize(pResultHash) * itemSize; - int32_t offset = 0; - void* pIter = NULL; - - int32_t numOfRows = taosHashGetSize(pResultHash); +void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) { if (pGroupResInfo->pRows != NULL) { - taosArrayClear(pGroupResInfo->pRows); - } else { - pGroupResInfo->pRows = taosArrayInit(numOfRows, sizeof(void*)); + taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree); } - if (numOfRows == 0) { - pGroupResInfo->index = 0; - return TSDB_CODE_SUCCESS; - } - - if (pGroupResInfo->pBuf == NULL) { - pGroupResInfo->pBuf = taosMemoryMalloc(bufLen); - if (pGroupResInfo->pBuf == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - } else { - char* p = taosMemoryRealloc(pGroupResInfo->pBuf, bufLen); - if (p == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - pGroupResInfo->pBuf = p; - } - - while ((pIter = taosHashIterate(pResultHash, pIter)) != NULL) { - SResKeyPos* p = (SResKeyPos*) (pGroupResInfo->pBuf + offset); - SResKeyPos* p1 = pIter; - - qDebug("key:%"PRId64", gid:%"PRId64, *(uint64_t*)p1->key, p1->groupId); - - memcpy(p, p1, itemSize); - taosArrayPush(pGroupResInfo->pRows, &p); - offset += itemSize; - } - - taosSort(pGroupResInfo->pRows->pData, taosArrayGetSize(pGroupResInfo->pRows), sizeof(void*), resultrowComparAsc); + pGroupResInfo->freeItem = true; + pGroupResInfo->pRows = pArrayList; pGroupResInfo->index = 0; ASSERT(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo)); - - return TSDB_CODE_SUCCESS; } bool hasRemainResults(SGroupResInfo* pGroupResInfo) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 4ca1593b09..0472b90338 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -843,15 +843,19 @@ static int32_t saveResult(SResultWindowInfo winInfo, SSHashObj* pStUpdated) { } static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SHashObj* pUpdatedMap) { - char buf[sizeof(SResKeyPos) + sizeof(uint64_t)] = {0}; - SResKeyPos* pResPos = (SResKeyPos*)buf; - - *(int64_t*) pResPos->key = ts; - pResPos->groupId = groupId; - pResPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset}; + SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (newPos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + newPos->groupId = groupId; + newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset}; + *(int64_t*)newPos->key = ts; SWinKey key = {.ts = ts, .groupId = groupId}; - taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), pResPos, sizeof(SResKeyPos) + sizeof(uint64_t)); + if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) { + taosMemoryFree(newPos); + } + return TSDB_CODE_SUCCESS; } @@ -2564,7 +2568,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { } } -// SArray* pUpdated = taosArrayInit(4, sizeof(SResKeyPos)); + SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); @@ -2607,9 +2611,9 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { continue; } else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) { doDeleteWindows(pOperator, &pInfo->interval, pBlock, NULL, pUpdatedMap); -// if (taosArrayGetSize(pUpdated) > 0) { -// break; -// } + if (taosArrayGetSize(pUpdated) > 0) { + break; + } continue; } else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) { processPullOver(pBlock, pInfo->pPullDataMap, &pInfo->interval); @@ -2656,10 +2660,14 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { } pInfo->binfo.pRes->info.watermark = pInfo->twAggSup.maxTs; - // todo - int32_t code = initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdatedMap); - + void* pIte = NULL; + while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) { + taosArrayPush(pUpdated, pIte); + } taosHashCleanup(pUpdatedMap); + taosArraySort(pUpdated, resultrowComparAsc); + + initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); doBuildPullDataBlock(pInfo->pPullWins, &pInfo->pullIndex, pInfo->pPullDataRes); @@ -4748,6 +4756,8 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; + SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); @@ -4800,8 +4810,14 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { closeStreamIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap, pInfo->pDelWins, pOperator); - // todo - int32_t code = initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdatedMap); + void* pIte = NULL; + while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) { + taosArrayPush(pUpdated, pIte); + } + taosArraySort(pUpdated, resultrowComparAsc); + + initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); + blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); taosHashCleanup(pUpdatedMap); From 7597dec434c229b5bc2f834efc6f9dc1639cdbba Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 15 Feb 2023 09:18:44 +0800 Subject: [PATCH 177/267] fix: taosbenchmark print qps for main (#19983) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 05191138e5..5f9a44084c 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 22627d7 + GIT_TAG 7c641c5 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From c2370fcdccb3f0cba35b9192aaccd3d92ef195b6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 15 Feb 2023 09:57:05 +0800 Subject: [PATCH 178/267] fix(query): set correct flag in create dummy ctx. --- source/libs/executor/src/executil.c | 6 +++--- source/libs/executor/src/timewindowoperator.c | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index ec2c819cf3..a98accba77 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -96,9 +96,9 @@ static void freeEx(void* p) { void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) { taosMemoryFreeClear(pGroupResInfo->pBuf); if (pGroupResInfo->freeItem) { - taosArrayDestroy(pGroupResInfo->pRows); -// taosArrayDestroyEx(pGroupResInfo->pRows, freeEx); -// pGroupResInfo->freeItem = false; +// taosArrayDestroy(pGroupResInfo->pRows); + taosArrayDestroyEx(pGroupResInfo->pRows, freeEx); + pGroupResInfo->freeItem = false; pGroupResInfo->pRows = NULL; } else { pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 0472b90338..c5dc927bd1 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -2880,6 +2880,8 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t nums) { for (int i = 0; i < nums; i++) { pDummy[i].functionId = pCtx[i].functionId; + pDummy[i].isNotNullFunc = pCtx[i].isNotNullFunc; + pDummy[i].isPseudoFunc = pCtx[i].isPseudoFunc; } } From f78fffadf36c86ef1bc2c64a6670d6fbd01e9fb9 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Wed, 15 Feb 2023 10:17:44 +0800 Subject: [PATCH 179/267] increase the time out of win ci test --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index dd15807308..5a0e7972c6 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -387,7 +387,7 @@ pipeline { } steps { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { - timeout(time: 55, unit: 'MINUTES'){ + timeout(time: 75, unit: 'MINUTES'){ pre_test_win() pre_test_build_win() run_win_ctest() From ca5dfb28e15f87bb585c78d295b3935a9f956d11 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 15 Feb 2023 10:28:37 +0800 Subject: [PATCH 180/267] fix: showHelp return line format error --- tools/shell/src/shellAuto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 6e50a97c02..a55aaeb999 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -375,7 +375,7 @@ void showHelp() { ----- C ----- \n\ create table using tags ...\n\ create database ...\n\ - create dnode \"fqdn:port\"n\ + create dnode \"fqdn:port\" ...\n\ create index ...\n\ create mnode on dnode ;\n\ create qnode on dnode ;\n\ From 2f32b3a2d803ae8c763d0a1c5188316f6862b7cf Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 15 Feb 2023 11:17:34 +0800 Subject: [PATCH 181/267] fix: need not obtain db name if input argument include -s --- tools/shell/src/shellAuto.c | 18 ++++++++++++++---- tools/shell/src/shellEngine.c | 6 ++++-- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index a55aaeb999..b8964298cb 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -332,6 +332,7 @@ bool varMode = false; // enter var names list mode TAOS* varCon = NULL; SShellCmd* varCmd = NULL; +bool varRunOnce = false; SMatch* lastMatch = NULL; // save last match result int cntDel = 0; // delete byte count after next press tab @@ -637,10 +638,11 @@ bool shellAutoInit() { } // set conn -void shellSetConn(TAOS* conn) { - varCon = conn; +void shellSetConn(TAOS* conn, bool runOnce) { + varCon = conn; + varRunOnce = runOnce; // init database and stable - updateTireValue(WT_VAR_DBNAME, false); + if (!runOnce) updateTireValue(WT_VAR_DBNAME, false); } // exit shell auto funciton, shell exit call once @@ -784,6 +786,12 @@ int writeVarNames(int type, TAOS_RES* tres) { return numOfRows; } +void setThreadNull(int type) { + taosThreadMutexLock(&tiresMutex); + threads[type] = NULL; + taosThreadMutexUnlock(&tiresMutex); +} + bool firstMatchCommand(TAOS* con, SShellCmd* cmd); // // thread obtain var thread from db server @@ -799,6 +807,7 @@ void* varObtainThread(void* param) { TAOS_RES* pSql = taos_query(varCon, varSqls[type]); if (taos_errno(pSql)) { taos_free_result(pSql); + setThreadNull(type); return NULL; } @@ -814,6 +823,7 @@ void* varObtainThread(void* param) { firstMatchCommand(varCon, varCmd); } + setThreadNull(type); return NULL; } @@ -1977,7 +1987,7 @@ void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb) { if (dealUseDB(sql)) { // change to new db - updateTireValue(WT_VAR_STABLE, false); + if (varRunOnce) updateTireValue(WT_VAR_STABLE, false); return; } diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 479c2cf39a..812f1755cd 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -1097,10 +1097,11 @@ int32_t shellExecute() { } #endif - shellSetConn(shell.conn); + bool runOnce = pArgs->commands != NULL || pArgs->file[0] != 0; + shellSetConn(shell.conn, runOnce); shellReadHistory(); - if (pArgs->commands != NULL || pArgs->file[0] != 0) { + if (runOnce) { if (pArgs->commands != NULL) { printf("%s%s\r\n", shell.info.promptHeader, pArgs->commands); char *cmd = strdup(pArgs->commands); @@ -1116,6 +1117,7 @@ int32_t shellExecute() { ws_close(shell.ws_conn); } else { #endif + taos_kill_query(shell.conn); taos_close(shell.conn); #ifdef WEBSOCKET } From 3f5d6ca9889537f51e44ef9ebec71de33b075432 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 15 Feb 2023 11:21:08 +0800 Subject: [PATCH 182/267] fix : build error --- tools/shell/inc/shellAuto.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/inc/shellAuto.h b/tools/shell/inc/shellAuto.h index b7bf5fa101..151f6da8c6 100644 --- a/tools/shell/inc/shellAuto.h +++ b/tools/shell/inc/shellAuto.h @@ -28,7 +28,7 @@ void pressOtherKey(char c); bool shellAutoInit(); // set conn -void shellSetConn(TAOS* conn); +void shellSetConn(TAOS* conn, bool runOnce); // exit shell auto funciton, shell exit call once void shellAutoExit(); From 128880c1cfe9c277bd884cd8fcde8e1b90686d4e Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 15 Feb 2023 11:27:44 +0800 Subject: [PATCH 183/267] add taos_close --- tools/shell/src/shellEngine.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 812f1755cd..9b842a9e6f 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -1117,7 +1117,6 @@ int32_t shellExecute() { ws_close(shell.ws_conn); } else { #endif - taos_kill_query(shell.conn); taos_close(shell.conn); #ifdef WEBSOCKET } @@ -1162,5 +1161,8 @@ int32_t shellExecute() { taosThreadJoin(spid, NULL); shellCleanupHistory(); + taos_kill_query(shell.conn); + taos_close(shell.conn); + return 0; } From 6676ee7de84192ea5351a3855b9e577a96cc2bce Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 15 Feb 2023 11:34:01 +0800 Subject: [PATCH 184/267] fix: run once not check right --- tools/shell/src/shellAuto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index b8964298cb..f58a5d0931 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -1987,7 +1987,7 @@ void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb) { if (dealUseDB(sql)) { // change to new db - if (varRunOnce) updateTireValue(WT_VAR_STABLE, false); + if (!varRunOnce) updateTireValue(WT_VAR_STABLE, false); return; } From 3a6d32ae0b293a9418a12ac6d785333c650d7574 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 15 Feb 2023 12:57:09 +0800 Subject: [PATCH 185/267] refactor: enable the log. --- source/dnode/mgmt/node_util/inc/dmUtil.h | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 2609422ccc..784bb3c5e1 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -53,20 +53,12 @@ extern "C" { #define dDebug(...) { if (dDebugFlag & DEBUG_DEBUG) { taosPrintLog("DND ", DEBUG_DEBUG, dDebugFlag, __VA_ARGS__); }} #define dTrace(...) { if (dDebugFlag & DEBUG_TRACE) { taosPrintLog("DND ", DEBUG_TRACE, dDebugFlag, __VA_ARGS__); }} -//#define dGFatal(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dFatal(param ", gtid:%s", __VA_ARGS__, buf);} -//#define dGError(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dError(param ", gtid:%s", __VA_ARGS__, buf);} -//#define dGWarn(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dWarn (param ", gtid:%s", __VA_ARGS__, buf);} -//#define dGInfo(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dInfo (param ", gtid:%s", __VA_ARGS__, buf);} -//#define dGDebug(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dDebug(param ", gtid:%s", __VA_ARGS__, buf);} -//#define dGTrace(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dTrace(param ", gtid:%s", __VA_ARGS__, buf);} - -// TODO: disable it temporarily -#define dGFatal(param, ...) -#define dGError(param, ...) -#define dGWarn(param, ...) -#define dGInfo(param, ...) -#define dGDebug(param, ...) -#define dGTrace(param, ...) +#define dGFatal(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dFatal(param ", gtid:%s", __VA_ARGS__, buf);} +#define dGError(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dError(param ", gtid:%s", __VA_ARGS__, buf);} +#define dGWarn(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dWarn (param ", gtid:%s", __VA_ARGS__, buf);} +#define dGInfo(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dInfo (param ", gtid:%s", __VA_ARGS__, buf);} +#define dGDebug(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dDebug(param ", gtid:%s", __VA_ARGS__, buf);} +#define dGTrace(param, ...) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dTrace(param ", gtid:%s", __VA_ARGS__, buf);} // clang-format on From 09e5ca7a0f8a3574efc0dc9fbabddf039003fac1 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 15 Feb 2023 15:34:55 +0800 Subject: [PATCH 186/267] handle fastfail --- source/libs/transport/src/transCli.c | 85 +++++++++++++--------------- source/libs/transport/src/transSvr.c | 4 +- 2 files changed, 42 insertions(+), 47 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 111742a6f4..d8ea21c335 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -579,7 +579,7 @@ static void addConnToPool(void* pool, SCliConn* conn) { QUEUE_PUSH(&conn->list->conns, &conn->q); conn->list->size += 1; - if (conn->list->size >= 50) { + if (conn->list->size >= 250) { STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg)); arg->param1 = conn; arg->param2 = thrd; @@ -882,47 +882,50 @@ void cliSend(SCliConn* pConn) { _RETURN: return; } +static void cliHandleFastFail(SCliConn* pConn, int status) { + SCliThrd* pThrd = pConn->hostThrd; + STrans* pTransInst = pThrd->pTransInst; + SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); + STraceId* trace = &pMsg->msg.info.traceId; + + tGError("%s msg %s failed to send, conn %p failed to connect to %s:%d, reason: %s", CONN_GET_INST_LABEL(pConn), + pMsg ? TMSG_INFO(pMsg->msg.msgType) : 0, pConn, pConn->ip, pConn->port, uv_strerror(status)); + uv_timer_stop(pConn->timer); + pConn->timer->data = NULL; + taosArrayPush(pThrd->timerList, &pConn->timer); + pConn->timer = NULL; + + if (pMsg != NULL && REQUEST_NO_RESP(&pMsg->msg) && + (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) { + char* ip = pConn->ip; + uint32_t port = pConn->port; + char key[TSDB_FQDN_LEN + 64] = {0}; + CONN_CONSTRUCT_HASH_KEY(key, ip, port); + + SFailFastItem* item = taosHashGet(pThrd->failFastCache, key, strlen(key)); + int64_t cTimestamp = taosGetTimestampMs(); + if (item != NULL) { + int32_t elapse = cTimestamp - item->timestamp; + if (elapse >= 0 && elapse <= pTransInst->failFastInterval) { + item->count++; + } else { + item->count = 1; + item->timestamp = cTimestamp; + } + } else { + SFailFastItem item = {.count = 1, .timestamp = cTimestamp}; + taosHashPut(pThrd->failFastCache, key, strlen(key), &item, sizeof(SFailFastItem)); + } + } + cliHandleExcept(pConn); +} void cliConnCb(uv_connect_t* req, int status) { SCliConn* pConn = req->data; SCliThrd* pThrd = pConn->hostThrd; - if (pConn->timer != NULL) { - uv_timer_stop(pConn->timer); - pConn->timer->data = NULL; - taosArrayPush(pThrd->timerList, &pConn->timer); - pConn->timer = NULL; - } - if (status != 0) { - SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); - STrans* pTransInst = pThrd->pTransInst; - - tError("%s msg %s failed to send, conn %p failed to connect to %s:%d, reason: %s", CONN_GET_INST_LABEL(pConn), - pMsg ? TMSG_INFO(pMsg->msg.msgType) : 0, pConn, pConn->ip, pConn->port, uv_strerror(status)); - if (pMsg != NULL && REQUEST_NO_RESP(&pMsg->msg) && - (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) { - char* ip = pConn->ip; - uint32_t port = pConn->port; - char key[TSDB_FQDN_LEN + 64] = {0}; - CONN_CONSTRUCT_HASH_KEY(key, ip, port); - - SFailFastItem* item = taosHashGet(pThrd->failFastCache, key, strlen(key)); - int64_t cTimestamp = taosGetTimestampMs(); - if (item != NULL) { - int32_t elapse = cTimestamp - item->timestamp; - if (elapse >= 0 && elapse <= pTransInst->failFastInterval) { - item->count++; - } else { - item->count = 1; - item->timestamp = cTimestamp; - } - } else { - SFailFastItem item = {.count = 1, .timestamp = cTimestamp}; - taosHashPut(pThrd->failFastCache, key, strlen(key), &item, sizeof(SFailFastItem)); - } - } - cliHandleExcept(pConn); + cliHandleFastFail(pConn, status); return; } struct sockaddr peername, sockname; @@ -1163,15 +1166,7 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb); if (ret != 0) { - tGError("%s conn %p failed to connect to %s:%d, reason:%s", pTransInst->label, conn, conn->ip, conn->port, - uv_err_name(ret)); - - uv_timer_stop(conn->timer); - conn->timer->data = NULL; - taosArrayPush(pThrd->timerList, &conn->timer); - conn->timer = NULL; - - cliHandleExcept(conn); + cliHandleFastFail(conn, ret); return; } uv_timer_start(conn->timer, cliConnTimeout, TRANS_CONN_TIMEOUT, 0); diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index fa8929f7d9..eecd260d35 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -246,11 +246,11 @@ static bool uvHandleReq(SSvrConn* pConn) { } } else { if (cost >= EXCEPTION_LIMIT_US) { - tGWarn("%s conn %p %s received from %s, local info:%s, len:%d, resp:%d, code:%d, cost:%dus, recv exception", + tGWarn("%s conn %p %s received from %s, local info:%s, len:%d, noResp:%d, code:%d, cost:%dus, recv exception", transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, pHead->noResp, transMsg.code, (int)(cost)); } else { - tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, resp:%d, code:%d, cost:%dus", + tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, noResp:%d, code:%d, cost:%dus", transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, pHead->noResp, transMsg.code, (int)(cost)); } From 957ed637b042f19c407c661d375193f933092bae Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 15 Feb 2023 16:49:21 +0800 Subject: [PATCH 187/267] fix: rsp type mismatch after link broken is processed --- source/libs/scheduler/inc/schInt.h | 1 + source/libs/scheduler/src/schRemote.c | 8 ++++---- source/libs/scheduler/src/schTask.c | 5 ++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index e8216fcd7c..14eb21565b 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -230,6 +230,7 @@ typedef struct SSchTask { SSchRedirectCtx redirectCtx; // task redirect context bool waitRetry; // wait for retry int32_t execId; // task current execute index + int32_t failedExecId; // last failed task execute index SSchLevel *level; // level SRWLatch planLock; // task update plan lock SSubplan *plan; // subplan diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index b6de9383d7..9c4ed65dd2 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -34,12 +34,12 @@ int32_t schValidateRspMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) { if (lastMsgType != reqMsgType) { SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_ERR_RET(TSDB_CODE_QW_MSG_ERROR); } if (taskStatus != JOB_TASK_STATUS_PART_SUCC) { SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_ERR_RET(TSDB_CODE_QW_MSG_ERROR); } return TSDB_CODE_SUCCESS; @@ -60,13 +60,13 @@ int32_t schValidateRspMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) { if (lastMsgType != reqMsgType) { SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_ERR_RET(TSDB_CODE_QW_MSG_ERROR); } if (taskStatus != JOB_TASK_STATUS_EXEC) { SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_ERR_RET(TSDB_CODE_QW_MSG_ERROR); } return TSDB_CODE_SUCCESS; diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 8e60222ca6..bdab739327 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -64,6 +64,7 @@ int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel * pTask->plan = pPlan; pTask->level = pLevel; pTask->execId = -1; + pTask->failedExecId = -2; pTask->timeoutUsec = SCH_DEFAULT_TASK_TIMEOUT_USEC; pTask->taskId = schGenTaskId(); @@ -166,7 +167,7 @@ int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, bool dropExecNode, v schUpdateTaskExecNode(pJob, pTask, handle, execId); - if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it + if ((execId != pTask->execId || execId <= pTask->failedExecId) || pTask->waitRetry) { // ignore it SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry); SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR); @@ -182,6 +183,8 @@ int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode) return TSDB_CODE_SCH_IGNORE_ERROR; } + pTask->failedExecId = pTask->execId; + int8_t jobStatus = 0; if (schJobNeedToStop(pJob, &jobStatus)) { SCH_TASK_DLOG("no more task failure processing cause of job status %s", jobTaskStatusStr(jobStatus)); From 009b6a61fa7d252d2cdc8a2c8bdba12ca776f376 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 15 Feb 2023 17:25:37 +0800 Subject: [PATCH 188/267] enh: limit tcp session between cluster --- include/util/taoserror.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index d8eecdfc64..75bdf81a27 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -67,6 +67,10 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) // #define TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED TAOS_DEF_ERROR_CODE(0, 0x0020) // "Vgroup could not be connected" #define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) // +#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) // + + + //common & util #define TSDB_CODE_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) // From e44704b20ed534c2c204029fb3b5f8bc151489d2 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 15 Feb 2023 17:26:11 +0800 Subject: [PATCH 189/267] opt: opt tag index --- include/libs/transport/trpc.h | 2 ++ source/dnode/mgmt/node_mgmt/src/dmTransport.c | 2 ++ source/libs/transport/inc/transportInt.h | 1 + source/libs/transport/src/trans.c | 1 + source/libs/transport/src/transCli.c | 19 ++++++++++++++++++- source/util/src/terror.c | 1 + 6 files changed, 25 insertions(+), 1 deletion(-) diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index de3c2a9f52..ff68b72fc2 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -112,6 +112,8 @@ typedef struct SRpcInit { // fail fast fp RpcFFfp ffp; + int32_t connLimit; + void *parent; } SRpcInit; diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index dcb63f6524..dc539ac15e 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -284,6 +284,8 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.failFastThreshold = 3; // failed threshold rpcInit.ffp = dmFailFastFp; + rpcInit.connLimit = 7500; + pTrans->clientRpc = rpcOpen(&rpcInit); if (pTrans->clientRpc == NULL) { dError("failed to init dnode rpc client"); diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index 2db4a72795..92477bb514 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -64,6 +64,7 @@ typedef struct { void (*destroyFp)(void* ahandle); bool (*failFastFp)(tmsg_t msgType); + int32_t connLimit; int index; void* parent; void* tcphandle; // returned handle from TCP initialization diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 47b1ac5ca7..61ca9743b3 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -67,6 +67,7 @@ void* rpcOpen(const SRpcInit* pInit) { pRpc->startTimer = pInit->tfp; pRpc->destroyFp = pInit->dfp; pRpc->failFastFp = pInit->ffp; + pRpc->connLimit = pInit->connLimit; pRpc->numOfThreads = pInit->numOfThreads > TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS : pInit->numOfThreads; if (pRpc->numOfThreads <= 0) { diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index d8ea21c335..dfbc8a5af2 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -80,6 +80,7 @@ typedef struct SCliThrd { uint64_t nextTimeout; // next timeout void* pTransInst; // + int connCount; void (*destroyAhandleFp)(void* ahandle); SHashObj* fqdn2ipCache; SCvtAddr cvtAddr; @@ -671,7 +672,6 @@ static SCliConn* cliCreateConn(SCliThrd* pThrd) { conn->stream = (uv_stream_t*)taosMemoryMalloc(sizeof(uv_tcp_t)); uv_tcp_init(pThrd->loop, (uv_tcp_t*)(conn->stream)); conn->stream->data = conn; - // transSetConnOption((uv_tcp_t*)conn->stream); uv_timer_t* timer = taosArrayGetSize(pThrd->timerList) > 0 ? *(uv_timer_t**)taosArrayPop(pThrd->timerList) : NULL; if (timer == NULL) { @@ -694,6 +694,7 @@ static SCliConn* cliCreateConn(SCliThrd* pThrd) { conn->broken = 0; transRefCliHandle(conn); + atomic_add_fetch_32(&pThrd->connCount, 1); allocConnRef(conn, false); return conn; @@ -738,6 +739,8 @@ static void cliDestroy(uv_handle_t* handle) { conn->timer = NULL; } + atomic_sub_fetch_32(&pThrd->connCount, 1); + transReleaseExHandle(transGetRefMgt(), conn->refId); transRemoveExHandle(transGetRefMgt(), conn->refId); taosMemoryFree(conn->ip); @@ -1861,6 +1864,13 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran return TSDB_CODE_RPC_BROKEN_LINK; } + // read only + if (pTransInst->connLimit != 0 && atomic_load_32(&pThrd->connCount) >= pTransInst->connLimit) { + transFreeMsg(pReq->pCont); + transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); + return TSDB_CODE_RPC_MAX_SESSIONS; + } + TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); STransConnCtx* pCtx = taosMemoryCalloc(1, sizeof(STransConnCtx)); @@ -1902,6 +1912,13 @@ int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMs transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return TSDB_CODE_RPC_BROKEN_LINK; } + // not limit sync req + // read only + // if (pTransInst->connLimit != 0 && atomic_load_32(&pThrd->connCount) >= pTransInst->connLimit) { + // transFreeMsg(pReq->pCont); + // transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); + // return TSDB_CODE_RPC_MAX_SESSIONS; + //} tsem_t* sem = taosMemoryCalloc(1, sizeof(tsem_t)); tsem_init(sem, 0, 0); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 57b1998155..c07fa88af5 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -52,6 +52,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_PORT_EADDRINUSE, "Port already in use") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_BROKEN_LINK, "Conn is broken") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_TIMEOUT, "Conn read timeout") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED, "some vnode/qnode/mnode(s) out of service") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_MAX_SESSIONS, "rpc open too many session") //common & util TAOS_DEFINE_ERROR(TSDB_CODE_TIME_UNSYNCED, "Client and server's time is not synchronized") From 43966abbc6f29f5d5fe2572c78c3cd637374a96b Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Wed, 15 Feb 2023 18:39:05 +0800 Subject: [PATCH 190/267] feat:add disable stream option --- include/common/tglobal.h | 2 ++ source/common/src/tglobal.c | 6 ++++++ source/dnode/vnode/src/tq/tqPush.c | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 2331f0b23c..5a0c0e0777 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -159,6 +159,8 @@ extern int32_t tsUptimeInterval; extern int32_t tsRpcRetryLimit; extern int32_t tsRpcRetryInterval; +extern bool tsDisableStream; + // #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd, diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index d4849650e6..46c2a32535 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -189,6 +189,7 @@ int32_t tsGrantHBInterval = 60; int32_t tsUptimeInterval = 300; // seconds char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits char tsUdfdLdLibPath[512] = ""; +bool tsDisableStream = true; #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { @@ -469,6 +470,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "udfdResFuncs", tsUdfdResFuncs, 0) != 0) return -1; if (cfgAddString(pCfg, "udfdLdLibPath", tsUdfdLdLibPath, 0) != 0) return -1; + if (cfgAddBool(pCfg, "disableStream", tsDisableStream, 0) != 0) return -1; + GRANT_CFG_ADD; return 0; } @@ -770,6 +773,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { if (tsQueryBufferSize >= 0) { tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL; } + + tsDisableStream = cfgGetItem(pCfg, "disableStream")->bval; + GRANT_CFG_GET; return 0; } diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index f89bc20362..b9df3e5826 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -307,7 +307,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) taosWUnLockLatch(&pTq->pushLock); } - if (vnodeIsRoleLeader(pTq->pVnode)) { + if (!tsDisableStream && vnodeIsRoleLeader(pTq->pVnode)) { if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0; if (msgType == TDMT_VND_SUBMIT) { void* data = taosMemoryMalloc(msgLen); From e681bb02cd161a6dd525fe8e732715736b05a727 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 15 Feb 2023 19:23:49 +0800 Subject: [PATCH 191/267] fix(query): set the initial number of tables values. --- source/libs/executor/src/executil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index a98accba77..903151b254 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1086,8 +1086,8 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t); char* pPayload = taosMemoryMalloc(size); + *(int32_t*)pPayload = numOfTables; if (numOfTables > 0) { - *(int32_t*)pPayload = numOfTables; memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t)); } From b8dfc7144624fcfbbea1ae8f0c3c35a2bafc4bd7 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 15 Feb 2023 19:24:48 +0800 Subject: [PATCH 192/267] handle too many session --- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 4 +- source/libs/transport/src/transCli.c | 93 +++++++++++-------- 2 files changed, 57 insertions(+), 40 deletions(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index dc539ac15e..bd08eda954 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -280,11 +280,11 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.retryMaxInterval = tsRedirectMaxPeriod; rpcInit.retryMaxTimouet = tsMaxRetryWaitTime; - rpcInit.failFastInterval = 1000; // interval threshold(ms) + rpcInit.failFastInterval = 5000; // interval threshold(ms) rpcInit.failFastThreshold = 3; // failed threshold rpcInit.ffp = dmFailFastFp; - rpcInit.connLimit = 7500; + rpcInit.connLimit = 3000; pTrans->clientRpc = rpcOpen(&rpcInit); if (pTrans->clientRpc == NULL) { diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index dfbc8a5af2..4d7b6b5b2b 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -40,9 +40,8 @@ typedef struct SCliConn { bool broken; // link broken or not ConnStatus status; // - int64_t refId; - char* ip; - uint32_t port; + int64_t refId; + char* ip; SDelayTask* task; @@ -86,6 +85,7 @@ typedef struct SCliThrd { SCvtAddr cvtAddr; SHashObj* failFastCache; + SHashObj* connLimit; SCliMsg* stopMsg; @@ -570,10 +570,8 @@ static void addConnToPool(void* pool, SCliConn* conn) { conn->status = ConnInPool; if (conn->list == NULL) { - char key[TSDB_FQDN_LEN + 64] = {0}; - CONN_CONSTRUCT_HASH_KEY(key, conn->ip, conn->port); tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap); - conn->list = taosHashGet((SHashObj*)pool, key, strlen(key)); + conn->list = taosHashGet((SHashObj*)pool, conn->ip, strlen(conn->ip)); } else { tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap); } @@ -751,6 +749,11 @@ static void cliDestroy(uv_handle_t* handle) { tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn); transReqQueueClear(&conn->wreqQueue); transDestroyBuffer(&conn->readBuf); + + int32_t* oVal = taosHashGet(pThrd->connLimit, conn->ip, strlen(conn->ip)); + int32_t nVal = oVal == NULL ? 0 : (*oVal) - 1; + taosHashPut(pThrd->connLimit, conn->ip, strlen(conn->ip), &nVal, sizeof(nVal)); + taosMemoryFree(conn); } static bool cliHandleNoResp(SCliConn* conn) { @@ -892,8 +895,8 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); STraceId* trace = &pMsg->msg.info.traceId; - tGError("%s msg %s failed to send, conn %p failed to connect to %s:%d, reason: %s", CONN_GET_INST_LABEL(pConn), - pMsg ? TMSG_INFO(pMsg->msg.msgType) : 0, pConn, pConn->ip, pConn->port, uv_strerror(status)); + tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), + pMsg ? TMSG_INFO(pMsg->msg.msgType) : 0, pConn, pConn->ip, uv_strerror(status)); uv_timer_stop(pConn->timer); pConn->timer->data = NULL; taosArrayPush(pThrd->timerList, &pConn->timer); @@ -901,12 +904,7 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { if (pMsg != NULL && REQUEST_NO_RESP(&pMsg->msg) && (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) { - char* ip = pConn->ip; - uint32_t port = pConn->port; - char key[TSDB_FQDN_LEN + 64] = {0}; - CONN_CONSTRUCT_HASH_KEY(key, ip, port); - - SFailFastItem* item = taosHashGet(pThrd->failFastCache, key, strlen(key)); + SFailFastItem* item = taosHashGet(pThrd->failFastCache, pConn->ip, strlen(pConn->ip)); int64_t cTimestamp = taosGetTimestampMs(); if (item != NULL) { int32_t elapse = cTimestamp - item->timestamp; @@ -918,7 +916,7 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { } } else { SFailFastItem item = {.count = 1, .timestamp = cTimestamp}; - taosHashPut(pThrd->failFastCache, key, strlen(key), &item, sizeof(SFailFastItem)); + taosHashPut(pThrd->failFastCache, pConn->ip, strlen(pConn->ip), &item, sizeof(SFailFastItem)); } } cliHandleExcept(pConn); @@ -931,9 +929,13 @@ void cliConnCb(uv_connect_t* req, int status) { cliHandleFastFail(pConn, status); return; } - struct sockaddr peername, sockname; - int addrlen = sizeof(peername); + int32_t* oVal = taosHashGet(pThrd->connLimit, pConn->ip, strlen(pConn->ip)); + int32_t nVal = oVal == NULL ? 0 : (*oVal) + 1; + taosHashPut(pThrd->connLimit, pConn->ip, strlen(pConn->ip), &nVal, sizeof(nVal)); + + struct sockaddr peername, sockname; + int addrlen = sizeof(peername); uv_tcp_getpeername((uv_tcp_t*)pConn->stream, &peername, &addrlen); transSockInfo2Str(&peername, pConn->dst); @@ -1068,6 +1070,24 @@ static FORCE_INLINE void cliUpdateFqdnCache(SHashObj* cache, char* fqdn) { return; } +static int32_t cliPreCheckSessionLimit(SCliThrd* pThrd, SCliMsg* pMsg) { + STrans* pTransInst = pThrd->pTransInst; + + STransConnCtx* pCtx = pMsg->ctx; + char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); + int32_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); + + char key[TSDB_FQDN_LEN + 64] = {0}; + CONN_CONSTRUCT_HASH_KEY(key, ip, port); + + int32_t* val = taosHashGet(pThrd->connLimit, key, strlen(key)); + if (val == NULL) return 0; + + if (*val >= pTransInst->connLimit) { + return -1; + } + return 0; +} void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { STrans* pTransInst = pThrd->pTransInst; STransConnCtx* pCtx = pMsg->ctx; @@ -1091,7 +1111,6 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { if (item != NULL) { int32_t elapse = (int32_t)(taosGetTimestampMs() - item->timestamp); if (item->count >= pTransInst->failFastThreshold && (elapse >= 0 && elapse <= pTransInst->failFastInterval)) { - STraceId* trace = &(pMsg->msg.info.traceId); tGTrace("%s, msg %s cancel to send, reason: failed to connect %s:%d: count: %d, at %d", pTransInst->label, TMSG_INFO(pMsg->msg.msgType), ip, port, item->count, elapse); destroyCmsg(pMsg); @@ -1113,6 +1132,13 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { return; } + if (conn == NULL && REQUEST_NO_RESP(&pMsg->msg) && 0 != cliPreCheckSessionLimit(pThrd, pMsg)) { + tGTrace("%s, msg %s cancel to send, reason: %s", pTransInst->label, TMSG_INFO(pMsg->msg.msgType), + tstrerror(TSDB_CODE_RPC_MAX_SESSIONS)); + destroyCmsg(pMsg); + return; + } + if (conn != NULL) { transCtxMerge(&conn->ctx, &pCtx->appCtx); transQueuePush(&conn->cliMsgs, pMsg); @@ -1126,10 +1152,14 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { transCtxMerge(&conn->ctx, &pCtx->appCtx); transQueuePush(&conn->cliMsgs, pMsg); - conn->ip = strdup(EPSET_GET_INUSE_IP(&pCtx->epSet)); - conn->port = EPSET_GET_INUSE_PORT(&pCtx->epSet); + char key[TSDB_FQDN_LEN + 64] = {0}; + char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); + uint16_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); + CONN_CONSTRUCT_HASH_KEY(key, ip, port); - uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, conn->ip); + conn->ip = strdup(key); + + uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, EPSET_GET_INUSE_IP(&pCtx->epSet)); if (ipaddr == 0xffffffff) { uv_timer_stop(conn->timer); conn->timer->data = NULL; @@ -1143,9 +1173,9 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { struct sockaddr_in addr; addr.sin_family = AF_INET; addr.sin_addr.s_addr = ipaddr; - addr.sin_port = (uint16_t)htons((uint16_t)conn->port); + addr.sin_port = (uint16_t)htons(port); - tGTrace("%s conn %p try to connect to %s:%d", pTransInst->label, conn, conn->ip, conn->port); + tGTrace("%s conn %p try to connect to %s", pTransInst->label, conn, conn->ip); int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4); if (fd == -1) { tGError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn, @@ -1199,7 +1229,6 @@ static void cliAsyncCb(uv_async_t* handle) { if (count >= 2) { tTrace("cli process batch size:%d", count); } - // if (!uv_is_active((uv_handle_t*)pThrd->prepare)) uv_prepare_start(pThrd->prepare, cliPrepareCb); if (pThrd->stopMsg != NULL) cliHandleQuit(pThrd->stopMsg, pThrd); } @@ -1412,6 +1441,7 @@ static SCliThrd* createThrdObj(void* trans) { pThrd->destroyAhandleFp = pTransInst->destroyFp; pThrd->fqdn2ipCache = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pThrd->failFastCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + pThrd->connLimit = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pThrd->quit = false; return pThrd; @@ -1440,6 +1470,7 @@ static void destroyThrdObj(SCliThrd* pThrd) { taosMemoryFree(pThrd->loop); taosHashCleanup(pThrd->fqdn2ipCache); taosHashCleanup(pThrd->failFastCache); + taosHashCleanup(pThrd->connLimit); taosMemoryFree(pThrd); } @@ -1864,13 +1895,6 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran return TSDB_CODE_RPC_BROKEN_LINK; } - // read only - if (pTransInst->connLimit != 0 && atomic_load_32(&pThrd->connCount) >= pTransInst->connLimit) { - transFreeMsg(pReq->pCont); - transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); - return TSDB_CODE_RPC_MAX_SESSIONS; - } - TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); STransConnCtx* pCtx = taosMemoryCalloc(1, sizeof(STransConnCtx)); @@ -1912,13 +1936,6 @@ int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMs transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return TSDB_CODE_RPC_BROKEN_LINK; } - // not limit sync req - // read only - // if (pTransInst->connLimit != 0 && atomic_load_32(&pThrd->connCount) >= pTransInst->connLimit) { - // transFreeMsg(pReq->pCont); - // transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); - // return TSDB_CODE_RPC_MAX_SESSIONS; - //} tsem_t* sem = taosMemoryCalloc(1, sizeof(tsem_t)); tsem_init(sem, 0, 0); From 717392782a93ffbd0c03805c4ca76347abfeded2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 15 Feb 2023 19:50:56 +0800 Subject: [PATCH 193/267] fix(query): remove the invalid free. --- source/dnode/vnode/src/tsdb/tsdbCache.c | 4 ++-- source/libs/executor/src/executil.c | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index ec0944193a..10ab541978 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -17,7 +17,7 @@ static int32_t tsdbOpenBICache(STsdb *pTsdb) { int32_t code = 0; - SLRUCache *pCache = taosLRUCacheInit(5 * 1024 * 1024, -1, .5); + SLRUCache *pCache = taosLRUCacheInit(5 * 1024 * 1024, 1, .5); if (pCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; @@ -48,7 +48,7 @@ int32_t tsdbOpenCache(STsdb *pTsdb) { SLRUCache *pCache = NULL; size_t cfgCapacity = pTsdb->pVnode->config.cacheLastSize * 1024 * 1024; - pCache = taosLRUCacheInit(cfgCapacity, -1, .5); + pCache = taosLRUCacheInit(cfgCapacity, 1, .5); if (pCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 903151b254..040e67713d 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1092,7 +1092,6 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, } metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); - taosMemoryFree(pPayload); } } From f22d07319f36669143489e5d5e0d20e0a21d25c0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 15 Feb 2023 20:06:04 +0800 Subject: [PATCH 194/267] handle too many session --- include/libs/transport/trpc.h | 3 +- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 3 +- source/libs/transport/inc/transportInt.h | 4 ++- source/libs/transport/src/trans.c | 3 +- source/libs/transport/src/transCli.c | 32 +++++++++++++------ 5 files changed, 32 insertions(+), 13 deletions(-) diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index ff68b72fc2..5787f41772 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -112,7 +112,8 @@ typedef struct SRpcInit { // fail fast fp RpcFFfp ffp; - int32_t connLimit; + int32_t connLimitNum; + int32_t connLimitLock; void *parent; } SRpcInit; diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index bd08eda954..d23e67b195 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -284,7 +284,8 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.failFastThreshold = 3; // failed threshold rpcInit.ffp = dmFailFastFp; - rpcInit.connLimit = 3000; + rpcInit.connLimitNum = 3000; + rpcInit.connLimitLock = 1; pTrans->clientRpc = rpcOpen(&rpcInit); if (pTrans->clientRpc == NULL) { diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index 92477bb514..1fe32955b9 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -64,7 +64,9 @@ typedef struct { void (*destroyFp)(void* ahandle); bool (*failFastFp)(tmsg_t msgType); - int32_t connLimit; + int32_t connLimitNum; + int8_t connLimitLock; // 0: no lock. 1. lock + int index; void* parent; void* tcphandle; // returned handle from TCP initialization diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 61ca9743b3..6eec54b370 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -67,7 +67,8 @@ void* rpcOpen(const SRpcInit* pInit) { pRpc->startTimer = pInit->tfp; pRpc->destroyFp = pInit->dfp; pRpc->failFastFp = pInit->ffp; - pRpc->connLimit = pInit->connLimit; + pRpc->connLimitNum = pInit->connLimitNum; + pRpc->connLimitLock = pInit->connLimitLock; pRpc->numOfThreads = pInit->numOfThreads > TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS : pInit->numOfThreads; if (pRpc->numOfThreads <= 0) { diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 4d7b6b5b2b..a1b4766e80 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -85,7 +85,7 @@ typedef struct SCliThrd { SCvtAddr cvtAddr; SHashObj* failFastCache; - SHashObj* connLimit; + SHashObj* connLimitCache; SCliMsg* stopMsg; @@ -750,9 +750,9 @@ static void cliDestroy(uv_handle_t* handle) { transReqQueueClear(&conn->wreqQueue); transDestroyBuffer(&conn->readBuf); - int32_t* oVal = taosHashGet(pThrd->connLimit, conn->ip, strlen(conn->ip)); + int32_t* oVal = taosHashGet(pThrd->connLimitCache, conn->ip, strlen(conn->ip)); int32_t nVal = oVal == NULL ? 0 : (*oVal) - 1; - taosHashPut(pThrd->connLimit, conn->ip, strlen(conn->ip), &nVal, sizeof(nVal)); + taosHashPut(pThrd->connLimitCache, conn->ip, strlen(conn->ip), &nVal, sizeof(nVal)); taosMemoryFree(conn); } @@ -930,9 +930,9 @@ void cliConnCb(uv_connect_t* req, int status) { return; } - int32_t* oVal = taosHashGet(pThrd->connLimit, pConn->ip, strlen(pConn->ip)); + int32_t* oVal = taosHashGet(pThrd->connLimitCache, pConn->ip, strlen(pConn->ip)); int32_t nVal = oVal == NULL ? 0 : (*oVal) + 1; - taosHashPut(pThrd->connLimit, pConn->ip, strlen(pConn->ip), &nVal, sizeof(nVal)); + taosHashPut(pThrd->connLimitCache, pConn->ip, strlen(pConn->ip), &nVal, sizeof(nVal)); struct sockaddr peername, sockname; int addrlen = sizeof(peername); @@ -1080,10 +1080,10 @@ static int32_t cliPreCheckSessionLimit(SCliThrd* pThrd, SCliMsg* pMsg) { char key[TSDB_FQDN_LEN + 64] = {0}; CONN_CONSTRUCT_HASH_KEY(key, ip, port); - int32_t* val = taosHashGet(pThrd->connLimit, key, strlen(key)); + int32_t* val = taosHashGet(pThrd->connLimitCache, key, strlen(key)); if (val == NULL) return 0; - if (*val >= pTransInst->connLimit) { + if (*val >= pTransInst->connLimitNum) { return -1; } return 0; @@ -1441,7 +1441,8 @@ static SCliThrd* createThrdObj(void* trans) { pThrd->destroyAhandleFp = pTransInst->destroyFp; pThrd->fqdn2ipCache = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pThrd->failFastCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - pThrd->connLimit = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + pThrd->connLimitCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, + pTransInst->connLimitLock == 0 ? HASH_NO_LOCK : HASH_ENTRY_LOCK); pThrd->quit = false; return pThrd; @@ -1470,7 +1471,7 @@ static void destroyThrdObj(SCliThrd* pThrd) { taosMemoryFree(pThrd->loop); taosHashCleanup(pThrd->fqdn2ipCache); taosHashCleanup(pThrd->failFastCache); - taosHashCleanup(pThrd->connLimit); + taosHashCleanup(pThrd->connLimitCache); taosMemoryFree(pThrd); } @@ -1894,6 +1895,19 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return TSDB_CODE_RPC_BROKEN_LINK; } + if (pTransInst->connLimitNum > 0 && REQUEST_NO_RESP(pReq)) { + char key[TSDB_FQDN_LEN + 64] = {0}; + char* ip = EPSET_GET_INUSE_IP((SEpSet*)pEpSet); + uint16_t port = EPSET_GET_INUSE_PORT((SEpSet*)pEpSet); + CONN_CONSTRUCT_HASH_KEY(key, ip, port); + + int32_t* val = taosHashGet(pThrd->connLimitCache, key, strlen(key)); + if (val != NULL && *val >= pTransInst->connLimitNum) { + transFreeMsg(pReq->pCont); + transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); + return TSDB_CODE_RPC_BROKEN_LINK; + } + } TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); From 9c050c0ea626254e4bd45f8a202ee655383c59a0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 15 Feb 2023 20:15:34 +0800 Subject: [PATCH 195/267] handle too many session --- source/libs/transport/src/transCli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index a1b4766e80..2330b871d7 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1905,7 +1905,7 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran if (val != NULL && *val >= pTransInst->connLimitNum) { transFreeMsg(pReq->pCont); transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); - return TSDB_CODE_RPC_BROKEN_LINK; + return TSDB_CODE_RPC_MAX_SESSIONS; } } From 4a8d5355a779891144aed07011a1c4d48fdb60a9 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Wed, 15 Feb 2023 20:21:41 +0800 Subject: [PATCH 196/267] fix: parse multi-line sql include tab key --- tools/shell/src/shellAuto.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index f58a5d0931..3589558d64 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -1735,10 +1735,12 @@ _return: // main key press tab void pressTabKey(SShellCmd* cmd) { - // check + // check empty tab key if (cmd->commandSize == 0) { - // empty - showHelp(); + // have multi line tab key + if(cmd->bufferSize == 0) { + showHelp(); + } shellShowOnScreen(cmd); return; } From 015dd658e67c4141051a15070456d68e382ce822 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 15 Feb 2023 21:16:08 +0800 Subject: [PATCH 197/267] fix: invalid read --- source/libs/transport/src/transCli.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 2330b871d7..22bcb93218 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -736,6 +736,9 @@ static void cliDestroy(uv_handle_t* handle) { conn->timer->data = NULL; conn->timer = NULL; } + int32_t* oVal = taosHashGet(pThrd->connLimitCache, conn->ip, strlen(conn->ip)); + int32_t nVal = oVal == NULL ? 0 : (*oVal) - 1; + taosHashPut(pThrd->connLimitCache, conn->ip, strlen(conn->ip), &nVal, sizeof(nVal)); atomic_sub_fetch_32(&pThrd->connCount, 1); @@ -750,10 +753,6 @@ static void cliDestroy(uv_handle_t* handle) { transReqQueueClear(&conn->wreqQueue); transDestroyBuffer(&conn->readBuf); - int32_t* oVal = taosHashGet(pThrd->connLimitCache, conn->ip, strlen(conn->ip)); - int32_t nVal = oVal == NULL ? 0 : (*oVal) - 1; - taosHashPut(pThrd->connLimitCache, conn->ip, strlen(conn->ip), &nVal, sizeof(nVal)); - taosMemoryFree(conn); } static bool cliHandleNoResp(SCliConn* conn) { From 5c6b5f0d0fd3b27312e65c38c4cd8e36ebcddf83 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 15 Feb 2023 21:58:56 +0800 Subject: [PATCH 198/267] fix: invalid read --- source/libs/transport/src/transCli.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 22bcb93218..681644d3bf 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -891,11 +891,17 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { SCliThrd* pThrd = pConn->hostThrd; STrans* pTransInst = pThrd->pTransInst; - SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); - STraceId* trace = &pMsg->msg.info.traceId; + SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); + + if (pMsg) { + STraceId* trace = &pMsg->msg.info.traceId; + tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), + pMsg ? TMSG_INFO(pMsg->msg.msgType) : 0, pConn, pConn->ip, uv_strerror(status)); + } else { + tError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), 0, + pConn, pConn->ip, uv_strerror(status)); + } - tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), - pMsg ? TMSG_INFO(pMsg->msg.msgType) : 0, pConn, pConn->ip, uv_strerror(status)); uv_timer_stop(pConn->timer); pConn->timer->data = NULL; taosArrayPush(pThrd->timerList, &pConn->timer); From 3fc8f6b73225a3988a499cab0928d2312c4fe513 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 15 Feb 2023 22:29:17 +0800 Subject: [PATCH 199/267] fix: invalid read --- source/libs/transport/src/transCli.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 681644d3bf..5170a68dbc 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -896,10 +896,10 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { if (pMsg) { STraceId* trace = &pMsg->msg.info.traceId; tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), - pMsg ? TMSG_INFO(pMsg->msg.msgType) : 0, pConn, pConn->ip, uv_strerror(status)); + TMSG_INFO(pMsg->msg.msgType), pConn, pConn->ip, uv_strerror(status)); } else { - tError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), 0, - pConn, pConn->ip, uv_strerror(status)); + tError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), + TMSG_INFO(0), pConn, pConn->ip, uv_strerror(status)); } uv_timer_stop(pConn->timer); From c7a9ed34d08b85054132a35e245ce4aa775bbbca Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 15 Feb 2023 23:24:43 +0800 Subject: [PATCH 200/267] fix(query): fix memory leak and invalid free for tag filter cache. --- source/dnode/vnode/src/meta/metaCache.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 21a1014e87..366c072b6f 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -508,11 +508,11 @@ static void freePayload(const void* key, size_t keyLen, void* value) { const uint64_t* p = key; if (keyLen != sizeof(int64_t) * 4) { - metaError("key length is invalid, length:%d, expect:%d", (int32_t) keyLen, (int32_t) sizeof(uint64_t)*2); + metaError("key length is invalid, length:%d, expect:%d", (int32_t)keyLen, (int32_t)sizeof(uint64_t) * 2); return; } - SHashObj* pHashObj = (SHashObj*)p[0]; + SHashObj* pHashObj = (SHashObj*)p[0]; STagFilterResEntry** pEntry = taosHashGet(pHashObj, &p[1], sizeof(uint64_t)); { @@ -525,12 +525,13 @@ static void freePayload(const void* key, size_t keyLen, void* value) { while ((pNode = tdListNext(&iter)) != NULL) { uint64_t* digest = (uint64_t*)pNode->data; if (digest[0] == p[2] && digest[1] == p[3]) { - tdListPopNode(&((*pEntry)->list), pNode); + void* tmp = tdListPopNode(&((*pEntry)->list), pNode); + taosMemoryFree(tmp); int64_t et = taosGetTimestampUs(); metaInfo("clear items in cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)), - (et - st)/1000.0); - return; + (et - st) / 1000.0); + break; } } } From 05e61bac89d6c757f9f3b20a50025ab236fbaeb5 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 00:12:26 +0800 Subject: [PATCH 201/267] fix: invalid read --- source/libs/transport/src/transCli.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 5170a68dbc..e91fe337fc 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -893,14 +893,14 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); - if (pMsg) { - STraceId* trace = &pMsg->msg.info.traceId; - tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), - TMSG_INFO(pMsg->msg.msgType), pConn, pConn->ip, uv_strerror(status)); - } else { - tError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), - TMSG_INFO(0), pConn, pConn->ip, uv_strerror(status)); - } + // if (pMsg) { + STraceId* trace = &pMsg->msg.info.traceId; + tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), + TMSG_INFO(pMsg->msg.msgType), pConn, pConn->ip, uv_strerror(status)); + //} else { + // tError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), + // TMSG_INFO(0), pConn, pConn->ip, uv_strerror(status)); + //} uv_timer_stop(pConn->timer); pConn->timer->data = NULL; From ebcbdcfddc27b684d160f4fbfb0e644c1a8b1c8e Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 00:30:26 +0800 Subject: [PATCH 202/267] fix: change parameter --- source/common/src/tglobal.c | 14 ++++++-------- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 6 +++++- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index d4849650e6..ce4f2aa334 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -76,11 +76,11 @@ bool tsEnableTelem = true; int32_t tsTelemInterval = 43200; char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com"; uint16_t tsTelemPort = 80; -char* tsTelemUri = "/report"; +char *tsTelemUri = "/report"; -bool tsEnableCrashReport = true; -char* tsClientCrashReportUri = "/ccrashreport"; -char* tsSvrCrashReportUri = "/dcrashreport"; +bool tsEnableCrashReport = true; +char *tsClientCrashReportUri = "/ccrashreport"; +char *tsSvrCrashReportUri = "/dcrashreport"; // schemaless char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null"; @@ -211,9 +211,7 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif -struct SConfig *taosGetCfg() { - return tsCfg; -} +struct SConfig *taosGetCfg() { return tsCfg; } static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile, char *apolloUrl) { @@ -498,7 +496,7 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) { pItem = cfgGetItem(tsCfg, "numOfRpcThreads"); if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { tsNumOfRpcThreads = numOfCores / 2; - tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4); + tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); pItem->i32 = tsNumOfRpcThreads; pItem->stype = stype; } diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index d23e67b195..b16ff6efac 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -284,7 +284,11 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.failFastThreshold = 3; // failed threshold rpcInit.ffp = dmFailFastFp; - rpcInit.connLimitNum = 3000; + int32_t connLimitNum = 30000 / (tsNumOfRpcThreads * 3); + connLimitNum = TMAX(connLimitNum, 500); + connLimitNum = TMIN(connLimitNum, 3000); + + rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; pTrans->clientRpc = rpcOpen(&rpcInit); From b614c287136d3e0cc4684ca9d8c2e7ba3825e912 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 16 Feb 2023 09:12:46 +0800 Subject: [PATCH 203/267] feat:set default value --- source/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 46c2a32535..94f1a8f730 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -189,7 +189,7 @@ int32_t tsGrantHBInterval = 60; int32_t tsUptimeInterval = 300; // seconds char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits char tsUdfdLdLibPath[512] = ""; -bool tsDisableStream = true; +bool tsDisableStream = false; #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { From 56f1afb19c4c3a031dcc44264e4ae8195e0fbbe6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 16 Feb 2023 09:28:35 +0800 Subject: [PATCH 204/267] fix(tsdb/cache): use block index cache with last/last_row --- source/dnode/vnode/src/tsdb/tsdbCache.c | 28 ++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index ec0944193a..43c55d103b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -17,7 +17,7 @@ static int32_t tsdbOpenBICache(STsdb *pTsdb) { int32_t code = 0; - SLRUCache *pCache = taosLRUCacheInit(5 * 1024 * 1024, -1, .5); + SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 1, .5); if (pCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; @@ -48,7 +48,7 @@ int32_t tsdbOpenCache(STsdb *pTsdb) { SLRUCache *pCache = NULL; size_t cfgCapacity = pTsdb->pVnode->config.cacheLastSize * 1024 * 1024; - pCache = taosLRUCacheInit(cfgCapacity, -1, .5); + pCache = taosLRUCacheInit(cfgCapacity, 1, .5); if (pCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; @@ -644,6 +644,7 @@ typedef struct SFSNextRowIter { SArray *aDFileSet; SDataFReader **pDataFReader; SArray *aBlockIdx; + LRUHandle *aBlockIdxHandle; SBlockIdx *pBlockIdx; SMapData blockMap; int32_t nBlock; @@ -697,6 +698,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { } // tMapDataReset(&state->blockIdxMap); + /* if (!state->aBlockIdx) { state->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); } else { @@ -704,6 +706,13 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { } code = tsdbReadBlockIdx(*state->pDataFReader, state->aBlockIdx); if (code) goto _err; + */ + int32_t code = + tsdbCacheGetBlockIdx((*state->pDataFReader)->pTsdb->biCache, *state->pDataFReader, &state->aBlockIdxHandle); + if (code != TSDB_CODE_SUCCESS || state->aBlockIdxHandle == NULL) { + goto _err; + } + state->aBlockIdx = (SArray *)taosLRUCacheValue((*state->pDataFReader)->pTsdb->biCache, state->aBlockIdxHandle); /* if (state->pBlockIdx) { */ /* } */ @@ -772,7 +781,10 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { // resetLastBlockLoadInfo(state->pLoadInfo); if (state->aBlockIdx) { - taosArrayDestroy(state->aBlockIdx); + // taosArrayDestroy(state->aBlockIdx); + tsdbBICacheRelease((*state->pDataFReader)->pTsdb->biCache, state->aBlockIdxHandle); + + state->aBlockIdxHandle = NULL; state->aBlockIdx = NULL; } @@ -795,7 +807,10 @@ _err: resetLastBlockLoadInfo(state->pLoadInfo); }*/ if (state->aBlockIdx) { - taosArrayDestroy(state->aBlockIdx); + // taosArrayDestroy(state->aBlockIdx); + tsdbBICacheRelease((*state->pDataFReader)->pTsdb->biCache, state->aBlockIdxHandle); + + state->aBlockIdxHandle = NULL; state->aBlockIdx = NULL; } if (state->pBlockData) { @@ -821,7 +836,10 @@ int32_t clearNextRowFromFS(void *iter) { state->pDataFReader = NULL; }*/ if (state->aBlockIdx) { - taosArrayDestroy(state->aBlockIdx); + // taosArrayDestroy(state->aBlockIdx); + tsdbBICacheRelease((*state->pDataFReader)->pTsdb->biCache, state->aBlockIdxHandle); + + state->aBlockIdxHandle = NULL; state->aBlockIdx = NULL; } if (state->pBlockData) { From 892d6c61c9961f834962cf20ada0e73ae24e150e Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 16 Feb 2023 10:06:01 +0800 Subject: [PATCH 205/267] fix: task dropped error code --- source/libs/qworker/inc/qwInt.h | 2 ++ source/libs/qworker/src/qwUtil.c | 26 +++++++------------------- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index bde05d4116..f14df8e57c 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -206,6 +206,8 @@ typedef struct SQWorkerMgmt { int32_t paramIdx; } SQWorkerMgmt; +#define QW_CTX_NOT_EXISTS_ERR_CODE(mgmt) (atomic_load_8(&(mgmt)->nodeStopped) ? TSDB_CODE_VND_STOPPED : TSDB_CODE_QRY_TASK_CTX_NOT_EXIST) + #define QW_FPARAMS_DEF SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId #define QW_IDS() sId, qId, tId, rId, eId #define QW_FPARAMS() mgmt, QW_IDS() diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index 7ee7c50c96..a342e48cc1 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -213,15 +213,9 @@ int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { QW_SET_QTID(id, qId, tId, eId); *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id)); - int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped); if (NULL == (*ctx)) { - if (!nodeStopped) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } else { - QW_TASK_DLOG_E("node stopped"); - QW_ERR_RET(TSDB_CODE_VND_STOPPED); - } + QW_TASK_DLOG_E("acquired task ctx not exist, may be dropped"); + QW_ERR_RET(QW_CTX_NOT_EXISTS_ERR_CODE(mgmt)); } return TSDB_CODE_SUCCESS; @@ -232,16 +226,9 @@ int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { QW_SET_QTID(id, qId, tId, eId); *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); - int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped); - if (NULL == (*ctx)) { - if (!nodeStopped) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } else { - QW_TASK_DLOG_E("node stopped"); - QW_ERR_RET(TSDB_CODE_VND_STOPPED); - } + QW_TASK_DLOG_E("get task ctx not exist, may be dropped"); + QW_ERR_RET(QW_CTX_NOT_EXISTS_ERR_CODE(mgmt)); } return TSDB_CODE_SUCCESS; @@ -334,7 +321,8 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); if (NULL == ctx) { - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + QW_TASK_DLOG_E("drop task ctx not exist, may be dropped"); + QW_ERR_RET(QW_CTX_NOT_EXISTS_ERR_CODE(mgmt)); } octx = *ctx; @@ -346,7 +334,7 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) { QW_TASK_ELOG_E("taosHashRemove from ctx hash failed"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + QW_ERR_RET(QW_CTX_NOT_EXISTS_ERR_CODE(mgmt)); } qwFreeTaskCtx(&octx); From 792e16befce5563751c41d67465d728ead159dd0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 10:36:49 +0800 Subject: [PATCH 206/267] fix invalid read --- source/libs/transport/src/transCli.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index e91fe337fc..415005a1e1 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -893,14 +893,9 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); - // if (pMsg) { STraceId* trace = &pMsg->msg.info.traceId; tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), TMSG_INFO(pMsg->msg.msgType), pConn, pConn->ip, uv_strerror(status)); - //} else { - // tError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), - // TMSG_INFO(0), pConn, pConn->ip, uv_strerror(status)); - //} uv_timer_stop(pConn->timer); pConn->timer->data = NULL; @@ -1158,13 +1153,13 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { transQueuePush(&conn->cliMsgs, pMsg); char key[TSDB_FQDN_LEN + 64] = {0}; - char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); + char* fqdn = EPSET_GET_INUSE_IP(&pCtx->epSet); uint16_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); - CONN_CONSTRUCT_HASH_KEY(key, ip, port); + CONN_CONSTRUCT_HASH_KEY(key, fqdn, port); conn->ip = strdup(key); - uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, EPSET_GET_INUSE_IP(&pCtx->epSet)); + uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, fqdn); if (ipaddr == 0xffffffff) { uv_timer_stop(conn->timer); conn->timer->data = NULL; From 7abe4a782404c93471224358fd36c030a3fa4c41 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 16 Feb 2023 10:55:55 +0800 Subject: [PATCH 207/267] fix: memory leak of tfs --- source/libs/tfs/src/tfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 943611ee27..6c96bd1dcb 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -80,7 +80,7 @@ STfs *tfsOpen(SDiskCfg *pCfg, int32_t ndisk) { void tfsClose(STfs *pTfs) { if (pTfs == NULL) return; - for (int32_t level = 0; level < TFS_MAX_LEVEL; level++) { + for (int32_t level = 0; level <= TFS_MAX_LEVEL; level++) { tfsDestroyTier(&pTfs->tiers[level]); } From 27643f7c21ee4402c546ced4cd866a0c47bcc442 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 11:04:38 +0800 Subject: [PATCH 208/267] fix invalid read --- source/libs/transport/src/transCli.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 415005a1e1..2d09822dc7 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -158,6 +158,7 @@ static void cliHandleResp(SCliConn* conn); // handle except about conn static void cliHandleExcept(SCliConn* conn); static void cliReleaseUnfinishedMsg(SCliConn* conn); +static void cliHandleFastFail(SCliConn* pConn, int status); // handle req from app static void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd); @@ -488,9 +489,9 @@ void cliConnTimeout(uv_timer_t* handle) { uv_timer_stop(handle); handle->data = NULL; taosArrayPush(pThrd->timerList, &conn->timer); - conn->timer = NULL; - cliHandleExceptImpl(conn, -1); + + cliHandleFastFail(conn, UV_ECANCELED); } void cliReadTimeoutCb(uv_timer_t* handle) { // set up timeout cb @@ -897,11 +898,6 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), TMSG_INFO(pMsg->msg.msgType), pConn, pConn->ip, uv_strerror(status)); - uv_timer_stop(pConn->timer); - pConn->timer->data = NULL; - taosArrayPush(pThrd->timerList, &pConn->timer); - pConn->timer = NULL; - if (pMsg != NULL && REQUEST_NO_RESP(&pMsg->msg) && (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) { SFailFastItem* item = taosHashGet(pThrd->failFastCache, pConn->ip, strlen(pConn->ip)); @@ -921,12 +917,23 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { } cliHandleExcept(pConn); } + void cliConnCb(uv_connect_t* req, int status) { SCliConn* pConn = req->data; SCliThrd* pThrd = pConn->hostThrd; + bool timeout = false; + + if (pConn->timer == NULL) { + timeout = true; + } else { + uv_timer_stop(pConn->timer); + pConn->timer->data = NULL; + taosArrayPush(pThrd->timerList, &pConn->timer); + pConn->timer = NULL; + } if (status != 0) { - cliHandleFastFail(pConn, status); + if (timeout == false) cliHandleFastFail(pConn, status); return; } @@ -1199,6 +1206,11 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb); if (ret != 0) { + uv_timer_stop(conn->timer); + conn->timer->data = NULL; + taosArrayPush(pThrd->timerList, &conn->timer); + conn->timer = NULL; + cliHandleFastFail(conn, ret); return; } From a6a8daec23f83c5553c688ab15ba3db44fbd7ab7 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 15:09:39 +0800 Subject: [PATCH 209/267] fix: batch write --- include/libs/transport/trpc.h | 3 +- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 1 + source/libs/transport/inc/transportInt.h | 1 + source/libs/transport/src/trans.c | 1 + source/libs/transport/src/transCli.c | 271 +++++++++++++++++- 5 files changed, 266 insertions(+), 11 deletions(-) diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index 5787f41772..acfd5dfb51 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -115,7 +115,8 @@ typedef struct SRpcInit { int32_t connLimitNum; int32_t connLimitLock; - void *parent; + int8_t supportBatch; // 0: no batch, 1. batch + void *parent; } SRpcInit; typedef struct { diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index b16ff6efac..8751b575f3 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -290,6 +290,7 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; + rpcInit.supportBatch = 1; pTrans->clientRpc = rpcOpen(&rpcInit); if (pTrans->clientRpc == NULL) { diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index 1fe32955b9..13adb4d2b4 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -66,6 +66,7 @@ typedef struct { int32_t connLimitNum; int8_t connLimitLock; // 0: no lock. 1. lock + int8_t supportBatch; // 0: no batch, 1: support batch int index; void* parent; diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 6eec54b370..38ec1c7fdc 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -69,6 +69,7 @@ void* rpcOpen(const SRpcInit* pInit) { pRpc->failFastFp = pInit->ffp; pRpc->connLimitNum = pInit->connLimitNum; pRpc->connLimitLock = pInit->connLimitLock; + pRpc->supportBatch = pInit->supportBatch; pRpc->numOfThreads = pInit->numOfThreads > TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS : pInit->numOfThreads; if (pRpc->numOfThreads <= 0) { diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 2d09822dc7..8063ac838b 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -40,6 +40,8 @@ typedef struct SCliConn { bool broken; // link broken or not ConnStatus status; // + SCliBatch* pBatch; + int64_t refId; char* ip; @@ -62,6 +64,16 @@ typedef struct SCliMsg { int sent; //(0: no send, 1: alread sent) } SCliMsg; +typedef struct { + queue wq; + int32_t wLen; + int32_t batchSize; // + int32_t batch; + char* dst; + char* ip; + uint16_t port; +} SCliBatch; + typedef struct SCliThrd { TdThread thread; // tid int64_t pid; // pid @@ -86,6 +98,7 @@ typedef struct SCliThrd { SHashObj* failFastCache; SHashObj* connLimitCache; + SHashObj* batchCache; SCliMsg* stopMsg; @@ -132,6 +145,11 @@ static void cliAsyncCb(uv_async_t* handle); static void cliIdleCb(uv_idle_t* handle); static void cliPrepareCb(uv_prepare_t* handle); +static void cliSendBatch(const SCliBatch* pBatch, SCliThrd* pThrd); +static void cliSendBatchCb(uv_write_t* req, int status); +// callback after conn to server +static void cliConnBatchCb(uv_connect_t* req, int status); + static bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead); static int32_t allocConnRef(SCliConn* conn, bool update); @@ -167,6 +185,8 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd); static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd); static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease, NULL, cliHandleUpdate}; +/// static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease, +/// NULL,cliHandleUpdate}; static FORCE_INLINE void destroyUserdata(STransMsg* userdata); static FORCE_INLINE void destroyCmsg(void* cmsg); @@ -287,6 +307,7 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) { } destroyCmsg(msg); } + transQueueClear(&conn->cliMsgs); memset(&conn->ctx, 0, sizeof(conn->ctx)); } bool cliMaySendCachedMsg(SCliConn* conn) { @@ -888,6 +909,169 @@ void cliSend(SCliConn* pConn) { _RETURN: return; } + +static SCliBatch* cliDumpBatch(SCliBatch* pBatch) { + SCliBatch* pNewBatch = taosMemCalloc(1, sizeof(SClicBatch)); + pNewBatch->wq = pBatch->wq; + + pNewBatch->batchSize = pBatch->batchSize; + pNewBatch->batch = pBatch->batch; + pNewBatch->wLen = pBatch->wLen; + pNewBatch->dst = strdup(pBatch->dst); + pNewBatch->ip = strdup(pBatch->ip); + pNewBatch->port = pBatch->port; + + QUEUE_INIT(&pBatch->wq); + pBatch->batchSize = 0; + pBatch->batch = 0; + pBatch->wLen = 0; + + return pNewBatch; +} +static void cliDestroyBatch(SCliBatch* pBatch) { + while (!EMPTY_IS_EMPTY(&pBatch->wq)) { + queue* h = QUEUE_HEAD(&pBatch->wq); + SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); + + QUEUE_REMOVE(&pMsg->q); + destroyCmsg(p); + } + taosMemoryFree(pBatch->ip); + taosMemoryFree(pBatch->dst); + taosMemoryFree(pBatch); +} +static void cliSendBatch(SCliBatch* pBatch, SCliThrd* pThrd) { + if (pBatch->wLen == 0 || EMPTY_IS_EMPTY(&pBatch->wq)) { + return; + } + STrans* pTransInst = pThrd->pTransInst; + + SCliBatch* pNewBatch = cliDumpBatch(pBatch); + + SCliConn* conn = getConnFromPool(pThrd->pool, pBatch->ip, pBatch->port); + if (conn == NULL) { + conn = cliCreateConn(pThrd); + conn->pBatch = pNewBatch; + conn->ip = strdup(conn->pBatch->ip); + + uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, conn->ip); + if (ipaddr == 0xffffffff) { + uv_timer_stop(conn->timer); + conn->timer->data = NULL; + taosArrayPush(pThrd->timerList, &conn->timer); + conn->timer = NULL; + + cliHandleExcept(conn); + return; + } + struct sockaddr_in addr; + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = ipaddr; + addr.sin_port = (uint16_t)htons(port); + + tGTrace("%s conn %p try to connect to %s", pTransInst->label, conn, conn->ip); + int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4); + if (fd == -1) { + tGError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn, + tstrerror(TAOS_SYSTEM_ERROR(errno))); + cliHandleExcept(conn); + errno = 0; + return; + } + int ret = uv_tcp_open((uv_tcp_t*)conn->stream, fd); + if (ret != 0) { + tGError("%s conn %p failed to set stream, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); + cliHandleExcept(conn); + return; + } + ret = transSetConnOption((uv_tcp_t*)conn->stream); + if (ret != 0) { + tGError("%s conn %p failed to set socket opt, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); + cliHandleExcept(conn); + return; + } + + ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb); + if (ret != 0) { + uv_timer_stop(conn->timer); + conn->timer->data = NULL; + taosArrayPush(pThrd->timerList, &conn->timer); + conn->timer = NULL; + + cliHandleFastFail(conn, ret); + return; + } + uv_timer_start(conn->timer, cliConnTimeout, TRANS_CONN_TIMEOUT, 0); + return; + } + + conn->pBatch = pNewBatch; + + int32_t wLen = pBatch->wLen; + uv_buf_t* wb = taosMemoryCalloc(wLen, sizeof(uv_buf_t)); + int i = 0; + + while (!EMPTY_IS_EMPTY(&pBatch->wq)) { + queue* h = QUEUE_HEAD(&pBatch->wq); + SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); + QUEUE_REMOVE(&pMsg->q); + + transQueuePush(conn->cliMsgs, pMsg); + + STransConnCtx* pCtx = pCliMsg->ctx; + + STransMsg* pMsg = (STransMsg*)(&pCliMsg->msg); + if (pMsg->pCont == 0) { + pMsg->pCont = (void*)rpcMallocCont(0); + pMsg->contLen = 0; + } + + int msgLen = transMsgLenFromCont(pMsg->contLen); + STransMsgHead* pHead = transHeadFromCont(pMsg->pCont); + + if (pHead->comp == 0) { + pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0; + pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0; + pHead->persist = REQUEST_PERSIS_HANDLE(pMsg) ? 1 : 0; + pHead->msgType = pMsg->msgType; + pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + pHead->release = REQUEST_RELEASE_HANDLE(pCliMsg) ? 1 : 0; + memcpy(pHead->user, pTransInst->user, strlen(pTransInst->user)); + pHead->traceId = pMsg->info.traceId; + pHead->magicNum = htonl(TRANS_MAGIC_NUM); + } + pHead->timestamp = taosHton64(taosGetTimestampUs()); + + if (pHead->comp == 0) { + if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { + msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead); + pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + } + } else { + msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen)); + } + + wb[i++] = uv_buf_init((char*)pHead, msgLen); + } + + pBatch->wLen = 0; + uv_write_t* req = taosMemCalloc(1, sizeof(uv_write_t)); + req->data = pConn; + uv_write(req, (uv_stream_t*)conn->stream, wb, wLen, cliSendBatchCb); + taosMemoryFree(wb); +} +static void cliSendBatchCb(uv_write_t* req, int status) { + SCliConn* conn = req->data; + SCliThrd* thrd = conn->hostThrd; + cliDestroyBatch(conn->pBatch); + conn->pBatch = NULL; + + if (status != 0) { + cliHandleExcept(conn); + } else { + addConnToPool(thrd->pool, conn); + } +} static void cliHandleFastFail(SCliConn* pConn, int status) { SCliThrd* pThrd = pConn->hostThrd; STrans* pTransInst = pThrd->pTransInst; @@ -1218,29 +1402,93 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { } tGTrace("%s conn %p ready", pTransInst->label, conn); } -static void cliAsyncCb(uv_async_t* handle) { - SAsyncItem* item = handle->data; - SCliThrd* pThrd = item->pThrd; - SCliMsg* pMsg = NULL; - - // batch process to avoid to lock/unlock frequently - queue wq; - taosThreadMutexLock(&item->mtx); - QUEUE_MOVE(&item->qmsg, &wq); - taosThreadMutexUnlock(&item->mtx); +static void cliNoBatchDealReq(queue wq, SCliThrd* pThrd) { int count = 0; + while (!QUEUE_IS_EMPTY(&wq)) { queue* h = QUEUE_HEAD(&wq); QUEUE_REMOVE(h); SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); (*cliAsyncHandle[pMsg->type])(pMsg, pThrd); + count++; } if (count >= 2) { tTrace("cli process batch size:%d", count); } +} + +static void cliHandleBatch() static void cliBatchDealReq(queue wq, SCliThrd* pThrd) { + int count = 0; + while (!QUEUE_IS_EMPTY(&wq)) { + queue* h = QUEUE_HEAD(&wq); + QUEUE_REMOVE(h); + + SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); + if (REQUEST_NO_RESP(&pMsg->msg)) { + STransConnCtx* pCtx = pMsg->ctx; + + char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); + uint32_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); + char key[TSDB_FQDN_LEN + 64] = {0}; + CONN_CONSTRUCT_HASH_KEY(key, ip, port); + + SCliBatch *ppBatch = taosHashGet(pThrd->batchCache, key, sizeof(key))); + if (*ppBatch == NULL) { + SCliBatch* pBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); + QUEUE_INIT(&pBatch->wq); + QUEUE_PUSH(&pBatch->wq, h); + pBatch->wLen += 1; + pBatch->batchSize += pMsg->msg.contLen; + + pBatch->dst = strdup(key); + pBatch->ip = strdup(ip); + pBatch->port = (uint16_t)port; + + taosHashPut(pThrd->batchCache, key, sizeof(key), &pBatch, sizeof(void*)); + } else { + QUEUE_PUSH(&(*ppBatch)->wq, h); + (*pBatch)->wLen += 1; + (*pBatch)->batchSize += pMsg->msg.contLen; + } + } + (*cliAsyncHandle[pMsg->type])(pMsg, pThrd); + count++; + } + + void** pIter = taoskHashIterate(pThrd->batchCache, NULL); + while (pIter != NULL) { + SCliBatch* batch = (SCliBatch*)(*pIter); + + cliSendBatch(batch, pThrd); + pIter = (void**)taosHashIterate(info, pIter); + } + + if (count >= 2) { + tTrace("cli process batch size:%d", count); + } +} + +static void cliAsyncCb(uv_async_t* handle) { + SAsyncItem* item = handle->data; + SCliThrd* pThrd = item->pThrd; + STrans* pTransInst = pThrd->pTransInst; + + SCliMsg* pMsg = NULL; + // batch process to avoid to lock/unlock frequently + queue wq; + taosThreadMutexLock(&item->mtx); + QUEUE_MOVE(&item->qmsg, &wq); + taosThreadMutexUnlock(&item->mtx); + + int8_t supportBatch = pTransInst->supprtBatch; + if (supportBatch == 0) { + cliNotBatchDealReq(wq, pThrd); + } else if (supportBatch == 1) { + cliBatchDealReq(wq, pThrd); + } if (pThrd->stopMsg != NULL) cliHandleQuit(pThrd->stopMsg, pThrd); } @@ -1456,6 +1704,8 @@ static SCliThrd* createThrdObj(void* trans) { pThrd->connLimitCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, pTransInst->connLimitLock == 0 ? HASH_NO_LOCK : HASH_ENTRY_LOCK); + pThrd->batchCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, hash_no_lock); + pThrd->quit = false; return pThrd; } @@ -1484,6 +1734,7 @@ static void destroyThrdObj(SCliThrd* pThrd) { taosHashCleanup(pThrd->fqdn2ipCache); taosHashCleanup(pThrd->failFastCache); taosHashCleanup(pThrd->connLimitCache); + taosHashCleanup(pThrd->batchCache); taosMemoryFree(pThrd); } From 742a6d6c95ce2d361aecc8ef16856be35612e4d1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 16 Feb 2023 15:25:25 +0800 Subject: [PATCH 210/267] fix(mnd/stream): fix ignore update compatibility --- source/dnode/mnode/impl/src/mndDef.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 75177f4158..658fa75d9c 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -146,7 +146,9 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) { // 3.0.20 if (sver >= 2) { if (tDecodeI64(pDecoder, &pObj->checkpointFreq) < 0) return -1; - if (tDecodeI8(pDecoder, &pObj->igCheckUpdate) < 0) return -1; + if (!tDecodeIsEnd(pDecoder)) { + if (tDecodeI8(pDecoder, &pObj->igCheckUpdate) < 0) return -1; + } } tEndDecode(pDecoder); return 0; From de9cafb04e21751645d688166b281eae169cc99c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 16 Feb 2023 15:50:19 +0800 Subject: [PATCH 211/267] fix(tsdb/cache): disable sharding for block index cache --- source/dnode/vnode/src/tsdb/tsdbCache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 43c55d103b..0b671e2bf5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -17,7 +17,7 @@ static int32_t tsdbOpenBICache(STsdb *pTsdb) { int32_t code = 0; - SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 1, .5); + SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5); if (pCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; From bcf6d5b641c889f96d85dd96d85cab5f1f50bf97 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Thu, 16 Feb 2023 16:06:12 +0800 Subject: [PATCH 212/267] fix: clear some asserts in walRead.c --- source/libs/wal/src/walRead.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 0f8f140bd8..5e09af5b2e 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -241,7 +241,6 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { if (pRead->curInvalid || pRead->curVersion != fetchVer) { if (walReadSeekVer(pRead, fetchVer) < 0) { - ASSERT(0); pRead->curVersion = fetchVer; pRead->curInvalid = 1; return -1; @@ -262,7 +261,6 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { } else { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } - ASSERT(0); pRead->curInvalid = 1; return -1; } @@ -299,7 +297,6 @@ static int32_t walFetchBodyNew(SWalReader *pRead) { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } pRead->curInvalid = 1; - ASSERT(0); return -1; } @@ -308,7 +305,6 @@ static int32_t walFetchBodyNew(SWalReader *pRead) { pRead->pHead->head.version, ver); pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - ASSERT(0); return -1; } @@ -316,7 +312,6 @@ static int32_t walFetchBodyNew(SWalReader *pRead) { wError("vgId:%d, wal fetch body error:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver); pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - ASSERT(0); return -1; } @@ -335,7 +330,6 @@ static int32_t walSkipFetchBodyNew(SWalReader *pRead) { if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); pRead->curInvalid = 1; - ASSERT(0); return -1; } @@ -384,7 +378,6 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { } else { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } - ASSERT(0); pRead->curInvalid = 1; return -1; } @@ -447,7 +440,6 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { if (pReadHead->bodyLen != taosReadFile(pRead->pLogFile, pReadHead->body, pReadHead->bodyLen)) { if (pReadHead->bodyLen < 0) { - ASSERT(0); terrno = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since %s", pRead->pWal->cfg.vgId, pReadHead->version, ver, tstrerror(terrno)); @@ -457,12 +449,10 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } pRead->curInvalid = 1; - ASSERT(0); return -1; } if (pReadHead->version != ver) { - ASSERT(0); wError("vgId:%d, wal fetch body error, index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId, pReadHead->version, ver); pRead->curInvalid = 1; @@ -471,7 +461,6 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { } if (walValidBodyCksum(*ppHead) != 0) { - ASSERT(0); wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver); pRead->curInvalid = 1; From 2a08e2a5492ae2b9a0dec42bbd9c2b49cdc24356 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Thu, 16 Feb 2023 15:02:53 +0800 Subject: [PATCH 213/267] enh: refactor some asserts in doQueueScan --- source/libs/executor/src/scanoperator.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index b556733254..94c5b2e0b7 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1573,7 +1573,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { qError("submit msg messed up when initing stream submit block %p", pSubmit); pInfo->tqReader->pMsg = NULL; pTaskInfo->streamInfo.pReq = NULL; - ASSERT(0); + return NULL; } } @@ -1628,11 +1628,14 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { while (1) { SFetchRet ret = {0}; - tqNextBlock(pInfo->tqReader, &ret); + if (tqNextBlock(pInfo->tqReader, &ret) < 0) { + qError("failed to get next log block since %s", terrstr()); + return NULL; + } if (ret.fetchType == FETCH_TYPE__DATA) { blockDataCleanup(pInfo->pRes); if (setBlockIntoRes(pInfo, &ret.data, true) < 0) { - ASSERT(0); + return NULL; } if (pInfo->pRes->info.rows > 0) { pOperator->status = OP_EXEC_RECV; @@ -1640,7 +1643,8 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { return pInfo->pRes; } } else if (ret.fetchType == FETCH_TYPE__META) { - ASSERT(0); + qError("unexpected ret.fetchType:%d", ret.fetchType); + return NULL; // pTaskInfo->streamInfo.lastStatus = ret.offset; // pTaskInfo->streamInfo.metaBlk = ret.meta; // return NULL; @@ -1667,7 +1671,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { return NULL; #endif } else { - ASSERT(0); + qError("unexpected streamInfo prepare type: %d", pTaskInfo->streamInfo.prepareStatus.type); return NULL; } } From c35b938698f913073b7ce52fc6cc8dd6354508c6 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 17:01:09 +0800 Subject: [PATCH 214/267] fix invalid read/write --- source/libs/transport/src/transCli.c | 265 ++++++++++++++------------- 1 file changed, 141 insertions(+), 124 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 8063ac838b..54203e8d7c 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -20,6 +20,15 @@ typedef struct SConnList { int32_t size; } SConnList; +typedef struct { + queue wq; + int32_t wLen; + int32_t batchSize; // + int32_t batch; + char* dst; + char* ip; + uint16_t port; +} SCliBatch; typedef struct SCliConn { T_REF_DECLARE() uv_connect_t connReq; @@ -64,16 +73,6 @@ typedef struct SCliMsg { int sent; //(0: no send, 1: alread sent) } SCliMsg; -typedef struct { - queue wq; - int32_t wLen; - int32_t batchSize; // - int32_t batch; - char* dst; - char* ip; - uint16_t port; -} SCliBatch; - typedef struct SCliThrd { TdThread thread; // tid int64_t pid; // pid @@ -145,10 +144,12 @@ static void cliAsyncCb(uv_async_t* handle); static void cliIdleCb(uv_idle_t* handle); static void cliPrepareCb(uv_prepare_t* handle); -static void cliSendBatch(const SCliBatch* pBatch, SCliThrd* pThrd); +static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd); +// static void cliConnBatchCb(uv_connect_t* req, int status); static void cliSendBatchCb(uv_write_t* req, int status); -// callback after conn to server -static void cliConnBatchCb(uv_connect_t* req, int status); +// static void cliConnBatchCb(uv_connect_t* req, int status); +// callback after conn to server +// static void cliConnBatchCb(uv_connect_t* req, int status); static bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead); @@ -160,6 +161,7 @@ static SCliConn* cliCreateConn(SCliThrd* thrd); static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle or not*/); static void cliDestroy(uv_handle_t* handle); static void cliSend(SCliConn* pConn); +static void cliSendBatch(SCliConn* pConn); static void cliDestroyConnMsgs(SCliConn* conn, bool destroy); // cli util func @@ -825,7 +827,63 @@ static void cliSendCb(uv_write_t* req, int status) { } uv_read_start((uv_stream_t*)pConn->stream, cliAllocRecvBufferCb, cliRecvCb); } +void cliSendBatch(SCliConn* pConn) { + SCliThrd* pThrd = pConn->hostThrd; + STrans* pTransInst = pThrd->pTransInst; + SCliBatch* pBatch = pConn->pBatch; + int32_t wLen = pBatch->wLen; + + uv_buf_t* wb = taosMemoryCalloc(wLen, sizeof(uv_buf_t)); + int i = 0; + + while (!QUEUE_IS_EMPTY(&pBatch->wq)) { + queue* h = QUEUE_HEAD(&pBatch->wq); + SCliMsg* pCliMsg = QUEUE_DATA(h, SCliMsg, q); + QUEUE_REMOVE(&pCliMsg->q); + + STransConnCtx* pCtx = pCliMsg->ctx; + + STransMsg* pMsg = (STransMsg*)(&pCliMsg->msg); + if (pMsg->pCont == 0) { + pMsg->pCont = (void*)rpcMallocCont(0); + pMsg->contLen = 0; + } + + int msgLen = transMsgLenFromCont(pMsg->contLen); + STransMsgHead* pHead = transHeadFromCont(pMsg->pCont); + + if (pHead->comp == 0) { + pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0; + pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0; + pHead->persist = REQUEST_PERSIS_HANDLE(pMsg) ? 1 : 0; + pHead->msgType = pMsg->msgType; + pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + pHead->release = REQUEST_RELEASE_HANDLE(pCliMsg) ? 1 : 0; + memcpy(pHead->user, pTransInst->user, strlen(pTransInst->user)); + pHead->traceId = pMsg->info.traceId; + pHead->magicNum = htonl(TRANS_MAGIC_NUM); + } + pHead->timestamp = taosHton64(taosGetTimestampUs()); + + if (pHead->comp == 0) { + if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { + msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead); + pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + } + } else { + msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen)); + } + + wb[i++] = uv_buf_init((char*)pHead, msgLen); + } + + pBatch->wLen = 0; + uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t)); + req->data = pConn; + uv_write(req, (uv_stream_t*)pConn->stream, wb, wLen, cliSendBatchCb); + taosMemoryFree(wb); +} void cliSend(SCliConn* pConn) { SCliThrd* pThrd = pConn->hostThrd; STrans* pTransInst = pThrd->pTransInst; @@ -911,8 +969,8 @@ _RETURN: } static SCliBatch* cliDumpBatch(SCliBatch* pBatch) { - SCliBatch* pNewBatch = taosMemCalloc(1, sizeof(SClicBatch)); - pNewBatch->wq = pBatch->wq; + SCliBatch* pNewBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); + memcpy(pNewBatch->wq, pBatch->wq, sizeof(pBatch->wq)); pNewBatch->batchSize = pBatch->batchSize; pNewBatch->batch = pBatch->batch; @@ -929,19 +987,19 @@ static SCliBatch* cliDumpBatch(SCliBatch* pBatch) { return pNewBatch; } static void cliDestroyBatch(SCliBatch* pBatch) { - while (!EMPTY_IS_EMPTY(&pBatch->wq)) { + while (!QUEUE_IS_EMPTY(&pBatch->wq)) { queue* h = QUEUE_HEAD(&pBatch->wq); - SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); + SCliMsg* p = QUEUE_DATA(h, SCliMsg, q); - QUEUE_REMOVE(&pMsg->q); + QUEUE_REMOVE(&p->q); destroyCmsg(p); } taosMemoryFree(pBatch->ip); taosMemoryFree(pBatch->dst); taosMemoryFree(pBatch); } -static void cliSendBatch(SCliBatch* pBatch, SCliThrd* pThrd) { - if (pBatch->wLen == 0 || EMPTY_IS_EMPTY(&pBatch->wq)) { +static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { + if (pBatch->wLen == 0 || QUEUE_IS_EMPTY(&pBatch->wq)) { return; } STrans* pTransInst = pThrd->pTransInst; @@ -961,33 +1019,32 @@ static void cliSendBatch(SCliBatch* pBatch, SCliThrd* pThrd) { taosArrayPush(pThrd->timerList, &conn->timer); conn->timer = NULL; - cliHandleExcept(conn); + cliHandleFastFail(conn, -1); return; } struct sockaddr_in addr; addr.sin_family = AF_INET; addr.sin_addr.s_addr = ipaddr; - addr.sin_port = (uint16_t)htons(port); + addr.sin_port = (uint16_t)htons(pBatch->port); - tGTrace("%s conn %p try to connect to %s", pTransInst->label, conn, conn->ip); + tTrace("%s conn %p try to connect to %s", pTransInst->label, conn, pBatch->ip); int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4); if (fd == -1) { - tGError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn, - tstrerror(TAOS_SYSTEM_ERROR(errno))); - cliHandleExcept(conn); - errno = 0; + tError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn, + tstrerror(TAOS_SYSTEM_ERROR(errno))); + cliHandleFastFail(conn, -1); return; } int ret = uv_tcp_open((uv_tcp_t*)conn->stream, fd); if (ret != 0) { - tGError("%s conn %p failed to set stream, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); - cliHandleExcept(conn); + tError("%s conn %p failed to set stream, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); + cliHandleFastFail(conn, -1); return; } ret = transSetConnOption((uv_tcp_t*)conn->stream); if (ret != 0) { - tGError("%s conn %p failed to set socket opt, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); - cliHandleExcept(conn); + tError("%s conn %p failed to set socket opt, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); + cliHandleFastFail(conn, -1); return; } @@ -997,8 +1054,7 @@ static void cliSendBatch(SCliBatch* pBatch, SCliThrd* pThrd) { conn->timer->data = NULL; taosArrayPush(pThrd->timerList, &conn->timer); conn->timer = NULL; - - cliHandleFastFail(conn, ret); + cliHandleFastFail(conn, -1); return; } uv_timer_start(conn->timer, cliConnTimeout, TRANS_CONN_TIMEOUT, 0); @@ -1006,59 +1062,7 @@ static void cliSendBatch(SCliBatch* pBatch, SCliThrd* pThrd) { } conn->pBatch = pNewBatch; - - int32_t wLen = pBatch->wLen; - uv_buf_t* wb = taosMemoryCalloc(wLen, sizeof(uv_buf_t)); - int i = 0; - - while (!EMPTY_IS_EMPTY(&pBatch->wq)) { - queue* h = QUEUE_HEAD(&pBatch->wq); - SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); - QUEUE_REMOVE(&pMsg->q); - - transQueuePush(conn->cliMsgs, pMsg); - - STransConnCtx* pCtx = pCliMsg->ctx; - - STransMsg* pMsg = (STransMsg*)(&pCliMsg->msg); - if (pMsg->pCont == 0) { - pMsg->pCont = (void*)rpcMallocCont(0); - pMsg->contLen = 0; - } - - int msgLen = transMsgLenFromCont(pMsg->contLen); - STransMsgHead* pHead = transHeadFromCont(pMsg->pCont); - - if (pHead->comp == 0) { - pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0; - pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0; - pHead->persist = REQUEST_PERSIS_HANDLE(pMsg) ? 1 : 0; - pHead->msgType = pMsg->msgType; - pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); - pHead->release = REQUEST_RELEASE_HANDLE(pCliMsg) ? 1 : 0; - memcpy(pHead->user, pTransInst->user, strlen(pTransInst->user)); - pHead->traceId = pMsg->info.traceId; - pHead->magicNum = htonl(TRANS_MAGIC_NUM); - } - pHead->timestamp = taosHton64(taosGetTimestampUs()); - - if (pHead->comp == 0) { - if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { - msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead); - pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); - } - } else { - msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen)); - } - - wb[i++] = uv_buf_init((char*)pHead, msgLen); - } - - pBatch->wLen = 0; - uv_write_t* req = taosMemCalloc(1, sizeof(uv_write_t)); - req->data = pConn; - uv_write(req, (uv_stream_t*)conn->stream, wb, wLen, cliSendBatchCb); - taosMemoryFree(wb); + cliSendBatch(conn); } static void cliSendBatchCb(uv_write_t* req, int status) { SCliConn* conn = req->data; @@ -1075,29 +1079,34 @@ static void cliSendBatchCb(uv_write_t* req, int status) { static void cliHandleFastFail(SCliConn* pConn, int status) { SCliThrd* pThrd = pConn->hostThrd; STrans* pTransInst = pThrd->pTransInst; + tError("conn %p free twice", pConn); + if (pConn->pBatch == NULL) { + SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); - SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); + STraceId* trace = &pMsg->msg.info.traceId; + tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), + TMSG_INFO(pMsg->msg.msgType), pConn, pConn->ip, uv_strerror(status)); - STraceId* trace = &pMsg->msg.info.traceId; - tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), - TMSG_INFO(pMsg->msg.msgType), pConn, pConn->ip, uv_strerror(status)); - - if (pMsg != NULL && REQUEST_NO_RESP(&pMsg->msg) && - (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) { - SFailFastItem* item = taosHashGet(pThrd->failFastCache, pConn->ip, strlen(pConn->ip)); - int64_t cTimestamp = taosGetTimestampMs(); - if (item != NULL) { - int32_t elapse = cTimestamp - item->timestamp; - if (elapse >= 0 && elapse <= pTransInst->failFastInterval) { - item->count++; + if (pMsg != NULL && REQUEST_NO_RESP(&pMsg->msg) && + (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) { + SFailFastItem* item = taosHashGet(pThrd->failFastCache, pConn->ip, strlen(pConn->ip)); + int64_t cTimestamp = taosGetTimestampMs(); + if (item != NULL) { + int32_t elapse = cTimestamp - item->timestamp; + if (elapse >= 0 && elapse <= pTransInst->failFastInterval) { + item->count++; + } else { + item->count = 1; + item->timestamp = cTimestamp; + } } else { - item->count = 1; - item->timestamp = cTimestamp; + SFailFastItem item = {.count = 1, .timestamp = cTimestamp}; + taosHashPut(pThrd->failFastCache, pConn->ip, strlen(pConn->ip), &item, sizeof(SFailFastItem)); } - } else { - SFailFastItem item = {.count = 1, .timestamp = cTimestamp}; - taosHashPut(pThrd->failFastCache, pConn->ip, strlen(pConn->ip), &item, sizeof(SFailFastItem)); } + } else { + cliDestroyBatch(pConn->pBatch); + pConn->pBatch = NULL; } cliHandleExcept(pConn); } @@ -1117,7 +1126,11 @@ void cliConnCb(uv_connect_t* req, int status) { } if (status != 0) { - if (timeout == false) cliHandleFastFail(pConn, status); + if (timeout == false) { + cliHandleFastFail(pConn, status); + } else if (timeout == true) { + // already deal by timeout + } return; } @@ -1135,8 +1148,11 @@ void cliConnCb(uv_connect_t* req, int status) { transSockInfo2Str(&sockname, pConn->src); tTrace("%s conn %p connect to server successfully", CONN_GET_INST_LABEL(pConn), pConn); - - cliSend(pConn); + if (pConn->pBatch != NULL) { + cliSendBatch(pConn); + } else { + cliSend(pConn); + } } static void cliHandleQuit(SCliMsg* pMsg, SCliThrd* pThrd) { @@ -1403,11 +1419,11 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { tGTrace("%s conn %p ready", pTransInst->label, conn); } -static void cliNoBatchDealReq(queue wq, SCliThrd* pThrd) { +static void cliNoBatchDealReq(queue* wq, SCliThrd* pThrd) { int count = 0; - while (!QUEUE_IS_EMPTY(&wq)) { - queue* h = QUEUE_HEAD(&wq); + while (!QUEUE_IS_EMPTY(wq)) { + queue* h = QUEUE_HEAD(wq); QUEUE_REMOVE(h); SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); @@ -1420,10 +1436,10 @@ static void cliNoBatchDealReq(queue wq, SCliThrd* pThrd) { } } -static void cliHandleBatch() static void cliBatchDealReq(queue wq, SCliThrd* pThrd) { +static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { int count = 0; - while (!QUEUE_IS_EMPTY(&wq)) { - queue* h = QUEUE_HEAD(&wq); + while (!QUEUE_IS_EMPTY(wq)) { + queue* h = QUEUE_HEAD(wq); QUEUE_REMOVE(h); SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); @@ -1435,8 +1451,8 @@ static void cliHandleBatch() static void cliBatchDealReq(queue wq, SCliThrd* pTh char key[TSDB_FQDN_LEN + 64] = {0}; CONN_CONSTRUCT_HASH_KEY(key, ip, port); - SCliBatch *ppBatch = taosHashGet(pThrd->batchCache, key, sizeof(key))); - if (*ppBatch == NULL) { + SCliBatch** ppBatch = taosHashGet(pThrd->batchCache, key, sizeof(key)); + if (ppBatch == NULL || *ppBatch == NULL) { SCliBatch* pBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); QUEUE_INIT(&pBatch->wq); QUEUE_PUSH(&pBatch->wq, h); @@ -1450,20 +1466,21 @@ static void cliHandleBatch() static void cliBatchDealReq(queue wq, SCliThrd* pTh taosHashPut(pThrd->batchCache, key, sizeof(key), &pBatch, sizeof(void*)); } else { QUEUE_PUSH(&(*ppBatch)->wq, h); - (*pBatch)->wLen += 1; - (*pBatch)->batchSize += pMsg->msg.contLen; + (*ppBatch)->wLen += 1; + (*ppBatch)->batchSize += pMsg->msg.contLen; } + return; } (*cliAsyncHandle[pMsg->type])(pMsg, pThrd); count++; } - void** pIter = taoskHashIterate(pThrd->batchCache, NULL); + void** pIter = taosHashIterate(pThrd->batchCache, NULL); while (pIter != NULL) { SCliBatch* batch = (SCliBatch*)(*pIter); - cliSendBatch(batch, pThrd); - pIter = (void**)taosHashIterate(info, pIter); + cliHandleBatchReq(batch, pThrd); + pIter = (void**)taosHashIterate(pThrd->batchCache, pIter); } if (count >= 2) { @@ -1483,11 +1500,11 @@ static void cliAsyncCb(uv_async_t* handle) { QUEUE_MOVE(&item->qmsg, &wq); taosThreadMutexUnlock(&item->mtx); - int8_t supportBatch = pTransInst->supprtBatch; + int8_t supportBatch = pTransInst->supportBatch; if (supportBatch == 0) { - cliNotBatchDealReq(wq, pThrd); + cliNoBatchDealReq(&wq, pThrd); } else if (supportBatch == 1) { - cliBatchDealReq(wq, pThrd); + cliBatchDealReq(&wq, pThrd); } if (pThrd->stopMsg != NULL) cliHandleQuit(pThrd->stopMsg, pThrd); @@ -1704,7 +1721,7 @@ static SCliThrd* createThrdObj(void* trans) { pThrd->connLimitCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, pTransInst->connLimitLock == 0 ? HASH_NO_LOCK : HASH_ENTRY_LOCK); - pThrd->batchCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, hash_no_lock); + pThrd->batchCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pThrd->quit = false; return pThrd; From 2a4708d82af9ce8eeec02cb061572d12524e2469 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 16 Feb 2023 16:02:32 +0800 Subject: [PATCH 215/267] fix: save fill history state --- include/libs/stream/tstream.h | 3 ++- source/dnode/vnode/src/tq/tq.c | 5 ++++- source/libs/stream/src/streamMeta.c | 31 +++++++++++++++++++---------- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index c5352eee46..ac09d5dfde 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -616,6 +616,7 @@ typedef struct SStreamMeta { SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId); void streamMetaClose(SStreamMeta* streamMeta); +int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask); int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen); SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId); @@ -627,7 +628,7 @@ void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); int32_t streamMetaBegin(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaRollBack(SStreamMeta* pMeta); -int32_t streamLoadTasks(SStreamMeta* pMeta); +int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver); // checkpoint int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 3e13eaa6e8..276de64bbd 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -106,7 +106,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { return NULL; } - if (streamLoadTasks(pTq->pStreamMeta) < 0) { + if (streamLoadTasks(pTq->pStreamMeta, walGetCommittedVer(pVnode->pWal)) < 0) { return NULL; } @@ -1196,6 +1196,9 @@ int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t version, char* msg, int32_t m return -1; } + atomic_store_8(&pTask->fillHistory, 0); + streamMetaSaveTask(pTq->pStreamMeta, pTask); + streamMetaReleaseTask(pTq->pStreamMeta, pTask); return 0; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index e345bc1c6e..63527e2b1c 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -129,13 +129,8 @@ FAIL: } #endif -#if 1 -int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) { - void* buf = NULL; - if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) { - return -1; - } - +int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) { + void* buf = NULL; int32_t len; int32_t code; tEncodeSize(tEncodeSStreamTask, pTask, len, code); @@ -153,11 +148,23 @@ int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) { tEncoderClear(&encoder); if (tdbTbUpsert(pMeta->pTaskDb, &pTask->taskId, sizeof(int32_t), buf, len, pMeta->txn) < 0) { - ASSERT(0); return -1; } taosMemoryFree(buf); + return 0; +} + +#if 1 +int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) { + if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) { + return -1; + } + + if (streamMetaSaveTask(pMeta, pTask) < 0) { + return -1; + } + taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)); return 0; @@ -255,7 +262,7 @@ int32_t streamMetaAbort(SStreamMeta* pMeta) { return 0; } -int32_t streamLoadTasks(SStreamMeta* pMeta) { +int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { TBC* pCur = NULL; if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) { return -1; @@ -294,7 +301,11 @@ int32_t streamLoadTasks(SStreamMeta* pMeta) { tdbTbcClose(pCur); return -1; } - pTask->taskStatus = TASK_STATUS__NORMAL; + /*pTask->taskStatus = TASK_STATUS__NORMAL;*/ + if (pTask->fillHistory) { + pTask->taskStatus = TASK_STATUS__WAIT_DOWNSTREAM; + streamTaskCheckDownstream(pTask, ver); + } } tdbFree(pKey); From ee28ab3bf7271aee41c634d9ab44714c276df8fe Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 18:06:12 +0800 Subject: [PATCH 216/267] enh: batch send --- source/libs/transport/src/transCli.c | 29 +++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 54203e8d7c..47845b6336 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -837,10 +837,9 @@ void cliSendBatch(SCliConn* pConn) { uv_buf_t* wb = taosMemoryCalloc(wLen, sizeof(uv_buf_t)); int i = 0; - while (!QUEUE_IS_EMPTY(&pBatch->wq)) { - queue* h = QUEUE_HEAD(&pBatch->wq); + queue* h = NULL; + QUEUE_FOREACH(h, &pBatch->wq) { SCliMsg* pCliMsg = QUEUE_DATA(h, SCliMsg, q); - QUEUE_REMOVE(&pCliMsg->q); STransConnCtx* pCtx = pCliMsg->ctx; @@ -878,7 +877,6 @@ void cliSendBatch(SCliConn* pConn) { wb[i++] = uv_buf_init((char*)pHead, msgLen); } - pBatch->wLen = 0; uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t)); req->data = pConn; uv_write(req, (uv_stream_t*)pConn->stream, wb, wLen, cliSendBatchCb); @@ -970,7 +968,13 @@ _RETURN: static SCliBatch* cliDumpBatch(SCliBatch* pBatch) { SCliBatch* pNewBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); - memcpy(pNewBatch->wq, pBatch->wq, sizeof(pBatch->wq)); + + QUEUE_INIT(&pNewBatch->wq); + while (!QUEUE_IS_EMPTY(&pBatch->wq)) { + queue* h = QUEUE_HEAD(&pBatch->wq); + QUEUE_REMOVE(h); + QUEUE_PUSH(&pNewBatch->wq, h); + } pNewBatch->batchSize = pBatch->batchSize; pNewBatch->batch = pBatch->batch; @@ -1027,7 +1031,7 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { addr.sin_addr.s_addr = ipaddr; addr.sin_port = (uint16_t)htons(pBatch->port); - tTrace("%s conn %p try to connect to %s", pTransInst->label, conn, pBatch->ip); + tTrace("%s conn %p try to connect to %s", pTransInst->label, conn, pBatch->dst); int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4); if (fd == -1) { tError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn, @@ -1079,7 +1083,7 @@ static void cliSendBatchCb(uv_write_t* req, int status) { static void cliHandleFastFail(SCliConn* pConn, int status) { SCliThrd* pThrd = pConn->hostThrd; STrans* pTransInst = pThrd->pTransInst; - tError("conn %p free twice", pConn); + tError("conn %p free twice, reason:%s", pConn, uv_err_name(status)); if (pConn->pBatch == NULL) { SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); @@ -1443,7 +1447,7 @@ static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { QUEUE_REMOVE(h); SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); - if (REQUEST_NO_RESP(&pMsg->msg)) { + if (pMsg->type == Normal && REQUEST_NO_RESP(&pMsg->msg)) { STransConnCtx* pCtx = pMsg->ctx; char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); @@ -1469,7 +1473,7 @@ static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { (*ppBatch)->wLen += 1; (*ppBatch)->batchSize += pMsg->msg.contLen; } - return; + continue; } (*cliAsyncHandle[pMsg->type])(pMsg, pThrd); count++; @@ -1751,6 +1755,13 @@ static void destroyThrdObj(SCliThrd* pThrd) { taosHashCleanup(pThrd->fqdn2ipCache); taosHashCleanup(pThrd->failFastCache); taosHashCleanup(pThrd->connLimitCache); + + void** pIter = taosHashIterate(pThrd->batchCache, NULL); + while (pIter != NULL) { + SCliBatch* batch = (SCliBatch*)(*pIter); + cliDestroyBatch(batch); + pIter = (void**)taosHashIterate(pThrd->batchCache, pIter); + } taosHashCleanup(pThrd->batchCache); taosMemoryFree(pThrd); } From c20fb3878da9ecc09e46b58382f01897fe4eb131 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 19:07:49 +0800 Subject: [PATCH 217/267] fix crash --- source/libs/transport/src/transCli.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 47845b6336..9324e7b96f 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1014,9 +1014,11 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { if (conn == NULL) { conn = cliCreateConn(pThrd); conn->pBatch = pNewBatch; - conn->ip = strdup(conn->pBatch->ip); + conn->ip = strdup(pNewBatch->dst); - uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, conn->ip); + char* ip = pNewBatch->ip; + uint16_t port = pNewBatch->port; + uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, ip); if (ipaddr == 0xffffffff) { uv_timer_stop(conn->timer); conn->timer->data = NULL; @@ -1029,7 +1031,7 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { struct sockaddr_in addr; addr.sin_family = AF_INET; addr.sin_addr.s_addr = ipaddr; - addr.sin_port = (uint16_t)htons(pBatch->port); + addr.sin_port = (uint16_t)htons(port); tTrace("%s conn %p try to connect to %s", pTransInst->label, conn, pBatch->dst); int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4); From 23e1cb8fd5fc939a8564382a751a699dc1140b92 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 19:17:39 +0800 Subject: [PATCH 218/267] fix mem leak --- source/libs/transport/src/transCli.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 9324e7b96f..316b312095 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1072,6 +1072,8 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { } static void cliSendBatchCb(uv_write_t* req, int status) { SCliConn* conn = req->data; + taosMemoryFree(req); + SCliThrd* thrd = conn->hostThrd; cliDestroyBatch(conn->pBatch); conn->pBatch = NULL; From 666cfd870103b88fe36e64271db66c3768df3700 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 19:29:06 +0800 Subject: [PATCH 219/267] add debug info --- source/libs/transport/src/transCli.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 316b312095..991d4e85c5 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -879,6 +879,8 @@ void cliSendBatch(SCliConn* pConn) { uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t)); req->data = pConn; + tDebug("%p conn %p start to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(pConn), pConn, + pBatch->wLen, pBatch->batchSize); uv_write(req, (uv_stream_t*)pConn->stream, wb, wLen, cliSendBatchCb); taosMemoryFree(wb); } @@ -1074,6 +1076,9 @@ static void cliSendBatchCb(uv_write_t* req, int status) { SCliConn* conn = req->data; taosMemoryFree(req); + tDebug("%p conn %p send batch msg out, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(conn), conn, conn->pBatch->wLen, + conn->pBatch->batchSize); + SCliThrd* thrd = conn->hostThrd; cliDestroyBatch(conn->pBatch); conn->pBatch = NULL; From 1f643f301df09590a3e941c681b3376a804dd9ed Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 19:31:40 +0800 Subject: [PATCH 220/267] change default value --- source/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index ce4f2aa334..7853458a63 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -388,7 +388,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "queryRspPolicy", tsQueryRspPolicy, 0, 1, 0) != 0) return -1; tsNumOfRpcThreads = tsNumOfCores / 2; - tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); + tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, 4); if (cfgAddInt32(pCfg, "numOfRpcThreads", tsNumOfRpcThreads, 1, 1024, 0) != 0) return -1; tsNumOfCommitThreads = tsNumOfCores / 2; From 60e148d52460c93b04127788601209cdd283de2d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 19:31:46 +0800 Subject: [PATCH 221/267] change default value --- source/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 7853458a63..37e356da01 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -496,7 +496,7 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) { pItem = cfgGetItem(tsCfg, "numOfRpcThreads"); if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { tsNumOfRpcThreads = numOfCores / 2; - tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); + tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, 4); pItem->i32 = tsNumOfRpcThreads; pItem->stype = stype; } From 2220b7e9436cf40f51b8ba9aa925527947a104ce Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 20:26:41 +0800 Subject: [PATCH 222/267] update benchmark case --- source/libs/transport/test/cliBench.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/source/libs/transport/test/cliBench.c b/source/libs/transport/test/cliBench.c index 01e88b9988..8db1001995 100644 --- a/source/libs/transport/test/cliBench.c +++ b/source/libs/transport/test/cliBench.c @@ -32,22 +32,21 @@ typedef struct { void *pRpc; } SInfo; - void initLogEnv() { - const char *logDir = "/tmp/trans_cli"; - const char* defaultLogFileNamePrefix = "taoslog"; + const char *logDir = "/tmp/trans_cli"; + const char *defaultLogFileNamePrefix = "taoslog"; const int32_t maxLogFileNum = 10000; tsAsyncLog = 0; - //idxDebugFlag = 143; + // idxDebugFlag = 143; strcpy(tsLogDir, (char *)logDir); taosRemoveDir(tsLogDir); - taosMkDir(tsLogDir); - + taosMkDir(tsLogDir); + if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) { - printf("failed to open log file in directory:%s\n", tsLogDir); + printf("failed to open log file in directory:%s\n", tsLogDir); } } - + static void processResponse(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { SInfo *pInfo = (SInfo *)pMsg->info.ahandle; tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen, @@ -72,11 +71,12 @@ static void *sendRequest(void *param) { rpcMsg.pCont = rpcMallocCont(pInfo->msgSize); rpcMsg.contLen = pInfo->msgSize; rpcMsg.info.ahandle = pInfo; + rpcMsg.info.noResp = 1; rpcMsg.msgType = 1; tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num); rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL); if (pInfo->num % 20000 == 0) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); - tsem_wait(&pInfo->rspSem); + // tsem_wait(&pInfo->rspSem); } tDebug("thread:%d, it is over", pInfo->index); @@ -112,7 +112,11 @@ int main(int argc, char *argv[]) { rpcInit.sessions = 100; rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.user = "michael"; + rpcInit.connType = TAOS_CONN_CLIENT; + rpcInit.connLimitNum = 300; + rpcInit.connLimitLock = 1; + rpcInit.supportBatch = 0; rpcDebugFlag = 135; for (int i = 1; i < argc; ++i) { @@ -148,7 +152,6 @@ int main(int argc, char *argv[]) { exit(0); } } - initLogEnv(); From f975d8e0d921339f83a9bc74c5db11716811c357 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 16 Feb 2023 21:58:32 +0800 Subject: [PATCH 223/267] change default value --- include/util/tdef.h | 6 +++--- source/common/src/tglobal.c | 4 ++-- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 2 +- source/libs/transport/test/cliBench.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/util/tdef.h b/include/util/tdef.h index e03352d98b..aeb8d08936 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -281,8 +281,8 @@ typedef enum ELogicConditionType { #define TSDB_DNODE_ROLE_MGMT 1 #define TSDB_DNODE_ROLE_VNODE 2 -#define TSDB_MAX_REPLICA 5 -#define TSDB_SYNC_LOG_BUFFER_SIZE 4096 +#define TSDB_MAX_REPLICA 5 +#define TSDB_SYNC_LOG_BUFFER_SIZE 4096 #define TSDB_SYNC_LOG_BUFFER_RETENTION (TSDB_SYNC_LOG_BUFFER_SIZE >> 4) #define TSDB_TBNAME_COLUMN_INDEX (-1) @@ -413,7 +413,7 @@ typedef enum ELogicConditionType { #ifdef WINDOWS #define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections. #else -#define TSDB_MAX_RPC_THREADS 20 +#define TSDB_MAX_RPC_THREADS 10 #endif #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 37e356da01..e0b89aa32b 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -388,7 +388,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "queryRspPolicy", tsQueryRspPolicy, 0, 1, 0) != 0) return -1; tsNumOfRpcThreads = tsNumOfCores / 2; - tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, 4); + tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS); if (cfgAddInt32(pCfg, "numOfRpcThreads", tsNumOfRpcThreads, 1, 1024, 0) != 0) return -1; tsNumOfCommitThreads = tsNumOfCores / 2; @@ -496,7 +496,7 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) { pItem = cfgGetItem(tsCfg, "numOfRpcThreads"); if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { tsNumOfRpcThreads = numOfCores / 2; - tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, 4); + tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS); pItem->i32 = tsNumOfRpcThreads; pItem->stype = stype; } diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 8751b575f3..1ddab769d8 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -286,7 +286,7 @@ int32_t dmInitClient(SDnode *pDnode) { int32_t connLimitNum = 30000 / (tsNumOfRpcThreads * 3); connLimitNum = TMAX(connLimitNum, 500); - connLimitNum = TMIN(connLimitNum, 3000); + connLimitNum = TMIN(connLimitNum, 1000); rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; diff --git a/source/libs/transport/test/cliBench.c b/source/libs/transport/test/cliBench.c index 8db1001995..5901a71929 100644 --- a/source/libs/transport/test/cliBench.c +++ b/source/libs/transport/test/cliBench.c @@ -116,7 +116,7 @@ int main(int argc, char *argv[]) { rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.connLimitNum = 300; rpcInit.connLimitLock = 1; - rpcInit.supportBatch = 0; + rpcInit.supportBatch = 1; rpcDebugFlag = 135; for (int i = 1; i < argc; ++i) { From 27e6a0c344c608fbe4473cd2d75c0c61e32d80eb Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 08:45:10 +0800 Subject: [PATCH 224/267] change default value --- source/libs/transport/src/transCli.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 991d4e85c5..9cd9b4f7b5 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1697,7 +1697,11 @@ static SCliThrd* createThrdObj(void* trans) { taosMemoryFree(pThrd); return NULL; } - pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 8, pThrd, cliAsyncCb); + if (pTransInst->supportBatch) { + pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 4, pThrd, cliAsyncCb); + } else { + pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 8, pThrd, cliAsyncCb); + } if (pThrd->asyncPool == NULL) { tError("failed to init async pool"); uv_loop_close(pThrd->loop); From dea3c0285be630b129c620f394b0a7d152fccea5 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Fri, 17 Feb 2023 09:16:44 +0800 Subject: [PATCH 225/267] fix: keep processing on tqNextBlock failure --- source/libs/executor/src/scanoperator.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 94c5b2e0b7..c06fc40b9b 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1630,13 +1630,10 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { SFetchRet ret = {0}; if (tqNextBlock(pInfo->tqReader, &ret) < 0) { qError("failed to get next log block since %s", terrstr()); - return NULL; } if (ret.fetchType == FETCH_TYPE__DATA) { blockDataCleanup(pInfo->pRes); - if (setBlockIntoRes(pInfo, &ret.data, true) < 0) { - return NULL; - } + setBlockIntoRes(pInfo, &ret.data, true); if (pInfo->pRes->info.rows > 0) { pOperator->status = OP_EXEC_RECV; qDebug("queue scan log return %d rows", pInfo->pRes->info.rows); @@ -1644,7 +1641,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { } } else if (ret.fetchType == FETCH_TYPE__META) { qError("unexpected ret.fetchType:%d", ret.fetchType); - return NULL; + continue; // pTaskInfo->streamInfo.lastStatus = ret.offset; // pTaskInfo->streamInfo.metaBlk = ret.meta; // return NULL; From 76e5213a233dfe68e527d9300ed43a9dd302f8c9 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 12:01:12 +0800 Subject: [PATCH 226/267] opt queue --- source/libs/transport/inc/transComm.h | 4 ++-- source/libs/transport/src/transCli.c | 16 ++-------------- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 5f964f6b1a..a41cc0068c 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -94,8 +94,8 @@ typedef void* queue[2]; /* Return the structure holding the given element. */ #define QUEUE_DATA(e, type, field) ((type*)((void*)((char*)(e)-offsetof(type, field)))) -//#define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit -//#define TRANS_RETRY_INTERVAL 15 // retry interval (ms) +// #define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit +// #define TRANS_RETRY_INTERVAL 15 // retry interval (ms) #define TRANS_CONN_TIMEOUT 3000 // connect timeout (ms) #define TRANS_READ_TIMEOUT 3000 // read timeout (ms) #define TRANS_PACKET_LIMIT 1024 * 1024 * 512 diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 9cd9b4f7b5..fbdc17040f 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -864,15 +864,7 @@ void cliSendBatch(SCliConn* pConn) { pHead->magicNum = htonl(TRANS_MAGIC_NUM); } pHead->timestamp = taosHton64(taosGetTimestampUs()); - - if (pHead->comp == 0) { - if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { - msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead); - pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); - } - } else { - msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen)); - } + msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen)); wb[i++] = uv_buf_init((char*)pHead, msgLen); } @@ -972,11 +964,7 @@ static SCliBatch* cliDumpBatch(SCliBatch* pBatch) { SCliBatch* pNewBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); QUEUE_INIT(&pNewBatch->wq); - while (!QUEUE_IS_EMPTY(&pBatch->wq)) { - queue* h = QUEUE_HEAD(&pBatch->wq); - QUEUE_REMOVE(h); - QUEUE_PUSH(&pNewBatch->wq, h); - } + QUEUE_MOVE(&pBatch->wq, &pNewBatch->wq); pNewBatch->batchSize = pBatch->batchSize; pNewBatch->batch = pBatch->batch; From a75e170151fc914938bbc262bdb011109e33be44 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 12:07:25 +0800 Subject: [PATCH 227/267] fix err code --- source/libs/transport/src/transCli.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index fbdc17040f..8309a41abd 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1080,7 +1080,9 @@ static void cliSendBatchCb(uv_write_t* req, int status) { static void cliHandleFastFail(SCliConn* pConn, int status) { SCliThrd* pThrd = pConn->hostThrd; STrans* pTransInst = pThrd->pTransInst; - tError("conn %p free twice, reason:%s", pConn, uv_err_name(status)); + + if (status == -1) status = ENETUNREACH; + if (pConn->pBatch == NULL) { SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); @@ -1106,6 +1108,8 @@ static void cliHandleFastFail(SCliConn* pConn, int status) { } } } else { + tError("%s batch msg failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), + pConn, pConn->ip, uv_strerror(status)); cliDestroyBatch(pConn->pBatch); pConn->pBatch = NULL; } From 3de71e39b421ff126a47be7cffe685a490de4a20 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 13:27:36 +0800 Subject: [PATCH 228/267] opt code --- source/libs/transport/src/transCli.c | 34 ++++++++++++++++++---------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 8309a41abd..473467d753 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -164,6 +164,8 @@ static void cliSend(SCliConn* pConn); static void cliSendBatch(SCliConn* pConn); static void cliDestroyConnMsgs(SCliConn* conn, bool destroy); +static int32_t cliPreCheckSessionLimit(SCliThrd* pThrd, char* ip, uint16_t port); + // cli util func static FORCE_INLINE bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx); static FORCE_INLINE void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr); @@ -1000,7 +1002,14 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { SCliBatch* pNewBatch = cliDumpBatch(pBatch); - SCliConn* conn = getConnFromPool(pThrd->pool, pBatch->ip, pBatch->port); + SCliConn* conn = getConnFromPool(pThrd->pool, pNewBatch->ip, pNewBatch->port); + + if (conn == NULL && 0 != cliPreCheckSessionLimit(pThrd, pNewBatch->ip, pNewBatch->port)) { + tError("%s failed to send batch msg, batch size:%d, msgLen: %d", pTransInst->label, pNewBatch->wLen, + pNewBatch->batchSize); + cliDestroyBatch(pNewBatch); + return; + } if (conn == NULL) { conn = cliCreateConn(pThrd); conn->pBatch = pNewBatch; @@ -1064,16 +1073,17 @@ static void cliSendBatchCb(uv_write_t* req, int status) { SCliConn* conn = req->data; taosMemoryFree(req); - tDebug("%p conn %p send batch msg out, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(conn), conn, conn->pBatch->wLen, - conn->pBatch->batchSize); - SCliThrd* thrd = conn->hostThrd; cliDestroyBatch(conn->pBatch); conn->pBatch = NULL; if (status != 0) { + tDebug("%p conn %p failed to send batch msg, batch size:%d, msgLen:%d, reason:%s", CONN_GET_INST_LABEL(conn), conn, + conn->pBatch->wLen, conn->pBatch->batchSize, uv_err_name(status)); cliHandleExcept(conn); } else { + tDebug("%p conn %p succ to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(conn), conn, + conn->pBatch->wLen, conn->pBatch->batchSize); addConnToPool(thrd->pool, conn); } } @@ -1282,12 +1292,12 @@ static FORCE_INLINE void cliUpdateFqdnCache(SHashObj* cache, char* fqdn) { return; } -static int32_t cliPreCheckSessionLimit(SCliThrd* pThrd, SCliMsg* pMsg) { +static int32_t cliPreCheckSessionLimit(SCliThrd* pThrd, char* ip, uint16_t port) { STrans* pTransInst = pThrd->pTransInst; - STransConnCtx* pCtx = pMsg->ctx; - char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); - int32_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); + // STransConnCtx* pCtx = pMsg->ctx; + // char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); + // int32_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); char key[TSDB_FQDN_LEN + 64] = {0}; CONN_CONSTRUCT_HASH_KEY(key, ip, port); @@ -1306,6 +1316,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr); STraceId* trace = &pMsg->msg.info.traceId; + char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); + uint16_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); if (!EPSET_IS_VALID(&pCtx->epSet)) { tGError("%s, msg %s sent with invalid epset", pTransInst->label, TMSG_INFO(pMsg->msg.msgType)); @@ -1314,9 +1326,7 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { } if (REQUEST_NO_RESP(&pMsg->msg) && (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) { - char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); - uint32_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); - char key[TSDB_FQDN_LEN + 64] = {0}; + char key[TSDB_FQDN_LEN + 64] = {0}; CONN_CONSTRUCT_HASH_KEY(key, ip, port); SFailFastItem* item = taosHashGet(pThrd->failFastCache, key, strlen(key)); @@ -1344,7 +1354,7 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { return; } - if (conn == NULL && REQUEST_NO_RESP(&pMsg->msg) && 0 != cliPreCheckSessionLimit(pThrd, pMsg)) { + if (conn == NULL && REQUEST_NO_RESP(&pMsg->msg) && 0 != cliPreCheckSessionLimit(pThrd, ip, port)) { tGTrace("%s, msg %s cancel to send, reason: %s", pTransInst->label, TMSG_INFO(pMsg->msg.msgType), tstrerror(TSDB_CODE_RPC_MAX_SESSIONS)); destroyCmsg(pMsg); From d22e97d256debf168b6e38542a546c2cbdab71d5 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 13:56:42 +0800 Subject: [PATCH 229/267] fix invalid code --- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 6 +++--- source/libs/transport/src/transCli.c | 16 +++++++++------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 1ddab769d8..f35352268f 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -284,9 +284,9 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.failFastThreshold = 3; // failed threshold rpcInit.ffp = dmFailFastFp; - int32_t connLimitNum = 30000 / (tsNumOfRpcThreads * 3); - connLimitNum = TMAX(connLimitNum, 500); - connLimitNum = TMIN(connLimitNum, 1000); + int32_t connLimitNum = 10000 / (tsNumOfRpcThreads * 3); + connLimitNum = TMAX(connLimitNum, 100); + connLimitNum = TMIN(connLimitNum, 600); rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 473467d753..e4fa91170d 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1070,22 +1070,24 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { cliSendBatch(conn); } static void cliSendBatchCb(uv_write_t* req, int status) { - SCliConn* conn = req->data; - taosMemoryFree(req); + SCliConn* conn = req->data; + SCliThrd* thrd = conn->hostThrd; + SCliBatch* p = conn->pBatch; - SCliThrd* thrd = conn->hostThrd; - cliDestroyBatch(conn->pBatch); conn->pBatch = NULL; if (status != 0) { tDebug("%p conn %p failed to send batch msg, batch size:%d, msgLen:%d, reason:%s", CONN_GET_INST_LABEL(conn), conn, - conn->pBatch->wLen, conn->pBatch->batchSize, uv_err_name(status)); + p->wLen, p->batchSize, uv_err_name(status)); cliHandleExcept(conn); } else { - tDebug("%p conn %p succ to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(conn), conn, - conn->pBatch->wLen, conn->pBatch->batchSize); + tDebug("%p conn %p succ to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(conn), conn, p->wLen, + p->batchSize); addConnToPool(thrd->pool, conn); } + + cliDestroyBatch(p); + taosMemoryFree(req); } static void cliHandleFastFail(SCliConn* pConn, int status) { SCliThrd* pThrd = pConn->hostThrd; From bdfb342ec893898c6d9d9896e0fab65fac29c0eb Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Fri, 17 Feb 2023 14:44:00 +0800 Subject: [PATCH 230/267] fix: free thread save memory before set null --- tools/shell/src/shellAuto.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 3589558d64..480421b5c0 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -788,6 +788,9 @@ int writeVarNames(int type, TAOS_RES* tres) { void setThreadNull(int type) { taosThreadMutexLock(&tiresMutex); + if(threads[type]) { + taosMemoryFree(threads[type]); + } threads[type] = NULL; taosThreadMutexUnlock(&tiresMutex); } From e2bc0383f2b011fcb81ce97229ffa3f2757b1ae6 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 17 Feb 2023 16:30:14 +0800 Subject: [PATCH 231/267] fix: refact client statistics collection --- source/client/inc/clientInt.h | 16 +++++++--------- source/client/src/clientEnv.c | 34 ++++++++++++++++----------------- source/client/src/clientImpl.c | 35 +++++++++++++++------------------- source/client/src/clientMain.c | 25 ++++++------------------ 4 files changed, 45 insertions(+), 65 deletions(-) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 903a6a22ca..fb011ce3e4 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -97,16 +97,14 @@ typedef struct { typedef struct SQueryExecMetric { int64_t start; // start timestamp, us - int64_t syntaxStart; // start to parse, us - int64_t syntaxEnd; // end to parse, us int64_t ctgStart; // start to parse, us - int64_t ctgEnd; // end to parse, us - int64_t semanticEnd; - int64_t planEnd; - int64_t resultReady; - int64_t execEnd; - int64_t send; // start to send to server, us - int64_t rsp; // receive response from server, us + int64_t execStart; // start to parse, us + + int64_t parseCostUs; + int64_t ctgCostUs; + int64_t analyseCostUs; + int64_t planCostUs; + int64_t execCostUs; } SQueryExecMetric; struct SAppInstInfo { diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 593a8fd20a..06ef7b7c9c 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -79,22 +79,21 @@ static void deregisterRequest(SRequestObj *pRequest) { "current:%d, app current:%d", pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst); - if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) { - // tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 - // "us, exec:%" PRId64 "us", - // duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, - // pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - - // pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd); - atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); - } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { - // tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 - // "us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%" PRIx64, - // duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, - // pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - - // pRequest->metric.ctgEnd, pRequest->metric.planEnd - pRequest->metric.semanticEnd, - // pRequest->metric.resultReady - pRequest->metric.planEnd, pRequest->requestId); + if (pRequest->pQuery && pRequest->pQuery->pRoot) { + if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->pQuery->pRoot->type && (0 == ((SVnodeModifOpStmt*)pRequest->pQuery->pRoot)->sqlNodeType)) { + tscDebug("insert duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64 + "us, planCost:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs, + pRequest->metric.planCostUs, pRequest->metric.execCostUs); + atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); + } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { + tscDebug("query duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64 + "us, planCost:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs, + pRequest->metric.planCostUs, pRequest->metric.execCostUs); - atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); + atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); + } } if (duration >= SLOW_QUERY_INTERVAL) { @@ -362,8 +361,6 @@ void doDestroyRequest(void *p) { taosArrayDestroy(pRequest->tableList); taosArrayDestroy(pRequest->dbList); taosArrayDestroy(pRequest->targetTableList); - qDestroyQuery(pRequest->pQuery); - nodesDestroyAllocator(pRequest->allocatorRefId); destroyQueryExecRes(&pRequest->body.resInfo.execRes); @@ -378,6 +375,9 @@ void doDestroyRequest(void *p) { taosMemoryFree(pRequest->body.param); } + qDestroyQuery(pRequest->pQuery); + nodesDestroyAllocator(pRequest->allocatorRefId); + taosMemoryFreeClear(pRequest->sqlstr); taosMemoryFree(pRequest); tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 67fa1e7781..079edcd667 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -946,7 +946,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { removeMeta(pTscObj, pRequest->targetTableList); } - pRequest->metric.execEnd = taosGetTimestampUs(); + pRequest->metric.execCostUs = taosGetTimestampUs() - pRequest->metric.execStart; int32_t code1 = handleQueryExecRsp(pRequest); if (pRequest->code == TSDB_CODE_SUCCESS && pRequest->code != code1) { pRequest->code = code1; @@ -1072,11 +1072,10 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat pRequest->body.subplanNum = pDag->numOfSubplans; } - pRequest->metric.planEnd = taosGetTimestampUs(); - if (code == TSDB_CODE_SUCCESS) { - tscDebug("0x%" PRIx64 " create query plan success, elapsed time:%.2f ms, 0x%" PRIx64, pRequest->self, - (pRequest->metric.planEnd - st) / 1000.0, pRequest->requestId); - } + pRequest->metric.execStart = taosGetTimestampUs(); + + pRequest->metric.planCostUs = pRequest->metric.execStart - st; + if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) { SArray* pNodeList = NULL; if (QUERY_NODE_VNODE_MODIF_STMT != nodeType(pQuery->pRoot)) { @@ -1124,6 +1123,16 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM destorySqlCallbackWrapper(pWrapper); } + if (pQuery->pRoot && !pRequest->inRetry) { + STscObj* pTscObj = pRequest->pTscObj; + SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary; + if (QUERY_NODE_VNODE_MODIF_STMT == pQuery->pRoot->type && (0 == ((SVnodeModifOpStmt*)pQuery->pRoot)->sqlNodeType)) { + atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1); + } else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) { + atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1); + } + } + switch (pQuery->execMode) { case QUERY_EXEC_MODE_LOCAL: asyncExecLocalCmd(pRequest, pQuery); @@ -1393,21 +1402,7 @@ int32_t doProcessMsgFromServer(void* param) { SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId); if (pRequest) { assert(pRequest->self == pSendInfo->requestObjRefId); - - pRequest->metric.rsp = taosGetTimestampUs(); pTscObj = pRequest->pTscObj; - /* - * There is not response callback function for submit response. - * The actual inserted number of points is the first number. - */ - int32_t elapsed = pRequest->metric.rsp - pRequest->metric.start; - if (pMsg->code == TSDB_CODE_SUCCESS) { - tscDebug("0x%" PRIx64 " rsp msg:%s, code:%s rspLen:%d, elapsed:%d ms, reqId:0x%" PRIx64, pRequest->self, - TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), pMsg->contLen, elapsed / 1000, pRequest->requestId); - } else { - tscError("0x%" PRIx64 " rsp msg:%s, code:%s rspLen:%d, elapsed time:%d ms, reqId:0x%" PRIx64, pRequest->self, - TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), pMsg->contLen, elapsed / 1000, pRequest->requestId); - } } } diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 2042ff141c..53efff023d 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -707,7 +707,8 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t SRequestObj *pRequest = pWrapper->pRequest; SQuery *pQuery = pRequest->pQuery; - pRequest->metric.ctgEnd = taosGetTimestampUs(); + int64_t analyseStart = taosGetTimestampUs(); + pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart; qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId); if (code == TSDB_CODE_SUCCESS) { @@ -718,7 +719,7 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t } } - pRequest->metric.semanticEnd = taosGetTimestampUs(); + pRequest->metric.analyseCostUs = taosGetTimestampUs() - analyseStart; if (code == TSDB_CODE_SUCCESS) { if (pQuery->haveResultSet) { @@ -730,10 +731,6 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t TSWAP(pRequest->tableList, (pQuery)->pTableList); TSWAP(pRequest->targetTableList, (pQuery)->pTargetTableList); - double el = (pRequest->metric.semanticEnd - pRequest->metric.ctgEnd) / 1000.0; - tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, elapsed time:%.2f ms, reqId:0x%" PRIx64, - pRequest->self, el, pRequest->requestId); - launchAsyncQuery(pRequest, pQuery, pResultMeta, pWrapper); } else { destorySqlCallbackWrapper(pWrapper); @@ -798,7 +795,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c SRequestObj *pRequest = pWrapper->pRequest; SQuery *pQuery = pRequest->pQuery; - pRequest->metric.ctgEnd = taosGetTimestampUs(); + pRequest->metric.ctgCostUs += taosGetTimestampUs() - pRequest->metric.ctgStart; qDebug("0x%" PRIx64 " start to continue parse, reqId:0x%" PRIx64 ", code:%s", pRequest->self, pRequest->requestId, tstrerror(code)); @@ -910,7 +907,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { } if (TSDB_CODE_SUCCESS == code) { - pRequest->metric.syntaxStart = taosGetTimestampUs(); + int64_t syntaxStart = taosGetTimestampUs(); pWrapper->pCatalogReq = taosMemoryCalloc(1, sizeof(SCatalogReq)); if (pWrapper->pCatalogReq == NULL) { @@ -921,16 +918,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { code = qParseSqlSyntax(pWrapper->pParseCtx, &pRequest->pQuery, pWrapper->pCatalogReq); } - pRequest->metric.syntaxEnd = taosGetTimestampUs(); - } - - if (TSDB_CODE_SUCCESS == code && !updateMetaForce) { - SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; - if (QUERY_NODE_INSERT_STMT == nodeType(pRequest->pQuery->pRoot)) { - atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1); - } else if (QUERY_NODE_SELECT_STMT == nodeType(pRequest->pQuery->pRoot)) { - atomic_add_fetch_64((int64_t *)&pActivity->numOfQueryReq, 1); - } + pRequest->metric.parseCostUs += taosGetTimestampUs() - syntaxStart; } if (TSDB_CODE_SUCCESS == code) { @@ -961,7 +949,6 @@ static void fetchCallback(void *pResult, void *param, int32_t code) { SRequestObj *pRequest = (SRequestObj *)param; SReqResultInfo *pResultInfo = &pRequest->body.resInfo; - pRequest->metric.resultReady = taosGetTimestampUs(); tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code), pRequest->requestId); From 3bd9af0e6048f6701c8dac82339b3939c576a3d0 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 17 Feb 2023 17:54:28 +0800 Subject: [PATCH 232/267] fix: taosbenchmark data generating refactor for main (#20029) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 5f9a44084c..94ed46e5e2 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 7c641c5 + GIT_TAG 634399d SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 085936e61cdd7cfe46ef69bcf9b109f5f2f4dd1c Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 17 Feb 2023 18:54:08 +0800 Subject: [PATCH 233/267] fix: taos shell prompt if not ws error (#20032) --- tools/shell/src/shellWebsocket.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index b93cc02964..e848c07b66 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -218,8 +218,8 @@ void shellRunSingleCommandWebsocketImp(char *command) { res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout); int code = ws_errno(res); if (code != 0 && !shell.stop_query) { - // websocket interface masked off first bit from standard error number. - if (TSDB_CODE_PAR_SYNTAX_ERROR == (code|0x80000000)) { + // if it's not a ws connection error + if (TSDB_CODE_WS_DSN_ERROR != (code&TSDB_CODE_WS_DSN_ERROR)) { et = taosGetTimestampUs(); fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6); ws_free_result(res); From d51c3c36fb505b95f262875eeec71c304c2abe93 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Fri, 17 Feb 2023 14:58:42 +0800 Subject: [PATCH 234/267] fix: not allow to read if sync is restoring --- source/dnode/vnode/src/vnd/vnodeSvr.c | 2 -- source/libs/sync/src/syncMain.c | 49 ++------------------------- 2 files changed, 3 insertions(+), 48 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 67990bc8c9..500e174421 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -351,7 +351,6 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { vTrace("message in vnode query queue is processing"); - // if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsLeader(pVnode)) { if ((pMsg->msgType == TDMT_SCH_QUERY) && !syncIsReadyForRead(pVnode->sync)) { vnodeRedirectRpcMsg(pVnode, pMsg, terrno); return 0; @@ -375,7 +374,6 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG || pMsg->msgType == TDMT_VND_BATCH_META) && !syncIsReadyForRead(pVnode->sync)) { - // !vnodeIsLeader(pVnode)) { vnodeRedirectRpcMsg(pVnode, pMsg, terrno); return 0; } diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 7f7a3f113b..3f0432d998 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -436,55 +436,12 @@ bool syncNodeIsReadyForRead(SSyncNode* pSyncNode) { return false; } - if (pSyncNode->restoreFinish) { - return true; - } - - bool ready = false; - if (!pSyncNode->pFsm->FpApplyQueueEmptyCb(pSyncNode->pFsm)) { - // apply queue not empty - ready = false; - - } else { - if (!pSyncNode->pLogStore->syncLogIsEmpty(pSyncNode->pLogStore)) { - SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); - SSyncRaftEntry* pEntry = NULL; - SLRUCache* pCache = pSyncNode->pLogStore->pCache; - LRUHandle* h = taosLRUCacheLookup(pCache, &lastIndex, sizeof(lastIndex)); - int32_t code = 0; - if (h) { - pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h); - code = 0; - - pSyncNode->pLogStore->cacheHit++; - sNTrace(pSyncNode, "hit cache index:%" PRId64 ", bytes:%u, %p", lastIndex, pEntry->bytes, pEntry); - - } else { - pSyncNode->pLogStore->cacheMiss++; - sNTrace(pSyncNode, "miss cache index:%" PRId64, lastIndex); - - code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, lastIndex, &pEntry); - } - - if (code == 0 && pEntry != NULL) { - if (pEntry->originalRpcType == TDMT_SYNC_NOOP && pEntry->term == raftStoreGetTerm(pSyncNode)) { - ready = true; - } - - if (h) { - taosLRUCacheRelease(pCache, h, false); - } else { - syncEntryDestroy(pEntry); - } - } - } - } - - if (!ready) { + if (!pSyncNode->restoreFinish) { terrno = TSDB_CODE_SYN_RESTORING; + return false; } - return ready; + return true; } bool syncIsReadyForRead(int64_t rid) { From 597d7d3d9b4f8e2676e2ba7adafbbee4333854a3 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 20:47:38 +0800 Subject: [PATCH 235/267] opt transport --- source/libs/transport/src/transCli.c | 194 ++++++++++++++++++--------- 1 file changed, 133 insertions(+), 61 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index e4fa91170d..3bd764ff8d 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -21,14 +21,28 @@ typedef struct SConnList { } SConnList; typedef struct { - queue wq; - int32_t wLen; - int32_t batchSize; // - int32_t batch; + queue wq; + int32_t len; + + int connMax; + int connCnt; + int batchLenLimit; + char* dst; char* ip; uint16_t port; + +} SCliBatchList; + +typedef struct { + queue wq; + queue listq; + int32_t wLen; + int32_t batchSize; // + int32_t batch; + SCliBatchList* pList; } SCliBatch; + typedef struct SCliConn { T_REF_DECLARE() uv_connect_t connReq; @@ -866,14 +880,21 @@ void cliSendBatch(SCliConn* pConn) { pHead->magicNum = htonl(TRANS_MAGIC_NUM); } pHead->timestamp = taosHton64(taosGetTimestampUs()); - msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen)); + if (pHead->comp == 0) { + if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { + msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead); + pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + } + } else { + msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen)); + } wb[i++] = uv_buf_init((char*)pHead, msgLen); } uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t)); req->data = pConn; - tDebug("%p conn %p start to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(pConn), pConn, + tDebug("%s conn %p start to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(pConn), pConn, pBatch->wLen, pBatch->batchSize); uv_write(req, (uv_stream_t*)pConn->stream, wb, wLen, cliSendBatchCb); taosMemoryFree(wb); @@ -962,62 +983,37 @@ _RETURN: return; } -static SCliBatch* cliDumpBatch(SCliBatch* pBatch) { - SCliBatch* pNewBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); - - QUEUE_INIT(&pNewBatch->wq); - QUEUE_MOVE(&pBatch->wq, &pNewBatch->wq); - - pNewBatch->batchSize = pBatch->batchSize; - pNewBatch->batch = pBatch->batch; - pNewBatch->wLen = pBatch->wLen; - pNewBatch->dst = strdup(pBatch->dst); - pNewBatch->ip = strdup(pBatch->ip); - pNewBatch->port = pBatch->port; - - QUEUE_INIT(&pBatch->wq); - pBatch->batchSize = 0; - pBatch->batch = 0; - pBatch->wLen = 0; - - return pNewBatch; -} static void cliDestroyBatch(SCliBatch* pBatch) { while (!QUEUE_IS_EMPTY(&pBatch->wq)) { - queue* h = QUEUE_HEAD(&pBatch->wq); - SCliMsg* p = QUEUE_DATA(h, SCliMsg, q); + queue* h = QUEUE_HEAD(&pBatch->wq); + QUEUE_REMOVE(h); - QUEUE_REMOVE(&p->q); + SCliMsg* p = QUEUE_DATA(h, SCliMsg, q); destroyCmsg(p); } - taosMemoryFree(pBatch->ip); - taosMemoryFree(pBatch->dst); taosMemoryFree(pBatch); } static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { if (pBatch->wLen == 0 || QUEUE_IS_EMPTY(&pBatch->wq)) { return; } - STrans* pTransInst = pThrd->pTransInst; + STrans* pTransInst = pThrd->pTransInst; + SCliBatchList* pList = pBatch->pList; - SCliBatch* pNewBatch = cliDumpBatch(pBatch); + SCliConn* conn = getConnFromPool(pThrd->pool, pList->ip, pList->port); - SCliConn* conn = getConnFromPool(pThrd->pool, pNewBatch->ip, pNewBatch->port); - - if (conn == NULL && 0 != cliPreCheckSessionLimit(pThrd, pNewBatch->ip, pNewBatch->port)) { - tError("%s failed to send batch msg, batch size:%d, msgLen: %d", pTransInst->label, pNewBatch->wLen, - pNewBatch->batchSize); - cliDestroyBatch(pNewBatch); + if (conn == NULL && 0 != cliPreCheckSessionLimit(pThrd, pList->ip, pList->port)) { + tError("%s failed to send batch msg, batch size:%d, msgLen: %d", pTransInst->label, pBatch->wLen, + pBatch->batchSize); + cliDestroyBatch(pBatch); return; } if (conn == NULL) { conn = cliCreateConn(pThrd); - conn->pBatch = pNewBatch; - conn->ip = strdup(pNewBatch->dst); + conn->pBatch = pBatch; + conn->ip = strdup(pList->dst); - char* ip = pNewBatch->ip; - uint16_t port = pNewBatch->port; - uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, ip); + uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, pList->ip); if (ipaddr == 0xffffffff) { uv_timer_stop(conn->timer); conn->timer->data = NULL; @@ -1030,9 +1026,9 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { struct sockaddr_in addr; addr.sin_family = AF_INET; addr.sin_addr.s_addr = ipaddr; - addr.sin_port = (uint16_t)htons(port); + addr.sin_port = (uint16_t)htons(pList->port); - tTrace("%s conn %p try to connect to %s", pTransInst->label, conn, pBatch->dst); + tTrace("%s conn %p try to connect to %s", pTransInst->label, conn, pList->dst); int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4); if (fd == -1) { tError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn, @@ -1066,7 +1062,7 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { return; } - conn->pBatch = pNewBatch; + conn->pBatch = pBatch; cliSendBatch(conn); } static void cliSendBatchCb(uv_write_t* req, int status) { @@ -1074,15 +1070,33 @@ static void cliSendBatchCb(uv_write_t* req, int status) { SCliThrd* thrd = conn->hostThrd; SCliBatch* p = conn->pBatch; + SCliBatchList* pBatchList = p->pList; + + int32_t empty = QUEUE_IS_EMPTY(&pBatchList->wq); + pBatchList->connCnt -= 1; + conn->pBatch = NULL; if (status != 0) { - tDebug("%p conn %p failed to send batch msg, batch size:%d, msgLen:%d, reason:%s", CONN_GET_INST_LABEL(conn), conn, + tDebug("%s conn %p failed to send batch msg, batch size:%d, msgLen:%d, reason:%s", CONN_GET_INST_LABEL(conn), conn, p->wLen, p->batchSize, uv_err_name(status)); cliHandleExcept(conn); + } else { - tDebug("%p conn %p succ to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(conn), conn, p->wLen, + tDebug("%s conn %p succ to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(conn), conn, p->wLen, p->batchSize); + + if (empty == false) { + queue* h = QUEUE_HEAD(&pBatchList->wq); + QUEUE_REMOVE(h); + conn->pBatch = QUEUE_DATA(h, SCliBatch, listq); + + pBatchList->connCnt += 1; + pBatchList->len -= 1; + cliSendBatch(conn); + return; + } + addConnToPool(thrd->pool, conn); } @@ -1468,23 +1482,65 @@ static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { char key[TSDB_FQDN_LEN + 64] = {0}; CONN_CONSTRUCT_HASH_KEY(key, ip, port); - SCliBatch** ppBatch = taosHashGet(pThrd->batchCache, key, sizeof(key)); - if (ppBatch == NULL || *ppBatch == NULL) { + // SCliBatch** ppBatch = taosHashGet(pThrd->batchCache, key, sizeof(key)); + SCliBatchList** ppBatchList = taosHashGet(pThrd->batchCache, key, sizeof(key)); + if (ppBatchList == NULL || *ppBatchList == NULL) { + SCliBatchList* pBatchList = taosMemoryCalloc(1, sizeof(SCliBatchList)); + QUEUE_INIT(&pBatchList->wq); + pBatchList->connMax = 200; + pBatchList->connCnt = 0; + pBatchList->batchLenLimit = 16 * 1024; + pBatchList->ip = strdup(ip); + pBatchList->dst = strdup(key); + pBatchList->port = port; + SCliBatch* pBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); QUEUE_INIT(&pBatch->wq); + QUEUE_INIT(&pBatch->listq); + QUEUE_PUSH(&pBatch->wq, h); pBatch->wLen += 1; pBatch->batchSize += pMsg->msg.contLen; + pBatch->pList = pBatchList; - pBatch->dst = strdup(key); - pBatch->ip = strdup(ip); - pBatch->port = (uint16_t)port; + QUEUE_PUSH(&pBatchList->wq, &pBatch->listq); - taosHashPut(pThrd->batchCache, key, sizeof(key), &pBatch, sizeof(void*)); + taosHashPut(pThrd->batchCache, key, sizeof(key), &pBatchList, sizeof(void*)); } else { - QUEUE_PUSH(&(*ppBatch)->wq, h); - (*ppBatch)->wLen += 1; - (*ppBatch)->batchSize += pMsg->msg.contLen; + if (QUEUE_IS_EMPTY(&(*ppBatchList)->wq)) { + SCliBatch* pBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); + QUEUE_INIT(&pBatch->wq); + QUEUE_INIT(&pBatch->listq); + + QUEUE_PUSH(&pBatch->wq, h); + pBatch->wLen += 1; + pBatch->batchSize = pMsg->msg.contLen; + pBatch->pList = *ppBatchList; + + QUEUE_PUSH(&((*ppBatchList)->wq), &pBatch->listq); + (*ppBatchList)->len += 1; + + continue; + } + + queue* hdr = QUEUE_TAIL(&((*ppBatchList)->wq)); + SCliBatch* pBatch = QUEUE_DATA(hdr, SCliBatch, listq); + if ((pBatch->batchSize + pMsg->msg.contLen) < (*ppBatchList)->batchLenLimit) { + QUEUE_PUSH(&pBatch->wq, h); + pBatch->batchSize += pMsg->msg.contLen; + } else { + SCliBatch* pBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); + QUEUE_INIT(&pBatch->wq); + QUEUE_INIT(&pBatch->listq); + + QUEUE_PUSH(&pBatch->wq, h); + pBatch->wLen += 1; + pBatch->batchSize += pMsg->msg.contLen; + pBatch->pList = *ppBatchList; + + QUEUE_PUSH(&((*ppBatchList)->wq), &pBatch->listq); + (*ppBatchList)->len += 1; + } } continue; } @@ -1494,7 +1550,16 @@ static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { void** pIter = taosHashIterate(pThrd->batchCache, NULL); while (pIter != NULL) { - SCliBatch* batch = (SCliBatch*)(*pIter); + SCliBatchList* batchList = (SCliBatchList*)(*pIter); + if (QUEUE_IS_EMPTY(&batchList->wq) || batchList->connCnt >= batchList->connMax) { + continue; + } + queue* hr = QUEUE_HEAD(&batchList->wq); + QUEUE_REMOVE(hr); + + batchList->len -= 1; + + SCliBatch* batch = QUEUE_DATA(hr, SCliBatch, listq); cliHandleBatchReq(batch, pThrd); pIter = (void**)taosHashIterate(pThrd->batchCache, pIter); @@ -1775,8 +1840,15 @@ static void destroyThrdObj(SCliThrd* pThrd) { void** pIter = taosHashIterate(pThrd->batchCache, NULL); while (pIter != NULL) { - SCliBatch* batch = (SCliBatch*)(*pIter); - cliDestroyBatch(batch); + SCliBatchList* pBatchList = (SCliBatchList*)(*pIter); + while (!QUEUE_IS_EMPTY(&pBatchList->wq)) { + queue* h = QUEUE_HEAD(&pBatchList->wq); + QUEUE_REMOVE(h); + + SCliBatch* pBatch = QUEUE_DATA(h, SCliBatch, listq); + cliDestroyBatch(pBatch); + } + taosMemoryFree(pBatchList); pIter = (void**)taosHashIterate(pThrd->batchCache, pIter); } taosHashCleanup(pThrd->batchCache); From b894ba6f37bde7c803976fca9ee71bf7ce5397e0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 21:59:23 +0800 Subject: [PATCH 236/267] opt trans --- source/libs/transport/src/transCli.c | 63 +++++++++++++++------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 3bd764ff8d..f106e07e37 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -159,11 +159,9 @@ static void cliIdleCb(uv_idle_t* handle); static void cliPrepareCb(uv_prepare_t* handle); static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd); -// static void cliConnBatchCb(uv_connect_t* req, int status); static void cliSendBatchCb(uv_write_t* req, int status); -// static void cliConnBatchCb(uv_connect_t* req, int status); -// callback after conn to server -// static void cliConnBatchCb(uv_connect_t* req, int status); + +SCliBatch* cliGetHeadFromList(SCliBatchList* pList); static bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead); @@ -847,8 +845,11 @@ void cliSendBatch(SCliConn* pConn) { SCliThrd* pThrd = pConn->hostThrd; STrans* pTransInst = pThrd->pTransInst; - SCliBatch* pBatch = pConn->pBatch; - int32_t wLen = pBatch->wLen; + SCliBatch* pBatch = pConn->pBatch; + SCliBatchList* pList = pBatch->pList; + pList->connCnt += 1; + + int32_t wLen = pBatch->wLen; uv_buf_t* wb = taosMemoryCalloc(wLen, sizeof(uv_buf_t)); int i = 0; @@ -994,7 +995,7 @@ static void cliDestroyBatch(SCliBatch* pBatch) { taosMemoryFree(pBatch); } static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { - if (pBatch->wLen == 0 || QUEUE_IS_EMPTY(&pBatch->wq)) { + if (pBatch == NULL || pBatch->wLen == 0 || QUEUE_IS_EMPTY(&pBatch->wq)) { return; } STrans* pTransInst = pThrd->pTransInst; @@ -1071,8 +1072,7 @@ static void cliSendBatchCb(uv_write_t* req, int status) { SCliBatch* p = conn->pBatch; SCliBatchList* pBatchList = p->pList; - - int32_t empty = QUEUE_IS_EMPTY(&pBatchList->wq); + SCliBatch* nxtBatch = cliGetHeadFromList(pBatchList); pBatchList->connCnt -= 1; conn->pBatch = NULL; @@ -1081,23 +1081,17 @@ static void cliSendBatchCb(uv_write_t* req, int status) { tDebug("%s conn %p failed to send batch msg, batch size:%d, msgLen:%d, reason:%s", CONN_GET_INST_LABEL(conn), conn, p->wLen, p->batchSize, uv_err_name(status)); cliHandleExcept(conn); - + cliHandleBatchReq(nxtBatch, thrd); } else { tDebug("%s conn %p succ to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(conn), conn, p->wLen, p->batchSize); - if (empty == false) { - queue* h = QUEUE_HEAD(&pBatchList->wq); - QUEUE_REMOVE(h); - conn->pBatch = QUEUE_DATA(h, SCliBatch, listq); - - pBatchList->connCnt += 1; - pBatchList->len -= 1; + if (nxtBatch != NULL) { + conn->pBatch = nxtBatch; cliSendBatch(conn); - return; + } else { + addConnToPool(thrd->pool, conn); } - - addConnToPool(thrd->pool, conn); } cliDestroyBatch(p); @@ -1466,6 +1460,18 @@ static void cliNoBatchDealReq(queue* wq, SCliThrd* pThrd) { tTrace("cli process batch size:%d", count); } } +SCliBatch* cliGetHeadFromList(SCliBatchList* pList) { + if (QUEUE_IS_EMPTY(&pList->wq) || pList->connCnt >= pList->connMax) { + return NULL; + } + queue* hr = QUEUE_HEAD(&pList->wq); + QUEUE_REMOVE(hr); + + pList->len -= 1; + + SCliBatch* batch = QUEUE_DATA(hr, SCliBatch, listq); + return batch; +} static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { int count = 0; @@ -1528,6 +1534,7 @@ static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { if ((pBatch->batchSize + pMsg->msg.contLen) < (*ppBatchList)->batchLenLimit) { QUEUE_PUSH(&pBatch->wq, h); pBatch->batchSize += pMsg->msg.contLen; + pBatch->wLen += 1; } else { SCliBatch* pBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); QUEUE_INIT(&pBatch->wq); @@ -1551,17 +1558,10 @@ static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { void** pIter = taosHashIterate(pThrd->batchCache, NULL); while (pIter != NULL) { SCliBatchList* batchList = (SCliBatchList*)(*pIter); - if (QUEUE_IS_EMPTY(&batchList->wq) || batchList->connCnt >= batchList->connMax) { - continue; + SCliBatch* batch = cliGetHeadFromList(batchList); + if (batch != NULL) { + cliHandleBatchReq(batch, pThrd); } - queue* hr = QUEUE_HEAD(&batchList->wq); - QUEUE_REMOVE(hr); - - batchList->len -= 1; - - SCliBatch* batch = QUEUE_DATA(hr, SCliBatch, listq); - - cliHandleBatchReq(batch, pThrd); pIter = (void**)taosHashIterate(pThrd->batchCache, pIter); } @@ -1848,7 +1848,10 @@ static void destroyThrdObj(SCliThrd* pThrd) { SCliBatch* pBatch = QUEUE_DATA(h, SCliBatch, listq); cliDestroyBatch(pBatch); } + taosMemoryFree(pBatchList->ip); + taosMemoryFree(pBatchList->dst); taosMemoryFree(pBatchList); + pIter = (void**)taosHashIterate(pThrd->batchCache, pIter); } taosHashCleanup(pThrd->batchCache); From f19fdaa142f70202854a9835ef1b02c9f1d50232 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 22:40:27 +0800 Subject: [PATCH 237/267] opt trans --- include/libs/transport/trpc.h | 5 +++-- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 3 ++- source/libs/transport/inc/transportInt.h | 1 + source/libs/transport/src/trans.c | 1 + source/libs/transport/src/transCli.c | 13 ++++++++++--- source/libs/transport/test/cliBench.c | 3 ++- 6 files changed, 19 insertions(+), 7 deletions(-) diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index acfd5dfb51..0cc0ab64ef 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -115,8 +115,9 @@ typedef struct SRpcInit { int32_t connLimitNum; int32_t connLimitLock; - int8_t supportBatch; // 0: no batch, 1. batch - void *parent; + int8_t supportBatch; // 0: no batch, 1. batch + int32_t batchSize; + void *parent; } SRpcInit; typedef struct { diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index f35352268f..4e9b7149e4 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -286,11 +286,12 @@ int32_t dmInitClient(SDnode *pDnode) { int32_t connLimitNum = 10000 / (tsNumOfRpcThreads * 3); connLimitNum = TMAX(connLimitNum, 100); - connLimitNum = TMIN(connLimitNum, 600); + connLimitNum = TMIN(connLimitNum, 500); rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; rpcInit.supportBatch = 1; + rpcInit.batchSize = 64 * 1024; pTrans->clientRpc = rpcOpen(&rpcInit); if (pTrans->clientRpc == NULL) { diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index 13adb4d2b4..1f3c98ad72 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -67,6 +67,7 @@ typedef struct { int32_t connLimitNum; int8_t connLimitLock; // 0: no lock. 1. lock int8_t supportBatch; // 0: no batch, 1: support batch + int32_t batchSize; int index; void* parent; diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 38ec1c7fdc..16ea25a41a 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -70,6 +70,7 @@ void* rpcOpen(const SRpcInit* pInit) { pRpc->connLimitNum = pInit->connLimitNum; pRpc->connLimitLock = pInit->connLimitLock; pRpc->supportBatch = pInit->supportBatch; + pRpc->batchSize = pInit->batchSize; pRpc->numOfThreads = pInit->numOfThreads > TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS : pInit->numOfThreads; if (pRpc->numOfThreads <= 0) { diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index f106e07e37..f0635d376c 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -27,6 +27,7 @@ typedef struct { int connMax; int connCnt; int batchLenLimit; + int sending; char* dst; char* ip; @@ -992,6 +993,8 @@ static void cliDestroyBatch(SCliBatch* pBatch) { SCliMsg* p = QUEUE_DATA(h, SCliMsg, q); destroyCmsg(p); } + SCliBatchList* p = pBatch->pList; + p->sending -= 1; taosMemoryFree(pBatch); } static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { @@ -1461,11 +1464,12 @@ static void cliNoBatchDealReq(queue* wq, SCliThrd* pThrd) { } } SCliBatch* cliGetHeadFromList(SCliBatchList* pList) { - if (QUEUE_IS_EMPTY(&pList->wq) || pList->connCnt >= pList->connMax) { + if (QUEUE_IS_EMPTY(&pList->wq) || pList->connCnt > pList->connMax || pList->sending > pList->connMax) { return NULL; } queue* hr = QUEUE_HEAD(&pList->wq); QUEUE_REMOVE(hr); + pList->sending += 1; pList->len -= 1; @@ -1474,6 +1478,8 @@ SCliBatch* cliGetHeadFromList(SCliBatchList* pList) { } static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { + STrans* pInst = pThrd->pTransInst; + int count = 0; while (!QUEUE_IS_EMPTY(wq)) { queue* h = QUEUE_HEAD(wq); @@ -1493,9 +1499,10 @@ static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { if (ppBatchList == NULL || *ppBatchList == NULL) { SCliBatchList* pBatchList = taosMemoryCalloc(1, sizeof(SCliBatchList)); QUEUE_INIT(&pBatchList->wq); - pBatchList->connMax = 200; + pBatchList->connMax = pInst->connLimitNum; pBatchList->connCnt = 0; - pBatchList->batchLenLimit = 16 * 1024; + pBatchList->batchLenLimit = pInst->batchSize; + pBatchList->ip = strdup(ip); pBatchList->dst = strdup(key); pBatchList->port = port; diff --git a/source/libs/transport/test/cliBench.c b/source/libs/transport/test/cliBench.c index 5901a71929..aaee162cd7 100644 --- a/source/libs/transport/test/cliBench.c +++ b/source/libs/transport/test/cliBench.c @@ -114,8 +114,9 @@ int main(int argc, char *argv[]) { rpcInit.user = "michael"; rpcInit.connType = TAOS_CONN_CLIENT; - rpcInit.connLimitNum = 300; + rpcInit.connLimitNum = 10; rpcInit.connLimitLock = 1; + rpcInit.batchSize = 16 * 1024; rpcInit.supportBatch = 1; rpcDebugFlag = 135; From 0a22b24ed31ac1d5688397fd11e057d75e0934fd Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 23:03:09 +0800 Subject: [PATCH 238/267] opt trans --- source/libs/transport/src/transCli.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index f0635d376c..b4b1bf1f51 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1502,6 +1502,7 @@ static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { pBatchList->connMax = pInst->connLimitNum; pBatchList->connCnt = 0; pBatchList->batchLenLimit = pInst->batchSize; + pBatchList->len += 1; pBatchList->ip = strdup(ip); pBatchList->dst = strdup(key); From 9db11ae559a487c3d7987299760291cd2aff14ab Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 17 Feb 2023 23:05:06 +0800 Subject: [PATCH 239/267] opt trans --- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 4e9b7149e4..7f9a261cf2 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -291,7 +291,7 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; rpcInit.supportBatch = 1; - rpcInit.batchSize = 64 * 1024; + rpcInit.batchSize = 16 * 1024; pTrans->clientRpc = rpcOpen(&rpcInit); if (pTrans->clientRpc == NULL) { From 7d915626c4cfa9f855912b9e5b610328ddb419f2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 17 Feb 2023 23:06:19 +0800 Subject: [PATCH 240/267] refactor: do some internal refactor and fix race condition. --- source/client/src/clientTmq.c | 143 +++++++++++++++++------------ source/client/test/clientTests.cpp | 139 +++++++++++++++++++++------- 2 files changed, 192 insertions(+), 90 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 22739108e2..d33f78d29d 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -32,15 +32,15 @@ sem_post(x) #endif -int32_t tmqAskEp(tmq_t* tmq, bool async); - -typedef struct { +struct SMqMgmt{ int8_t inited; tmr_h timer; int32_t rsetId; -} SMqMgmt; +}; -static SMqMgmt tmqMgmt = {0}; +static TdThreadOnce tmqInit = PTHREAD_ONCE_INIT; // initialize only once +volatile int32_t tmqInitRes = 0; // initialize rsp code +static struct SMqMgmt tmqMgmt = {0}; typedef struct { int8_t tmqRspType; @@ -65,8 +65,7 @@ struct tmq_conf_t { int8_t withTbName; int8_t snapEnable; int32_t snapBatchSize; - - bool hbBgEnable; + bool hbBgEnable; uint16_t port; int32_t autoCommitInterval; @@ -78,18 +77,17 @@ struct tmq_conf_t { }; struct tmq_t { - int64_t refId; + int64_t refId; // conf - char groupId[TSDB_CGROUP_LEN]; - char clientId[256]; - int8_t withTbName; - int8_t useSnapshot; - int8_t autoCommit; - int32_t autoCommitInterval; - int32_t resetOffsetCfg; - int64_t consumerId; - - bool hbBgEnable; + char groupId[TSDB_CGROUP_LEN]; + char clientId[256]; + int8_t withTbName; + int8_t useSnapshot; + int8_t autoCommit; + int32_t autoCommitInterval; + int32_t resetOffsetCfg; + uint64_t consumerId; + bool hbBgEnable; tmq_commit_cb* commitCb; void* commitCbUserParam; @@ -221,13 +219,21 @@ typedef struct { /*int32_t vgId;*/ } SMqCommitCbParam; +static int32_t tmqAskEp(tmq_t* tmq, bool async); + tmq_conf_t* tmq_conf_new() { tmq_conf_t* conf = taosMemoryCalloc(1, sizeof(tmq_conf_t)); + if (conf == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return conf; + } + conf->withTbName = false; conf->autoCommit = true; conf->autoCommitInterval = 5000; conf->resetOffset = TMQ_CONF__RESET_OFFSET__EARLIEAST; conf->hbBgEnable = true; + return conf; } @@ -932,31 +938,37 @@ void tmqFreeImpl(void* handle) { taosMemoryFree(tmq); } +static void tmqMgmtInit(void) { + tmqInitRes = 0; + tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ"); + + if (tmqMgmt.timer == NULL) { + tmqInitRes = TSDB_CODE_OUT_OF_MEMORY; + } + + tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl); + if (tmqMgmt.rsetId != 0) { + tmqInitRes = terrno; + } +} + tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { - // init timer - int8_t inited = atomic_val_compare_exchange_8(&tmqMgmt.inited, 0, 1); - if (inited == 0) { - tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ"); - if (tmqMgmt.timer == NULL) { - atomic_store_8(&tmqMgmt.inited, 0); - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl); + taosThreadOnce(&tmqInit, tmqMgmtInit); + if (tmqInitRes != 0) { + terrno = tmqInitRes; + return NULL; } tmq_t* pTmq = taosMemoryCalloc(1, sizeof(tmq_t)); if (pTmq == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - tscError("setting up new consumer failed since %s, consumer group %s", terrstr(), conf->groupId); + tscError("failed to create consumer, consumer group %s, code:%s", conf->groupId, terrstr()); return NULL; } const char* user = conf->user == NULL ? TSDB_DEFAULT_USER : conf->user; const char* pass = conf->pass == NULL ? TSDB_DEFAULT_PASS : conf->pass; - ASSERT(user); - ASSERT(pass); ASSERT(conf->groupId[0]); pTmq->clientTopics = taosArrayInit(0, sizeof(SMqClientTopic)); @@ -966,7 +978,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { if (pTmq->clientTopics == NULL || pTmq->mqueue == NULL || pTmq->qall == NULL || pTmq->delayedTask == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - tscError("consumer %" PRId64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), + tscError("consumer:0x%" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId); goto FAIL; } @@ -996,7 +1008,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { // init semaphore if (tsem_init(&pTmq->rspSem, 0, 0) != 0) { - tscError("consumer %" PRId64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), + tscError("consumer:0x %" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId); goto FAIL; } @@ -1004,7 +1016,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { // init connection pTmq->pTscObj = taos_connect_internal(conf->ip, user, pass, NULL, NULL, conf->port, CONN_TYPE__TMQ); if (pTmq->pTscObj == NULL) { - tscError("consumer %" PRId64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), + tscError("consumer:0x %" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId); tsem_destroy(&pTmq->rspSem); goto FAIL; @@ -1022,8 +1034,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer); } - tscInfo("consumer %" PRId64 " is setup, consumer group %s", pTmq->consumerId, pTmq->groupId); - + tscInfo("consumer:0x%" PRIx64 " is setup, consumer groupId %s", pTmq->consumerId, pTmq->groupId); return pTmq; FAIL: @@ -1032,6 +1043,7 @@ FAIL: if (pTmq->delayedTask) taosCloseQueue(pTmq->delayedTask); if (pTmq->qall) taosFreeQall(pTmq->qall); taosMemoryFree(pTmq); + return NULL; } @@ -1041,44 +1053,52 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { void* buf = NULL; SMsgSendInfo* sendInfo = NULL; SCMSubscribeReq req = {0}; - int32_t code = -1; + int32_t code = 0; - tscDebug("tmq subscribe, consumer: %" PRId64 ", topic num %d", tmq->consumerId, sz); + tscDebug("consumer:0x%"PRIx64", tmq subscribe start, numOfTopic %d", tmq->consumerId, sz); req.consumerId = tmq->consumerId; tstrncpy(req.clientId, tmq->clientId, 256); tstrncpy(req.cgroup, tmq->groupId, TSDB_CGROUP_LEN); req.topicNames = taosArrayInit(sz, sizeof(void*)); - if (req.topicNames == NULL) goto FAIL; - tscDebug("tmq subscribe, consumer: %" PRId64 ", topic num %d", tmq->consumerId, sz); + if (req.topicNames == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; + } for (int32_t i = 0; i < sz; i++) { char* topic = taosArrayGetP(container, i); SName name = {0}; tNameSetDbName(&name, tmq->pTscObj->acctId, topic, strlen(topic)); - char* topicFName = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); if (topicFName == NULL) { goto FAIL; } - tNameExtractFullName(&name, topicFName); - tscDebug("subscribe topic: %s", topicFName); + tNameExtractFullName(&name, topicFName); + tscDebug("consumer:0x%"PRIx64", subscribe topic: %s", tmq->consumerId, topicFName); taosArrayPush(req.topicNames, &topicFName); } int32_t tlen = tSerializeSCMSubscribeReq(NULL, &req); + buf = taosMemoryMalloc(tlen); - if (buf == NULL) goto FAIL; + if (buf == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; + } void* abuf = buf; tSerializeSCMSubscribeReq(&abuf, &req); sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); - if (sendInfo == NULL) goto FAIL; + if (sendInfo == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; + } SMqSubscribeCbParam param = { .rspErr = 0, @@ -1086,7 +1106,9 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { .epoch = tmq->epoch, }; - if (tsem_init(¶m.rspSem, 0, 0) != 0) goto FAIL; + if (tsem_init(¶m.rspSem, 0, 0) != 0) { + goto FAIL; + } sendInfo->msgInfo = (SDataBuf){ .pData = buf, @@ -1112,15 +1134,18 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { tsem_wait(¶m.rspSem); tsem_destroy(¶m.rspSem); - code = param.rspErr; - if (code != 0) goto FAIL; + if (param.rspErr != 0) { + code = param.rspErr; + goto FAIL; + } int32_t retryCnt = 0; while (TSDB_CODE_MND_CONSUMER_NOT_READY == tmqAskEp(tmq, false)) { if (retryCnt++ > 10) { goto FAIL; } - tscDebug("consumer not ready, retry"); + + tscDebug("consumer:0x%"PRIx64", mnd not ready for subscribe, retry count:%d in 500ms", tmq->consumerId, retryCnt); taosMsleep(500); } @@ -1138,7 +1163,6 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId2, tmqMgmt.timer); } - code = 0; FAIL: taosArrayDestroyP(req.topicNames, taosMemoryFree); taosMemoryFree(buf); @@ -1434,7 +1458,7 @@ END: } int32_t tmqAskEp(tmq_t* tmq, bool async) { - int32_t code = 0; + int32_t code = TSDB_CODE_SUCCESS; #if 0 int8_t epStatus = atomic_val_compare_exchange_8(&tmq->epStatus, 0, 1); if (epStatus == 1) { @@ -1444,6 +1468,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { } atomic_store_32(&tmq->epSkipCnt, 0); #endif + SMqAskEpReq req = {0}; req.consumerId = tmq->consumerId; req.epoch = tmq->epoch; @@ -1451,27 +1476,31 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { int32_t tlen = tSerializeSMqAskEpReq(NULL, 0, &req); if (tlen < 0) { - tscError("tSerializeSMqAskEpReq failed"); + tscError("consumer:0x%"PRIx64", tSerializeSMqAskEpReq failed", tmq->consumerId); return -1; } + void* pReq = taosMemoryCalloc(1, tlen); if (pReq == NULL) { - tscError("failed to malloc askEpReq msg, size:%d", tlen); + tscError("consumer:0x%"PRIx64", failed to malloc askEpReq msg, size:%d", tmq->consumerId, tlen); + terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } + if (tSerializeSMqAskEpReq(pReq, tlen, &req) < 0) { - tscError("tSerializeSMqAskEpReq %d failed", tlen); + tscError("consumer:0x%"PRIx64", tSerializeSMqAskEpReq %d failed", tmq->consumerId, tlen); taosMemoryFree(pReq); return -1; } SMqAskEpCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam)); if (pParam == NULL) { - tscError("failed to malloc subscribe param"); + tscError("consumer:0x%"PRIx64", failed to malloc subscribe param", tmq->consumerId); taosMemoryFree(pReq); /*atomic_store_8(&tmq->epStatus, 0);*/ return -1; } + pParam->refId = tmq->refId; pParam->epoch = tmq->epoch; pParam->async = async; @@ -1499,8 +1528,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { sendInfo->msgType = TDMT_MND_TMQ_ASK_EP; SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp); - - tscDebug("consumer:%" PRId64 ", ask ep", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 ", ask ep from mnode", tmq->consumerId); int64_t transporterId = 0; asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); @@ -1510,6 +1538,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { code = pParam->code; taosMemoryFree(pParam); } + return code; } diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index a75411a854..9b777f05c0 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -162,6 +162,11 @@ void *queryThread(void *arg) { } static int32_t numOfThreads = 1; + +void tmq_commit_cb_print(tmq_t *pTmq, int32_t code, void *param) { + printf("success, code:%d\n", code); +} + } // namespace int main(int argc, char** argv) { @@ -176,12 +181,12 @@ int main(int argc, char** argv) { return RUN_ALL_TESTS(); } -TEST(testCase, driverInit_Test) { +TEST(clientCase, driverInit_Test) { // taosInitGlobalCfg(); // taos_init(); } -TEST(testCase, connect_Test) { +TEST(clientCase, connect_Test) { taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg"); TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); @@ -190,8 +195,8 @@ TEST(testCase, connect_Test) { } taos_close(pConn); } -#if 0 -TEST(testCase, create_user_Test) { + +TEST(clientCase, create_user_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -204,7 +209,7 @@ TEST(testCase, create_user_Test) { taos_close(pConn); } -TEST(testCase, create_account_Test) { +TEST(clientCase, create_account_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -217,7 +222,7 @@ TEST(testCase, create_account_Test) { taos_close(pConn); } -TEST(testCase, drop_account_Test) { +TEST(clientCase, drop_account_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -230,7 +235,7 @@ TEST(testCase, drop_account_Test) { taos_close(pConn); } -TEST(testCase, show_user_Test) { +TEST(clientCase, show_user_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -250,7 +255,7 @@ TEST(testCase, show_user_Test) { taos_close(pConn); } -TEST(testCase, drop_user_Test) { +TEST(clientCase, drop_user_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -263,7 +268,7 @@ TEST(testCase, drop_user_Test) { taos_close(pConn); } -TEST(testCase, show_db_Test) { +TEST(clientCase, show_db_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -282,7 +287,7 @@ TEST(testCase, show_db_Test) { taos_close(pConn); } -TEST(testCase, create_db_Test) { +TEST(clientCase, create_db_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -306,7 +311,7 @@ TEST(testCase, create_db_Test) { taos_close(pConn); } -TEST(testCase, create_dnode_Test) { +TEST(clientCase, create_dnode_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -325,7 +330,7 @@ TEST(testCase, create_dnode_Test) { taos_close(pConn); } -TEST(testCase, drop_dnode_Test) { +TEST(clientCase, drop_dnode_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -349,7 +354,7 @@ TEST(testCase, drop_dnode_Test) { taos_close(pConn); } -TEST(testCase, use_db_test) { +TEST(clientCase, use_db_test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -367,7 +372,7 @@ TEST(testCase, use_db_test) { taos_close(pConn); } -// TEST(testCase, drop_db_test) { +// TEST(clientCase, drop_db_test) { // TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); // assert(pConn != NULL); // @@ -389,7 +394,7 @@ TEST(testCase, use_db_test) { // taos_close(pConn); //} -TEST(testCase, create_stable_Test) { +TEST(clientCase, create_stable_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -428,7 +433,7 @@ TEST(testCase, create_stable_Test) { taos_close(pConn); } -TEST(testCase, create_table_Test) { +TEST(clientCase, create_table_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -447,7 +452,7 @@ TEST(testCase, create_table_Test) { taos_close(pConn); } -TEST(testCase, create_ctable_Test) { +TEST(clientCase, create_ctable_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -472,7 +477,7 @@ TEST(testCase, create_ctable_Test) { taos_close(pConn); } -TEST(testCase, show_stable_Test) { +TEST(clientCase, show_stable_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != nullptr); @@ -497,7 +502,7 @@ TEST(testCase, show_stable_Test) { taos_close(pConn); } -TEST(testCase, show_vgroup_Test) { +TEST(clientCase, show_vgroup_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -529,7 +534,7 @@ TEST(testCase, show_vgroup_Test) { taos_close(pConn); } -TEST(testCase, create_multiple_tables) { +TEST(clientCase, create_multiple_tables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -600,7 +605,7 @@ TEST(testCase, create_multiple_tables) { taos_close(pConn); } -TEST(testCase, show_table_Test) { +TEST(clientCase, show_table_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -634,7 +639,7 @@ TEST(testCase, show_table_Test) { taos_close(pConn); } -//TEST(testCase, drop_stable_Test) { +//TEST(clientCase, drop_stable_Test) { // TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); // assert(pConn != nullptr); // @@ -659,7 +664,7 @@ TEST(testCase, show_table_Test) { // taos_close(pConn); //} -TEST(testCase, generated_request_id_test) { +TEST(clientCase, generated_request_id_test) { SHashObj* phash = taosHashInit(10000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK); for (int32_t i = 0; i < 50000; ++i) { @@ -675,7 +680,7 @@ TEST(testCase, generated_request_id_test) { taosHashCleanup(phash); } -TEST(testCase, insert_test) { +TEST(clientCase, insert_test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -692,9 +697,8 @@ TEST(testCase, insert_test) { taos_free_result(pRes); taos_close(pConn); } -#endif -TEST(testCase, projection_query_tables) { +TEST(clientCase, projection_query_tables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -752,8 +756,7 @@ TEST(testCase, projection_query_tables) { taos_close(pConn); } -#if 0 -TEST(testCase, tsbs_perf_test) { +TEST(clientCase, tsbs_perf_test) { TdThread qid[20] = {0}; for(int32_t i = 0; i < numOfThreads; ++i) { @@ -762,7 +765,7 @@ TEST(testCase, tsbs_perf_test) { getchar(); } -TEST(testCase, projection_query_stables) { +TEST(clientCase, projection_query_stables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -790,7 +793,7 @@ TEST(testCase, projection_query_stables) { taos_close(pConn); } -TEST(testCase, agg_query_tables) { +TEST(clientCase, agg_query_tables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -825,7 +828,7 @@ create table tm1 using m1 tags(2); insert into tm0 values('2021-1-1 1:1:1.120', 1) ('2021-1-1 1:1:2.9', 2) tm1 values('2021-1-1 1:1:1.120', 11) ('2021-1-1 1:1:2.99', 22); */ -TEST(testCase, async_api_test) { +TEST(clientCase, async_api_test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -859,7 +862,7 @@ TEST(testCase, async_api_test) { taos_close(pConn); } -TEST(testCase, update_test) { +TEST(clientCase, update_test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -895,6 +898,76 @@ TEST(testCase, update_test) { } } -#endif +TEST(clientCase, subscription_test) { + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(pConn, nullptr); + + // TAOS_RES* pRes = taos_query(pConn, "create topic topic_t1 as select * from t1"); + // if (taos_errno(pRes) != TSDB_CODE_SUCCESS) { + // printf("failed to create topic, code:%s", taos_errstr(pRes)); + // taos_free_result(pRes); + // return; + // } + + tmq_conf_t* conf = tmq_conf_new(); + tmq_conf_set(conf, "enable.auto.commit", "true"); + tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); + tmq_conf_set(conf, "group.id", "cgrpName"); + tmq_conf_set(conf, "td.connect.user", "root"); + tmq_conf_set(conf, "td.connect.pass", "taosdata"); + tmq_conf_set(conf, "auto.offset.reset", "earliest"); + tmq_conf_set(conf, "experimental.snapshot.enable", "true"); + tmq_conf_set(conf, "msg.with.table.name", "true"); + tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + + // 创建订阅 topics 列表 + tmq_list_t* topicList = tmq_list_new(); + tmq_list_append(topicList, "topic_t1"); + + // 启动订阅 + tmq_subscribe(tmq, topicList); + tmq_list_destroy(topicList); + + TAOS_FIELD* fields = NULL; + int32_t numOfFields = 0; + int32_t precision = 0; + int32_t totalRows = 0; + int32_t msgCnt = 0; + int32_t timeout = 5000; + + while (1) { + TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout); + if (pRes) { + char buf[1024]; + int32_t rows = 0; + + const char* topicName = tmq_get_topic_name(pRes); + const char* dbName = tmq_get_db_name(pRes); + int32_t vgroupId = tmq_get_vgroup_id(pRes); + + printf("topic: %s\n", topicName); + printf("db: %s\n", dbName); + printf("vgroup id: %d\n", vgroupId); + + while (1) { + TAOS_ROW row = taos_fetch_row(pRes); + if (row == NULL) break; + + fields = taos_fetch_fields(pRes); + numOfFields = taos_field_count(pRes); + precision = taos_result_precision(pRes); + rows++; + taos_print_row(buf, row, fields, numOfFields); + printf("precision: %d, row content: %s\n", precision, buf); + } + } +// return rows; + } + + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); +} #pragma GCC diagnostic pop From 2fced5c88b989ecf06b2275dab1018478318229d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 18 Feb 2023 08:13:21 +0800 Subject: [PATCH 241/267] del case --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index ee647500cf..16751423b1 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -301,7 +301,7 @@ ,,y,script,./test.sh -f tsim/vnode/replica3_repeat.sim ,,y,script,./test.sh -f tsim/vnode/replica3_vgroup.sim ,,y,script,./test.sh -f tsim/vnode/replica3_many.sim -,,y,script,./test.sh -f tsim/vnode/replica3_import.sim +#,,y,script,./test.sh -f tsim/vnode/replica3_import.sim ,,y,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim ,,y,script,./test.sh -f tsim/vnode/stable_dnode2_stop.sim ,,y,script,./test.sh -f tsim/vnode/stable_dnode2.sim From a9b6d564257a27f79ad7c100ed97568fd1aa994b Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 18 Feb 2023 08:45:40 +0800 Subject: [PATCH 242/267] rm macro --- source/libs/transport/src/transCli.c | 2 -- source/libs/transport/src/transSvr.c | 4 ---- 2 files changed, 6 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index b4b1bf1f51..2c862ed45b 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -12,7 +12,6 @@ * along with this program. If not, see . */ -#ifdef USE_UV #include "transComm.h" typedef struct SConnList { @@ -2423,4 +2422,3 @@ int64_t transAllocHandle() { return exh->refId; } -#endif diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index eecd260d35..04e094ae9a 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -12,8 +12,6 @@ * along with this program. If not, see . */ -#ifdef USE_UV - #include "transComm.h" static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT; @@ -1347,5 +1345,3 @@ _return2: } int transGetConnInfo(void* thandle, STransHandleInfo* pConnInfo) { return -1; } - -#endif From cfc9bb1effdab4339fd3478f822d0a2887f0df66 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 18 Feb 2023 11:37:43 +0800 Subject: [PATCH 243/267] opt transport opt --- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 7f9a261cf2..c1ee87657d 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -284,9 +284,9 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.failFastThreshold = 3; // failed threshold rpcInit.ffp = dmFailFastFp; - int32_t connLimitNum = 10000 / (tsNumOfRpcThreads * 3); - connLimitNum = TMAX(connLimitNum, 100); - connLimitNum = TMIN(connLimitNum, 500); + int32_t connLimitNum = 1000 / (tsNumOfRpcThreads * 3); + connLimitNum = TMAX(connLimitNum, 10); + connLimitNum = TMIN(connLimitNum, 100); rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; From 2270fed9f751b2d11985edaf6cbf4abd5aecdaee Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Sat, 18 Feb 2023 11:57:24 +0800 Subject: [PATCH 244/267] fix:add limit for stream batch --- source/libs/stream/src/streamExec.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 786ee9d079..7bf19cec63 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -15,6 +15,8 @@ #include "streamInc.h" +#define STREAM_EXEC_MAX_BATCH_NUM 100 + static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* pRes) { int32_t code; void* exec = pTask->exec.executor; @@ -221,6 +223,9 @@ int32_t streamExecForAll(SStreamTask* pTask) { batchCnt++; input = newRet; streamQueueProcessSuccess(pTask->inputQueue); + if (batchCnt > STREAM_EXEC_MAX_BATCH_NUM) { + break; + } } } } From 6836dc1c8071cdbc1fb69bd546b936a936110349 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 18 Feb 2023 12:03:11 +0800 Subject: [PATCH 245/267] refactor: do some internal refactor and add some logs. --- source/client/src/clientTmq.c | 127 +++++++++++++++++++--------------- 1 file changed, 72 insertions(+), 55 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index d33f78d29d..2646fe30b3 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -153,11 +153,9 @@ typedef struct { typedef struct { // subscribe info - char topicName[TSDB_TOPIC_FNAME_LEN]; - char db[TSDB_DB_FNAME_LEN]; - - SArray* vgs; // SArray - + char topicName[TSDB_TOPIC_FNAME_LEN]; + char db[TSDB_DB_FNAME_LEN]; + SArray* vgs; // SArray SSchemaWrapper schema; } SMqClientTopic; @@ -511,7 +509,7 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT .handle = NULL, }; - tscDebug("consumer:%" PRId64 ", commit offset of %s on vgId:%d, offset is %" PRId64, tmq->consumerId, pOffset->subKey, + tscDebug("consumer:0x%" PRIx64 ", commit offset of %s on vgId:%d, offset is %" PRId64, tmq->consumerId, pOffset->subKey, pVg->vgId, pOffset->val.version); // TODO: put into cb @@ -642,13 +640,14 @@ static int32_t tmqCommitConsumerImpl(tmq_t* tmq, int8_t automatic, int8_t async, for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); - tscDebug("consumer:%" PRId64 ", begin commit for topic %s, vgNum %d", tmq->consumerId, pTopic->topicName, - (int32_t)taosArrayGetSize(pTopic->vgs)); + int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs); + tscDebug("consumer:0x%" PRIx64 ", begin commit for topic %s, vgNum %d", tmq->consumerId, pTopic->topicName, + numOfVgroups); - for (int32_t j = 0; j < taosArrayGetSize(pTopic->vgs); j++) { + for (int32_t j = 0; j < numOfVgroups; j++) { SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); - tscDebug("consumer:%" PRId64 ", begin commit for topic %s, vgId:%d", tmq->consumerId, pTopic->topicName, + tscDebug("consumer:0x%" PRIx64 ", begin commit for topic %s, vgId:%d", tmq->consumerId, pTopic->topicName, pVg->vgId); if (pVg->currentOffset.type > 0 && !tOffsetEqual(&pVg->currentOffset, &pVg->committedOffset)) { @@ -792,34 +791,38 @@ OVER: taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer); } -int32_t tmqHandleAllDelayedTask(tmq_t* tmq) { +int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) { STaosQall* qall = taosAllocateQall(); - taosReadAllQitems(tmq->delayedTask, qall); + taosReadAllQitems(pTmq->delayedTask, qall); + + tscDebug("consumer:0x%"PRIx64" handle delayed %d tasks before poll data", pTmq->consumerId, qall->numOfItems); + while (1) { int8_t* pTaskType = NULL; taosGetQitem(qall, (void**)&pTaskType); if (pTaskType == NULL) break; if (*pTaskType == TMQ_DELAYED_TASK__ASK_EP) { - tmqAskEp(tmq, true); + tmqAskEp(pTmq, true); int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); - *pRefId = tmq->refId; + *pRefId = pTmq->refId; - taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &tmq->epTimer); + taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &pTmq->epTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { - tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam); + tmqCommitInner(pTmq, NULL, 1, 1, pTmq->commitCb, pTmq->commitCbUserParam); int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); - *pRefId = tmq->refId; + *pRefId = pTmq->refId; - taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer); + taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, pRefId, tmqMgmt.timer, &pTmq->commitTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) { } else { ASSERT(0); } taosFreeQitem(pTaskType); } + taosFreeQall(qall); return 0; } @@ -947,7 +950,7 @@ static void tmqMgmtInit(void) { } tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl); - if (tmqMgmt.rsetId != 0) { + if (tmqMgmt.rsetId < 0) { tmqInitRes = terrno; } } @@ -1257,7 +1260,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { tDecoderClear(&decoder); memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead)); - tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d", + tscDebug("consumer:0x%" PRIx64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d", tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version, rspType); @@ -1280,7 +1283,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { taosMemoryFree(pMsg->pData); taosMemoryFree(pMsg->pEpSet); - tscDebug("consumer:%" PRId64 ", put poll res into mqueue %p", tmq->consumerId, pRspWrapper); + tscDebug("consumer:0x%" PRIx64 ", put poll res into mqueue %p", tmq->consumerId, pRspWrapper); taosWriteQitem(tmq->mqueue, pRspWrapper); tsem_post(&tmq->rspSem); @@ -1297,10 +1300,12 @@ CREATE_MSG_FAIL: bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { bool set = false; + int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); int32_t topicNumGet = taosArrayGetSize(pRsp->topics); + char vgKey[TSDB_TOPIC_FNAME_LEN + 22]; - tscDebug("consumer:%" PRId64 ", update ep epoch %d to epoch %d, topic num:%d", tmq->consumerId, tmq->epoch, epoch, - topicNumGet); + tscDebug("consumer:0x%" PRIx64", update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d", + tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur); SArray* newTopics = taosArrayInit(topicNumGet, sizeof(SMqClientTopic)); if (newTopics == NULL) { @@ -1312,19 +1317,19 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { taosArrayDestroy(newTopics); return false; } - int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); + for (int32_t i = 0; i < topicNumCur; i++) { // find old topic SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i); if (pTopicCur->vgs) { int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs); - tscDebug("consumer:%" PRId64 ", new vg num: %d", tmq->consumerId, vgNumCur); + tscDebug("consumer:0x%" PRIx64 ", new vg num: %d", tmq->consumerId, vgNumCur); for (int32_t j = 0; j < vgNumCur; j++) { SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j); sprintf(vgKey, "%s:%d", pTopicCur->topicName, pVgCur->vgId); char buf[80]; tFormatOffset(buf, 80, &pVgCur->currentOffset); - tscDebug("consumer:%" PRId64 ", epoch %d vgId:%d vgKey is %s, offset is %s", tmq->consumerId, epoch, + tscDebug("consumer:0x%" PRIx64 ", epoch %d vgId:%d vgKey is %s, offset is %s", tmq->consumerId, epoch, pVgCur->vgId, vgKey, buf); taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffset, sizeof(STqOffsetVal)); } @@ -1340,7 +1345,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { tstrncpy(topic.topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN); tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN); - tscDebug("consumer:%" PRId64 ", update topic: %s", tmq->consumerId, topic.topicName); + tscDebug("consumer:0x%" PRIx64 ", update topic: %s", tmq->consumerId, topic.topicName); int32_t vgNumGet = taosArrayGetSize(pTopicEp->vgs); topic.vgs = taosArrayInit(vgNumGet, sizeof(SMqClientVg)); @@ -1366,6 +1371,8 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { } taosArrayPush(newTopics, &topic); } + + // destroy current buffered existed topics info if (tmq->clientTopics) { int32_t sz = taosArrayGetSize(tmq->clientTopics); for (int32_t i = 0; i < sz; i++) { @@ -1373,17 +1380,21 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { if (pTopic->schema.nCols) taosMemoryFreeClear(pTopic->schema.pSchema); taosArrayDestroy(pTopic->vgs); } + taosArrayDestroy(tmq->clientTopics); } + taosHashCleanup(pHash); tmq->clientTopics = newTopics; - if (taosArrayGetSize(tmq->clientTopics) == 0) + if (taosArrayGetSize(tmq->clientTopics) == 0) { atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__NO_TOPIC); - else + } else { atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__READY); + } atomic_store_32(&tmq->epoch, epoch); + tscDebug("consumer:0x%" PRIx64 ", update topic info completed", tmq->consumerId); return set; } @@ -1406,8 +1417,8 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) { pParam->code = code; if (code != 0) { - tscError("consumer:%" PRId64 ", get topic endpoint error, not ready, wait:%d, code %x", tmq->consumerId, - pParam->async, code); + tscError("consumer:0x%" PRIx64 ", get topic endpoint error, async:%d, code:%s", tmq->consumerId, + pParam->async, tstrerror(code)); goto END; } @@ -1416,7 +1427,7 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) { // Epoch will only increase when received newer epoch ep msg SMqRspHead* head = pMsg->pData; int32_t epoch = atomic_load_32(&tmq->epoch); - tscDebug("consumer:%" PRId64 ", recv ep, msg epoch %d, current epoch %d", tmq->consumerId, head->epoch, epoch); + tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d", tmq->consumerId, head->epoch, epoch); if (head->epoch <= epoch) { goto END; } @@ -1435,6 +1446,7 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) { code = -1; goto END; } + pWrapper->tmqRspType = TMQ_MSG_TYPE__EP_RSP; pWrapper->epoch = head->epoch; memcpy(&pWrapper->msg, pMsg->pData, sizeof(SMqRspHead)); @@ -1463,7 +1475,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { int8_t epStatus = atomic_val_compare_exchange_8(&tmq->epStatus, 0, 1); if (epStatus == 1) { int32_t epSkipCnt = atomic_add_fetch_32(&tmq->epSkipCnt, 1); - tscTrace("consumer:%" PRId64 ", skip ask ep cnt %d", tmq->consumerId, epSkipCnt); + tscTrace("consumer:0x%" PRIx64 ", skip ask ep cnt %d", tmq->consumerId, epSkipCnt); if (epSkipCnt < 5000) return 0; } atomic_store_32(&tmq->epSkipCnt, 0); @@ -1521,7 +1533,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { .handle = NULL, }; - sendInfo->requestId = generateRequestId(); + sendInfo->requestId = tmq->consumerId; sendInfo->requestObjRefId = 0; sendInfo->param = pParam; sendInfo->fp = tmqAskEpCb; @@ -1611,6 +1623,7 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) { return pRspObj; } +// broadcast the poll request to all related vnodes int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); @@ -1619,7 +1632,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { int32_t vgStatus = atomic_val_compare_exchange_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE, TMQ_VG_STATUS__WAIT); if (vgStatus != TMQ_VG_STATUS__IDLE) { int32_t vgSkipCnt = atomic_add_fetch_32(&pVg->vgSkipCnt, 1); - tscTrace("consumer:%" PRId64 ", epoch %d skip vgId:%d skip cnt %d", tmq->consumerId, tmq->epoch, pVg->vgId, + tscTrace("consumer:0x%" PRIx64 ", epoch %d skip vgId:%d skip cnt %d", tmq->consumerId, tmq->epoch, pVg->vgId, vgSkipCnt); continue; /*if (vgSkipCnt < 10000) continue;*/ @@ -1627,7 +1640,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { if (skipCnt < 30000) { continue; } else { - tscDebug("consumer:%" PRId64 ",skip vgId:%d skip too much reset", tmq->consumerId, pVg->vgId); + tscDebug("consumer:0x%" PRIx64 ",skip vgId:%d skip too much reset", tmq->consumerId, pVg->vgId); } #endif } @@ -1683,6 +1696,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { .len = msgSize, .handle = NULL, }; + sendInfo->requestId = req.reqId; sendInfo->requestObjRefId = 0; sendInfo->param = pParam; @@ -1690,18 +1704,19 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { sendInfo->msgType = TDMT_VND_TMQ_CONSUME; int64_t transporterId = 0; - /*printf("send poll\n");*/ char offsetFormatBuf[80]; tFormatOffset(offsetFormatBuf, 80, &pVg->currentOffset); - tscDebug("consumer:%" PRId64 ", send poll to %s vgId:%d, epoch %d, req offset:%s, reqId:%" PRIu64, + + tscDebug("consumer:0x%" PRIx64 ", send poll to %s vgId:%d, epoch %d, req offset:%s, reqId:0x%" PRIx64, tmq->consumerId, pTopic->topicName, pVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId); - /*printf("send vgId:%d %" PRId64 "\n", pVg->vgId, pVg->currentOffset);*/ asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo); + pVg->pollCnt++; tmq->pollCnt++; } } + return 0; } @@ -1739,7 +1754,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { } } - tscDebug("consumer:%" PRId64 " handle rsp %p", tmq->consumerId, rspWrapper); + tscDebug("consumer:0x%" PRIx64 " handle rsp %p", tmq->consumerId, rspWrapper); if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__END_RSP) { taosFreeQitem(rspWrapper); @@ -1747,7 +1762,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { return NULL; } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_RSP) { SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper; - tscDebug("consumer %" PRId64 " actual process poll rsp", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 " actual process poll rsp", tmq->consumerId); /*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/ int32_t consumerEpoch = atomic_load_32(&tmq->epoch); if (pollRspWrapper->dataRsp.head.epoch == consumerEpoch) { @@ -1766,8 +1781,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { taosFreeQitem(pollRspWrapper); return pRsp; } else { - tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", - pollRspWrapper->dataRsp.head.epoch, consumerEpoch); + tscDebug("consumer:0x%"PRIx64", msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", + tmq->consumerId, pollRspWrapper->dataRsp.head.epoch, consumerEpoch); tmqFreeRspWrapper(rspWrapper); taosFreeQitem(pollRspWrapper); } @@ -1785,8 +1800,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { taosFreeQitem(pollRspWrapper); return pRsp; } else { - tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", - pollRspWrapper->metaRsp.head.epoch, consumerEpoch); + tscDebug("consumer:0x%"PRIx64", msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", + tmq->consumerId, pollRspWrapper->metaRsp.head.epoch, consumerEpoch); tmqFreeRspWrapper(rspWrapper); taosFreeQitem(pollRspWrapper); } @@ -1816,8 +1831,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { taosFreeQitem(pollRspWrapper); return pRsp; } else { - tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", - pollRspWrapper->taosxRsp.head.epoch, consumerEpoch); + tscDebug("consumer:0x%"PRIx64", msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", + tmq->consumerId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch); tmqFreeRspWrapper(rspWrapper); taosFreeQitem(pollRspWrapper); } @@ -1827,7 +1842,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { tmqHandleNoPollRsp(tmq, rspWrapper, &reset); taosFreeQitem(rspWrapper); if (pollIfReset && reset) { - tscDebug("consumer:%" PRId64 ", reset and repoll", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 ", reset and repoll", tmq->consumerId); tmqPollImpl(tmq, timeout); } } @@ -1838,7 +1853,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { void* rspObj; int64_t startTime = taosGetTimestampMs(); - tscDebug("consumer:%" PRId64 ", start poll at %" PRId64, tmq->consumerId, startTime); + tscDebug("consumer:0x%" PRIx64 ", start poll at %" PRId64, tmq->consumerId, startTime); #if 0 tmqHandleAllDelayedTask(tmq); @@ -1851,7 +1866,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { // in no topic status, delayed task also need to be processed if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) { - tscDebug("consumer:%" PRId64 ", poll return since consumer status is init", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 ", poll return since consumer status is init", tmq->consumerId); return NULL; } @@ -1868,28 +1883,30 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { while (1) { tmqHandleAllDelayedTask(tmq); + if (tmqPollImpl(tmq, timeout) < 0) { - tscDebug("consumer:%" PRId64 " return since poll err", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 " return due to poll error", tmq->consumerId); /*return NULL;*/ } rspObj = tmqHandleAllRsp(tmq, timeout, false); if (rspObj) { - tscDebug("consumer:%" PRId64 ", return rsp %p", tmq->consumerId, rspObj); + tscDebug("consumer:0x%" PRIx64 ", return rsp %p", tmq->consumerId, rspObj); return (TAOS_RES*)rspObj; } else if (terrno == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) { - tscDebug("consumer:%" PRId64 ", return null since no committed offset", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 ", return null since no committed offset", tmq->consumerId); return NULL; } + if (timeout != -1) { int64_t currentTime = taosGetTimestampMs(); int64_t passedTime = currentTime - startTime; if (passedTime > timeout) { - tscDebug("consumer:%" PRId64 ", (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64, + tscDebug("consumer:0x%" PRIx64 ", (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64, tmq->consumerId, tmq->epoch, startTime, currentTime); return NULL; } - /*tscInfo("consumer:%" PRId64 ", (epoch %d) wait, start time %" PRId64 ", current time %" PRId64*/ + /*tscInfo("consumer:0x%" PRIx64 ", (epoch %d) wait, start time %" PRId64 ", current time %" PRId64*/ /*", left time %" PRId64,*/ /*tmq->consumerId, tmq->epoch, startTime, currentTime, (timeout - passedTime));*/ tsem_timewait(&tmq->rspSem, (timeout - passedTime)); From abe37a16e3756717e62f2f9c849b6a498e82f50c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 18 Feb 2023 12:40:09 +0800 Subject: [PATCH 246/267] refactor: do some internal refactor and add some logs. --- source/client/src/clientTmq.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 2646fe30b3..a17ad97756 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -795,19 +795,23 @@ int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) { STaosQall* qall = taosAllocateQall(); taosReadAllQitems(pTmq->delayedTask, qall); + if (qall->numOfItems == 0) { + taosFreeQall(qall); + return TSDB_CODE_SUCCESS; + } + tscDebug("consumer:0x%"PRIx64" handle delayed %d tasks before poll data", pTmq->consumerId, qall->numOfItems); + int8_t* pTaskType = NULL; + taosGetQitem(qall, (void**)&pTaskType); - while (1) { - int8_t* pTaskType = NULL; - taosGetQitem(qall, (void**)&pTaskType); - if (pTaskType == NULL) break; - + while (pTaskType != NULL) { if (*pTaskType == TMQ_DELAYED_TASK__ASK_EP) { tmqAskEp(pTmq, true); int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); *pRefId = pTmq->refId; + tscDebug("consumer:0x%"PRIx64" retrieve ep from mnode in 1s", pTmq->consumerId); taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &pTmq->epTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { tmqCommitInner(pTmq, NULL, 1, 1, pTmq->commitCb, pTmq->commitCbUserParam); @@ -815,12 +819,16 @@ int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) { int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); *pRefId = pTmq->refId; + tscDebug("consumer:0x%"PRIx64" commit to mnode in %.2f s", pTmq->consumerId, pTmq->autoCommitInterval/1000.0); taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, pRefId, tmqMgmt.timer, &pTmq->commitTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) { + // do nothing } else { ASSERT(0); } + taosFreeQitem(pTaskType); + taosGetQitem(qall, (void**)&pTaskType); } taosFreeQall(qall); From a2a7dffb5d726b21fde8c3a171f787a7f4e2a6e7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 18 Feb 2023 13:52:07 +0800 Subject: [PATCH 247/267] refactor: do some internal refactor and add some logs. --- source/client/src/clientTmq.c | 25 +++++++++++++------------ source/client/test/clientTests.cpp | 2 +- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index a17ad97756..5aef065ce0 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -641,17 +641,14 @@ static int32_t tmqCommitConsumerImpl(tmq_t* tmq, int8_t automatic, int8_t async, SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs); - tscDebug("consumer:0x%" PRIx64 ", begin commit for topic %s, vgNum %d", tmq->consumerId, pTopic->topicName, - numOfVgroups); - for (int32_t j = 0; j < numOfVgroups; j++) { SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); - tscDebug("consumer:0x%" PRIx64 ", begin commit for topic %s, vgId:%d", tmq->consumerId, pTopic->topicName, - pVg->vgId); + tscDebug("consumer:0x%" PRIx64 " begin commit for topic %s, vgId:%d, ordinal:%d/%d", tmq->consumerId, pTopic->topicName, + pVg->vgId, j + 1, numOfVgroups); if (pVg->currentOffset.type > 0 && !tOffsetEqual(&pVg->currentOffset, &pVg->committedOffset)) { - tscDebug("consumer: %" PRId64 ", vg:%d, current %" PRId64 ", committed %" PRId64 "", tmq->consumerId, pVg->vgId, + tscDebug("consumer:0x%" PRId64 " vg:%d, current %" PRId64 ", committed %" PRId64 "", tmq->consumerId, pVg->vgId, pVg->currentOffset.version, pVg->committedOffset.version); if (tmqSendCommitReq(tmq, pVg, pTopic, pParamSet) < 0) { continue; @@ -811,7 +808,7 @@ int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) { int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); *pRefId = pTmq->refId; - tscDebug("consumer:0x%"PRIx64" retrieve ep from mnode in 1s", pTmq->consumerId); + tscDebug("consumer:0x%"PRIx64" will retrieve ep from mnode in 1s", pTmq->consumerId); taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &pTmq->epTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { tmqCommitInner(pTmq, NULL, 1, 1, pTmq->commitCb, pTmq->commitCbUserParam); @@ -819,7 +816,7 @@ int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) { int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); *pRefId = pTmq->refId; - tscDebug("consumer:0x%"PRIx64" commit to mnode in %.2f s", pTmq->consumerId, pTmq->autoCommitInterval/1000.0); + tscDebug("consumer:0x%"PRIx64" will commit to mnode in %.2fs", pTmq->consumerId, pTmq->autoCommitInterval/1000.0); taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, pRefId, tmqMgmt.timer, &pTmq->commitTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) { // do nothing @@ -1066,7 +1063,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { SCMSubscribeReq req = {0}; int32_t code = 0; - tscDebug("consumer:0x%"PRIx64", tmq subscribe start, numOfTopic %d", tmq->consumerId, sz); + tscDebug("consumer:0x%"PRIx64" tmq subscribe start, numOfTopic %d", tmq->consumerId, sz); req.consumerId = tmq->consumerId; tstrncpy(req.clientId, tmq->clientId, 256); @@ -1156,7 +1153,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { goto FAIL; } - tscDebug("consumer:0x%"PRIx64", mnd not ready for subscribe, retry count:%d in 500ms", tmq->consumerId, retryCnt); + tscDebug("consumer:0x%"PRIx64", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt); taosMsleep(500); } @@ -1435,11 +1432,15 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) { // Epoch will only increase when received newer epoch ep msg SMqRspHead* head = pMsg->pData; int32_t epoch = atomic_load_32(&tmq->epoch); - tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d", tmq->consumerId, head->epoch, epoch); if (head->epoch <= epoch) { + tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, no need to update local ep", + tmq->consumerId, head->epoch, epoch); goto END; } + tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, update local ep", tmq->consumerId, + head->epoch, epoch); + if (!async) { SMqAskEpRsp rsp; tDecodeSMqAskEpRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &rsp); @@ -1548,7 +1549,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { sendInfo->msgType = TDMT_MND_TMQ_ASK_EP; SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp); - tscDebug("consumer:0x%" PRIx64 ", ask ep from mnode", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 " ask ep from mnode, async:%d", tmq->consumerId, async); int64_t transporterId = 0; asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index 9b777f05c0..59c931d9aa 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -671,7 +671,7 @@ TEST(clientCase, generated_request_id_test) { uint64_t v = generateRequestId(); void* result = taosHashGet(phash, &v, sizeof(v)); if (result != nullptr) { - printf("0x%lx, index:%d\n", v, i); +// printf("0x%llx, index:%d\n", v, i); } assert(result == nullptr); taosHashPut(phash, &v, sizeof(v), NULL, 0); From 50ae5e7427ce3703253696f465d6d7ec6556f0a7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 18 Feb 2023 15:51:02 +0800 Subject: [PATCH 248/267] refactor: do some internal refactor and add some logs. --- source/client/src/clientTmq.c | 41 ++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 5aef065ce0..b9b66e1222 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -509,7 +509,7 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT .handle = NULL, }; - tscDebug("consumer:0x%" PRIx64 ", commit offset of %s on vgId:%d, offset is %" PRId64, tmq->consumerId, pOffset->subKey, + tscDebug("consumer:0x%" PRIx64 " topic:%s on vgId:%d offset:%" PRId64, tmq->consumerId, pOffset->subKey, pVg->vgId, pOffset->val.version); // TODO: put into cb @@ -643,16 +643,15 @@ static int32_t tmqCommitConsumerImpl(tmq_t* tmq, int8_t automatic, int8_t async, int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs); for (int32_t j = 0; j < numOfVgroups; j++) { SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); - - tscDebug("consumer:0x%" PRIx64 " begin commit for topic %s, vgId:%d, ordinal:%d/%d", tmq->consumerId, pTopic->topicName, - pVg->vgId, j + 1, numOfVgroups); - if (pVg->currentOffset.type > 0 && !tOffsetEqual(&pVg->currentOffset, &pVg->committedOffset)) { - tscDebug("consumer:0x%" PRId64 " vg:%d, current %" PRId64 ", committed %" PRId64 "", tmq->consumerId, pVg->vgId, - pVg->currentOffset.version, pVg->committedOffset.version); + tscDebug("consumer:0x%" PRIx64 " topic:%s vgId:%d, current %" PRId64 ", committed %" PRId64, tmq->consumerId, + pTopic->topicName, pVg->vgId, pVg->currentOffset.version, pVg->committedOffset.version); if (tmqSendCommitReq(tmq, pVg, pTopic, pParamSet) < 0) { continue; } + } else { + tscDebug("consumer:0x%" PRIx64 " topic:%s vgId:%d, not commit, current:%" PRId64 ", ordinal:%d/%d", + tmq->consumerId, pTopic->topicName, pVg->vgId, pVg->currentOffset.version, j + 1, numOfVgroups); } } } @@ -808,7 +807,7 @@ int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) { int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); *pRefId = pTmq->refId; - tscDebug("consumer:0x%"PRIx64" will retrieve ep from mnode in 1s", pTmq->consumerId); + tscDebug("consumer:0x%"PRIx64" next retrieve ep from mnode in 1s", pTmq->consumerId); taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &pTmq->epTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { tmqCommitInner(pTmq, NULL, 1, 1, pTmq->commitCb, pTmq->commitCbUserParam); @@ -816,7 +815,7 @@ int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) { int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); *pRefId = pTmq->refId; - tscDebug("consumer:0x%"PRIx64" will commit to mnode in %.2fs", pTmq->consumerId, pTmq->autoCommitInterval/1000.0); + tscDebug("consumer:0x%"PRIx64" next commit to mnode in %.2fs", pTmq->consumerId, pTmq->autoCommitInterval/1000.0); taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, pRefId, tmqMgmt.timer, &pTmq->commitTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) { // do nothing @@ -1309,7 +1308,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { int32_t topicNumGet = taosArrayGetSize(pRsp->topics); char vgKey[TSDB_TOPIC_FNAME_LEN + 22]; - tscDebug("consumer:0x%" PRIx64", update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d", + tscDebug("consumer:0x%" PRIx64" update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d", tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur); SArray* newTopics = taosArrayInit(topicNumGet, sizeof(SMqClientTopic)); @@ -1634,14 +1633,18 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) { // broadcast the poll request to all related vnodes int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { - for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { + int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics); + tscDebug("consumer:0x%" PRIx64" start to poll data, numOfTopics:%d", tmq->consumerId, numOfTopics); + + for (int i = 0; i < numOfTopics; i++) { + SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); for (int j = 0; j < taosArrayGetSize(pTopic->vgs); j++) { SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); int32_t vgStatus = atomic_val_compare_exchange_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE, TMQ_VG_STATUS__WAIT); - if (vgStatus != TMQ_VG_STATUS__IDLE) { + if (vgStatus == TMQ_VG_STATUS__WAIT) { int32_t vgSkipCnt = atomic_add_fetch_32(&pVg->vgSkipCnt, 1); - tscTrace("consumer:0x%" PRIx64 ", epoch %d skip vgId:%d skip cnt %d", tmq->consumerId, tmq->epoch, pVg->vgId, + tscDebug("consumer:0x%" PRIx64 " epoch %d wait poll-rsp, skip vgId:%d skip cnt %d", tmq->consumerId, tmq->epoch, pVg->vgId, vgSkipCnt); continue; /*if (vgSkipCnt < 10000) continue;*/ @@ -1653,6 +1656,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { } #endif } + atomic_store_32(&pVg->vgSkipCnt, 0); SMqPollReq req = {0}; @@ -1663,6 +1667,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { tsem_post(&tmq->rspSem); return -1; } + char* msg = taosMemoryCalloc(1, msgSize); if (NULL == msg) { atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); @@ -1684,6 +1689,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { tsem_post(&tmq->rspSem); return -1; } + pParam->refId = tmq->refId; pParam->epoch = tmq->epoch; @@ -1790,7 +1796,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { taosFreeQitem(pollRspWrapper); return pRsp; } else { - tscDebug("consumer:0x%"PRIx64", msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", + tscDebug("consumer:0x%"PRIx64" msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", tmq->consumerId, pollRspWrapper->dataRsp.head.epoch, consumerEpoch); tmqFreeRspWrapper(rspWrapper); taosFreeQitem(pollRspWrapper); @@ -1809,7 +1815,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { taosFreeQitem(pollRspWrapper); return pRsp; } else { - tscDebug("consumer:0x%"PRIx64", msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", + tscDebug("consumer:0x%"PRIx64" msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", tmq->consumerId, pollRspWrapper->metaRsp.head.epoch, consumerEpoch); tmqFreeRspWrapper(rspWrapper); taosFreeQitem(pollRspWrapper); @@ -1840,7 +1846,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { taosFreeQitem(pollRspWrapper); return pRsp; } else { - tscDebug("consumer:0x%"PRIx64", msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", + tscDebug("consumer:0x%"PRIx64" msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", tmq->consumerId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch); tmqFreeRspWrapper(rspWrapper); taosFreeQitem(pollRspWrapper); @@ -1885,7 +1891,8 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { if (retryCnt++ > 10) { return NULL; } - tscDebug("consumer not ready, retry"); + + tscDebug("consumer:0x%"PRIx64" not ready, retry:%d/10 in 500ms", tmq->consumerId, retryCnt); taosMsleep(500); } } From 06a3c1c9c0b4cf2b7334fbf5cafb9da453d406d4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 18 Feb 2023 17:32:43 +0800 Subject: [PATCH 249/267] refactor: do some internal refactor and add some logs. --- source/client/test/clientTests.cpp | 2 +- source/dnode/mnode/impl/src/mndDef.c | 26 ++++++++++++++------------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index 59c931d9aa..cb3c2f8c68 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -912,7 +912,7 @@ TEST(clientCase, subscription_test) { tmq_conf_t* conf = tmq_conf_new(); tmq_conf_set(conf, "enable.auto.commit", "true"); tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); - tmq_conf_set(conf, "group.id", "cgrpName"); + tmq_conf_set(conf, "group.id", "newabcdefgjhijlm__"); tmq_conf_set(conf, "td.connect.user", "root"); tmq_conf_set(conf, "td.connect.pass", "taosdata"); tmq_conf_set(conf, "auto.offset.reset", "earliest"); diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 75177f4158..6e651c1954 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -411,19 +411,21 @@ void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp) { return (void *)buf; } -SMqSubscribeObj *tNewSubscribeObj(const char key[TSDB_SUBSCRIBE_KEY_LEN]) { - SMqSubscribeObj *pSubNew = taosMemoryCalloc(1, sizeof(SMqSubscribeObj)); - if (pSubNew == NULL) return NULL; - memcpy(pSubNew->key, key, TSDB_SUBSCRIBE_KEY_LEN); - taosInitRWLatch(&pSubNew->lock); - pSubNew->vgNum = 0; - pSubNew->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); +SMqSubscribeObj *tNewSubscribeObj(const char* key) { + SMqSubscribeObj *pSubObj = taosMemoryCalloc(1, sizeof(SMqSubscribeObj)); + if (pSubObj == NULL) { + return NULL; + } + + memcpy(pSubObj->key, key, TSDB_SUBSCRIBE_KEY_LEN); + taosInitRWLatch(&pSubObj->lock); + pSubObj->vgNum = 0; + pSubObj->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + // TODO set hash free fp - /*taosHashSetFreeFp(pSubNew->consumerHash, tDeleteSMqConsumerEp);*/ - - pSubNew->unassignedVgs = taosArrayInit(0, sizeof(void *)); - - return pSubNew; + /*taosHashSetFreeFp(pSubObj->consumerHash, tDeleteSMqConsumerEp);*/ + pSubObj->unassignedVgs = taosArrayInit(0, POINTER_BYTES); + return pSubObj; } SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) { From edb3d6f8f3a10c6cd0eefef14296bb6031bb2dcf Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 18 Feb 2023 19:19:44 +0800 Subject: [PATCH 250/267] change transport param --- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index c1ee87657d..659dae142e 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -286,7 +286,7 @@ int32_t dmInitClient(SDnode *pDnode) { int32_t connLimitNum = 1000 / (tsNumOfRpcThreads * 3); connLimitNum = TMAX(connLimitNum, 10); - connLimitNum = TMIN(connLimitNum, 100); + connLimitNum = TMIN(connLimitNum, 50); rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; From 4853522e1186af1b30a7593d27387227f9bf007f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 18 Feb 2023 19:46:37 +0800 Subject: [PATCH 251/267] fix(query): set the correct rows in data block. --- source/dnode/mnode/impl/src/mndSubscribe.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 153bb8bd04..ea1e16ef8b 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -1075,6 +1075,9 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock } #endif + + pBlock->info.rows = numOfRows; + taosRUnLockLatch(&pSub->lock); sdbRelease(pSdb, pSub); } From 59c72beb8d7b4927c1253bee196d472655939830 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 18 Feb 2023 19:47:18 +0800 Subject: [PATCH 252/267] fix(query): set the correct rows in data block. --- source/dnode/mnode/impl/src/mndSubscribe.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index ea1e16ef8b..d127ceacf5 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -966,7 +966,9 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock while (numOfRows < rowsCapacity) { pShow->pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pShow->pIter, (void **)&pSub); - if (pShow->pIter == NULL) break; + if (pShow->pIter == NULL) { + break; + } taosRLockLatch(&pSub->lock); From 098949b528a59c5af225bc23b7ab47f438bbd142 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 19 Feb 2023 18:10:42 +0800 Subject: [PATCH 253/267] add trans param --- include/common/tglobal.h | 9 +++++---- source/common/src/tglobal.c | 18 ++++++++++++++++-- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 4 ++-- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 5a0c0e0777..e92afc2222 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -49,6 +49,7 @@ extern int32_t tsTagFilterResCacheSize; // queue & threads extern int32_t tsNumOfRpcThreads; +extern int32_t tsNumOfRpcSessions; extern int32_t tsNumOfCommitThreads; extern int32_t tsNumOfTaskQueueThreads; extern int32_t tsNumOfMnodeQueryThreads; @@ -86,9 +87,9 @@ extern int32_t tsTelemInterval; extern char tsTelemServer[]; extern uint16_t tsTelemPort; extern bool tsEnableCrashReport; -extern char* tsTelemUri; -extern char* tsClientCrashReportUri; -extern char* tsSvrCrashReportUri; +extern char *tsTelemUri; +extern char *tsClientCrashReportUri; +extern char *tsSvrCrashReportUri; // query buffer management extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing @@ -159,7 +160,7 @@ extern int32_t tsUptimeInterval; extern int32_t tsRpcRetryLimit; extern int32_t tsRpcRetryInterval; -extern bool tsDisableStream; +extern bool tsDisableStream; // #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 1e5291b7cb..e636dffdd9 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -41,6 +41,7 @@ bool tsPrintAuth = false; // queue & threads int32_t tsNumOfRpcThreads = 1; +int32_t tsNumOfRpcSessions = 2000; int32_t tsNumOfCommitThreads = 2; int32_t tsNumOfTaskQueueThreads = 4; int32_t tsNumOfMnodeQueryThreads = 4; @@ -54,7 +55,6 @@ int32_t tsNumOfQnodeQueryThreads = 4; int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeStreamThreads = 4; int32_t tsNumOfSnodeWriteThreads = 1; - // sync raft int32_t tsElectInterval = 25 * 1000; int32_t tsHeartbeatInterval = 1000; @@ -392,6 +392,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS); if (cfgAddInt32(pCfg, "numOfRpcThreads", tsNumOfRpcThreads, 1, 1024, 0) != 0) return -1; + tsNumOfRpcSessions = TRANGE(tsNumOfRpcSessions, 100, 10000); + if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 1024, 0) != 0) return -1; + tsNumOfCommitThreads = tsNumOfCores / 2; tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4); if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, 0) != 0) return -1; @@ -504,6 +507,14 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) { pItem->stype = stype; } + pItem = cfgGetItem(tsCfg, "numOfRpcSessions"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfRpcSessions = 2000; + tsNumOfRpcSessions = TRANGE(tsNumOfRpcSessions, 100, 10000); + pItem->i32 = tsNumOfRpcSessions; + pItem->stype = stype; + } + pItem = cfgGetItem(tsCfg, "numOfCommitThreads"); if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { tsNumOfCommitThreads = numOfCores / 2; @@ -721,6 +732,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval; tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32; + tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32; tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32; tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32; tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32; @@ -771,7 +783,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { if (tsQueryBufferSize >= 0) { tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL; } - + tsDisableStream = cfgGetItem(pCfg, "disableStream")->bval; GRANT_CFG_GET; @@ -980,6 +992,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32; } else if (strcasecmp("numOfRpcThreads", name) == 0) { tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32; + } else if (strcasecmp("numOfRpcSessions", name) == 0) { + tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32; } else if (strcasecmp("numOfCommitThreads", name) == 0) { tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32; } else if (strcasecmp("numOfMnodeReadThreads", name) == 0) { diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 659dae142e..3992dbc3e1 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -284,9 +284,9 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.failFastThreshold = 3; // failed threshold rpcInit.ffp = dmFailFastFp; - int32_t connLimitNum = 1000 / (tsNumOfRpcThreads * 3); + int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3); connLimitNum = TMAX(connLimitNum, 10); - connLimitNum = TMIN(connLimitNum, 50); + connLimitNum = TMIN(connLimitNum, 500); rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; From dd30b3577cbc67616d768e4899e8d3e66210c500 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 19 Feb 2023 18:29:11 +0800 Subject: [PATCH 254/267] add trans param --- source/common/src/tglobal.c | 2 +- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index e636dffdd9..e3f08e912a 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -393,7 +393,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "numOfRpcThreads", tsNumOfRpcThreads, 1, 1024, 0) != 0) return -1; tsNumOfRpcSessions = TRANGE(tsNumOfRpcSessions, 100, 10000); - if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 1024, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, 0) != 0) return -1; tsNumOfCommitThreads = tsNumOfCores / 2; tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 3992dbc3e1..3a1ca161a9 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -291,7 +291,7 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.connLimitNum = connLimitNum; rpcInit.connLimitLock = 1; rpcInit.supportBatch = 1; - rpcInit.batchSize = 16 * 1024; + rpcInit.batchSize = 8 * 1024; pTrans->clientRpc = rpcOpen(&rpcInit); if (pTrans->clientRpc == NULL) { From 1971ec0adb0ff52cc3b2a07267f765f1dad2059d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 19 Feb 2023 22:17:44 +0800 Subject: [PATCH 255/267] add trans param --- source/libs/transport/src/transCli.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 2c862ed45b..7e1aeafaad 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -2285,7 +2285,7 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return TSDB_CODE_RPC_BROKEN_LINK; } - if (pTransInst->connLimitNum > 0 && REQUEST_NO_RESP(pReq)) { + /*if (pTransInst->connLimitNum > 0 && REQUEST_NO_RESP(pReq)) { char key[TSDB_FQDN_LEN + 64] = {0}; char* ip = EPSET_GET_INUSE_IP((SEpSet*)pEpSet); uint16_t port = EPSET_GET_INUSE_PORT((SEpSet*)pEpSet); @@ -2297,7 +2297,7 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return TSDB_CODE_RPC_MAX_SESSIONS; } - } + }*/ TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); From 4ff86d954aa3cb0ad45a6bfc1c817a22a08704d5 Mon Sep 17 00:00:00 2001 From: lispQin Date: Mon, 20 Feb 2023 10:24:45 +0800 Subject: [PATCH 256/267] doc: update immigrate OpenTSDB data by DataX to TDengine3.0 links --- docs/en/25-application/03-immigrate.md | 2 +- docs/zh/25-application/_03-immigrate.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md index 30d069e4e2..5f4a86937e 100644 --- a/docs/en/25-application/03-immigrate.md +++ b/docs/en/25-application/03-immigrate.md @@ -184,7 +184,7 @@ TDengine supports the standard JDBC 3.0 interface for manipulating databases, bu To facilitate historical data migration, we provide a plug-in for the data synchronization tool DataX, which can automatically write data into TDengine.The automatic data migration of DataX can only support the data migration process of a single value model. -For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/blog/2021/10/26/3156.html). +For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/engineering/16401.html). After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. We provide these as a reference for application migration. diff --git a/docs/zh/25-application/_03-immigrate.md b/docs/zh/25-application/_03-immigrate.md index d1c9caea09..7da23876d2 100644 --- a/docs/zh/25-application/_03-immigrate.md +++ b/docs/zh/25-application/_03-immigrate.md @@ -172,7 +172,7 @@ TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其 为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。 -DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于 DataX 的 TDengine 数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。 +DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于 DataX 的 TDengine 数据迁移工具](https://www.taosdata.com/engineering/16401.html)。 在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 From 46ca6c9466deb451d09109e57f9644bc481fbaca Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 20 Feb 2023 19:06:07 +0800 Subject: [PATCH 257/267] fix: tag filter --- source/libs/executor/src/executil.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index d9b0210ed5..639adb7ec6 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -965,9 +965,9 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN goto end; } else { if ((condType == FILTER_NO_LOGIC || condType == FILTER_AND) && status != SFLT_NOT_INDEX) { - code = metaGetTableTagsByUids(metaHandle, pListInfo->suid, pUidList); + code = metaGetTableTagsByUids(metaHandle, pListInfo->suid, pUidTagList); } else { - code = metaGetTableTags(metaHandle, pListInfo->suid, pUidList); + code = metaGetTableTags(metaHandle, pListInfo->suid, pUidTagList); } if (code != TSDB_CODE_SUCCESS) { qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), pListInfo->suid); From 9370d67e431388ebe98c2f7926007e52bdec0ed4 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 21 Feb 2023 09:43:56 +0800 Subject: [PATCH 258/267] fix: tsdb reader resume --- source/dnode/vnode/src/tsdb/tsdbRead.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 8b785500f9..64a450a7b1 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -4303,12 +4303,14 @@ int32_t tsdbReaderResume(STsdbReader* pReader) { // we need only one row pPrevReader->capacity = 1; pPrevReader->status.pTableMap = pReader->status.pTableMap; + pPrevReader->status.uidList = pReader->status.uidList; pPrevReader->pSchema = pReader->pSchema; pPrevReader->pMemSchema = pReader->pMemSchema; pPrevReader->pReadSnap = pReader->pReadSnap; pNextReader->capacity = 1; pNextReader->status.pTableMap = pReader->status.pTableMap; + pNextReader->status.uidList = pReader->status.uidList; pNextReader->pSchema = pReader->pSchema; pNextReader->pMemSchema = pReader->pMemSchema; pNextReader->pReadSnap = pReader->pReadSnap; From b304456feffc3b8fe52f1f5627ed95a06153d64e Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 21 Feb 2023 11:17:59 +0800 Subject: [PATCH 259/267] fix: rsma error --- source/dnode/vnode/src/sma/smaFS.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/sma/smaFS.c b/source/dnode/vnode/src/sma/smaFS.c index 23d2528283..5dbe91f836 100644 --- a/source/dnode/vnode/src/sma/smaFS.c +++ b/source/dnode/vnode/src/sma/smaFS.c @@ -639,8 +639,7 @@ int32_t tdRSmaFSCopy(SSma *pSma, SRSmaFS *pFS) { code = tdRSmaFSCreate(pFS, size); TSDB_CHECK_CODE(code, lino, _exit); - taosArrayClear(pFS->aQTaskInf->pData); - taosArrayAddBatch(pFS->aQTaskInf->pData, qFS->aQTaskInf->pData, size); + taosArrayAddBatch(pFS->aQTaskInf, qFS->aQTaskInf->pData, size); _exit: if (code) { From acfa05b5fadcf4d4cdc61aa4b0317dac10b0376c Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 21 Feb 2023 13:14:44 +0800 Subject: [PATCH 260/267] fix: table count scan --- tests/develop-test/2-query/table_count_scan.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/develop-test/2-query/table_count_scan.py b/tests/develop-test/2-query/table_count_scan.py index ad0cb0bbc4..758d28948d 100644 --- a/tests/develop-test/2-query/table_count_scan.py +++ b/tests/develop-test/2-query/table_count_scan.py @@ -14,7 +14,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVer=1): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self._conn = conn def restartTaosd(self, index=1, dbname="db"): @@ -93,7 +93,7 @@ class TDTestCase: tdSql.checkData(1, 1, 'performance_schema') tdSql.checkData(0, 0, 3) tdSql.checkData(0, 1, 'tbl_count') - tdSql.checkData(2, 0, 23) + tdSql.checkData(2, 0, 24) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -189,7 +189,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 23) + tdSql.checkData(3, 0, 24) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -204,7 +204,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 23) + tdSql.checkData(3, 0, 24) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -215,7 +215,7 @@ class TDTestCase: tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(1, 0, 5) tdSql.checkData(1, 1, 'performance_schema') - tdSql.checkData(2, 0, 23) + tdSql.checkData(2, 0, 24) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") From f06fd054110dd0f349903c339f72158db2d87035 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 21 Feb 2023 14:58:29 +0800 Subject: [PATCH 261/267] fix(dnd/ep): failed with invalid ep.json --- source/dnode/mgmt/node_util/src/dmEps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index a7a5b8b999..e9ab8a0460 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -402,7 +402,7 @@ static int32_t dmDecodeEpPairs(SJson *pJson, SDnodeData *pData) { int32_t code = 0; SJson *dnodes = tjsonGetObjectItem(pJson, "dnodes"); - if (dnodes == NULL) return 0; + if (dnodes == NULL) return -1; int32_t numOfDnodes = tjsonGetArraySize(dnodes); for (int32_t i = 0; i < numOfDnodes; ++i) { From 5d46b63ed058ee0e160932732a5e1b77fe9007aa Mon Sep 17 00:00:00 2001 From: lispQin Date: Tue, 21 Feb 2023 17:31:40 +0800 Subject: [PATCH 262/267] docs: update application docs --- .../{03-immigrate.md => _03-immigrate.md} | 0 .../{_03-immigrate.md => 03-immigrate.md} | 105 +++++++++++++++++- 2 files changed, 102 insertions(+), 3 deletions(-) rename docs/en/25-application/{03-immigrate.md => _03-immigrate.md} (100%) rename docs/zh/25-application/{_03-immigrate.md => 03-immigrate.md} (82%) diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/_03-immigrate.md similarity index 100% rename from docs/en/25-application/03-immigrate.md rename to docs/en/25-application/_03-immigrate.md diff --git a/docs/zh/25-application/_03-immigrate.md b/docs/zh/25-application/03-immigrate.md similarity index 82% rename from docs/zh/25-application/_03-immigrate.md rename to docs/zh/25-application/03-immigrate.md index 7da23876d2..beda3002c9 100644 --- a/docs/zh/25-application/_03-immigrate.md +++ b/docs/zh/25-application/03-immigrate.md @@ -170,11 +170,110 @@ TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其 ### 1、使用工具自动迁移数据 -为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。 +为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了适配 TDengine 3.0 的插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。 -DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于 DataX 的 TDengine 数据迁移工具](https://www.taosdata.com/engineering/16401.html)。 +#### 1.0 插件功能介绍 +1. TDengine30Reader 提供的功能: + 1. 支持通过 SQL 进行数据筛选; + 2. 根据时间间隔进行任务切分; + 3. 支持 TDengine 的全部数据类型; + 4. 支持批量读取,通过 batchSize 参数控制批量拉取结果集的大小,提高读取性能。 +2. TDengine30Writer 支持的功能: + 1. 支持 OpenTSDB 的 json 格式的行协议,使用 TDengine 的 schemaless 方式写入 TDengine。 + 2. 支持批量写入,通过 batchSize 参数控制批量写入的数量,提高写入性能。 -在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 +#### 1.1 DataX 安装环境准备 +1. 需要安装 TDengine 客户端 +2. 需要安装 JDK 1.8 环境(运行 DataX) +3. 需要安装 Python 环境(运行 DataX) +4. 需要 maven 编译环境(如果不编译 DataX 则可以不安装 maven) + +#### 1.2 安装 +1. 下载源码 +~~~ +git clone https://github.com/taosdata/DataX.git +~~~ +2. 编译打包 +~~~ +cd DataX +mvn -U clean package assembly:assembly -Dmaven.test.skip=true +~~~ +3. 安装 +~~~ +cp target/datax.tar.gz your_install_dir +cd your_install_dir +tar -zxvf dataX.tar.gz +~~~ + +#### 1.3 数据迁移 Job 的配置 +以一个从 OpenTSDB 到 TDengine 3.0 版本的数据迁移任务为例,配置文件 opentsdb2tdengine.json 如下: +~~~ +{ + "job":{ + "content":[{ + "reader": { + "name": "opentsdbreader", + "parameter": { + "endpoint": "http://192.168.1.180:4242", + "column": ["weather_temperature"], + "beginDateTime": "2021-01-01 00:00:00", + "endDateTime": "2021-01-01 01:00:00" + } + }, + "writer": { + "name": "tdengine30writer", + "parameter": { + "username": "root", + "password": "taosdata", + "connection": [ + { + "table": [ + "matric1" + ], + "jdbcUrl": "jdbc:TAOS://192.168.1.101:6030/test?timestampFormat=TIMESTAMP" + } + ], + "batchSize": 1000, + "ignoreTagsUnmatched": true + } + } + }], + "setting": { + "speed": { + "channel": 1 + } + } + } + } +~~~ +配置说明: +1. 上面的配置表示,从 192.168.1.180 的 OpenTSDB,到 192.168.1.101 的 TDengine 的迁移。迁移 metric 为 weather_temperature,时间从 2021-01-01 00:00:00 开始,到 2021-01-01 01:00:00 结束的数据。 +2. reader 使用 datax 的 opentsdbreader,parameter 的配置请参考:[opentsdbreader.md#配置参数](https://github.com/taosdata/DataX/blob/master/opentsdbreader/doc/opentsdbreader.md) +3. tdengine30writer 的 parameter 中,user,password 为必须项,没有默认值。batchSize 不是必须项,默认值为 1。详细参考:[tdengine30writer.md#配置参数](https://github.com/taosdata/DataX/blob/master/tdengine30writer/doc/tdengine30writer-CN.md) +4. TDengine 中,如果 dbname 指定的 database 不存在,则需要在迁移前创建数据库。 + +#### 1.4 执行迁移任务 +~~~ +python bin/datax.py job/opentsdb2tdengine.json +~~~ + +#### 1.5 限制条件 +1. 目前,DataX 自带的 opentsdbreader 仅支持 OpenTSDB-2.3.X 版本。详细参考:[opentsdbreader#约束限制](https://github.com/alibaba/DataX/blob/master/opentsdbreader/doc/opentsdbreader.md#5-%E7%BA%A6%E6%9D%9F%E9%99%90%E5%88%B6) +2. 数据迁移工具依赖 TDengine 客户端中的 `libtaos.so/taos.dll/libtaos.dylib`,需要与服务端对应版本的 TDengine-client。 + +#### 1.6 其他 +1. FAQ + 1. 如何估算一个数据迁移任务所需要的资源 + DataX 的每个 reader 按照自己的 task 切分策略进行任务划分,具体请参考 DataX 的任务调度规则。在估算资源是,需要按照数据迁移的数据量,任务切分规则和网络带宽限制等综合考虑,最好以实际数据迁移测试结果为准。 + 2. TDengine30Writer 的 batchSize 设置多大效率最高? + batchSize 是控制批量写入的参数,在获取 batchSize 行纪录后,TDengineWriter 会向 TDengine 发送一次写入请求,这减少了与 TDengine 交互次数,从而提高了性能。从测试结果来看,batchSize 在 500-1000 范围内效率最高。 + 3. job 的配置中 channel 数为多少合适? + job 中的 channel 数为流量控制的参数,每个 channel 都需要开辟一块内存,用来缓存数据。如果 channel 设置过大,会引起 OOM,所以 channel 数并不是越大越好。增加 channel 数后,需要提高 JVM 内存大小。从测试结果来看,channel 在 1~6 的范围内都是合适,能够保证 DataX 的流量最大化即可。 + 4. java.sql.SQLException: TDengine ERROR (8000060b): Invalid client value + 配置文件中 column 中没有配置 tbname,此时会触发行协议数据写入(行协议写入只会自动创建子表名,但需要提前创建好超级表),行协议写入的情况下不支持 TAG 数据类型为非 NCHAR,所以此种情况有两种解决方案:1.将 TAG 全部修改为 NCHAR 类型;2.在 Column 中配置好表名称这样不会触发行协议写入。 + 5. java.sql.SQLException: TDengine ERROR (8000060b): Timestamp data out of range + 配置文件中 column 中没有配置 tbname,此时会触发行协议数据写入,且 TAG 全部为 NCHAR 类型,此时需要保证时间戳的一列名称为 _ts,而不能是其他名称(行协议写入下,默认将最后的时间戳写入到 _ts 一列,且不能随意命名)。若想避免请使用 tbname 指定表名以避免触发行协议写入。 +2. 在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 | DataX 实例个数 (并发进程个数) | 迁移记录速度 (条/秒) | | ----------------------------- | --------------------- | From bdb0c12d4bfecdac278ac045aa7941e98a050d22 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Tue, 21 Feb 2023 17:57:48 +0800 Subject: [PATCH 263/267] docs: refine dataX page --- docs/zh/25-application/03-immigrate.md | 159 +++++++------------------ 1 file changed, 43 insertions(+), 116 deletions(-) diff --git a/docs/zh/25-application/03-immigrate.md b/docs/zh/25-application/03-immigrate.md index beda3002c9..26ef712957 100644 --- a/docs/zh/25-application/03-immigrate.md +++ b/docs/zh/25-application/03-immigrate.md @@ -18,82 +18,15 @@ title: OpenTSDB 应用迁移到 TDengine 的最佳实践 如果我们将原本运行在 OpenTSDB 上的应用迁移到 TDengine 上,不仅可以有效地降低计算和存储资源的占用、减少部署服务器的规模,还能够极大减少运行维护的成本的输出,让运维管理工作更简单、更轻松,大幅降低总拥有成本。与 OpenTSDB 一样,TDengine 也已经进行了开源,不同的是,除了单机版,后者还实现了集群版开源,被厂商绑定的顾虑一扫而空。 -在下文中我们将就“使用最典型并广泛应用的运维监控(DevOps)场景”来说明,如何在不编码的情况下将 OpenTSDB 的应用快速、安全、可靠地迁移到 TDengine 之上。后续的章节会做更深度的介绍,以便于进行非 DevOps 场景的迁移。 +在下文中我们将说明如何在不编码的情况下将 OpenTSDB 的应用快速、安全、可靠地迁移到 TDengine 之上。 -## DevOps 应用快速迁移 +## TDengine 与 OpenTSDB 的差异 -### 1、典型应用场景 - -一个典型的 DevOps 应用场景的系统整体的架构如下图(图 1) 所示。 - -**图 1. DevOps 场景中典型架构** -![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "图1. DevOps 场景中典型架构") - -在该应用场景中,包含了部署在应用环境中负责收集机器度量(Metrics)、网络度量(Metrics)以及应用度量(Metrics)的 Agent 工具、汇聚 Agent 收集信息的数据收集器,数据持久化存储和管理的系统以及监控数据可视化工具(例如:Grafana 等)。 - -其中,部署在应用节点的 Agents 负责向 collectd/Statsd 提供不同来源的运行指标,collectd/StatsD 则负责将汇聚的数据推送到 OpenTSDB 集群系统,然后使用可视化看板 Grafana 将数据可视化呈现出来。 - -### 2、迁移服务 - -- **TDengine 安装部署** - -首先是 TDengine 的安装,从官网上下载 TDengine 最新稳定版进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 - -注意,安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后再启动。 - -- **调整数据收集器配置** - -在 TDengine 2.4 版本中,包含一个组件 taosAdapter。taosAdapter 是一个无状态、可快速弹性伸缩的组件,它可以兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议规范,提供了丰富的数据接入能力,有效的节省用户迁移成本,降低用户应用迁移的难度。 - -用户可以根据需求弹性部署 taosAdapter 实例,结合场景的需要,快速提升数据写入的吞吐量,为不同应用场景下的数据写入提供保障。 - -通过 taosAdapter,用户可以将 collectd 或 StatsD 收集的数据直接推送到 TDengine ,实现应用场景的无缝迁移,非常的轻松便捷。taosAdapter 还支持 Telegraf、Icinga、TCollector 、node_exporter 的数据接入,使用详情参考[taosAdapter](/reference/taosadapter/)。 - -如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为 192.168.1.130,端口为 6046,配置如下: - -```html -LoadPlugin write_tsdb - - - Host "192.168.1.130" Port "6046" HostTags "status=production" StoreRates - false AlwaysAppendDS false - - -``` - -即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosAdapter, taosAdapter 将调用 API 将数据写入到 TDengine 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。 - -- **调整看板(Dashboard)系统** - -在数据能够正常写入 TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。获取和使用 TDengine 提供的 Grafana 插件请参考[与其他工具的连接](/third-party/grafana)。 - -TDengine 提供了默认的两套 Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到 Grafana 中即可激活使用。 - -**图 2. 导入 Grafana 模板** -![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "图2. 导入 Grafana 模板") - -操作完以上步骤后,就完成了将 OpenTSDB 替换成为 TDengine 的迁移工作。可以看到整个流程非常简单,不需要写代码,只需要对某些配置文件进行调整即可完成全部的迁移工作。 - -### 3、迁移后架构 - -完成迁移以后,此时的系统整体的架构如下图(图 3)所示,而整个过程中采集端、数据写入端、以及监控呈现端均保持了稳定,除了极少的配置调整外,不涉及任何重要的更改和变动。OpenTSDB 大量的应用场景均为 DevOps ,这种场景下,简单的参数设置即可完成 OpenTSDB 到 TDengine 迁移动作,使用上 TDengine 更加强大的处理能力和查询性能。 - -在绝大多数的 DevOps 场景中,如果你拥有一个小规模的 OpenTSDB 集群(3 台及以下的节点)作为 DevOps 的存储端,依赖于 OpenTSDB 为系统持久化层提供数据存储和查询功能,那么你可以安全地将其替换为 TDengine,并节约更多的计算和存储资源。在同等计算资源配置情况下,单台 TDengine 即可满足 3 ~ 5 台 OpenTSDB 节点提供的服务能力。如果规模比较大,那便需要采用 TDengine 集群。 - -如果你的应用特别复杂,或者应用领域并不是 DevOps 场景,你可以继续阅读后续的章节,更加全面深入地了解将 OpenTSDB 的应用迁移到 TDengine 的高级话题。 - -**图 3. 迁移完成后的系统架构** -![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "图 3. 迁移完成后的系统架构") - -## 其他场景的迁移评估与策略 - -### 1、TDengine 与 OpenTSDB 的差异 - -本章将详细介绍 OpenTSDB 与 TDengine 在系统功能层面上存在的差异。阅读完本章的内容,你可以全面地评估是否能够将某些基于 OpenTSDB 的复杂应用迁移到 TDengine 上,以及迁移之后应该注意的问题。 +本节将详细介绍 OpenTSDB 与 TDengine 在系统功能层面上存在的差异。阅读完本节的内容,你可以全面地评估是否能够将某些基于 OpenTSDB 的复杂应用迁移到 TDengine 上,以及迁移之后应该注意的问题。 TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的应用中使用了 Grafana 以外的前端看板(例如[TSDash](https://github.com/facebook/tsdash)、[Status Wolf](https://github.com/box/StatusWolf)等),那么前端看板将无法直接迁移到 TDengine,需要将前端看板重新适配到 Grafana 才可以正常运行。 -在 2.3.0.x 版本中,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 +如果您的收集端使用了像 collectd 和 StatsD 这样的数据采集工具,要重新配置这些数据采集工具将数据写入到 TDengine。TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 此外,如果你的应用中使用了 OpenTSDB 以下特性,在将应用迁移到 TDengine 之前你还需要了解以下注意事项: @@ -104,11 +37,11 @@ TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的 通过上面的介绍,相信你应该能够了解 OpenTSDB 迁移到 TDengine 带来的变化,这些信息也有助于你正确地判断是否可以接受将应用 迁移到 TDengine 之上,体验 TDengine 提供的强大的时序数据处理能力和便捷的使用体验。 -### 2、迁移策略 +## 迁移策略 首先将基于 OpenTSDB 的系统进行迁移涉及到的数据模式设计、系统规模估算、数据写入端改造,进行数据分流、应用适配工作;之后将两个系统并行运行一段时间,再将历史数据迁移到 TDengine 中。当然如果你的应用中有部分功能强依赖于上述 OpenTSDB 特性,同时又不希望停止使用,可以考虑保持原有的 OpenTSDB 系统运行,同时启动 TDengine 来提供主要的服务。 -## 数据模型设计 +### 数据模型设计 一方面,TDengine 要求其入库的数据具有严格的模式定义。另一方面,TDengine 的数据模型相对于 OpenTSDB 来说又更加丰富,多值模型能够兼容全部的单值模型的建立需求。 @@ -150,7 +83,7 @@ insert into memory_vm130_memory_buffered_collectd using memory tags(‘vm130’ 如果你想要利用 TDengine 的多值模型能力,需要首先满足以下要求:不同的采集量具有相同的采集频率,且能够通过消息队列**同时到达**数据写入端,从而确保使用 SQL 语句将多个指标一次性写入。将度量的名称作为超级表的名称,建立具有相同采集频率且能够同时到达的数据多列模型。子表的表名采用具有固定规则的方式进行命名。上述每个度量均只包含一个测量值,因此无法将其转化为多值模型。 -## 数据分流与应用适配 +### 数据分流与应用适配 从消息队列中订阅数据,并启动调整后的写入程序写入数据。 @@ -166,13 +99,19 @@ TDengine 不支持采用 OpenTSDB 的查询语法进行查询或数据获取处 TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其他类型的高级语言的连接器来查询读取数据,以适配你的应用。具体的操作和使用帮助也请参阅用户手册。 -## 历史数据迁移 -### 1、使用工具自动迁移数据 +## 使用 DataX 迁移数据 为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了适配 TDengine 3.0 的插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。 -#### 1.0 插件功能介绍 +### 安装和部署 TDengine + +在进行数据迁移之前,要有一个正确运行的 TDengine 集群。首先是 TDengine 的安装,从官网上下载 TDengine 最新稳定版进行安装。各种安装包的使用帮助请参考 [安装指南](../../get-started/package) + +安装完成后,请根据 [部署指南](../../deployment/deploy) 配置集群。 + +### 1.0 插件功能介绍 + 1. TDengine30Reader 提供的功能: 1. 支持通过 SQL 进行数据筛选; 2. 根据时间间隔进行任务切分; @@ -182,13 +121,15 @@ TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其 1. 支持 OpenTSDB 的 json 格式的行协议,使用 TDengine 的 schemaless 方式写入 TDengine。 2. 支持批量写入,通过 batchSize 参数控制批量写入的数量,提高写入性能。 -#### 1.1 DataX 安装环境准备 +### 1.1 DataX 安装环境准备 + 1. 需要安装 TDengine 客户端 2. 需要安装 JDK 1.8 环境(运行 DataX) 3. 需要安装 Python 环境(运行 DataX) 4. 需要 maven 编译环境(如果不编译 DataX 则可以不安装 maven) -#### 1.2 安装 +### 安装 + 1. 下载源码 ~~~ git clone https://github.com/taosdata/DataX.git @@ -205,7 +146,8 @@ cd your_install_dir tar -zxvf dataX.tar.gz ~~~ -#### 1.3 数据迁移 Job 的配置 +### 数据迁移 Job 的配置 + 以一个从 OpenTSDB 到 TDengine 3.0 版本的数据迁移任务为例,配置文件 opentsdb2tdengine.json 如下: ~~~ { @@ -252,28 +194,33 @@ tar -zxvf dataX.tar.gz 3. tdengine30writer 的 parameter 中,user,password 为必须项,没有默认值。batchSize 不是必须项,默认值为 1。详细参考:[tdengine30writer.md#配置参数](https://github.com/taosdata/DataX/blob/master/tdengine30writer/doc/tdengine30writer-CN.md) 4. TDengine 中,如果 dbname 指定的 database 不存在,则需要在迁移前创建数据库。 -#### 1.4 执行迁移任务 +### 执行迁移任务 + ~~~ python bin/datax.py job/opentsdb2tdengine.json ~~~ -#### 1.5 限制条件 +### 限制条件 + 1. 目前,DataX 自带的 opentsdbreader 仅支持 OpenTSDB-2.3.X 版本。详细参考:[opentsdbreader#约束限制](https://github.com/alibaba/DataX/blob/master/opentsdbreader/doc/opentsdbreader.md#5-%E7%BA%A6%E6%9D%9F%E9%99%90%E5%88%B6) 2. 数据迁移工具依赖 TDengine 客户端中的 `libtaos.so/taos.dll/libtaos.dylib`,需要与服务端对应版本的 TDengine-client。 -#### 1.6 其他 -1. FAQ - 1. 如何估算一个数据迁移任务所需要的资源 +### FAQ + +1. 如何估算一个数据迁移任务所需要的资源 DataX 的每个 reader 按照自己的 task 切分策略进行任务划分,具体请参考 DataX 的任务调度规则。在估算资源是,需要按照数据迁移的数据量,任务切分规则和网络带宽限制等综合考虑,最好以实际数据迁移测试结果为准。 - 2. TDengine30Writer 的 batchSize 设置多大效率最高? +2. TDengine30Writer 的 batchSize 设置多大效率最高? batchSize 是控制批量写入的参数,在获取 batchSize 行纪录后,TDengineWriter 会向 TDengine 发送一次写入请求,这减少了与 TDengine 交互次数,从而提高了性能。从测试结果来看,batchSize 在 500-1000 范围内效率最高。 - 3. job 的配置中 channel 数为多少合适? +3. job 的配置中 channel 数为多少合适? job 中的 channel 数为流量控制的参数,每个 channel 都需要开辟一块内存,用来缓存数据。如果 channel 设置过大,会引起 OOM,所以 channel 数并不是越大越好。增加 channel 数后,需要提高 JVM 内存大小。从测试结果来看,channel 在 1~6 的范围内都是合适,能够保证 DataX 的流量最大化即可。 - 4. java.sql.SQLException: TDengine ERROR (8000060b): Invalid client value +4. java.sql.SQLException: TDengine ERROR (8000060b): Invalid client value 配置文件中 column 中没有配置 tbname,此时会触发行协议数据写入(行协议写入只会自动创建子表名,但需要提前创建好超级表),行协议写入的情况下不支持 TAG 数据类型为非 NCHAR,所以此种情况有两种解决方案:1.将 TAG 全部修改为 NCHAR 类型;2.在 Column 中配置好表名称这样不会触发行协议写入。 - 5. java.sql.SQLException: TDengine ERROR (8000060b): Timestamp data out of range +5. java.sql.SQLException: TDengine ERROR (8000060b): Timestamp data out of range 配置文件中 column 中没有配置 tbname,此时会触发行协议数据写入,且 TAG 全部为 NCHAR 类型,此时需要保证时间戳的一列名称为 _ts,而不能是其他名称(行协议写入下,默认将最后的时间戳写入到 _ts 一列,且不能随意命名)。若想避免请使用 tbname 指定表名以避免触发行协议写入。 -2. 在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 + +### 提升性能 + + 在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 | DataX 实例个数 (并发进程个数) | 迁移记录速度 (条/秒) | | ----------------------------- | --------------------- | @@ -283,9 +230,9 @@ python bin/datax.py job/opentsdb2tdengine.json | 5 | 约 29.5 万 | | 10 | 约 33 万 | -
(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16 核 64G 硬件设备,channel 和 batchSize 分别为 8 和 1000,每条记录包含 10 个 tag) +(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16 核 64G 硬件设备,channel 和 batchSize 分别为 8 和 1000,每条记录包含 10 个 tag) -### 2、手动迁移数据 +## 手动迁移数据 如果你需要使用多值模型进行数据写入,就需要自行开发一个将数据从 OpenTSDB 导出的工具,然后确认哪些时间线能够合并导入到同一个时间线,再将可以同时导入的时间通过 SQL 语句的写入到数据库中。 @@ -492,31 +439,11 @@ WHERE ts>=1510560000 AND ts<=1515000009 综上所述,可使用单台 16 核 32GB 的机器,或者使用 2 台 8 核 16GB 机器构成的集群。 -## 附录 3: 集群部署及启动 - -TDengine 提供了丰富的帮助文档说明集群安装、部署的诸多方面的内容,这里提供相应的文档列表,供你参考。 - -### 集群部署 - -首先是安装 TDengine,从官网上下载 TDengine 最新稳定版,解压缩后运行 install.sh 进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 - -注意安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后才启动 `taosd` 服务。 - -### 设置运行参数并启动服务 - -为确保系统能够正常获取运行的必要信息。请在服务端正确设置以下关键参数: - -FQDN、firstEp、secondEP、dataDir、logDir、tmpDir、serverPort。各参数的具体含义及设置的要求,可参见文档《[TDengine 集群安装、管理](/cluster/)》 - -按照相同的步骤,在需要运行的节点上设置参数,并启动 `taosd` 服务,然后添加 Dnode 到集群中。 - -最后启动 `taos` 命令行程序,执行命令 `show dnodes`,如果能看到所有的加入集群的节点,那么集群顺利搭建完成。具体的操作流程及注意事项,请参阅文档《[TDengine 集群安装、管理](/cluster/)》 - -## 附录 4: 超级表名称 +## 附录 3: 超级表名称 由于 OpenTSDB 的 metric 名称中带有点号(“.”),例如“cpu.usage_user”这种名称的 metric。但是点号在 TDengine 中具有特殊含义,是用来分隔数据库和表名称的分隔符。TDengine 也提供转义符,以允许用户在(超级)表名称中使用关键词或特殊分隔符(如:点号)。为了使用特殊字符,需要采用转义字符将表的名称括起来,例如:`cpu.usage_user`这样就是合法的(超级)表名称。 -## 附录 5:参考文章 +## 附录 4:参考文章 -1. [使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统](/application/collectd/) -2. [通过 collectd 将采集数据直接写入 TDengine](/third-party/collectd/) +1. [使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统](../collectd/) +2. [通过 collectd 将采集数据直接写入 TDengine](../../third-party/collectd/) From 8030188f4095af53ceabd8f4f2aedef651c1be92 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Tue, 21 Feb 2023 18:05:05 +0800 Subject: [PATCH 264/267] Update 03-immigrate.md --- docs/zh/25-application/03-immigrate.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/25-application/03-immigrate.md b/docs/zh/25-application/03-immigrate.md index 26ef712957..75788c0cc7 100644 --- a/docs/zh/25-application/03-immigrate.md +++ b/docs/zh/25-application/03-immigrate.md @@ -110,7 +110,7 @@ TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其 安装完成后,请根据 [部署指南](../../deployment/deploy) 配置集群。 -### 1.0 插件功能介绍 +### 插件功能介绍 1. TDengine30Reader 提供的功能: 1. 支持通过 SQL 进行数据筛选; @@ -121,7 +121,7 @@ TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其 1. 支持 OpenTSDB 的 json 格式的行协议,使用 TDengine 的 schemaless 方式写入 TDengine。 2. 支持批量写入,通过 batchSize 参数控制批量写入的数量,提高写入性能。 -### 1.1 DataX 安装环境准备 +### DataX 安装环境准备 1. 需要安装 TDengine 客户端 2. 需要安装 JDK 1.8 环境(运行 DataX) From 7e8823d594e88a9197ef5a82a6b2b6891cc60d08 Mon Sep 17 00:00:00 2001 From: sunpeng Date: Tue, 21 Feb 2023 19:00:50 +0800 Subject: [PATCH 265/267] build: delete TaosConsumer and TaosTmq from taospy (#20076) --- tests/pytest/crash_gen/crash_gen_main.py | 1153 +++++++++++---------- tests/system-test/0-others/user_manage.py | 112 +- 2 files changed, 645 insertions(+), 620 deletions(-) diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 3c39b05e69..ec588659e9 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -18,7 +18,8 @@ from __future__ import annotations from typing import Any, Set, Tuple from typing import Dict from typing import List -from typing import Optional # Type hinting, ref: https://stackoverflow.com/questions/19202633/python-3-type-hinting-for-none +from typing import \ + Optional # Type hinting, ref: https://stackoverflow.com/questions/19202633/python-3-type-hinting-for-none import textwrap import time @@ -39,7 +40,6 @@ import gc import taos from taos.tmq import * - from .shared.types import TdColumns, TdTags # from crash_gen import ServiceManager, TdeInstance, TdeSubProcess @@ -65,10 +65,11 @@ if sys.version_info[0] < 3: # Command-line/Environment Configurations, will set a bit later # ConfigNameSpace = argparse.Namespace # gConfig: argparse.Namespace -gSvcMgr: Optional[ServiceManager] # TODO: refactor this hack, use dep injection +gSvcMgr: Optional[ServiceManager] # TODO: refactor this hack, use dep injection # logger: logging.Logger gContainer: Container + # def runThread(wt: WorkerThread): # wt.run() @@ -77,7 +78,7 @@ class WorkerThread: def __init__(self, pool: ThreadPool, tid, tc: ThreadCoordinator): """ Note: this runs in the main thread context - """ + """ # self._curStep = -1 self._pool = pool self._tid = tid @@ -91,15 +92,15 @@ class WorkerThread: if (Config.getConfig().per_thread_db_connection): # type: ignore # print("connector_type = {}".format(gConfig.connector_type)) tInst = gContainer.defTdeInstance - if Config.getConfig().connector_type == 'native': - self._dbConn = DbConn.createNative(tInst.getDbTarget()) + if Config.getConfig().connector_type == 'native': + self._dbConn = DbConn.createNative(tInst.getDbTarget()) elif Config.getConfig().connector_type == 'rest': - self._dbConn = DbConn.createRest(tInst.getDbTarget()) + self._dbConn = DbConn.createRest(tInst.getDbTarget()) elif Config.getConfig().connector_type == 'mixed': - if Dice.throw(2) == 0: # 1/2 chance - self._dbConn = DbConn.createNative(tInst.getDbTarget()) + if Dice.throw(2) == 0: # 1/2 chance + self._dbConn = DbConn.createNative(tInst.getDbTarget()) else: - self._dbConn = DbConn.createRest(tInst.getDbTarget()) + self._dbConn = DbConn.createRest(tInst.getDbTarget()) else: raise RuntimeError("Unexpected connector type: {}".format(Config.getConfig().connector_type)) @@ -138,7 +139,7 @@ class WorkerThread: # clean up if (Config.getConfig().per_thread_db_connection): # type: ignore - if self._dbConn.isOpen: #sometimes it is not open + if self._dbConn.isOpen: # sometimes it is not open self._dbConn.close() else: Logging.warning("Cleaning up worker thread, dbConn already closed") @@ -150,20 +151,19 @@ class WorkerThread: tc = self._tc # Thread Coordinator, the overall master try: tc.crossStepBarrier() # shared barrier first, INCLUDING the last one - except threading.BrokenBarrierError as err: # main thread timed out + except threading.BrokenBarrierError as err: # main thread timed out print("_bto", end="") Logging.debug("[TRD] Worker thread exiting due to main thread barrier time-out") break Logging.debug("[TRD] Worker thread [{}] exited barrier...".format(self._tid)) - self.crossStepGate() # then per-thread gate, after being tapped + self.crossStepGate() # then per-thread gate, after being tapped Logging.debug("[TRD] Worker thread [{}] exited step gate...".format(self._tid)) if not self._tc.isRunning(): print("_wts", end="") Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") break - # Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more) try: if (Config.getConfig().per_thread_db_connection): # most likely TRUE @@ -172,7 +172,8 @@ class WorkerThread: # self.useDb() # might encounter exceptions. TODO: catch except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [0x383, 0x386, 0x00B, 0x014] : # invalid database, dropping, Unable to establish connection, Database not ready + if errno in [0x383, 0x386, 0x00B, + 0x014]: # invalid database, dropping, Unable to establish connection, Database not ready # ignore dummy = 0 else: @@ -180,12 +181,12 @@ class WorkerThread: raise # Fetch a task from the Thread Coordinator - Logging.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid)) + Logging.debug("[TRD] Worker thread [{}] about to fetch task".format(self._tid)) task = tc.fetchTask() # Execute such a task Logging.debug("[TRD] Worker thread [{}] about to execute task: {}".format( - self._tid, task.__class__.__name__)) + self._tid, task.__class__.__name__)) task.execute(self) tc.saveExecutedTask(task) Logging.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid)) @@ -228,7 +229,7 @@ class WorkerThread: self._stepGate.set() # wake up! time.sleep(0) # let the released thread run a bit else: - print("_tad", end="") # Thread already dead + print("_tad", end="") # Thread already dead def execSql(self, sql): # TODO: expose DbConn directly return self.getDbConn().execute(sql) @@ -239,7 +240,7 @@ class WorkerThread: def getQueryResult(self): return self.getDbConn().getQueryResult() - def getDbConn(self) -> DbConn : + def getDbConn(self) -> DbConn: if (Config.getConfig().per_thread_db_connection): return self._dbConn else: @@ -251,6 +252,7 @@ class WorkerThread: # else: # return self._tc.getDbState().getDbConn().query(sql) + # The coordinator of all worker threads, mostly running in main thread @@ -262,7 +264,7 @@ class ThreadCoordinator: self._pool = pool # self._wd = wd self._te = None # prepare for every new step - self._dbManager = dbManager # type: Optional[DbManager] # may be freed + self._dbManager = dbManager # type: Optional[DbManager] # may be freed self._executedTasks: List[Task] = [] # in a given step self._lock = threading.RLock() # sync access for a few things @@ -284,7 +286,7 @@ class ThreadCoordinator: return self._dbManager def crossStepBarrier(self, timeout=None): - self._stepBarrier.wait(timeout) + self._stepBarrier.wait(timeout) def requestToStop(self): self._runStatus = Status.STATUS_STOPPING @@ -292,7 +294,7 @@ class ThreadCoordinator: def _runShouldEnd(self, transitionFailed, hasAbortedTask, workerTimeout): maxSteps = Config.getConfig().max_steps # type: ignore - if self._curStep >= (maxSteps - 1): # maxStep==10, last curStep should be 9 + if self._curStep >= (maxSteps - 1): # maxStep==10, last curStep should be 9 return True if self._runStatus != Status.STATUS_RUNNING: return True @@ -304,7 +306,7 @@ class ThreadCoordinator: return True return False - def _hasAbortedTask(self): # from execution of previous step + def _hasAbortedTask(self): # from execution of previous step for task in self._executedTasks: if task.isAborted(): # print("Task aborted: {}".format(task)) @@ -319,17 +321,17 @@ class ThreadCoordinator: "--\r\n\n--> Step {} starts with main thread waking up".format(self._curStep)) # A new TE for the new step - self._te = None # set to empty first, to signal worker thread to stop + self._te = None # set to empty first, to signal worker thread to stop if not transitionFailed: # only if not failed self._te = TaskExecutor(self._curStep) Logging.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format( - self._curStep)) # Now not all threads had time to go to sleep + self._curStep)) # Now not all threads had time to go to sleep # Worker threads will wake up at this point, and each execute it's own task - self.tapAllThreads() # release all worker thread from their "gates" + self.tapAllThreads() # release all worker thread from their "gates" def _syncAtBarrier(self): - # Now main thread (that's us) is ready to enter a step + # Now main thread (that's us) is ready to enter a step # let other threads go past the pool barrier, but wait at the # thread gate Logging.debug("[TRD] Main thread about to cross the barrier") @@ -341,7 +343,7 @@ class ThreadCoordinator: transitionFailed = False try: for x in self._dbs: - db = x # type: Database + db = x # type: Database sm = db.getStateMachine() Logging.debug("[STT] starting transitions for DB: {}".format(db.getName())) # at end of step, transiton the DB state @@ -357,8 +359,8 @@ class ThreadCoordinator: # for t in self._pool.threadList: # Logging.debug("[DB] use db for all worker threads") # t.useDb() - # t.execSql("use db") # main thread executing "use - # db" on behalf of every worker thread + # t.execSql("use db") # main thread executing "use + # db" on behalf of every worker thread except taos.error.ProgrammingError as err: if (err.msg == 'network unavailable'): # broken DB connection @@ -369,12 +371,13 @@ class ThreadCoordinator: self._execStats.registerFailure("Broken DB Connection") # continue # don't do that, need to tap all threads at # end, and maybe signal them to stop - if isinstance(err, CrashGenError): # our own transition failure + if isinstance(err, CrashGenError): # our own transition failure Logging.info("State transition error") # TODO: saw an error here once, let's print out stack info for err? - traceback.print_stack() # Stack frame to here. + traceback.print_stack() # Stack frame to here. Logging.info("Caused by:") - traceback.print_exception(*sys.exc_info()) # Ref: https://www.geeksforgeeks.org/how-to-print-exception-stack-trace-in-python/ + traceback.print_exception( + *sys.exc_info()) # Ref: https://www.geeksforgeeks.org/how-to-print-exception-stack-trace-in-python/ transitionFailed = True self._te = None # Not running any more self._execStats.registerFailure("State transition error: {}".format(err)) @@ -392,14 +395,14 @@ class ThreadCoordinator: # Coordinate all threads step by step self._curStep = -1 # not started yet - + self._execStats.startExec() # start the stop watch transitionFailed = False hasAbortedTask = False workerTimeout = False while not self._runShouldEnd(transitionFailed, hasAbortedTask, workerTimeout): - if not Config.getConfig().debug: # print this only if we are not in debug mode - Progress.emit(Progress.STEP_BOUNDARY) + if not Config.getConfig().debug: # print this only if we are not in debug mode + Progress.emit(Progress.STEP_BOUNDARY) # print(".", end="", flush=True) # if (self._curStep % 2) == 0: # print memory usage once every 10 steps # memUsage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss @@ -408,15 +411,14 @@ class ThreadCoordinator: # h = hpy() # print("\n") # print(h.heap()) - - + try: - self._syncAtBarrier() # For now just cross the barrier + self._syncAtBarrier() # For now just cross the barrier Progress.emit(Progress.END_THREAD_STEP) - if self._stepStartTime : + if self._stepStartTime: stepExecTime = time.time() - self._stepStartTime Progress.emitStr('{:.3f}s/{}'.format(stepExecTime, DbConnNative.totalRequests)) - DbConnNative.resetTotalRequests() # reset to zero + DbConnNative.resetTotalRequests() # reset to zero except threading.BrokenBarrierError as err: self._execStats.registerFailure("Aborted due to worker thread timeout") Logging.error("\n") @@ -439,15 +441,15 @@ class ThreadCoordinator: # At this point, all threads should be pass the overall "barrier" and before the per-thread "gate" # We use this period to do house keeping work, when all worker # threads are QUIET. - hasAbortedTask = self._hasAbortedTask() # from previous step - if hasAbortedTask: + hasAbortedTask = self._hasAbortedTask() # from previous step + if hasAbortedTask: Logging.info("Aborted task encountered, exiting test program") self._execStats.registerFailure("Aborted Task Encountered") - break # do transition only if tasks are error free + break # do transition only if tasks are error free # Ending previous step try: - transitionFailed = self._doTransition() # To start, we end step -1 first + transitionFailed = self._doTransition() # To start, we end step -1 first except taos.error.ProgrammingError as err: transitionFailed = True errno2 = Helper.convertErrno(err.errno) # correct error scheme @@ -459,32 +461,32 @@ class ThreadCoordinator: # Then we move on to the next step Progress.emit(Progress.BEGIN_THREAD_STEP) self._stepStartTime = time.time() - self._releaseAllWorkerThreads(transitionFailed) + self._releaseAllWorkerThreads(transitionFailed) - if hasAbortedTask or transitionFailed : # abnormal ending, workers waiting at "gate" + if hasAbortedTask or transitionFailed: # abnormal ending, workers waiting at "gate" Logging.debug("Abnormal ending of main thraed") elif workerTimeout: Logging.debug("Abnormal ending of main thread, due to worker timeout") - else: # regular ending, workers waiting at "barrier" + else: # regular ending, workers waiting at "barrier" Logging.debug("Regular ending, main thread waiting for all worker threads to stop...") self._syncAtBarrier() self._te = None # No more executor, time to end Logging.debug("Main thread tapping all threads one last time...") self.tapAllThreads() # Let the threads run one last time - #TODO: looks like we are not capturing the failures for the last step yet (i.e. calling registerFailure if neccessary) + # TODO: looks like we are not capturing the failures for the last step yet (i.e. calling registerFailure if neccessary) Logging.debug("\r\n\n--> Main thread ready to finish up...") Logging.debug("Main thread joining all threads") self._pool.joinAll() # Get all threads to finish - Logging.info(". . . All worker threads finished") # No CR/LF before + Logging.info(". . . All worker threads finished") # No CR/LF before self._execStats.endExec() - def cleanup(self): # free resources + def cleanup(self): # free resources self._pool.cleanup() self._pool = None - self._te = None + self._te = None self._dbManager = None self._executedTasks = [] self._lock = None @@ -492,7 +494,6 @@ class ThreadCoordinator: self._execStats = None self._runStatus = None - def printStats(self): self._execStats.printStats() @@ -523,21 +524,21 @@ class ThreadCoordinator: def _initDbs(self): ''' Initialize multiple databases, invoked at __ini__() time ''' - self._dbs = [] # type: List[Database] + self._dbs = [] # type: List[Database] dbc = self.getDbManager().getDbConn() if Config.getConfig().max_dbs == 0: self._dbs.append(Database(0, dbc)) - else: - baseDbNumber = int(datetime.datetime.now().timestamp( # Don't use Dice/random, as they are deterministic - )*333) % 888 if Config.getConfig().dynamic_db_table_names else 0 + else: + baseDbNumber = int(datetime.datetime.now().timestamp( # Don't use Dice/random, as they are deterministic + ) * 333) % 888 if Config.getConfig().dynamic_db_table_names else 0 for i in range(Config.getConfig().max_dbs): self._dbs.append(Database(baseDbNumber + i, dbc)) def pickDatabase(self): idxDb = 0 - if Config.getConfig().max_dbs != 0 : - idxDb = Dice.throw(Config.getConfig().max_dbs) # 0 to N-1 - db = self._dbs[idxDb] # type: Database + if Config.getConfig().max_dbs != 0: + idxDb = Dice.throw(Config.getConfig().max_dbs) # 0 to N-1 + db = self._dbs[idxDb] # type: Database return db def fetchTask(self) -> Task: @@ -549,12 +550,12 @@ class ThreadCoordinator: # pick a task type for current state db = self.pickDatabase() - if Dice.throw(2)==1: - taskType = db.getStateMachine().pickTaskType() # dynamic name of class + if Dice.throw(2) == 1: + taskType = db.getStateMachine().pickTaskType() # dynamic name of class else: - taskType = db.getStateMachine().balance_pickTaskType() # and an method can get balance task types + taskType = db.getStateMachine().balance_pickTaskType() # and an method can get balance task types pass - + return taskType(self._execStats, db) # create a task from it def resetExecutedTasks(self): @@ -564,6 +565,7 @@ class ThreadCoordinator: with self._lock: self._executedTasks.append(task) + class ThreadPool: def __init__(self, numThreads, maxSteps): self.numThreads = numThreads @@ -585,7 +587,8 @@ class ThreadPool: workerThread._thread.join() def cleanup(self): - self.threadList = [] # maybe clean up each? + self.threadList = [] # maybe clean up each? + # A queue of continguous POSITIVE integers, used by DbManager to generate continuous numbers # for new table names @@ -680,11 +683,11 @@ class AnyState: CAN_CREATE_DB = 1 # For below, if we can "drop the DB", but strictly speaking # only "under normal circumstances", as we may override it with the -b option - CAN_DROP_DB = 2 + CAN_DROP_DB = 2 CAN_CREATE_FIXED_SUPER_TABLE = 3 CAN_CREATE_STREAM = 3 # super table must exists CAN_CREATE_TOPIC = 3 # super table must exists - CAN_CREATE_CONSUMERS = 3 + CAN_CREATE_CONSUMERS = 3 CAN_DROP_FIXED_SUPER_TABLE = 4 CAN_DROP_TOPIC = 4 CAN_DROP_STREAM = 4 @@ -729,7 +732,7 @@ class AnyState: def canDropDb(self): # If user requests to run up to a number of DBs, # we'd then not do drop_db operations any more - if Config.getConfig().max_dbs > 0 or Config.getConfig().use_shadow_db : + if Config.getConfig().max_dbs > 0 or Config.getConfig().use_shadow_db: return False return self._info[self.CAN_DROP_DB] @@ -737,19 +740,19 @@ class AnyState: return self._info[self.CAN_CREATE_FIXED_SUPER_TABLE] def canDropFixedSuperTable(self): - if Config.getConfig().use_shadow_db: # duplicate writes to shaddow DB, in which case let's disable dropping s-table + if Config.getConfig().use_shadow_db: # duplicate writes to shaddow DB, in which case let's disable dropping s-table return False return self._info[self.CAN_DROP_FIXED_SUPER_TABLE] def canCreateTopic(self): return self._info[self.CAN_CREATE_TOPIC] - + def canDropTopic(self): return self._info[self.CAN_DROP_TOPIC] def canCreateConsumers(self): return self._info[self.CAN_CREATE_CONSUMERS] - + def canCreateStreams(self): return self._info[self.CAN_CREATE_STREAM] @@ -777,7 +780,7 @@ class AnyState: raise CrashGenError( "Unexpected more than 1 success at state: {}, with task: {}, in task set: {}".format( self.__class__.__name__, - cls.__name__, # verified just now that isinstance(task, cls) + cls.__name__, # verified just now that isinstance(task, cls) [c.__class__.__name__ for c in tasks] )) @@ -792,16 +795,17 @@ class AnyState: sCnt += 1 if (exists and sCnt <= 0): raise CrashGenError("Unexpected zero success at state: {}, with task: {}, in task set: {}".format( - self.__class__.__name__, - cls.__name__, # verified just now that isinstance(task, cls) - [c.__class__.__name__ for c in tasks] - )) + self.__class__.__name__, + cls.__name__, # verified just now that isinstance(task, cls) + [c.__class__.__name__ for c in tasks] + )) def assertNoTask(self, tasks, cls): for task in tasks: if isinstance(task, cls): raise CrashGenError( - "This task: {}, is not expected to be present, given the success/failure of others".format(cls.__name__)) + "This task: {}, is not expected to be present, given the success/failure of others".format( + cls.__name__)) def assertNoSuccess(self, tasks, cls): for task in tasks: @@ -848,7 +852,7 @@ class StateEmpty(AnyState): def verifyTasksToState(self, tasks, newState): if (self.hasSuccess(tasks, TaskCreateDb) - ): # at EMPTY, if there's succes in creating DB + ): # at EMPTY, if there's succes in creating DB if (not self.hasTask(tasks, TaskDropDb)): # and no drop_db tasks # we must have at most one. TODO: compare numbers self.assertAtMostOneSuccess(tasks, TaskCreateDb) @@ -885,19 +889,19 @@ class StateSuperTableOnly(AnyState): def verifyTasksToState(self, tasks, newState): if (self.hasSuccess(tasks, TaskDropSuperTable) - ): # we are able to drop the table - #self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) + ): # we are able to drop the table + # self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) # we must have had recreted it self.hasSuccess(tasks, TaskCreateSuperTable) # self._state = self.STATE_DB_ONLY # elif ( self.hasSuccess(tasks, AddFixedDataTask) ): # no success dropping the table, but added data # self.assertNoTask(tasks, DropFixedTableTask) # not true in massively parrallel cases - # self._state = self.STATE_HAS_DATA + # self._state = self.STATE_HAS_DATA # elif ( self.hasSuccess(tasks, ReadFixedDataTask) ): # no success in prev cases, but was able to read data - # self.assertNoTask(tasks, DropFixedTableTask) - # self.assertNoTask(tasks, AddFixedDataTask) - # self._state = self.STATE_TABLE_ONLY # no change + # self.assertNoTask(tasks, DropFixedTableTask) + # self.assertNoTask(tasks, AddFixedDataTask) + # self._state = self.STATE_TABLE_ONLY # no change # else: # did not drop table, did not insert data, did not read successfully, that is impossible # raise RuntimeError("Unexpected no-success scenarios") # TODO: need to revamp!! @@ -919,41 +923,41 @@ class StateHasData(AnyState): self.assertAtMostOneSuccess(tasks, TaskDropDb) # TODO: dicy elif (newState.equals(AnyState.STATE_DB_ONLY)): # in DB only if (not self.hasTask(tasks, TaskCreateDb) - ): # without a create_db task + ): # without a create_db task # we must have drop_db task self.assertNoTask(tasks, TaskDropDb) self.hasSuccess(tasks, TaskDropSuperTable) # self.assertAtMostOneSuccess(tasks, DropFixedSuperTableTask) # TODO: dicy # elif ( newState.equals(AnyState.STATE_TABLE_ONLY) ): # data deleted - # self.assertNoTask(tasks, TaskDropDb) - # self.assertNoTask(tasks, TaskDropSuperTable) - # self.assertNoTask(tasks, TaskAddData) - # self.hasSuccess(tasks, DeleteDataTasks) + # self.assertNoTask(tasks, TaskDropDb) + # self.assertNoTask(tasks, TaskDropSuperTable) + # self.assertNoTask(tasks, TaskAddData) + # self.hasSuccess(tasks, DeleteDataTasks) else: # should be STATE_HAS_DATA if (not self.hasTask(tasks, TaskCreateDb) - ): # only if we didn't create one + ): # only if we didn't create one # we shouldn't have dropped it self.assertNoTask(tasks, TaskDropDb) - if not( self.hasTask(tasks, TaskCreateSuperTable) - ): # if we didn't create the table + if not (self.hasTask(tasks, TaskCreateSuperTable) + ): # if we didn't create the table # we should not have a task that drops it self.assertNoTask(tasks, TaskDropSuperTable) # self.assertIfExistThenSuccess(tasks, ReadFixedDataTask) class StateMechine: - def __init__(self, db: Database): + def __init__(self, db: Database): self._db = db # transitition target probabilities, indexed with value of STATE_EMPTY, STATE_DB_ONLY, etc. self._stateWeights = [1, 2, 10, 40] - def init(self, dbc: DbConn): # late initailization, don't save the dbConn + def init(self, dbc: DbConn): # late initailization, don't save the dbConn try: self._curState = self._findCurrentState(dbc) # starting state - except taos.error.ProgrammingError as err: + except taos.error.ProgrammingError as err: Logging.error("Failed to initialized state machine, cannot find current state: {}".format(err)) traceback.print_stack() - raise # re-throw + raise # re-throw # TODO: seems no lnoger used, remove? def getCurrentState(self): @@ -999,28 +1003,27 @@ class StateMechine: def _findCurrentState(self, dbc: DbConn): ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state - dbName =self._db.getName() - if not dbc.existsDatabase(dbName): # dbc.hasDatabases(): # no database?! - Logging.debug( "[STT] empty database found, between {} and {}".format(ts, time.time())) + dbName = self._db.getName() + if not dbc.existsDatabase(dbName): # dbc.hasDatabases(): # no database?! + Logging.debug("[STT] empty database found, between {} and {}".format(ts, time.time())) return StateEmpty() # did not do this when openning connection, and this is NOT the worker # thread, which does this on their own dbc.use(dbName) - + if not dbc.hasTables(): # no tables - + Logging.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) return StateDbOnly() # For sure we have tables, which means we must have the super table. # TODO: are we sure? - + sTable = self._db.getFixedSuperTable() - if sTable.hasRegTables(dbc): # no regular tables # print("debug=====*\n"*100) Logging.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) - + return StateSuperTableOnly() else: # has actual tables Logging.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time())) @@ -1029,7 +1032,7 @@ class StateMechine: # We transition the system to a new state by examining the current state itself def transition(self, tasks, dbc: DbConn): global gSvcMgr - + if (len(tasks) == 0): # before 1st step, or otherwise empty Logging.debug("[STT] Starting State: {}".format(self._curState)) return # do nothing @@ -1038,39 +1041,39 @@ class StateMechine: dbc.execute("select * from information_schema.ins_dnodes") # Generic Checks, first based on the start state - if not Config.getConfig().ignore_errors: # verify state, only if we are asked not to ignore certain errors. + if not Config.getConfig().ignore_errors: # verify state, only if we are asked not to ignore certain errors. if self._curState.canCreateDb(): self._curState.assertIfExistThenSuccess(tasks, TaskCreateDb) # self.assertAtMostOneSuccess(tasks, CreateDbTask) # not really, in # case of multiple creation and drops if self._curState.canDropDb(): - if gSvcMgr == None: # only if we are running as client-only + if gSvcMgr == None: # only if we are running as client-only self._curState.assertIfExistThenSuccess(tasks, TaskDropDb) # self.assertAtMostOneSuccess(tasks, DropDbTask) # not really in # case of drop-create-drop # if self._state.canCreateFixedTable(): - # self.assertIfExistThenSuccess(tasks, CreateFixedTableTask) # Not true, DB may be dropped - # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not - # really, in case of create-drop-create + # self.assertIfExistThenSuccess(tasks, CreateFixedTableTask) # Not true, DB may be dropped + # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not + # really, in case of create-drop-create # if self._state.canDropFixedTable(): - # self.assertIfExistThenSuccess(tasks, DropFixedTableTask) # Not True, the whole DB may be dropped - # self.assertAtMostOneSuccess(tasks, DropFixedTableTask) # not - # really in case of drop-create-drop + # self.assertIfExistThenSuccess(tasks, DropFixedTableTask) # Not True, the whole DB may be dropped + # self.assertAtMostOneSuccess(tasks, DropFixedTableTask) # not + # really in case of drop-create-drop # if self._state.canAddData(): # self.assertIfExistThenSuccess(tasks, AddFixedDataTask) # not true # actually # if self._state.canReadData(): - # Nothing for sure + # Nothing for sure newState = self._findCurrentState(dbc) Logging.debug("[STT] New DB state determined: {}".format(newState)) # can old state move to new state through the tasks? - if not Config.getConfig().ignore_errors: # verify state, only if we are asked not to ignore certain errors. + if not Config.getConfig().ignore_errors: # verify state, only if we are asked not to ignore certain errors. self._curState.verifyTasksToState(tasks, newState) self._curState = newState @@ -1096,22 +1099,24 @@ class StateMechine: weightsTypes = BasicTypes.copy() # this matrixs can balance the Frequency of TaskTypes - balance_TaskType_matrixs = {'TaskDropDb': 5 , 'TaskDropTopics': 20 , 'TaskDropStreams':10 , 'TaskDropStreamTables':10 , - 'TaskReadData':50 , 'TaskDropSuperTable':5 , 'TaskAlterTags':3 , 'TaskAddData':10, - 'TaskDeleteData':10 , 'TaskCreateDb':10 , 'TaskCreateStream': 3, 'TaskCreateTopic' :3, - 'TaskCreateConsumers':10, 'TaskCreateSuperTable': 10 } # TaskType : balance_matrixs of task - - for task , weights in balance_TaskType_matrixs.items(): - + balance_TaskType_matrixs = {'TaskDropDb': 5, 'TaskDropTopics': 20, 'TaskDropStreams': 10, + 'TaskDropStreamTables': 10, + 'TaskReadData': 50, 'TaskDropSuperTable': 5, 'TaskAlterTags': 3, 'TaskAddData': 10, + 'TaskDeleteData': 10, 'TaskCreateDb': 10, 'TaskCreateStream': 3, + 'TaskCreateTopic': 3, + 'TaskCreateConsumers': 10, + 'TaskCreateSuperTable': 10} # TaskType : balance_matrixs of task + + for task, weights in balance_TaskType_matrixs.items(): + for basicType in BasicTypes: if basicType.__name__ == task: for _ in range(weights): weightsTypes.append(basicType) - task = random.sample(weightsTypes,1) + task = random.sample(weightsTypes, 1) return task[0] - # ref: # https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ def _weighted_choice_sub(self, weights) -> int: @@ -1123,6 +1128,7 @@ class StateMechine: return i raise CrashGenError("Unexpected no choice") + class Database: ''' We use this to represent an actual TDengine database inside a service instance, possibly in a cluster environment. @@ -1131,16 +1137,16 @@ class Database: TODO: consider moving, but keep in mind it contains "StateMachine" ''' - _clsLock = threading.Lock() # class wide lock + _clsLock = threading.Lock() # class wide lock _lastInt = 101 # next one is initial integer - _lastTick = None # Optional[datetime] - _lastLaggingTick = None # Optional[datetime] # lagging tick, for out-of-sequence (oos) data insertions + _lastTick = None # Optional[datetime] + _lastLaggingTick = None # Optional[datetime] # lagging tick, for out-of-sequence (oos) data insertions - def __init__(self, dbNum: int, dbc: DbConn): # TODO: remove dbc - self._dbNum = dbNum # we assign a number to databases, for our testing purpose + def __init__(self, dbNum: int, dbc: DbConn): # TODO: remove dbc + self._dbNum = dbNum # we assign a number to databases, for our testing purpose self._stateMachine = StateMechine(self) self._stateMachine.init(dbc) - + self._lock = threading.RLock() def getStateMachine(self) -> StateMechine: @@ -1152,7 +1158,7 @@ class Database: def getName(self): return "db_{}".format(self._dbNum) - def filterTasks(self, inTasks: List[Task]): # Pick out those belonging to us + def filterTasks(self, inTasks: List[Task]): # Pick out those belonging to us outTasks = [] for task in inTasks: if task.getDb().isSame(self): @@ -1184,38 +1190,42 @@ class Database: # start time will be auto generated , start at 10 years ago local time local_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16] local_epoch_time = [int(i) for i in local_time.split("-")] - #local_epoch_time will be such as : [2022, 7, 18] + # local_epoch_time will be such as : [2022, 7, 18] - t1 = datetime.datetime(local_epoch_time[0]-5, local_epoch_time[1], local_epoch_time[2]) + t1 = datetime.datetime(local_epoch_time[0] - 5, local_epoch_time[1], local_epoch_time[2]) t2 = datetime.datetime.now() # maybe a very large number, takes 69 years to exceed Python int range elSec = int(t2.timestamp() - t1.timestamp()) elSec2 = (elSec % (8 * 12 * 30 * 24 * 60 * 60 / 500)) * \ - 500 # a number representing seconds within 10 years + 500 # a number representing seconds within 10 years # print("elSec = {}".format(elSec)) - t3 = datetime.datetime(local_epoch_time[0]-10, local_epoch_time[1], local_epoch_time[2]) # default "keep" is 10 years + t3 = datetime.datetime(local_epoch_time[0] - 10, local_epoch_time[1], + local_epoch_time[2]) # default "keep" is 10 years t4 = datetime.datetime.fromtimestamp( t3.timestamp() + elSec2) # see explanation above Logging.debug("Setting up TICKS to start from: {}".format(t4)) return t4 @classmethod - def getNextTick(cls): + def getNextTick(cls): ''' Fetch a timestamp tick, with some random factor, may not be unique. - ''' + ''' with cls._clsLock: # prevent duplicate tick - if cls._lastLaggingTick is None or cls._lastTick is None : # not initialized + if cls._lastLaggingTick is None or cls._lastTick is None: # not initialized # 10k at 1/20 chance, should be enough to avoid overlaps tick = cls.setupLastTick() cls._lastTick = tick - cls._lastLaggingTick = tick + datetime.timedelta(0, -60*2) # lagging behind 2 minutes, should catch up fast + cls._lastLaggingTick = tick + datetime.timedelta(0, + -60 * 2) # lagging behind 2 minutes, should catch up fast # if : # should be quite a bit into the future - if Config.isSet('mix_oos_data') and Dice.throw(20) == 0: # if asked to do so, and 1 in 20 chance, return lagging tick - cls._lastLaggingTick += datetime.timedelta(0, 1) # pick the next sequence from the lagging tick sequence - return cls._lastLaggingTick + if Config.isSet('mix_oos_data') and Dice.throw( + 20) == 0: # if asked to do so, and 1 in 20 chance, return lagging tick + cls._lastLaggingTick += datetime.timedelta(0, + 1) # pick the next sequence from the lagging tick sequence + return cls._lastLaggingTick else: # regular # add one second to it cls._lastTick += datetime.timedelta(0, 1) @@ -1332,9 +1342,7 @@ class Task(): # Logging.debug("Creating new task {}...".format(self._taskNum)) self._execStats = execStats - self._db = db # A task is always associated/for a specific DB - - + self._db = db # A task is always associated/for a specific DB def isSuccess(self): return self._err is None @@ -1367,82 +1375,78 @@ class Task(): def _isServiceStable(self): if not gSvcMgr: return True # we don't run service, so let's assume it's stable - return gSvcMgr.isStable() # otherwise let's examine the service + return gSvcMgr.isStable() # otherwise let's examine the service def _isErrAcceptable(self, errno, msg): if errno in [ - # TDengine 2.x Error Codes: - 0x05, # TSDB_CODE_RPC_NOT_READY - 0x0B, # Unable to establish connection, more details in TD-1648 - # 0x200, # invalid SQL, TODO: re-examine with TD-934 - 0x20F, # query terminated, possibly due to vnoding being dropped, see TD-1776 - 0x213, # "Disconnected from service", result of "kill connection ???" - 0x217, # "db not selected", client side defined error code - # 0x218, # "Table does not exist" client side defined error code - 0x360, # Table already exists - 0x362, - # 0x369, # tag already exists - 0x36A, 0x36B, 0x36D, - 0x381, - 0x380, # "db not selected" - 0x383, - 0x386, # DB is being dropped?! - 0x503, - 0x510, # vnode not in ready state - 0x14, # db not ready, errno changed - 0x600, # Invalid table ID, why? - 0x218, # Table does not exist + # TDengine 2.x Error Codes: + 0x05, # TSDB_CODE_RPC_NOT_READY + 0x0B, # Unable to establish connection, more details in TD-1648 + # 0x200, # invalid SQL, TODO: re-examine with TD-934 + 0x20F, # query terminated, possibly due to vnoding being dropped, see TD-1776 + 0x213, # "Disconnected from service", result of "kill connection ???" + 0x217, # "db not selected", client side defined error code + # 0x218, # "Table does not exist" client side defined error code + 0x360, # Table already exists + 0x362, + # 0x369, # tag already exists + 0x36A, 0x36B, 0x36D, + 0x381, + 0x380, # "db not selected" + 0x383, + 0x386, # DB is being dropped?! + 0x503, + 0x510, # vnode not in ready state + 0x14, # db not ready, errno changed + 0x600, # Invalid table ID, why? + 0x218, # Table does not exist - # TDengine 3.0 Error Codes: - 0x0333, # Object is creating # TODO: this really is NOT an acceptable error - 0x0369, # Tag already exists - 0x0388, # Database not exist - 0x03A0, # STable already exists - 0x03A1, # STable [does] not exist - 0x03AA, # Tag already exists - 0x0603, # Table already exists - 0x2603, # Table does not exist, replaced by 2662 below - 0x260d, # Tags number not matched - 0x2662, # Table does not exist #TODO: what about 2603 above? - 0x2600, # database not specified, SQL: show stables , database droped , and show tables - 0x032C, # Object is creating - 0x032D, # Object is dropping - 0x03D3, # Conflict transaction not completed - 0x0707, # Query not ready , it always occur at replica 3 - 0x707, # Query not ready - 0x396, # Database in creating status - 0x386, # Database in droping status - 0x03E1, # failed on tmq_subscribe ,topic not exist - 0x03ed , # Topic must be dropped first, SQL: drop database db_0 - 0x0203 , # Invalid value - 0x03f0 , # Stream already exist , topic already exists + # TDengine 3.0 Error Codes: + 0x0333, # Object is creating # TODO: this really is NOT an acceptable error + 0x0369, # Tag already exists + 0x0388, # Database not exist + 0x03A0, # STable already exists + 0x03A1, # STable [does] not exist + 0x03AA, # Tag already exists + 0x0603, # Table already exists + 0x2603, # Table does not exist, replaced by 2662 below + 0x260d, # Tags number not matched + 0x2662, # Table does not exist #TODO: what about 2603 above? + 0x2600, # database not specified, SQL: show stables , database droped , and show tables + 0x032C, # Object is creating + 0x032D, # Object is dropping + 0x03D3, # Conflict transaction not completed + 0x0707, # Query not ready , it always occur at replica 3 + 0x707, # Query not ready + 0x396, # Database in creating status + 0x386, # Database in droping status + 0x03E1, # failed on tmq_subscribe ,topic not exist + 0x03ed, # Topic must be dropped first, SQL: drop database db_0 + 0x0203, # Invalid value + 0x03f0, # Stream already exist , topic already exists - - - - 1000 # REST catch-all error - ]: - return True # These are the ALWAYS-ACCEPTABLE ones + 1000 # REST catch-all error + ]: + return True # These are the ALWAYS-ACCEPTABLE ones # This case handled below already. # elif (errno in [ 0x0B ]) and Settings.getConfig().auto_start_service: # return True # We may get "network unavilable" when restarting service - elif Config.getConfig().ignore_errors: # something is specified on command line + elif Config.getConfig().ignore_errors: # something is specified on command line moreErrnos = [int(v, 0) for v in Config.getConfig().ignore_errors.split(',')] if errno in moreErrnos: return True - elif errno == 0x200 : # invalid SQL, we need to div in a bit more + elif errno == 0x200: # invalid SQL, we need to div in a bit more if msg.find("invalid column name") != -1: - return True - elif msg.find("tags number not matched") != -1: # mismatched tags after modification return True - elif msg.find("duplicated column names") != -1: # also alter table tag issues + elif msg.find("tags number not matched") != -1: # mismatched tags after modification return True - elif not self._isServiceStable(): # We are managing service, and ... + elif msg.find("duplicated column names") != -1: # also alter table tag issues + return True + elif not self._isServiceStable(): # We are managing service, and ... Logging.info("Ignoring error when service starting/stopping: errno = {}, msg = {}".format(errno, msg)) return True - - return False # Not an acceptable error + return False # Not an acceptable error def execute(self, wt: WorkerThread): wt.verifyThreadSelf() @@ -1453,7 +1457,7 @@ class Task(): self.logDebug( "[-] executing task {}...".format(self.__class__.__name__)) - self._err = None # TODO: type hint mess up? + self._err = None # TODO: type hint mess up? self._execStats.beginTaskType(self.__class__.__name__) # mark beginning errno2 = None @@ -1465,19 +1469,19 @@ class Task(): errno2 = Helper.convertErrno(err.errno) if (Config.getConfig().continue_on_exception): # user choose to continue self.logDebug("[=] Continue after TAOS exception: errno=0x{:X}, msg: {}, SQL: {}".format( - errno2, err, wt.getDbConn().getLastSql())) + errno2, err, wt.getDbConn().getLastSql())) self._err = err elif self._isErrAcceptable(errno2, err.__str__()): self.logDebug("[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format( - errno2, err, wt.getDbConn().getLastSql())) + errno2, err, wt.getDbConn().getLastSql())) # print("_", end="", flush=True) Progress.emit(Progress.ACCEPTABLE_ERROR) self._err = err - else: # not an acceptable error + else: # not an acceptable error shortTid = threading.get_ident() % 10000 errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, thread={}, msg: {}, SQL: {}".format( self.__class__.__name__, - errno2, + errno2, shortTid, err, wt.getDbConn().getLastSql()) self.logDebug(errMsg) @@ -1485,7 +1489,8 @@ class Task(): # raise # so that we see full stack traceback.print_exc() print( - "\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format(errMsg) + + "\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format( + errMsg) + "----------------------------\n") # sys.exit(-1) self._err = err @@ -1502,10 +1507,10 @@ class Task(): traceback.print_exc() # except BaseException: # TODO: what is this again??!! # raise RuntimeError("Punt") - # self.logDebug( - # "[=] Unexpected exception, SQL: {}".format( - # wt.getDbConn().getLastSql())) - # raise + # self.logDebug( + # "[=] Unexpected exception, SQL: {}".format( + # wt.getDbConn().getLastSql())) + # raise self._execStats.endTaskType(self.__class__.__name__, self.isSuccess()) self.logDebug("[X] task execution completed, {}, status: {}".format( @@ -1524,12 +1529,12 @@ class Task(): def getQueryResult(self, wt: WorkerThread): # execute an SQL on the worker thread return wt.getQueryResult() - def lockTable(self, ftName): # full table name + def lockTable(self, ftName): # full table name # print(" <<" + ftName + '_', end="", flush=True) - with Task._lock: # SHORT lock! so we only protect lock creation - if not ftName in Task._tableLocks: # Create new lock and add to list, if needed + with Task._lock: # SHORT lock! so we only protect lock creation + if not ftName in Task._tableLocks: # Create new lock and add to list, if needed Task._tableLocks[ftName] = threading.Lock() - + # No lock protection, anybody can do this any time lock = Task._tableLocks[ftName] # Logging.info("Acquiring lock: {}, {}".format(ftName, lock)) @@ -1538,7 +1543,7 @@ class Task(): def unlockTable(self, ftName): # print('_' + ftName + ">> ", end="", flush=True) - with Task._lock: + with Task._lock: if not ftName in self._tableLocks: raise RuntimeError("Corrupt state, no such lock") lock = Task._tableLocks[ftName] @@ -1588,11 +1593,11 @@ class ExecutionStats: t[0] += 1 # index 0 has the "total" execution times if isSuccess: t[1] += 1 # index 1 has the "success" execution times - if eno != None: + if eno != None: if klassName not in self._errors: self._errors[klassName] = {} errors = self._errors[klassName] - errors[eno] = errors[eno]+1 if eno in errors else 1 + errors[eno] = errors[eno] + 1 if eno in errors else 1 def beginTaskType(self, klassName): with self._lock: @@ -1615,7 +1620,7 @@ class ExecutionStats: Logging.info( "----------------------------------------------------------------------") Logging.info( - "| Crash_Gen test {}, with the following stats:". format( + "| Crash_Gen test {}, with the following stats:".format( "FAILED (reason: {})".format( self._failureReason) if self._failed else "SUCCEEDED")) Logging.info("| Task Execution Times (success/total):") @@ -1628,7 +1633,7 @@ class ExecutionStats: # print("errors = {}".format(errors)) errStrs = ["0x{:X}:{}".format(eno, n) for (eno, n) in errors.items()] # print("error strings = {}".format(errStrs)) - errStr = ", ".join(errStrs) + errStr = ", ".join(errStrs) Logging.info("| {0:<24}: {1}/{2} (Errors: {3})".format(k, n[1], n[0], errStr)) Logging.info( @@ -1647,8 +1652,8 @@ class ExecutionStats: Logging.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList())) Logging.info("| Active DB Native Connections (now): {}".format(DbConnNative.totalConnections)) Logging.info("| Longest native query time: {:.3f} seconds, started: {}". - format(MyTDSql.longestQueryTime, - time.strftime("%x %X", time.localtime(MyTDSql.lqStartTime))) ) + format(MyTDSql.longestQueryTime, + time.strftime("%x %X", time.localtime(MyTDSql.lqStartTime)))) Logging.info("| Longest native query: {}".format(MyTDSql.longestQuery)) Logging.info( "----------------------------------------------------------------------") @@ -1662,12 +1667,12 @@ class StateTransitionTask(Task): _baseTableNumber = None - _endState = None # TODO: no longter used? + _endState = None # TODO: no longter used? @classmethod def getInfo(cls): # each sub class should supply their own information raise RuntimeError("Overriding method expected") - + @classmethod def getEndState(cls): # TODO: optimize by calling it fewer times raise RuntimeError("Overriding method expected") @@ -1687,7 +1692,7 @@ class StateTransitionTask(Task): @classmethod def getRegTableName(cls, i): - if ( StateTransitionTask._baseTableNumber is None): # Set it one time + if (StateTransitionTask._baseTableNumber is None): # Set it one time StateTransitionTask._baseTableNumber = Dice.throw( 999) if Config.getConfig().dynamic_db_table_names else 0 return "reg_table_{}".format(StateTransitionTask._baseTableNumber + i) @@ -1711,16 +1716,21 @@ class TaskCreateDb(StateTransitionTask): repStr = "" if Config.getConfig().num_replicas != 1: # numReplica = Dice.throw(Settings.getConfig().max_replicas) + 1 # 1,2 ... N - numReplica = Config.getConfig().num_replicas # fixed, always + numReplica = Config.getConfig().num_replicas # fixed, always repStr = "replica {}".format(numReplica) - updatePostfix = "" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active , 3.0 version default is update 1 - vg_nums = random.randint(1,8) - cache_model = Dice.choice(['none' , 'last_row' , 'last_value' , 'both']) - buffer = random.randint(3,128) + updatePostfix = "" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active , 3.0 version default is update 1 + vg_nums = random.randint(1, 8) + cache_model = Dice.choice(['none', 'last_row', 'last_value', 'both']) + buffer = random.randint(3, 128) dbName = self._db.getName() - self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr, updatePostfix, vg_nums, cache_model,buffer ) ) + self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr, + updatePostfix, + vg_nums, + cache_model, + buffer)) if dbName == "db_0" and Config.getConfig().use_shadow_db: - self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix ) ) + self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix)) + class TaskDropDb(StateTransitionTask): @classmethod @@ -1732,19 +1742,20 @@ class TaskDropDb(StateTransitionTask): return state.canDropDb() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - + try: - self.queryWtSql(wt, "drop database {}".format(self._db.getName())) # drop database maybe failed ,because topic exists + self.queryWtSql(wt, "drop database {}".format( + self._db.getName())) # drop database maybe failed ,because topic exists except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [0x0203]: # drop maybe failed + if errno in [0x0203]: # drop maybe failed pass Logging.debug("[OPS] database dropped at {}".format(time.time())) class TaskCreateStream(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1755,39 +1766,40 @@ class TaskCreateStream(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): dbname = self._db.getName() - - sub_stream_name = dbname+ '_sub_stream' + + sub_stream_name = dbname + '_sub_stream' sub_stream_tb_name = 'stream_tb_sub' - super_stream_name = dbname+ '_super_stream' + super_stream_name = dbname + '_super_stream' super_stream_tb_name = 'stream_tb_super' if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place - stbname =sTable.getName() + stbname = sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) - aggExpr = Dice.choice([ - 'count(*)', 'avg(speed)', 'sum(speed)', 'stddev(speed)','min(speed)', 'max(speed)', 'first(speed)', 'last(speed)', - 'apercentile(speed, 10)', 'last_row(*)', 'twa(speed)']) - - stream_sql = '' # set default value + aggExpr = Dice.choice([ + 'count(*)', 'avg(speed)', 'sum(speed)', 'stddev(speed)', 'min(speed)', 'max(speed)', 'first(speed)', + 'last(speed)', + 'apercentile(speed, 10)', 'last_row(*)', 'twa(speed)']) + + stream_sql = '' # set default value if sub_tables: sub_tbname = sub_tables[0] # create stream with query above sub_table - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\ - format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '. \ + format(sub_stream_name, dbname, sub_stream_tb_name, aggExpr, dbname, sub_tbname) else: - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\ - format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '. \ + format(super_stream_name, dbname, super_stream_tb_name, aggExpr, dbname, stbname) self.execWtSql(wt, stream_sql) Logging.debug("[OPS] stream is creating at {}".format(time.time())) class TaskCreateTopic(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1798,40 +1810,46 @@ class TaskCreateTopic(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): dbname = self._db.getName() - - sub_topic_name = dbname+ '_sub_topic' - super_topic_name = dbname+ '_super_topic' - stable_topic = dbname+ '_stable_topic' - db_topic = 'database_' + dbname+ '_topics' + + sub_topic_name = dbname + '_sub_topic' + super_topic_name = dbname + '_super_topic' + stable_topic = dbname + '_stable_topic' + db_topic = 'database_' + dbname + '_topics' if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place # create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1; - stbname =sTable.getName() + stbname = sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) - scalarExpr = Dice.choice([ '*','speed','color','abs(speed)','acos(speed)','asin(speed)','atan(speed)','ceil(speed)','cos(speed)','cos(speed)', - 'floor(speed)','log(speed,2)','pow(speed,2)','round(speed)','sin(speed)','sqrt(speed)','char_length(color)','concat(color,color)', - 'concat_ws(" ", color,color," ")','length(color)', 'lower(color)', 'ltrim(color)','substr(color , 2)','upper(color)','cast(speed as double)', - 'cast(ts as bigint)']) + scalarExpr = Dice.choice( + ['*', 'speed', 'color', 'abs(speed)', 'acos(speed)', 'asin(speed)', 'atan(speed)', 'ceil(speed)', + 'cos(speed)', 'cos(speed)', + 'floor(speed)', 'log(speed,2)', 'pow(speed,2)', 'round(speed)', 'sin(speed)', 'sqrt(speed)', + 'char_length(color)', 'concat(color,color)', + 'concat_ws(" ", color,color," ")', 'length(color)', 'lower(color)', 'ltrim(color)', 'substr(color , 2)', + 'upper(color)', 'cast(speed as double)', + 'cast(ts as bigint)']) topic_sql = '' # set default value - if Dice.throw(3)==0: # create topic : source data from sub query - if sub_tables: # if not empty + if Dice.throw(3) == 0: # create topic : source data from sub query + if sub_tables: # if not empty sub_tbname = sub_tables[0] # create topic : source data from sub query of sub stable - topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name,scalarExpr,dbname,sub_tbname) - + topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name, scalarExpr, dbname, + sub_tbname) + else: # create topic : source data from sub query of stable - topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name,scalarExpr, dbname,stbname) - elif Dice.throw(3)==1: # create topic : source data from super table - topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic,dbname,stbname) - - elif Dice.throw(3)==2: # create topic : source data from whole database - topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic,dbname) + topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name, scalarExpr, dbname, + stbname) + elif Dice.throw(3) == 1: # create topic : source data from super table + topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic, dbname, stbname) + + elif Dice.throw(3) == 2: # create topic : source data from whole database + topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic, dbname) else: pass @@ -1840,8 +1858,9 @@ class TaskCreateTopic(StateTransitionTask): self.execWtSql(wt, topic_sql) Logging.debug("[OPS] db topic is creating at {}".format(time.time())) + class TaskDropTopics(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1852,21 +1871,21 @@ class TaskDropTopics(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): dbname = self._db.getName() - if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place tblName = sTable.getName() if sTable.hasTopics(wt.getDbConn()): - sTable.dropTopics(wt.getDbConn(),dbname,None) # drop topics of database - sTable.dropTopics(wt.getDbConn(),dbname,tblName) # drop topics of stable + sTable.dropTopics(wt.getDbConn(), dbname, None) # drop topics of database + sTable.dropTopics(wt.getDbConn(), dbname, tblName) # drop topics of stable + class TaskDropStreams(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1877,20 +1896,20 @@ class TaskDropStreams(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): # dbname = self._db.getName() - if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place # tblName = sTable.getName() if sTable.hasStreams(wt.getDbConn()): sTable.dropStreams(wt.getDbConn()) # drop stream of database + class TaskDropStreamTables(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1901,42 +1920,42 @@ class TaskDropStreamTables(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): # dbname = self._db.getName() - if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable - wt.execSql("use db") # should always be in place + sTable = self._db.getFixedSuperTable() # type: TdSuperTable + wt.execSql("use db") # should always be in place # tblName = sTable.getName() if sTable.hasStreamTables(wt.getDbConn()): - sTable.dropStreamTables(wt.getDbConn()) # drop stream tables + sTable.dropStreamTables(wt.getDbConn()) # drop stream tables + class TaskCreateConsumers(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @classmethod def canBeginFrom(cls, state: AnyState): - return state.canCreateConsumers() + return state.canCreateConsumers() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - if Config.getConfig().connector_type == 'native': - - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + if Config.getConfig().connector_type == 'native': + + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place if sTable.hasTopics(wt.getDbConn()): - sTable.createConsumer(wt.getDbConn(),random.randint(1,10)) + sTable.createConsumer(wt.getDbConn(), random.randint(1, 10)) pass else: print(" restful not support tmq consumers") - return + return + - class TaskCreateSuperTable(StateTransitionTask): @classmethod def getEndState(cls): @@ -1951,9 +1970,9 @@ class TaskCreateSuperTable(StateTransitionTask): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place - + sTable.create(wt.getDbConn(), {'ts': TdDataType.TIMESTAMP, 'speed': TdDataType.INT, 'color': TdDataType.BINARY16}, { 'b': TdDataType.BINARY200, 'f': TdDataType.FLOAT}, @@ -1974,11 +1993,10 @@ class TdSuperTable: def getName(self): return self._stName - - def drop(self, dbc, skipCheck = False): + def drop(self, dbc, skipCheck=False): dbName = self._dbName - if self.exists(dbc) : # if myself exists - fullTableName = dbName + '.' + self._stName + if self.exists(dbc): # if myself exists + fullTableName = dbName + '.' + self._stName dbc.execute("DROP TABLE {}".format(fullTableName)) else: if not skipCheck: @@ -1989,64 +2007,55 @@ class TdSuperTable: return dbc.existsSuperTable(self._stName) # TODO: odd semantic, create() method is usually static? - def create(self, dbc, cols: TdColumns, tags: TdTags, dropIfExists = False): + def create(self, dbc, cols: TdColumns, tags: TdTags, dropIfExists=False): '''Creating a super table''' dbName = self._dbName dbc.execute("USE " + dbName) - fullTableName = dbName + '.' + self._stName + fullTableName = dbName + '.' + self._stName if dbc.existsSuperTable(self._stName): - if dropIfExists: - dbc.execute("DROP TABLE {}".format(fullTableName)) - - else: # error + if dropIfExists: + dbc.execute("DROP TABLE {}".format(fullTableName)) + + else: # error raise CrashGenError("Cannot create super table, already exists: {}".format(self._stName)) # Now let's create sql = "CREATE TABLE {} ({})".format( fullTableName, - ",".join(['%s %s'%(k,v.value) for (k,v) in cols.items()])) - if tags : + ",".join(['%s %s' % (k, v.value) for (k, v) in cols.items()])) + if tags: sql += " TAGS ({})".format( - ",".join(['%s %s'%(k,v.value) for (k,v) in tags.items()]) - ) + ",".join(['%s %s' % (k, v.value) for (k, v) in tags.items()]) + ) else: sql += " TAGS (dummy int) " - dbc.execute(sql) + dbc.execute(sql) + + def createConsumer(self, dbc, Consumer_nums): - def createConsumer(self, dbc,Consumer_nums): - def generateConsumer(current_topic_list): - conf = TaosTmqConf() - conf.set("group.id", "tg2") - conf.set("td.connect.user", "root") - conf.set("td.connect.pass", "taosdata") -# conf.set("enable.auto.commit", "true") -# def tmq_commit_cb_print(tmq, resp, offset, param=None): -# print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}") -# conf.set_auto_commit_cb(tmq_commit_cb_print, None) - consumer = conf.new_consumer() - topic_list = TaosTmqList() + consumer = Consumer({"group.id": "tg2", "td.connect.user": "root", "td.connect.pass": "taosdata"}) + topic_list = [] for topic in current_topic_list: topic_list.append(topic) - try: - consumer.subscribe(topic_list) - except TmqError as e : - pass + + consumer.subscribe(topic_list) # consumer with random work life time_start = time.time() while 1: - res = consumer.poll(1000) - if time.time() - time_start >random.randint(5,50) : + res = consumer.poll(1) + consumer.commit(res) + if time.time() - time_start > random.randint(5, 50): break try: consumer.unsubscribe() - except TmqError as e : + except TmqError as e: pass return - + # mulit Consumer current_topic_list = self.getTopicLists(dbc) for i in range(Consumer_nums): @@ -2067,84 +2076,86 @@ class TdSuperTable: def getRegTables(self, dbc: DbConn): dbName = self._dbName try: - dbc.query("select distinct TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later - except taos.error.ProgrammingError as err: - errno2 = Helper.convertErrno(err.errno) + dbc.query("select distinct TBNAME from {}.{}".format(dbName, + self._stName)) # TODO: analyze result set later + except taos.error.ProgrammingError as err: + errno2 = Helper.convertErrno(err.errno) Logging.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err)) raise qr = dbc.getQueryResult() - return [v[0] for v in qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation + return [v[0] for v in + qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation def hasRegTables(self, dbc: DbConn): - + if dbc.existsSuperTable(self._stName): return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0 else: return False - def hasStreamTables(self,dbc: DbConn): - + def hasStreamTables(self, dbc: DbConn): + return dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) > 0 - def hasStreams(self,dbc: DbConn): + def hasStreams(self, dbc: DbConn): return dbc.query("show streams") > 0 - def hasTopics(self,dbc: DbConn): - + def hasTopics(self, dbc: DbConn): + return dbc.query("show topics") > 0 - def dropTopics(self,dbc: DbConn , dbname=None,stb_name=None): + def dropTopics(self, dbc: DbConn, dbname=None, stb_name=None): dbc.query("show topics ") topics = dbc.getQueryResult() - if dbname !=None and stb_name == None : - + if dbname != None and stb_name == None: + for topic in topics: if dbname in topic[0] and topic[0].startswith("database"): try: dbc.execute('drop topic {}'.format(topic[0])) - Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time())) - except taos.error.ProgrammingError as err: + Logging.debug("[OPS] topic {} is droping at {}".format(topic, time.time())) + except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [0x03EB]: # Topic subscribed cannot be dropped - pass + if errno in [0x03EB]: # Topic subscribed cannot be dropped + pass # for subsript in subscriptions: - + else: pass pass return True - elif dbname !=None and stb_name!= None: + elif dbname != None and stb_name != None: for topic in topics: if topic[0].startswith(self._dbName) and topic[0].endswith('topic'): dbc.execute('drop topic {}'.format(topic[0])) - Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time())) + Logging.debug("[OPS] topic {} is droping at {}".format(topic, time.time())) return True else: return True pass - def dropStreams(self,dbc:DbConn): + def dropStreams(self, dbc: DbConn): dbc.query("show streams ") Streams = dbc.getQueryResult() for Stream in Streams: if Stream[0].startswith(self._dbName): dbc.execute('drop stream {}'.format(Stream[0])) - + return not dbc.query("show streams ") > 0 def dropStreamTables(self, dbc: DbConn): dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) - + StreamTables = dbc.getQueryResult() - for StreamTable in StreamTables: + for StreamTable in StreamTables: if self.dropStreams(dbc): - dbc.execute('drop table {}.{}'.format(self._dbName,StreamTable[0])) - + dbc.execute('drop table {}.{}'.format(self._dbName, StreamTable[0])) + return not dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str): @@ -2155,16 +2166,16 @@ class TdSuperTable: ''' dbName = self._dbName sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName) - if dbc.query(sql) >= 1 : # reg table exists already + if dbc.query(sql) >= 1: # reg table exists already return # acquire a lock first, so as to be able to *verify*. More details in TD-1471 - fullTableName = dbName + '.' + regTableName + fullTableName = dbName + '.' + regTableName if task is not None: # Somethime thie operation is requested on behalf of a "task" # Logging.info("Locking table for creation: {}".format(fullTableName)) - task.lockTable(fullTableName) # in which case we'll lock this table to ensure serialized access + task.lockTable(fullTableName) # in which case we'll lock this table to ensure serialized access # Logging.info("Table locked for creation".format(fullTableName)) - Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table + Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table # print("(" + fullTableName[-3:] + ")", end="", flush=True) try: sql = "CREATE TABLE {} USING {}.{} tags ({})".format( @@ -2176,17 +2187,17 @@ class TdSuperTable: finally: if task is not None: # Logging.info("Unlocking table after creation: {}".format(fullTableName)) - task.unlockTable(fullTableName) # no matter what + task.unlockTable(fullTableName) # no matter what # Logging.info("Table unlocked after creation: {}".format(fullTableName)) - def _getTagStrForSql(self, dbc) : + def _getTagStrForSql(self, dbc): tags = self._getTags(dbc) tagStrs = [] - for tagName in tags: + for tagName in tags: tagType = tags[tagName] if tagType == 'BINARY': tagStrs.append("'Beijing-Shanghai-LosAngeles'") - elif tagType== 'VARCHAR': + elif tagType == 'VARCHAR': tagStrs.append("'London-Paris-Berlin'") elif tagType == 'FLOAT': tagStrs.append('9.9') @@ -2200,12 +2211,12 @@ class TdSuperTable: dbc.query("DESCRIBE {}.{}".format(self._dbName, self._stName)) stCols = dbc.getQueryResult() # print(stCols) - ret = {row[0]:row[1] for row in stCols if row[3]=='TAG'} # name:type + ret = {row[0]: row[1] for row in stCols if row[3] == 'TAG'} # name:type # print("Tags retrieved: {}".format(ret)) return ret def addTag(self, dbc, tagName, tagType): - if tagName in self._getTags(dbc): # already + if tagName in self._getTags(dbc): # already return # sTable.addTag("extraTag", "int") sql = "alter table {}.{} add tag {} {}".format( @@ -2213,33 +2224,33 @@ class TdSuperTable: dbc.execute(sql) def dropTag(self, dbc, tagName): - if not tagName in self._getTags(dbc): # don't have this tag + if not tagName in self._getTags(dbc): # don't have this tag return sql = "alter table {}.{} drop tag {}".format(self._dbName, self._stName, tagName) dbc.execute(sql) def changeTag(self, dbc, oldTag, newTag): tags = self._getTags(dbc) - if not oldTag in tags: # don't have this tag + if not oldTag in tags: # don't have this tag return - if newTag in tags: # already have this tag + if newTag in tags: # already have this tag return sql = "alter table {}.{} change tag {} {}".format(self._dbName, self._stName, oldTag, newTag) dbc.execute(sql) def generateQueries(self, dbc: DbConn) -> List[SqlQuery]: ''' Generate queries to test/exercise this super table ''' - ret = [] # type: List[SqlQuery] + ret = [] # type: List[SqlQuery] for rTbName in self.getRegTables(dbc): # regular tables - - filterExpr = Dice.choice([ # TODO: add various kind of WHERE conditions + + filterExpr = Dice.choice([ # TODO: add various kind of WHERE conditions None ]) # Run the query against the regular table first - doAggr = (Dice.throw(2) == 0) # 1 in 2 chance - if not doAggr: # don't do aggregate query, just simple one + doAggr = (Dice.throw(2) == 0) # 1 in 2 chance + if not doAggr: # don't do aggregate query, just simple one commonExpr = Dice.choice([ '*', 'abs(speed)', @@ -2256,7 +2267,7 @@ class TdSuperTable: 'sin(speed)', 'sqrt(speed)', 'char_length(color)', - 'concat(color,color)', + 'concat(color,color)', 'concat_ws(" ", color,color," ")', 'length(color)', 'lower(color)', @@ -2276,26 +2287,26 @@ class TdSuperTable: 'distinct(color)' ] ) - ret.append(SqlQuery( # reg table + ret.append(SqlQuery( # reg table "select {} from {}.{}".format(commonExpr, self._dbName, rTbName))) - ret.append(SqlQuery( # super table + ret.append(SqlQuery( # super table "select {} from {}.{}".format(commonExpr, self._dbName, self.getName()))) - else: # Aggregate query - aggExpr = Dice.choice([ + else: # Aggregate query + aggExpr = Dice.choice([ 'count(*)', 'avg(speed)', # 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable - 'sum(speed)', - 'stddev(speed)', + 'sum(speed)', + 'stddev(speed)', # SELECTOR functions - 'min(speed)', - 'max(speed)', - 'first(speed)', + 'min(speed)', + 'max(speed)', + 'first(speed)', 'last(speed)', - 'top(speed, 50)', # TODO: not supported? - 'bottom(speed, 50)', # TODO: not supported? - 'apercentile(speed, 10)', # TODO: TD-1316 - 'last_row(*)', # TODO: commented out per TD-3231, we should re-create + 'top(speed, 50)', # TODO: not supported? + 'bottom(speed, 50)', # TODO: not supported? + 'apercentile(speed, 10)', # TODO: TD-1316 + 'last_row(*)', # TODO: commented out per TD-3231, we should re-create # Transformation Functions # 'diff(speed)', # TODO: no supported?! 'spread(speed)', @@ -2313,21 +2324,21 @@ class TdSuperTable: 'sample(speed,5)', 'STATECOUNT(speed,"LT",1)', 'STATEDURATION(speed,"LT",1)', - 'twa(speed)' - - ]) # TODO: add more from 'top' + 'twa(speed)' + + ]) # TODO: add more from 'top' - # if aggExpr not in ['stddev(speed)']: # STDDEV not valid for super tables?! (Done in TD-1049) sql = "select {} from {}.{}".format(aggExpr, self._dbName, self.getName()) - if Dice.throw(3) == 0: # 1 in X chance - partion_expr = Dice.choice(['color','tbname']) + if Dice.throw(3) == 0: # 1 in X chance + partion_expr = Dice.choice(['color', 'tbname']) sql = sql + ' partition BY ' + partion_expr + ' order by ' + partion_expr Progress.emit(Progress.QUERY_GROUP_BY) # Logging.info("Executing GROUP-BY query: " + sql) ret.append(SqlQuery(sql)) - return ret + return ret + class TaskReadData(StateTransitionTask): @classmethod @@ -2345,60 +2356,61 @@ class TaskReadData(StateTransitionTask): def _reconnectIfNeeded(self, wt): # 1 in 20 chance, simulate a broken connection, only if service stable (not restarting) - if random.randrange(20)==0: # and self._canRestartService(): # TODO: break connection in all situations + if random.randrange(20) == 0: # and self._canRestartService(): # TODO: break connection in all situations # Logging.info("Attempting to reconnect to server") # TODO: change to DEBUG - Progress.emit(Progress.SERVICE_RECONNECT_START) + Progress.emit(Progress.SERVICE_RECONNECT_START) try: wt.getDbConn().close() wt.getDbConn().open() - except ConnectionError as err: # may fail + except ConnectionError as err: # may fail if not gSvcMgr: Logging.error("Failed to reconnect in client-only mode") - raise # Not OK if we are running in client-only mode - if gSvcMgr.isRunning(): # may have race conditon, but low prob, due to + raise # Not OK if we are running in client-only mode + if gSvcMgr.isRunning(): # may have race conditon, but low prob, due to Logging.error("Failed to reconnect when managed server is running") - raise # Not OK if we are running normally + raise # Not OK if we are running normally - Progress.emit(Progress.SERVICE_RECONNECT_FAILURE) + Progress.emit(Progress.SERVICE_RECONNECT_FAILURE) # Logging.info("Ignoring DB reconnect error") # print("_r", end="", flush=True) - Progress.emit(Progress.SERVICE_RECONNECT_SUCCESS) + Progress.emit(Progress.SERVICE_RECONNECT_SUCCESS) # The above might have taken a lot of time, service might be running # by now, causing error below to be incorrectly handled due to timing issue - return # TODO: fix server restart status race condtion - + return # TODO: fix server restart status race condtion def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): self._reconnectIfNeeded(wt) dbc = wt.getDbConn() sTable = self._db.getFixedSuperTable() - + for q in sTable.generateQueries(dbc): # regular tables try: sql = q.getSql() # if 'GROUP BY' in sql: # Logging.info("Executing GROUP-BY query: " + sql) dbc.execute(sql) - except taos.error.ProgrammingError as err: + except taos.error.ProgrammingError as err: errno2 = Helper.convertErrno(err.errno) Logging.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql())) raise + class SqlQuery: @classmethod def buildRandom(cls, db: Database): '''Build a random query against a certain database''' - + dbName = db.getName() - def __init__(self, sql:str = None): + def __init__(self, sql: str = None): self._sql = sql def getSql(self): return self._sql - + + class TaskDropSuperTable(StateTransitionTask): @classmethod def getEndState(cls): @@ -2421,7 +2433,7 @@ class TaskDropSuperTable(StateTransitionTask): regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) try: self.execWtSql(wt, "drop table {}.{}". - format(self._db.getName(), regTableName)) # nRows always 0, like MySQL + format(self._db.getName(), regTableName)) # nRows always 0, like MySQL except taos.error.ProgrammingError as err: # correcting for strange error number scheme errno2 = Helper.convertErrno(err.errno) @@ -2429,7 +2441,6 @@ class TaskDropSuperTable(StateTransitionTask): isSuccess = False Logging.debug("[DB] Acceptable error when dropping a table") continue # try to delete next regular table - if (not tickOutput): tickOutput = True # Print only one time @@ -2441,8 +2452,6 @@ class TaskDropSuperTable(StateTransitionTask): # Drop the super table itself tblName = self._db.getFixedSuperTableName() self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName)) - - class TaskAlterTags(StateTransitionTask): @@ -2472,6 +2481,7 @@ class TaskAlterTags(StateTransitionTask): sTable.changeTag(dbc, "extraTag", "newTag") # sql = "alter table db.{} change tag extraTag newTag".format(tblName) + class TaskRestartService(StateTransitionTask): _isRunning = False _classLock = threading.Lock() @@ -2484,11 +2494,12 @@ class TaskRestartService(StateTransitionTask): def canBeginFrom(cls, state: AnyState): if Config.getConfig().auto_start_service: return state.canDropFixedSuperTable() # Basicallly when we have the super table - return False # don't run this otherwise + return False # don't run this otherwise CHANCE_TO_RESTART_SERVICE = 200 + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - if not Config.getConfig().auto_start_service: # only execute when we are in -a mode + if not Config.getConfig().auto_start_service: # only execute when we are in -a mode print("_a", end="", flush=True) return @@ -2498,20 +2509,22 @@ class TaskRestartService(StateTransitionTask): return self._isRunning = True - if Dice.throw(self.CHANCE_TO_RESTART_SERVICE) == 0: # 1 in N chance + if Dice.throw(self.CHANCE_TO_RESTART_SERVICE) == 0: # 1 in N chance dbc = wt.getDbConn() - dbc.execute("select * from information_schema.ins_databases") # simple delay, align timing with other workers + dbc.execute( + "select * from information_schema.ins_databases") # simple delay, align timing with other workers gSvcMgr.restart() self._isRunning = False + class TaskAddData(StateTransitionTask): # Track which table is being actively worked on activeTable: Set[int] = set() # We use these two files to record operations to DB, useful for power-off tests - fAddLogReady = None # type: Optional[io.TextIOWrapper] - fAddLogDone = None # type: Optional[io.TextIOWrapper] + fAddLogReady = None # type: Optional[io.TextIOWrapper] + fAddLogDone = None # type: Optional[io.TextIOWrapper] @classmethod def prepToRecordOps(cls): @@ -2532,12 +2545,12 @@ class TaskAddData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canAddData() - def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): + def _lockTableIfNeeded(self, fullTableName, extraMsg=''): if Config.getConfig().verify_data: # Logging.info("Locking table: {}".format(fullTableName)) - self.lockTable(fullTableName) + self.lockTable(fullTableName) # Logging.info("Table locked {}: {}".format(extraMsg, fullTableName)) - # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written else: # Logging.info("Skipping locking table") pass @@ -2545,15 +2558,15 @@ class TaskAddData(StateTransitionTask): def _unlockTableIfNeeded(self, fullTableName): if Config.getConfig().verify_data: # Logging.info("Unlocking table: {}".format(fullTableName)) - self.unlockTable(fullTableName) + self.unlockTable(fullTableName) # Logging.info("Table unlocked: {}".format(fullTableName)) else: pass # Logging.info("Skipping unlocking table") - def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor): - numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS - + def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor): + numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + fullTableName = db.getName() + '.' + regTableName self._lockTableIfNeeded(fullTableName, 'batch') @@ -2571,10 +2584,8 @@ class TaskAddData(StateTransitionTask): # Logging.info("Data added in batch: {}".format(sql)) self._unlockTableIfNeeded(fullTableName) - - - def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches - numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches + numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS for j in range(numRecords): # number of records per table intToWrite = db.getNextInt() @@ -2587,13 +2598,14 @@ class TaskAddData(StateTransitionTask): self.fAddLogReady.write("Ready to write {} to {}\n".format(intToWrite, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady.fileno()) - + # TODO: too ugly trying to lock the table reliably, refactor... fullTableName = db.getName() + '.' + regTableName - self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock - + self._lockTableIfNeeded( + fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + try: - sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {}) + sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {}) fullTableName, # ds.getFixedSuperTableName(), # ds.getNextBinary(), ds.getNextFloat(), @@ -2604,55 +2616,56 @@ class TaskAddData(StateTransitionTask): intWrote = intToWrite # Quick hack, attach an update statement here. TODO: create an "update" task - if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB - intToUpdate = db.getNextInt() # Updated, but should not succeed + if (not Config.getConfig().use_shadow_db) and Dice.throw( + 5) == 0: # 1 in N chance, plus not using shaddow DB + intToUpdate = db.getNextInt() # Updated, but should not succeed nextColor = db.getNextColor() - sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here - fullTableName, - nextTick, intToUpdate, nextColor) + sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here + fullTableName, + nextTick, intToUpdate, nextColor) # sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format( # fullTableName, db.getNextInt(), db.getNextColor(), nextTick) dbc.execute(sql) - intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. + intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. - except: # Any exception at all - self._unlockTableIfNeeded(fullTableName) + except: # Any exception at all + self._unlockTableIfNeeded(fullTableName) raise # Now read it back and verify, we might encounter an error if table is dropped - if Config.getConfig().verify_data: # only if command line asks for it + if Config.getConfig().verify_data: # only if command line asks for it try: readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'". - format(db.getName(), regTableName, nextTick)) - if readBack != intWrote : + format(db.getName(), regTableName, nextTick)) + if readBack != intWrote: raise taos.error.ProgrammingError( "Failed to read back same data, wrote: {}, read: {}" .format(intWrote, readBack), 0x999) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result + if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result raise taos.error.ProgrammingError( "Failed to read back same data for tick: {}, wrote: {}, read: EMPTY" .format(nextTick, intWrote), errno) - elif errno == CrashGenError.INVALID_MULTIPLE_RESULT : # multiple results + elif errno == CrashGenError.INVALID_MULTIPLE_RESULT: # multiple results raise taos.error.ProgrammingError( "Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS" .format(nextTick, intWrote), errno) - elif errno in [0x218, 0x362]: # table doesn't exist + elif errno in [0x218, 0x362]: # table doesn't exist # do nothing pass else: # Re-throw otherwise raise finally: - self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock # Done with read-back verification, unlock the table now else: - self._unlockTableIfNeeded(fullTableName) + self._unlockTableIfNeeded(fullTableName) - # Successfully wrote the data into the DB, let's record it somehow + # Successfully wrote the data into the DB, let's record it somehow te.recordDataMark(intWrote) if Config.getConfig().record_ops: @@ -2666,17 +2679,17 @@ class TaskAddData(StateTransitionTask): # ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access db = self._db dbc = wt.getDbConn() - numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES + numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS - tblSeq = list(range(numTables )) - random.shuffle(tblSeq) # now we have random sequence + tblSeq = list(range(numTables)) + random.shuffle(tblSeq) # now we have random sequence for i in tblSeq: if (i in self.activeTable): # wow already active # print("x", end="", flush=True) # concurrent insertion Progress.emit(Progress.CONCURRENT_INSERTION) else: self.activeTable.add(i) # marking it active - + dbName = db.getName() sTable = db.getFixedSuperTable() regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) @@ -2684,21 +2697,22 @@ class TaskAddData(StateTransitionTask): # self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked" sTable.ensureRegTable(self, wt.getDbConn(), regTableName) # Ensure the table exists # self._unlockTable(fullTableName) - - if Dice.throw(1) == 0: # 1 in 2 chance + + if Dice.throw(1) == 0: # 1 in 2 chance self._addData(db, dbc, regTableName, te) else: self._addDataInBatch(db, dbc, regTableName, te) self.activeTable.discard(i) # not raising an error, unlike remove + class TaskDeleteData(StateTransitionTask): # Track which table is being actively worked on activeTable: Set[int] = set() # We use these two files to record operations to DB, useful for power-off tests - fAddLogReady = None # type: Optional[io.TextIOWrapper] - fAddLogDone = None # type: Optional[io.TextIOWrapper] + fAddLogReady = None # type: Optional[io.TextIOWrapper] + fAddLogDone = None # type: Optional[io.TextIOWrapper] @classmethod def prepToRecordOps(cls): @@ -2719,12 +2733,12 @@ class TaskDeleteData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canDeleteData() - def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): + def _lockTableIfNeeded(self, fullTableName, extraMsg=''): if Config.getConfig().verify_data: # Logging.info("Locking table: {}".format(fullTableName)) - self.lockTable(fullTableName) + self.lockTable(fullTableName) # Logging.info("Table locked {}: {}".format(extraMsg, fullTableName)) - # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written else: # Logging.info("Skipping locking table") pass @@ -2732,15 +2746,15 @@ class TaskDeleteData(StateTransitionTask): def _unlockTableIfNeeded(self, fullTableName): if Config.getConfig().verify_data: # Logging.info("Unlocking table: {}".format(fullTableName)) - self.unlockTable(fullTableName) + self.unlockTable(fullTableName) # Logging.info("Table unlocked: {}".format(fullTableName)) else: pass # Logging.info("Skipping unlocking table") - def _deleteData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches - numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS - del_Records = int(numRecords/5) + def _deleteData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches + numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + del_Records = int(numRecords / 5) if Dice.throw(2) == 0: for j in range(del_Records): # number of records per table intToWrite = db.getNextInt() @@ -2753,13 +2767,14 @@ class TaskDeleteData(StateTransitionTask): self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady.fileno()) - + # TODO: too ugly trying to lock the table reliably, refactor... fullTableName = db.getName() + '.' + regTableName - self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock - + self._lockTableIfNeeded( + fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + try: - sql = "delete from {} where ts = '{}' ;".format( # removed: tags ('{}', {}) + sql = "delete from {} where ts = '{}' ;".format( # removed: tags ('{}', {}) fullTableName, # ds.getFixedSuperTableName(), # ds.getNextBinary(), ds.getNextFloat(), @@ -2772,45 +2787,46 @@ class TaskDeleteData(StateTransitionTask): intWrote = intToWrite # Quick hack, attach an update statement here. TODO: create an "update" task - if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB - intToUpdate = db.getNextInt() # Updated, but should not succeed + if (not Config.getConfig().use_shadow_db) and Dice.throw( + 5) == 0: # 1 in N chance, plus not using shaddow DB + intToUpdate = db.getNextInt() # Updated, but should not succeed # nextColor = db.getNextColor() - sql = "delete from {} where ts = '{}' ;".format( # "INSERt" means "update" here - fullTableName, - nextTick) + sql = "delete from {} where ts = '{}' ;".format( # "INSERt" means "update" here + fullTableName, + nextTick) # sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format( # fullTableName, db.getNextInt(), db.getNextColor(), nextTick) dbc.execute(sql) - intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. + intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. - except: # Any exception at all - self._unlockTableIfNeeded(fullTableName) + except: # Any exception at all + self._unlockTableIfNeeded(fullTableName) raise # Now read it back and verify, we might encounter an error if table is dropped - if Config.getConfig().verify_data: # only if command line asks for it + if Config.getConfig().verify_data: # only if command line asks for it try: dbc.query("SELECT * from {}.{} WHERE ts='{}'". - format(db.getName(), regTableName, nextTick)) + format(db.getName(), regTableName, nextTick)) result = dbc.getQueryResult() - if len(result)==0: + if len(result) == 0: # means data has been delete - print("D1",end="") # DF means delete failed + print("D1", end="") # DF means delete failed else: - print("DF",end="") # DF means delete failed + print("DF", end="") # DF means delete failed except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result # print("D1",end="") # D1 means delete data success and only 1 record - if errno in [0x218, 0x362,0x2662]: # table doesn't exist + if errno in [0x218, 0x362, 0x2662]: # table doesn't exist # do nothing pass else: # Re-throw otherwise raise finally: - self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock # Done with read-back verification, unlock the table now # Successfully wrote the data into the DB, let's record it somehow te.recordDataMark(intWrote) @@ -2824,52 +2840,54 @@ class TaskDeleteData(StateTransitionTask): self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady.fileno()) - + # TODO: too ugly trying to lock the table reliably, refactor... fullTableName = db.getName() + '.' + regTableName - self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock - + self._lockTableIfNeeded( + fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + try: - sql = "delete from {} ;".format( # removed: tags ('{}', {}) + sql = "delete from {} ;".format( # removed: tags ('{}', {}) fullTableName) # Logging.info("Adding data: {}".format(sql)) dbc.execute(sql) # Logging.info("Data added: {}".format(sql)) - + # Quick hack, attach an update statement here. TODO: create an "update" task - if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB - sql = "delete from {} ;".format( # "INSERt" means "update" here - fullTableName) + if (not Config.getConfig().use_shadow_db) and Dice.throw( + 5) == 0: # 1 in N chance, plus not using shaddow DB + sql = "delete from {} ;".format( # "INSERt" means "update" here + fullTableName) dbc.execute(sql) - except: # Any exception at all - self._unlockTableIfNeeded(fullTableName) + except: # Any exception at all + self._unlockTableIfNeeded(fullTableName) raise # Now read it back and verify, we might encounter an error if table is dropped - if Config.getConfig().verify_data: # only if command line asks for it + if Config.getConfig().verify_data: # only if command line asks for it try: dbc.query("SELECT * from {}.{} WHERE ts='{}'". - format(db.getName(), regTableName, nextTick)) + format(db.getName(), regTableName, nextTick)) result = dbc.getQueryResult() - if len(result)==0: + if len(result) == 0: # means data has been delete - print("DA",end="") + print("DA", end="") else: - print("DF",end="") # DF means delete failed + print("DF", end="") # DF means delete failed except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result # print("Da",end="") # Da means delete data success and for all datas - if errno in [0x218, 0x362,0x2662]: # table doesn't exist + if errno in [0x218, 0x362, 0x2662]: # table doesn't exist # do nothing pass else: # Re-throw otherwise raise finally: - self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock # Done with read-back verification, unlock the table now if Config.getConfig().record_ops: @@ -2883,17 +2901,17 @@ class TaskDeleteData(StateTransitionTask): # ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access db = self._db dbc = wt.getDbConn() - numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES + numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS - tblSeq = list(range(numTables )) - random.shuffle(tblSeq) # now we have random sequence + tblSeq = list(range(numTables)) + random.shuffle(tblSeq) # now we have random sequence for i in tblSeq: if (i in self.activeTable): # wow already active # print("x", end="", flush=True) # concurrent insertion Progress.emit(Progress.CONCURRENT_INSERTION) else: self.activeTable.add(i) # marking it active - + dbName = db.getName() sTable = db.getFixedSuperTable() regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) @@ -2901,54 +2919,57 @@ class TaskDeleteData(StateTransitionTask): # self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked" sTable.ensureRegTable(self, wt.getDbConn(), regTableName) # Ensure the table exists # self._unlockTable(fullTableName) - + self._deleteData(db, dbc, regTableName, te) - + self.activeTable.discard(i) # not raising an error, unlike remove -class ThreadStacks: # stack info for all threads +class ThreadStacks: # stack info for all threads def __init__(self): self._allStacks = {} - allFrames = sys._current_frames() # All current stack frames, keyed with "ident" + allFrames = sys._current_frames() # All current stack frames, keyed with "ident" for th in threading.enumerate(): # For each thread - stack = traceback.extract_stack(allFrames[th.ident]) #type: ignore # Get stack for a thread - shortTid = th.native_id % 10000 #type: ignore - self._allStacks[shortTid] = stack # Was using th.native_id + stack = traceback.extract_stack(allFrames[th.ident]) # type: ignore # Get stack for a thread + shortTid = th.native_id % 10000 # type: ignore + self._allStacks[shortTid] = stack # Was using th.native_id - def record_current_time(self,current_time): + def record_current_time(self, current_time): self.current_time = current_time - def print(self, filteredEndName = None, filterInternal = False): - for shortTid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom + def print(self, filteredEndName=None, filterInternal=False): + for shortTid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom lastFrame = stack[-1] - if filteredEndName: # we need to filter out stacks that match this name - if lastFrame.name == filteredEndName : # end did not match + if filteredEndName: # we need to filter out stacks that match this name + if lastFrame.name == filteredEndName: # end did not match continue if filterInternal: - if lastFrame.name in ['wait', 'invoke_excepthook', - '_wait', # The Barrier exception - 'svcOutputReader', # the svcMgr thread - '__init__']: # the thread that extracted the stack - continue # ignore + if lastFrame.name in ['wait', 'invoke_excepthook', + '_wait', # The Barrier exception + 'svcOutputReader', # the svcMgr thread + '__init__']: # the thread that extracted the stack + continue # ignore # Now print print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(shortTid)) - + lastSqlForThread = DbConn.fetchSqlForThread(shortTid) last_sql_commit_time = DbConn.get_save_sql_time(shortTid) # time_cost = DbConn.get_time_cost() - print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, self.current_time-last_sql_commit_time ,lastSqlForThread)) + print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, + self.current_time - last_sql_commit_time, + lastSqlForThread)) stackFrame = 0 - for frame in stack: # was using: reversed(stack) + for frame in stack: # was using: reversed(stack) # print(frame) print("[{sf}] File {filename}, line {lineno}, in {name}".format( sf=stackFrame, filename=frame.filename, lineno=frame.lineno, name=frame.name)) print(" {}".format(frame.line)) stackFrame += 1 print("-----> End of Thread Info ----->\n") - if self.current_time-last_sql_commit_time >100: # dead lock occured + if self.current_time - last_sql_commit_time > 100: # dead lock occured print("maybe dead locked of thread {} ".format(shortTid)) + class ClientManager: def __init__(self): Logging.info("Starting service manager") @@ -3041,36 +3062,35 @@ class ClientManager: # time.sleep(2.0) # dbManager = None # release? - def run(self, svcMgr): + def run(self, svcMgr): # self._printLastNumbers() # global gConfig # Prepare Tde Instance global gContainer - tInst = gContainer.defTdeInstance = TdeInstance() # "subdir to hold the instance" + tInst = gContainer.defTdeInstance = TdeInstance() # "subdir to hold the instance" cfg = Config.getConfig() dbManager = DbManager(cfg.connector_type, tInst.getDbTarget()) # Regular function thPool = ThreadPool(cfg.num_threads, cfg.max_steps) self.tc = ThreadCoordinator(thPool, dbManager) - + Logging.info("Starting client instance: {}".format(tInst)) self.tc.run() # print("exec stats: {}".format(self.tc.getExecStats())) # print("TC failed = {}".format(self.tc.isFailed())) - if svcMgr: # gConfig.auto_start_service: + if svcMgr: # gConfig.auto_start_service: svcMgr.stopTaosServices() svcMgr = None - # Release global variables # gConfig = None Config.clearConfig() gSvcMgr = None logger = None - + thPool = None - dbManager.cleanUp() # destructor wouldn't run in time + dbManager.cleanUp() # destructor wouldn't run in time dbManager = None # Print exec status, etc., AFTER showing messages from the server @@ -3082,7 +3102,7 @@ class ClientManager: # Release variables here self.tc = None - gc.collect() # force garbage collection + gc.collect() # force garbage collection # h = hpy() # print("\n----- Final Python Heap -----\n") # print(h.heap()) @@ -3093,37 +3113,38 @@ class ClientManager: # self.tc.getDbManager().cleanUp() # clean up first, so we can show ZERO db connections self.tc.printStats() + class MainExec: - def __init__(self): + def __init__(self): self._clientMgr = None - self._svcMgr = None # type: Optional[ServiceManager] + self._svcMgr = None # type: Optional[ServiceManager] signal.signal(signal.SIGTERM, self.sigIntHandler) - signal.signal(signal.SIGINT, self.sigIntHandler) + signal.signal(signal.SIGINT, self.sigIntHandler) signal.signal(signal.SIGUSR1, self.sigUsrHandler) # different handler! def sigUsrHandler(self, signalNumber, frame): if self._clientMgr: self._clientMgr.sigUsrHandler(signalNumber, frame) - elif self._svcMgr: # Only if no client mgr, we are running alone + elif self._svcMgr: # Only if no client mgr, we are running alone self._svcMgr.sigUsrHandler(signalNumber, frame) - + def sigIntHandler(self, signalNumber, frame): - if self._svcMgr: + if self._svcMgr: self._svcMgr.sigIntHandler(signalNumber, frame) - if self._clientMgr: + if self._clientMgr: self._clientMgr.sigIntHandler(signalNumber, frame) def runClient(self): global gSvcMgr if Config.getConfig().auto_start_service: - gSvcMgr = self._svcMgr = ServiceManager(1) # hack alert - gSvcMgr.startTaosServices() # we start, don't run - + gSvcMgr = self._svcMgr = ServiceManager(1) # hack alert + gSvcMgr.startTaosServices() # we start, don't run + self._clientMgr = ClientManager() ret = None - try: - ret = self._clientMgr.run(self._svcMgr) # stop TAOS service inside + try: + ret = self._clientMgr.run(self._svcMgr) # stop TAOS service inside except requests.exceptions.ConnectionError as err: Logging.warning("Failed to open REST connection to DB: {}".format(err)) # don't raise @@ -3131,10 +3152,11 @@ class MainExec: def runService(self): global gSvcMgr - gSvcMgr = self._svcMgr = ServiceManager(Config.getConfig().num_dnodes) # save it in a global variable TODO: hack alert + gSvcMgr = self._svcMgr = ServiceManager( + Config.getConfig().num_dnodes) # save it in a global variable TODO: hack alert - gSvcMgr.run() # run to some end state - gSvcMgr = self._svcMgr = None + gSvcMgr.run() # run to some end state + gSvcMgr = self._svcMgr = None def _buildCmdLineParser(self): parser = argparse.ArgumentParser( @@ -3145,7 +3167,7 @@ class MainExec: 1. You build TDengine in the top level ./build directory, as described in offical docs 2. You run the server there before this script: ./build/bin/taosd -c test/cfg - ''')) + ''')) parser.add_argument( '-a', @@ -3209,7 +3231,7 @@ class MainExec: '-n', '--dynamic-db-table-names', action='store_true', - help='Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false)') + help='Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false)') parser.add_argument( '-o', '--num-dnodes', @@ -3259,19 +3281,18 @@ class MainExec: return parser - - def init(self): # TODO: refactor + def init(self): # TODO: refactor global gContainer - gContainer = Container() # micky-mouse DI + gContainer = Container() # micky-mouse DI - global gSvcMgr # TODO: refactor away + global gSvcMgr # TODO: refactor away gSvcMgr = None parser = self._buildCmdLineParser() Config.init(parser) # Sanity check for arguments - if Config.getConfig().use_shadow_db and Config.getConfig().max_dbs>1 : + if Config.getConfig().use_shadow_db and Config.getConfig().max_dbs > 1: raise CrashGenError("Cannot combine use-shadow-db with max-dbs of more than 1") Logging.clsInit(Config.getConfig().debug) @@ -3282,10 +3303,10 @@ class MainExec: if Config.getConfig().run_tdengine: # run server try: self.runService() - return 0 # success + return 0 # success except ConnectionError as err: Logging.error("Failed to make DB connection, please check DB instance manually") - return -1 # failure + return -1 # failure else: return self.runClient() @@ -3294,7 +3315,7 @@ class Container(): _propertyList = {'defTdeInstance'} def __init__(self): - self._cargo = {} # No cargo at the beginning + self._cargo = {} # No cargo at the beginning def _verifyValidProperty(self, name): if not name in self._propertyList: @@ -3303,10 +3324,10 @@ class Container(): # Called for an attribute, when other mechanisms fail (compare to __getattribute__) def __getattr__(self, name): self._verifyValidProperty(name) - return self._cargo[name] # just a simple lookup + return self._cargo[name] # just a simple lookup def __setattr__(self, name, value): - if name == '_cargo' : # reserved vars + if name == '_cargo': # reserved vars super().__setattr__(name, value) return self._verifyValidProperty(name) diff --git a/tests/system-test/0-others/user_manage.py b/tests/system-test/0-others/user_manage.py index 5148e26b39..6f90a2873a 100644 --- a/tests/system-test/0-others/user_manage.py +++ b/tests/system-test/0-others/user_manage.py @@ -12,12 +12,13 @@ # -*- coding: utf-8 -*- import taos -from util.log import * -from util.cases import * -from util.sql import * -from util.common import * -from util.sqlset import * from taos.tmq import * +from util.cases import * +from util.common import * +from util.log import * +from util.sql import * +from util.sqlset import * + class TDTestCase: def init(self, conn, logSql, replicaVar=1): @@ -26,10 +27,10 @@ class TDTestCase: tdSql.init(conn.cursor()) self.setsql = TDSetSql() self.stbname = 'stb' - self.binary_length = 20 # the length of binary for column_dict + self.binary_length = 20 # the length of binary for column_dict self.nchar_length = 20 # the length of nchar for column_dict self.column_dict = { - 'ts' : 'timestamp', + 'ts': 'timestamp', 'col1': 'tinyint', 'col2': 'smallint', 'col3': 'int', @@ -45,7 +46,7 @@ class TDTestCase: 'col13': f'nchar({self.nchar_length})' } self.tag_dict = { - 'ts_tag' : 'timestamp', + 'ts_tag': 'timestamp', 't1': 'tinyint', 't2': 'smallint', 't3': 'int', @@ -67,25 +68,28 @@ class TDTestCase: f'now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据"' ] self.tbnum = 1 + def prepare_data(self): - tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict)) for i in range(self.tbnum): tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_list[i]})') for j in self.values_list: tdSql.execute(f'insert into {self.stbname}_{i} values({j})') + def create_user(self): - for user_name in ['jiacy1_all','jiacy1_read','jiacy1_write','jiacy1_none','jiacy0_all','jiacy0_read','jiacy0_write','jiacy0_none']: + for user_name in ['jiacy1_all', 'jiacy1_read', 'jiacy1_write', 'jiacy1_none', 'jiacy0_all', 'jiacy0_read', + 'jiacy0_write', 'jiacy0_none']: if 'jiacy1' in user_name.lower(): tdSql.execute(f'create user {user_name} pass "123" sysinfo 1') elif 'jiacy0' in user_name.lower(): tdSql.execute(f'create user {user_name} pass "123" sysinfo 0') - for user_name in ['jiacy1_all','jiacy1_read','jiacy0_all','jiacy0_read']: + for user_name in ['jiacy1_all', 'jiacy1_read', 'jiacy0_all', 'jiacy0_read']: tdSql.execute(f'grant read on db to {user_name}') - for user_name in ['jiacy1_all','jiacy1_write','jiacy0_all','jiacy0_write']: + for user_name in ['jiacy1_all', 'jiacy1_write', 'jiacy0_all', 'jiacy0_write']: tdSql.execute(f'grant write on db to {user_name}') def user_privilege_check(self): - jiacy1_read_conn = taos.connect(user='jiacy1_read',password='123') + jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123') sql = "create table ntb (ts timestamp,c0 int)" expectErrNotOccured = True try: @@ -94,32 +98,34 @@ class TDTestCase: expectErrNotOccured = False if expectErrNotOccured: caller = inspect.getframeinfo(inspect.stack()[1][0]) - tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured" ) + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured") else: self.queryRows = 0 self.queryCols = 0 self.queryResult = None tdLog.info(f"sql:{sql}, expect error occured") pass + def drop_topic(self): - jiacy1_all_conn = taos.connect(user='jiacy1_all',password='123') - jiacy1_read_conn = taos.connect(user='jiacy1_read',password='123') - jiacy1_write_conn = taos.connect(user='jiacy1_write',password='123') - jiacy1_none_conn = taos.connect(user='jiacy1_none',password='123') - jiacy0_all_conn = taos.connect(user='jiacy0_all',password='123') - jiacy0_read_conn = taos.connect(user='jiacy0_read',password='123') - jiacy0_write_conn = taos.connect(user='jiacy0_write',password='123') - jiacy0_none_conn = taos.connect(user='jiacy0_none',password='123') + jiacy1_all_conn = taos.connect(user='jiacy1_all', password='123') + jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123') + jiacy1_write_conn = taos.connect(user='jiacy1_write', password='123') + jiacy1_none_conn = taos.connect(user='jiacy1_none', password='123') + jiacy0_all_conn = taos.connect(user='jiacy0_all', password='123') + jiacy0_read_conn = taos.connect(user='jiacy0_read', password='123') + jiacy0_write_conn = taos.connect(user='jiacy0_write', password='123') + jiacy0_none_conn = taos.connect(user='jiacy0_none', password='123') tdSql.execute('create topic root_db as select * from db.stb') - for user in [jiacy1_all_conn,jiacy1_read_conn,jiacy0_all_conn,jiacy0_read_conn]: + for user in [jiacy1_all_conn, jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]: user.execute(f'create topic db_jiacy as select * from db.stb') user.execute('drop topic db_jiacy') - for user in [jiacy1_write_conn,jiacy1_none_conn,jiacy0_write_conn,jiacy0_none_conn,jiacy1_all_conn,jiacy1_read_conn,jiacy0_all_conn,jiacy0_read_conn]: + for user in [jiacy1_write_conn, jiacy1_none_conn, jiacy0_write_conn, jiacy0_none_conn, jiacy1_all_conn, + jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]: sql_list = [] - if user in [jiacy1_all_conn,jiacy1_read_conn,jiacy0_all_conn,jiacy0_read_conn]: + if user in [jiacy1_all_conn, jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]: sql_list = ['drop topic root_db'] - elif user in [jiacy1_write_conn,jiacy1_none_conn,jiacy0_write_conn,jiacy0_none_conn]: - sql_list = ['drop topic root_db','create topic db_jiacy as select * from db.stb'] + elif user in [jiacy1_write_conn, jiacy1_none_conn, jiacy0_write_conn, jiacy0_none_conn]: + sql_list = ['drop topic root_db', 'create topic db_jiacy as select * from db.stb'] for sql in sql_list: expectErrNotOccured = True try: @@ -128,33 +134,26 @@ class TDTestCase: expectErrNotOccured = False if expectErrNotOccured: caller = inspect.getframeinfo(inspect.stack()[1][0]) - tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured" ) + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured") else: self.queryRows = 0 self.queryCols = 0 self.queryResult = None tdLog.info(f"sql:{sql}, expect error occured") + def tmq_commit_cb_print(tmq, resp, param=None): print(f"commit: {resp}, tmq: {tmq}, param: {param}") + def subscribe_topic(self): print("create topic") tdSql.execute('create topic db_topic as select * from db.stb') tdSql.execute('grant subscribe on db_topic to jiacy1_all') print("build consumer") - conf = TaosTmqConf() - conf.set("group.id", "tg2") - conf.set("td.connect.user", "jiacy1_all") - conf.set("td.connect.pass", "123") - conf.set("enable.auto.commit", "true") - conf.set_auto_commit_cb(self.tmq_commit_cb_print, None) - tmq = conf.new_consumer() + tmq = Consumer({"group.id": "tg2", "td.connect.user": "jiacy1_all", "td.connect.pass": "123", + "enable.auto.commit": "true"}) print("build topic list") - topic_list = TaosTmqList() - topic_list.append("db_topic") + tmq.subscribe(["db_topic"]) print("basic consume loop") - tmq.subscribe(topic_list) - sub_list = tmq.subscription() - print("subscribed topics: ", sub_list) c = 0 l = 0 for i in range(10): @@ -163,20 +162,23 @@ class TDTestCase: res = tmq.poll(10) print(f"loop {l}") l += 1 - if res: - c += 1 - topic = res.get_topic_name() - vg = res.get_vgroup_id() - db = res.get_db_name() - print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}") - for row in res: - print(row) - print("* committed") - tmq.commit(res) - else: + if not res: print(f"received empty message at loop {l} (committed {c})") - pass - + continue + if res.error(): + print(f"consumer error at loop {l} (committed {c}) {res.error()}") + continue + + c += 1 + topic = res.topic() + db = res.database() + print(f"topic: {topic}\ndb: {db}") + + for row in res: + print(row.fetchall()) + print("* committed") + tmq.commit(res) + def run(self): tdSql.prepare() self.create_user() @@ -184,9 +186,11 @@ class TDTestCase: self.drop_topic() self.user_privilege_check() self.subscribe_topic() + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) + tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) From 4736636fb2ee7f728a69a68f6c01a0558941fdf5 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 22 Feb 2023 00:06:55 +0800 Subject: [PATCH 266/267] fix: taosbenchmark data gen refactor (#20059) * fix: taosbenchmark data gen refactor * fix: update taos-tools e22e5e2 * fix: update taos-tools cccc353 * fix: update taos-tools 10a211f * fix: update taos-tools e54a926 * fix: update taos-tools 340b0f4 * fix: update taos-tools 6afed7c * fix: update taos-tools cc6db40 * fix: update taos-tools 634399d * fix: update taos-tools e0104dc * fix: update taos-tools bc11ff3 * fix: update taos-tools b23e170 * fix: update taos-tools a62f774 * fix: clean up and print * fix: update taos-tools a880c81 * test: update 5-taos-tools/taosdump/taosdumpTestInspect.py for 3.0 * fix: add sml_tags_json_array to accelerate sml json test * fix: taosbenchmark json data generating refine 611dc09 * fix: update taos-tools ad2f7c8 * fix: coverity scan issues * fix: udpate taos-tools d259385 * fix: update taos-tools be929ab * fix: update taos-tools 63e63d6 * fix: update taos-tools 2a38de1 * fix: update taos-tools 0d4c001 * fix: update taos-tools dce7de5 * fix: update taos-tools 82b96df * fix: update taos-tools 401cf6a * fix: update taos-tools 6f692b0:wq * fix: update taos-tools 2942ca0 * fix: update taos-tools 61cbfd2 --- cmake/taostools_CMakeLists.txt.in | 2 +- .../taosdump/taosdumpTestInspect.py | 128 ++++++++++++------ 2 files changed, 88 insertions(+), 42 deletions(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 94ed46e5e2..ae3b626f88 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 634399d + GIT_TAG 61cbfd2 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py index a6c2062d6c..1ccbb1f7d6 100644 --- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py @@ -11,24 +11,20 @@ # -*- coding: utf-8 -*- -import sys import os from util.log import * from util.cases import * from util.sql import * from util.dnodes import * -import subprocess class TDTestCase: def caseDescription(self): - ''' + """ case1: [TD-14544] taosdump data inspect - ''' - return + """ def init(self, conn, logSql, replicaVar=1): - self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) self.tmpdir = "tmp" @@ -36,44 +32,56 @@ class TDTestCase: def getPath(self, tool="taosdump"): selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] + elif "src" in selfPath: + projPath = selfPath[: selfPath.find("src")] + elif "/tools/" in selfPath: + projPath = selfPath[: selfPath.find("/tools/")] + elif "/tests/" in selfPath: + projPath = selfPath[: selfPath.find("/tests/")] else: - projPath = selfPath[:selfPath.find("tests")] + tdLog.info("cannot found %s in path: %s, use system's" % (tool, selfPath)) + projPath = "/usr/local/taos/bin" paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files): + for root, dummy, files in os.walk(projPath): + if (tool) in files: rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): + if "packaging" not in rootRealPath: paths.append(os.path.join(root, tool)) break - if (len(paths) == 0): + if len(paths) == 0: return "" return paths[0] def run(self): - tdSql.prepare(replica=f"{self.replicaVar}") + tdSql.prepare() tdSql.execute("drop database if exists db") - tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + tdSql.execute("create database db keep 3649 ") tdSql.execute("use db") tdSql.execute( - "create table st(ts timestamp, c1 INT, c2 BOOL, c3 TINYINT, c4 SMALLINT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 TIMESTAMP, c9 BINARY(10), c10 NCHAR(10), c11 TINYINT UNSIGNED, c12 SMALLINT UNSIGNED, c13 INT UNSIGNED, c14 BIGINT UNSIGNED) tags(n1 INT, w2 BOOL, t3 TINYINT, t4 SMALLINT, t5 BIGINT, t6 FLOAT, t7 DOUBLE, t8 TIMESTAMP, t9 BINARY(10), t10 NCHAR(10), t11 TINYINT UNSIGNED, t12 SMALLINT UNSIGNED, t13 INT UNSIGNED, t14 BIGINT UNSIGNED)") + "create table st(ts timestamp, c1 INT, c2 BOOL, c3 TINYINT, c4 SMALLINT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 TIMESTAMP, c9 BINARY(10), c10 NCHAR(10), c11 TINYINT UNSIGNED, c12 SMALLINT UNSIGNED, c13 INT UNSIGNED, c14 BIGINT UNSIGNED) tags(n1 INT, w2 BOOL, t3 TINYINT, t4 SMALLINT, t5 BIGINT, t6 FLOAT, t7 DOUBLE, t8 TIMESTAMP, t9 BINARY(10), t10 NCHAR(10), t11 TINYINT UNSIGNED, t12 SMALLINT UNSIGNED, t13 INT UNSIGNED, t14 BIGINT UNSIGNED)" + ) tdSql.execute( - "create table t1 using st tags(1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)") + "create table t1 using st tags(1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)" + ) tdSql.execute( - "insert into t1 values(1640000000000, 1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)") + "insert into t1 values(1640000000000, 1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)" + ) tdSql.execute( - "create table t2 using st tags(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)") + "create table t2 using st tags(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" + ) tdSql.execute( - "insert into t2 values(1640000000000, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)") + "insert into t2 values(1640000000000, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" + ) -# sys.exit(1) + # sys.exit(1) - binPath = self.getPath("taosdump") - if (binPath == ""): + binPath = self.getPath() + if binPath == "": tdLog.exit("taosdump not found!") else: tdLog.info("taosdump found in %s" % binPath) @@ -85,35 +93,73 @@ class TDTestCase: os.system("rm -rf %s" % self.tmpdir) os.makedirs(self.tmpdir) - os.system( - "%s --databases db -o %s -T 1" % - (binPath, self.tmpdir)) + os.system("%s --databases db -o %s -T 1" % (binPath, self.tmpdir)) -# sys.exit(1) + # sys.exit(1) - taosdumpInspectCmd = "%s -I %s/*.avro* -s | grep 'Schema:'|wc -l" % ( - binPath, self.tmpdir) - schemaTimes = subprocess.check_output( - taosdumpInspectCmd, shell=True).decode("utf-8") + taosdumpInspectCmd = "%s -I %s/taosdump.*/*.avro* -s | grep 'Schema:'|wc -l" % ( + binPath, + self.tmpdir, + ) + schemaTimes = subprocess.check_output(taosdumpInspectCmd, shell=True).decode( + "utf-8" + ) print("schema found times: %d" % int(schemaTimes)) - if (int(schemaTimes) != 3): + if int(schemaTimes) != 1: caller = inspect.getframeinfo(inspect.stack()[0][0]) tdLog.exit( - "%s(%d) failed: expected schema found times 3, actual %d" % - (caller.filename, caller.lineno, int(schemaTimes))) + "%s(%d) failed: expected schema found times 1, actual %d" + % (caller.filename, caller.lineno, int(schemaTimes)) + ) - taosdumpInspectCmd = "%s -I %s/*.avro* | grep '=== Records:'|wc -l" % ( - binPath, self.tmpdir) - recordsTimes = subprocess.check_output( - taosdumpInspectCmd, shell=True).decode("utf-8") + taosdumpInspectCmd = ( + "%s -I %s/taosdump*/data*/*.avro* -s | grep 'Schema:'|wc -l" + % (binPath, self.tmpdir) + ) + schemaTimes = subprocess.check_output(taosdumpInspectCmd, shell=True).decode( + "utf-8" + ) + print("schema found times: %d" % int(schemaTimes)) + + if int(schemaTimes) != 2: + caller = inspect.getframeinfo(inspect.stack()[0][0]) + tdLog.exit( + "%s(%d) failed: expected schema found times 2, actual %d" + % (caller.filename, caller.lineno, int(schemaTimes)) + ) + + taosdumpInspectCmd = ( + "%s -I %s/taosdump*/*.avro* | grep '=== Records:'|wc -l" + % (binPath, self.tmpdir) + ) + recordsTimes = subprocess.check_output(taosdumpInspectCmd, shell=True).decode( + "utf-8" + ) print("records found times: %d" % int(recordsTimes)) - if (int(recordsTimes) != 3): + if int(recordsTimes) != 1: caller = inspect.getframeinfo(inspect.stack()[0][0]) tdLog.exit( - "%s(%d) failed: expected records found times 3, actual %d" % - (caller.filename, caller.lineno, int(recordsTimes))) + "%s(%d) failed: expected records found times 1, actual %d" + % (caller.filename, caller.lineno, int(recordsTimes)) + ) + + taosdumpInspectCmd = ( + "%s -I %s/taosdump*/data*/*.avro* | grep '=== Records:'|wc -l" + % (binPath, self.tmpdir) + ) + recordsTimes = subprocess.check_output(taosdumpInspectCmd, shell=True).decode( + "utf-8" + ) + print("records found times: %d" % int(recordsTimes)) + + if int(recordsTimes) != 2: + caller = inspect.getframeinfo(inspect.stack()[0][0]) + tdLog.exit( + "%s(%d) failed: expected records found times 2, actual %d" + % (caller.filename, caller.lineno, int(recordsTimes)) + ) def stop(self): tdSql.close() From 8a925342cce2a7ddd5c2a46d77eabf1a2a7eaac8 Mon Sep 17 00:00:00 2001 From: xinsheng Ren <285808407@qq.com> Date: Wed, 22 Feb 2023 09:05:21 +0800 Subject: [PATCH 267/267] enh: check if the directory exists before installing on mac (#20047) * enh: check if the directory exists before installing on mac * fix: check directory on different platforms --------- Co-authored-by: facetosea <25808407@qq.com> --- packaging/tools/post.sh | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 6755ed40e5..78eb7f7587 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -627,9 +627,16 @@ function install_app() { fi } -function install_TDengine() { - echo -e "${GREEN}Start to install TDengine...${NC}" - log_print "start to install TDengine" +function checkDirectory() { + if [ ! -d "${bin_link_dir}" ]; then + ${csudo}mkdir -p ${bin_link_dir} + log_print "${bin_link_dir} directory created" + fi + + if [ ! -d "${lib_link_dir}" ]; then + ${csudo}mkdir -p ${lib_link_dir} + log_print "${lib_link_dir} directory created" + fi #install log and data dir , then ln to /usr/local/taos ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} @@ -640,6 +647,13 @@ function install_TDengine() { ${csudo}ln -s ${log_dir} ${log_link_dir} || : ${csudo}ln -s ${data_dir} ${data_link_dir} || : +} + +function install_TDengine() { + echo -e "${GREEN}Start to install TDengine...${NC}" + log_print "start to install TDengine" + + checkDirectory # Install include, lib, binary and service install_include &&