Merge branch 'fix/TD-22457' of github.com:taosdata/TDengine into fix/TD-22457

This commit is contained in:
dapan1121 2023-02-16 10:07:07 +08:00
commit a7fd33d7ad
77 changed files with 2884 additions and 995 deletions

View File

@ -387,7 +387,7 @@ pipeline {
}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 55, unit: 'MINUTES'){
timeout(time: 75, unit: 'MINUTES'){
pre_test_win()
pre_test_build_win()
run_win_ctest()

View File

@ -276,7 +276,7 @@ sudo make install
安装成功后,可以在应用程序中双击 TDengine 图标启动服务,或者在终端中启动 TDengine 服务:
```bash
launchctl start com.tdengine.taosd
sudo launchctl start com.tdengine.taosd
```
用户可以使用 TDengine CLI 来连接 TDengine 服务,在终端中,输入:

View File

@ -286,7 +286,7 @@ Installing from source code will also configure service management for TDengine.
To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use:
```bash
launchctl start com.tdengine.taosd
sudo launchctl start com.tdengine.taosd
```
Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use:

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 22627d7
GIT_TAG 7c641c5
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -367,6 +367,12 @@ typedef struct SSortExecInfo {
int32_t readBytes; // read io bytes
} SSortExecInfo;
typedef struct STUidTagInfo {
char* name;
uint64_t uid;
void* pTagVal;
} STUidTagInfo;
// stream special block column
#define START_TS_COLUMN_INDEX 0

View File

@ -132,14 +132,16 @@ typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
SResultDataInfo resDataInfo;
uint32_t order; // data block scanner order: asc|desc
uint8_t isPseudoFunc;// denote current function is pseudo function or not [added for perf reason]
uint8_t isNotNullFunc;// not return null value.
uint8_t scanFlag; // record current running step, default: 0
int16_t functionId; // function id
char *pOutput; // final result output buffer, point to sdata->data
int32_t numOfParams;
// input parameter, e.g., top(k, 20), the number of results of top query is kept in param
SFunctParam *param;
// corresponding output buffer for timestamp of each result, e.g., diff/csum
SColumnInfoData *pTsOutput;
int32_t numOfParams;
int32_t offset;
SResultRowEntryInfo *resultInfo;
SSubsidiaryResInfo subsidiaries;
@ -152,7 +154,7 @@ typedef struct SqlFunctionCtx {
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
SSerializeDataHandle saveHandle;
int32_t exprIdx;
char udfName[TSDB_FUNC_NAME_LEN];
char *udfName;
} SqlFunctionCtx;
typedef struct tExprNode {

View File

@ -41,6 +41,7 @@ extern char tsSSE42Enable;
extern char tsAVXEnable;
extern char tsAVX2Enable;
extern char tsFMAEnable;
extern char tsTagFilterCache;
extern char configDir[];
extern char tsDataDir[];

View File

@ -53,6 +53,7 @@ typedef struct SArray {
* @return
*/
SArray* taosArrayInit(size_t size, size_t elemSize);
SArray* taosArrayInit_s(size_t size, size_t elemSize, size_t initialSize);
/**
*
@ -149,14 +150,6 @@ void* taosArrayGetLast(const SArray* pArray);
*/
size_t taosArrayGetSize(const SArray* pArray);
/**
* set the size of array
* @param pArray
* @param size size of the array
* @return
*/
void taosArraySetSize(SArray* pArray, size_t size);
/**
* insert data into array
* @param pArray

View File

@ -89,7 +89,7 @@ bool taosAssertRelease(bool condition);
// Disable all asserts that may compromise the performance.
#if defined DISABLE_ASSERT
#define ASSERT(condition)
#define ASSERTS(condition, ...)
#define ASSERTS(condition, ...) (0)
#else
#define ASSERTS(condition, ...) taosAssertDebug(condition, __FILE__, __LINE__, __VA_ARGS__)
#ifdef NDEBUG

View File

@ -116,6 +116,7 @@ typedef struct SHNode {
struct SHNode *next;
uint32_t keyLen : 20;
uint32_t dataLen : 12;
uint32_t hashVal;
char data[];
} SHNode;
#pragma pack(pop)

View File

@ -45,11 +45,25 @@ typedef struct STraceId {
#define TRACE_GET_MSGID(traceId) (traceId)->msgId
#define TRACE_TO_STR(traceId, buf) \
do { \
int64_t rootId = (traceId) != NULL ? (traceId)->rootId : 0; \
int64_t msgId = (traceId) != NULL ? (traceId)->msgId : 0; \
sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \
//#define TRACE_TO_STR(traceId, buf) \
// do { \
// int64_t rootId = (traceId) != NULL ? (traceId)->rootId : 0; \
// int64_t msgId = (traceId) != NULL ? (traceId)->msgId : 0; \
// sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \
// } while (0)
#define TRACE_TO_STR(_traceId, _buf) \
do { \
int64_t rootId = (_traceId) != NULL ? (_traceId)->rootId : 0; \
int64_t msgId = (_traceId) != NULL ? (_traceId)->msgId : 0; \
char* _t = _buf; \
_t[0] = '0'; \
_t[1] = 'x'; \
_t += titoa(rootId, 16, &_t[2]); \
_t[0] = ':'; \
_t[1] = '0'; \
_t[2] = 'x'; \
_t += titoa(msgId, 16, &_t[3]); \
} while (0)
#ifdef __cplusplus

View File

@ -46,6 +46,9 @@ char *paGetToken(char *src, char **token, int32_t *tokenLen);
int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]);
int32_t taosHexStrToByteArray(char hexstr[], char bytes[]);
int32_t tintToHex(uint64_t val, char hex[]);
int32_t titoa(uint64_t val, size_t radix, char str[]);
char *taosIpStr(uint32_t ipInt);
uint32_t ip2uint(const char *const ip_addr);
void taosIp2String(uint32_t ip, char *str);

328
include/util/xxhash.h Normal file
View File

@ -0,0 +1,328 @@
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/* Notice extracted from xxHash homepage :
xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
It also successfully passes all tests from the SMHasher suite.
Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
Name Speed Q.Score Author
xxHash 5.4 GB/s 10
CrapWow 3.2 GB/s 2 Andrew
MumurHash 3a 2.7 GB/s 10 Austin Appleby
SpookyHash 2.0 GB/s 10 Bob Jenkins
SBox 1.4 GB/s 9 Bret Mulvey
Lookup3 1.2 GB/s 9 Bob Jenkins
SuperFastHash 1.2 GB/s 1 Paul Hsieh
CityHash64 1.05 GB/s 10 Pike & Alakuijala
FNV 0.55 GB/s 5 Fowler, Noll, Vo
CRC32 0.43 GB/s 9
MD5-32 0.33 GB/s 10 Ronald L. Rivest
SHA1-32 0.28 GB/s 10
Q.Score is a measure of quality of the hash function.
It depends on successfully passing SMHasher test set.
10 is a perfect score.
A 64-bit version, named XXH64, is available since r35.
It offers much better speed, but for 64-bit applications only.
Name Speed on 64 bits Speed on 32 bits
XXH64 13.8 GB/s 1.9 GB/s
XXH32 6.8 GB/s 6.0 GB/s
*/
#ifndef XXHASH_H_5627135585666179
#define XXHASH_H_5627135585666179 1
#if defined (__cplusplus)
extern "C" {
#endif
/* ****************************
* Definitions
******************************/
#include <stddef.h> /* size_t */
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
/* ****************************
* API modifier
******************************/
/** XXH_INLINE_ALL (and XXH_PRIVATE_API)
* This is useful to include xxhash functions in `static` mode
* in order to inline them, and remove their symbol from the public list.
* Inlining can offer dramatic performance improvement on small keys.
* Methodology :
* #define XXH_INLINE_ALL
* #include "xxhash.h"
* `xxhash.c` is automatically included.
* It's not useful to compile and link it as a separate module.
*/
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# ifndef XXH_STATIC_LINKING_ONLY
# define XXH_STATIC_LINKING_ONLY
# endif
# if defined(__GNUC__)
# define XXH_PUBLIC_API static __inline __attribute__((unused))
# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define XXH_PUBLIC_API static inline
# elif defined(_MSC_VER)
# define XXH_PUBLIC_API static __inline
# else
/* this version may generate warnings for unused static functions */
# define XXH_PUBLIC_API static
# endif
#else
# define XXH_PUBLIC_API /* do nothing */
#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
/*! XXH_NAMESPACE, aka Namespace Emulation :
*
* If you want to include _and expose_ xxHash functions from within your own library,
* but also want to avoid symbol collisions with other libraries which may also include xxHash,
*
* you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
* with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
*
* Note that no change is required within the calling program as long as it includes `xxhash.h` :
* regular symbol name will be automatically translated by this header.
*/
#ifdef XXH_NAMESPACE
# define XXH_CAT(A,B) A##B
# define XXH_NAME2(A,B) XXH_CAT(A,B)
# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
#endif
/* *************************************
* Version
***************************************/
#define XXH_VERSION_MAJOR 0
#define XXH_VERSION_MINOR 6
#define XXH_VERSION_RELEASE 5
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
XXH_PUBLIC_API unsigned XXH_versionNumber (void);
/*-**********************************************************************
* 32-bit hash
************************************************************************/
typedef unsigned int XXH32_hash_t;
/*! XXH32() :
Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input".
The memory between input & input+length must be valid (allocated and read-accessible).
"seed" can be used to alter the result predictably.
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
/*====== Streaming ======*/
typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
/*
* Streaming functions generate the xxHash of an input provided in multiple segments.
* Note that, for small input, they are slower than single-call functions, due to state management.
* For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
*
* XXH state must first be allocated, using XXH*_createState() .
*
* Start a new hash by initializing state with a seed, using XXH*_reset().
*
* Then, feed the hash state by calling XXH*_update() as many times as necessary.
* The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
*
* Finally, a hash value can be produced anytime, by using XXH*_digest().
* This function returns the nn-bits hash as an int or long long.
*
* It's still possible to continue inserting input into the hash state after a digest,
* and generate some new hashes later on, by calling again XXH*_digest().
*
* When done, free XXH state space if it was allocated dynamically.
*/
/*====== Canonical representation ======*/
typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
* These functions allow transformation of hash result into and from its canonical format.
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
*/
#ifndef XXH_NO_LONG_LONG
/*-**********************************************************************
* 64-bit hash
************************************************************************/
typedef unsigned long long XXH64_hash_t;
/*! XXH64() :
Calculate the 64-bit hash of sequence of length "len" stored at memory address "input".
"seed" can be used to alter the result predictably.
This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).
*/
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
/*====== Streaming ======*/
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
/*====== Canonical representation ======*/
typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
#endif /* XXH_NO_LONG_LONG */
#ifdef XXH_STATIC_LINKING_ONLY
/* ================================================================================================
This section contains declarations which are not guaranteed to remain stable.
They may change in future versions, becoming incompatible with a different version of the library.
These declarations should only be used with static linking.
Never use them in association with dynamic linking !
=================================================================================================== */
/* These definitions are only present to allow
* static allocation of XXH state, on stack or in a struct for example.
* Never **ever** use members directly. */
#if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
struct XXH32_state_s {
uint32_t total_len_32;
uint32_t large_len;
uint32_t v1;
uint32_t v2;
uint32_t v3;
uint32_t v4;
uint32_t mem32[4];
uint32_t memsize;
uint32_t reserved; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH32_state_t */
struct XXH64_state_s {
uint64_t total_len;
uint64_t v1;
uint64_t v2;
uint64_t v3;
uint64_t v4;
uint64_t mem64[4];
uint32_t memsize;
uint32_t reserved[2]; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH64_state_t */
# else
struct XXH32_state_s {
unsigned total_len_32;
unsigned large_len;
unsigned v1;
unsigned v2;
unsigned v3;
unsigned v4;
unsigned mem32[4];
unsigned memsize;
unsigned reserved; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH32_state_t */
# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
struct XXH64_state_s {
unsigned long long total_len;
unsigned long long v1;
unsigned long long v2;
unsigned long long v3;
unsigned long long v4;
unsigned long long mem64[4];
unsigned memsize;
unsigned reserved[2]; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH64_state_t */
# endif
# endif
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */
#endif
#endif /* XXH_STATIC_LINKING_ONLY */
#if defined (__cplusplus)
}
#endif
#endif /* XXHASH_H_5627135585666179 */

View File

@ -531,13 +531,13 @@ function install_taosadapter_service() {
}
function install_service_on_launchctl() {
${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist
${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist
${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
}
function install_service() {

View File

@ -512,14 +512,14 @@ function install_service_on_systemd() {
function install_service_on_launchctl() {
if [ -f ${install_main_dir}/service/com.taosdata.taosd.plist ]; then
${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
${csudo}cp ${install_main_dir}/service/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist || :
${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist || :
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosd.plist || :
fi
if [ -f ${install_main_dir}/service/com.taosdata.taosadapter.plist ]; then
${csudouser}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}cp ${install_main_dir}/service/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist || :
${csudouser}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist || :
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist || :
fi
}

View File

@ -934,6 +934,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
}
if (TSDB_CODE_SUCCESS == code) {
pRequest->stmtType = pRequest->pQuery->pRoot->type;
phaseAsyncQuery(pWrapper);
} else {
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code),

View File

@ -1546,7 +1546,10 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
}
void colDataDestroy(SColumnInfoData* pColData) {
if (!pColData) return;
if (!pColData) {
return;
}
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
taosMemoryFreeClear(pColData->varmeta.offset);
} else {
@ -2353,8 +2356,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
pStart += sizeof(uint64_t);
if (pBlock->pDataBlock == NULL) {
pBlock->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData));
taosArraySetSize(pBlock->pDataBlock, numOfCols);
pBlock->pDataBlock = taosArrayInit_s(numOfCols, sizeof(SColumnInfoData), numOfCols);
}
for (int32_t i = 0; i < numOfCols; ++i) {

View File

@ -140,6 +140,7 @@ int32_t tsMaxMemUsedByInsert = 1024;
float tsSelectivityRatio = 1.0;
int32_t tsTagFilterResCacheSize = 1024 * 10;
char tsTagFilterCache = 0;
// the maximum allowed query buffer size during query processing for each data node.
// -1 no limit (default)
@ -351,6 +352,7 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "AVX2", tsAVX2Enable, 0) != 0) return -1;
if (cfgAddBool(pCfg, "FMA", tsFMAEnable, 0) != 0) return -1;
if (cfgAddBool(pCfg, "SIMD-builtins", tsSIMDBuiltins, 0) != 0) return -1;
if (cfgAddBool(pCfg, "tagFilterCache", tsTagFilterCache, 0) != 0) return -1;
if (cfgAddInt64(pCfg, "openMax", tsOpenMax, 0, INT64_MAX, 1) != 0) return -1;
if (cfgAddInt64(pCfg, "streamMax", tsStreamMax, 0, INT64_MAX, 1) != 0) return -1;
@ -731,6 +733,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
tsSIMDBuiltins = (bool)cfgGetItem(pCfg, "SIMD-builtins")->bval;
tsTagFilterCache = (bool)cfgGetItem(pCfg, "tagFilterCache")->bval;
tsEnableMonitor = cfgGetItem(pCfg, "monitor")->bval;
tsMonitorInterval = cfgGetItem(pCfg, "monitorInterval")->i32;

View File

@ -880,6 +880,12 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (strcasecmp(cfgReq.config, "resetlog") == 0) {
strcpy(dcfgReq.config, "resetlog");
} else if (strncasecmp(cfgReq.config, "monitor", 7) == 0) {
if (' ' != cfgReq.config[7] && 0 != cfgReq.config[7]) {
mError("dnode:%d, failed to config monitor since invalid conf:%s", cfgReq.dnodeId, cfgReq.config);
terrno = TSDB_CODE_INVALID_CFG;
return -1;
}
const char *value = cfgReq.value;
int32_t flag = atoi(value);
if (flag <= 0) {
@ -900,12 +906,18 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
int32_t optLen = strlen(optName);
if (strncasecmp(cfgReq.config, optName, optLen) != 0) continue;
if (' ' != cfgReq.config[optLen] && 0 != cfgReq.config[optLen]) {
mError("dnode:%d, failed to config since invalid conf:%s", cfgReq.dnodeId, cfgReq.config);
terrno = TSDB_CODE_INVALID_CFG;
return -1;
}
const char *value = cfgReq.value;
int32_t flag = atoi(value);
if (flag <= 0) {
flag = atoi(cfgReq.config + optLen + 1);
}
if (flag <= 0 || flag > 255) {
if (flag < 0 || flag > 255) {
mError("dnode:%d, failed to config %s since value:%d", cfgReq.dnodeId, optName, flag);
terrno = TSDB_CODE_INVALID_CFG;
return -1;

View File

@ -479,9 +479,8 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
tstrncpy(createReq.name, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
createReq.numOfColumns = pStream->outputSchema.nCols;
createReq.numOfTags = 1; // group id
createReq.pColumns = taosArrayInit(createReq.numOfColumns, sizeof(SField));
createReq.pColumns = taosArrayInit_s(createReq.numOfColumns, sizeof(SField), createReq.numOfColumns);
// build fields
taosArraySetSize(createReq.pColumns, createReq.numOfColumns);
for (int32_t i = 0; i < createReq.numOfColumns; i++) {
SField *pField = taosArrayGet(createReq.pColumns, i);
tstrncpy(pField->name, pStream->outputSchema.pSchema[i].name, TSDB_COL_NAME_LEN);
@ -489,8 +488,8 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
pField->type = pStream->outputSchema.pSchema[i].type;
pField->bytes = pStream->outputSchema.pSchema[i].bytes;
}
createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField));
taosArraySetSize(createReq.pTags, 1);
createReq.pTags = taosArrayInit_s(createReq.numOfTags, sizeof(SField), 1);
// build tags
SField *pField = taosArrayGet(createReq.pTags, 0);
strcpy(pField->name, "group_id");

View File

@ -104,8 +104,8 @@ void metaReaderClear(SMetaReader *pReader);
int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
int32_t metaGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid);
int metaGetTableEntryByName(SMetaReader *pReader, const char *name);
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags);
int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHashObj *tags);
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList);
int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList);
int32_t metaReadNext(SMetaReader *pReader);
const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);

View File

@ -236,6 +236,7 @@ void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF,
void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]);
void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]);
void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]);
// SDelFile
void tsdbDelFileName(STsdb *pTsdb, SDelFile *pFile, char fname[]);
// tsdbFS.c ==============================================================================================
@ -646,6 +647,7 @@ typedef struct SSttBlockLoadInfo {
int16_t *colIds;
int32_t numOfCols;
bool sttBlockLoaded;
int32_t numOfStt;
// keep the last access position, this position may be used to reduce the binary times for
// starting last block data for a new table
@ -711,7 +713,7 @@ bool tMergeTreeNext(SMergeTree *pMTree);
TSDBROW tMergeTreeGetRow(SMergeTree *pMTree);
void tMergeTreeClose(SMergeTree *pMTree);
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols);
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols, int32_t numOfStt);
void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el);
void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);

View File

@ -107,7 +107,7 @@ typedef struct STbUidStore STbUidStore;
#define META_BEGIN_HEAP_NIL 2
int metaOpen(SVnode* pVnode, SMeta** ppMeta, int8_t rollback);
int metaClose(SMeta* pMeta);
int metaClose(SMeta** pMeta);
int metaBegin(SMeta* pMeta, int8_t fromSys);
TXN* metaGetTxn(SMeta* pMeta);
int metaCommit(SMeta* pMeta, TXN* txn);

View File

@ -32,9 +32,9 @@ typedef struct SMetaStbStatsEntry {
} SMetaStbStatsEntry;
typedef struct STagFilterResEntry {
uint64_t suid; // uid for super table
SList list; // the linked list of md5 digest, extracted from the serialized tag query condition
uint32_t qTimes; // queried times for current super table
uint32_t hitTimes; // queried times for current super table
uint32_t accTime;
} STagFilterResEntry;
struct SMetaCache {
@ -55,6 +55,7 @@ struct SMetaCache {
// query cache
struct STagFilterResCache {
TdThreadMutex lock;
uint32_t accTimes;
SHashObj* pTableEntry;
SLRUCache* pUidResCache;
} sTagFilterResCache;
@ -132,6 +133,7 @@ int32_t metaCacheOpen(SMeta* pMeta) {
goto _err2;
}
pCache->sTagFilterResCache.accTimes = 0;
pCache->sTagFilterResCache.pTableEntry =
taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK);
if (pCache->sTagFilterResCache.pTableEntry == NULL) {
@ -159,9 +161,9 @@ void metaCacheClose(SMeta* pMeta) {
entryCacheClose(pMeta);
statsCacheClose(pMeta);
taosHashCleanup(pMeta->pCache->sTagFilterResCache.pTableEntry);
taosLRUCacheCleanup(pMeta->pCache->sTagFilterResCache.pUidResCache);
taosThreadMutexDestroy(&pMeta->pCache->sTagFilterResCache.lock);
taosHashCleanup(pMeta->pCache->sTagFilterResCache.pTableEntry);
taosMemoryFree(pMeta->pCache);
pMeta->pCache = NULL;
@ -424,6 +426,31 @@ int32_t metaStatsCacheGet(SMeta* pMeta, int64_t uid, SMetaStbStats* pInfo) {
return code;
}
static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInvalidRes, int32_t keyLen, SLRUCache* pCache, uint64_t suid) {
SListIter iter = {0};
tdListInitIter((SList*)&(pEntry->list), &iter, TD_LIST_FORWARD);
SListNode* pNode = NULL;
uint64_t buf[3];
buf[0] = suid;
int32_t len = sizeof(uint64_t) * tListLen(buf);
while ((pNode = tdListNext(&iter)) != NULL) {
memcpy(&buf[1], pNode->data, keyLen);
// check whether it is existed in LRU cache, and remove it from linked list if not.
LRUHandle* pRes = taosLRUCacheLookup(pCache, buf, len);
if (pRes == NULL) { // remove the item in the linked list
taosArrayPush(pInvalidRes, &pNode);
} else {
taosLRUCacheRelease(pCache, pRes, false);
}
}
return 0;
}
int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
bool* acquireRes) {
// generate the composed key for LRU cache
@ -431,16 +458,18 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
SHashObj* pTableMap = pMeta->pCache->sTagFilterResCache.pTableEntry;
TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock;
uint64_t buf[3] = {0};
uint32_t times = 0;
uint64_t buf[4];
*acquireRes = 0;
buf[0] = suid;
memcpy(&buf[1], pKey, keyLen);
buf[0] = (uint64_t) pTableMap;
buf[1] = suid;
memcpy(&buf[2], pKey, keyLen);
taosThreadMutexLock(pLock);
pMeta->pCache->sTagFilterResCache.accTimes += 1;
int32_t len = keyLen + sizeof(uint64_t);
int32_t len = keyLen + sizeof(uint64_t) * 2;
LRUHandle* pHandle = taosLRUCacheLookup(pCache, buf, len);
if (pHandle == NULL) {
taosThreadMutexUnlock(pLock);
@ -458,48 +487,17 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
// set the result into the buffer
taosArrayAddBatch(pList1, p + sizeof(int32_t), size);
times = atomic_add_fetch_32(&(*pEntry)->qTimes, 1);
(*pEntry)->hitTimes += 1;
uint32_t acc = pMeta->pCache->sTagFilterResCache.accTimes;
if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) {
metaInfo("cache hit:%d, total acc:%d, rate:%.2f", (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc);
}
taosLRUCacheRelease(pCache, pHandle, false);
// unlock meta
taosThreadMutexUnlock(pLock);
// check if scanning all items are necessary or not
if (times >= 5000 && TD_DLIST_NELES(&(*pEntry)->list) > 10) {
taosThreadMutexLock(pLock);
SArray* pInvalidRes = taosArrayInit(64, POINTER_BYTES);
SListIter iter = {0};
tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD);
SListNode* pNode = NULL;
while ((pNode = tdListNext(&iter)) != NULL) {
memcpy(&buf[1], pNode->data, keyLen);
// check whether it is existed in LRU cache, and remove it from linked list if not.
LRUHandle* pRes = taosLRUCacheLookup(pCache, buf, len);
if (pRes == NULL) { // remove the item in the linked list
taosArrayPush(pInvalidRes, &pNode);
} else {
taosLRUCacheRelease(pCache, pRes, false);
}
}
// remove the keys, of which query uid lists have been replaced already.
size_t s = taosArrayGetSize(pInvalidRes);
for (int32_t i = 0; i < s; ++i) {
SListNode** p1 = taosArrayGet(pInvalidRes, i);
tdListPopNode(&(*pEntry)->list, *p1);
taosMemoryFree(*p1);
}
atomic_store_32(&(*pEntry)->qTimes, 0); // reset the query times
taosArrayDestroy(pInvalidRes);
taosThreadMutexUnlock(pLock);
}
return TSDB_CODE_SUCCESS;
}
@ -507,9 +505,52 @@ static void freePayload(const void* key, size_t keyLen, void* value) {
if (value == NULL) {
return;
}
const uint64_t* p = key;
if (keyLen != sizeof(int64_t) * 4) {
metaError("key length is invalid, length:%d, expect:%d", (int32_t) keyLen, (int32_t) sizeof(uint64_t)*2);
return;
}
SHashObj* pHashObj = (SHashObj*)p[0];
STagFilterResEntry** pEntry = taosHashGet(pHashObj, &p[1], sizeof(uint64_t));
{
int64_t st = taosGetTimestampUs();
SListIter iter = {0};
tdListInitIter((SList*)&((*pEntry)->list), &iter, TD_LIST_FORWARD);
SListNode* pNode = NULL;
while ((pNode = tdListNext(&iter)) != NULL) {
uint64_t* digest = (uint64_t*)pNode->data;
if (digest[0] == p[2] && digest[1] == p[3]) {
tdListPopNode(&((*pEntry)->list), pNode);
int64_t et = taosGetTimestampUs();
metaInfo("clear items in cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)),
(et - st)/1000.0);
return;
}
}
}
taosMemoryFree(value);
}
static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyLen, uint64_t suid) {
STagFilterResEntry* p = taosMemoryMalloc(sizeof(STagFilterResEntry));
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
p->hitTimes = 0;
tdListInit(&p->list, keyLen);
taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES);
tdListAppend(&p->list, pKey);
return 0;
}
// check both the payload size and selectivity ratio
int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen, double selectivityRatio) {
@ -533,42 +574,61 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int
SHashObj* pTableEntry = pMeta->pCache->sTagFilterResCache.pTableEntry;
TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock;
// the format of key:
// hash table address(8bytes) + suid(8bytes) + MD5 digest(16bytes)
uint64_t buf[4] = {0};
buf[0] = (uint64_t) pTableEntry;
buf[1] = suid;
memcpy(&buf[2], pKey, keyLen);
ASSERT(keyLen == 16);
int32_t code = 0;
taosThreadMutexLock(pLock);
STagFilterResEntry** pEntry = taosHashGet(pTableEntry, &suid, sizeof(uint64_t));
if (pEntry == NULL) {
STagFilterResEntry* p = taosMemoryMalloc(sizeof(STagFilterResEntry));
p->qTimes = 0;
tdListInit(&p->list, keyLen);
taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES);
tdListAppend(&p->list, pKey);
code = addNewEntry(pTableEntry, pKey, keyLen, suid);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
} else {
tdListAppend(&(*pEntry)->list, pKey);
// check if it exists or not
size_t size = listNEles(&(*pEntry)->list);
if (size == 0) {
tdListAppend(&(*pEntry)->list, pKey);
} else {
SListNode* pNode = listHead(&(*pEntry)->list);
uint64_t* p = (uint64_t*)pNode->data;
if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) {
// we have already found the existed items, no need to added to cache anymore.
taosThreadMutexUnlock(pLock);
return TSDB_CODE_SUCCESS;
} else { // not equal, append it
tdListAppend(&(*pEntry)->list, pKey);
}
}
}
uint64_t buf[3] = {0};
buf[0] = suid;
memcpy(&buf[1], pKey, keyLen);
ASSERT(sizeof(uint64_t) + keyLen == 24);
// add to cache.
taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL,
taosLRUCacheInsert(pCache, buf, sizeof(uint64_t)*2 + keyLen, pPayload, payloadLen, freePayload, NULL,
TAOS_LRU_PRIORITY_LOW);
_end:
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", TD_VID(pMeta->pVnode), suid,
(int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry));
return TSDB_CODE_SUCCESS;
return code;
}
// remove the lru cache that are expired due to the tags value update, or creating, or dropping, of child tables
int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
int32_t keyLen = sizeof(uint64_t) * 3;
uint64_t p[3] = {0};
p[0] = suid;
uint64_t p[4] = {0};
p[0] = (uint64_t) pMeta->pCache->sTagFilterResCache.pTableEntry;
p[1] = suid;
TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock;
@ -584,11 +644,11 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
SListNode* pNode = NULL;
while ((pNode = tdListNext(&iter)) != NULL) {
memcpy(&p[1], pNode->data, 16);
memcpy(&p[2], pNode->data, 16);
taosLRUCacheErase(pMeta->pCache->sTagFilterResCache.pUidResCache, p, keyLen);
}
(*pEntry)->qTimes = 0;
(*pEntry)->hitTimes = 0;
tdListEmpty(&(*pEntry)->list);
taosThreadMutexUnlock(pLock);

View File

@ -201,7 +201,8 @@ _err:
return -1;
}
int metaClose(SMeta *pMeta) {
int metaClose(SMeta **ppMeta) {
SMeta *pMeta = *ppMeta;
if (pMeta) {
if (pMeta->pEnv) metaAbort(pMeta);
if (pMeta->pCache) metaCacheClose(pMeta);
@ -221,7 +222,8 @@ int metaClose(SMeta *pMeta) {
if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb);
if (pMeta->pEnv) tdbClose(pMeta->pEnv);
metaDestroyLock(pMeta);
taosMemoryFree(pMeta);
taosMemoryFreeClear(*ppMeta);
}
return 0;

View File

@ -1371,13 +1371,14 @@ static int32_t metaGetTableTagByUid(SMeta *pMeta, int64_t suid, int64_t uid, voi
return ret;
}
int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHashObj *tags) {
int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList) {
const int32_t LIMIT = 128;
int32_t isLock = false;
int32_t sz = uidList ? taosArrayGetSize(uidList) : 0;
for (int i = 0; i < sz; i++) {
tb_uid_t *id = taosArrayGet(uidList, i);
STUidTagInfo *p = taosArrayGet(uidList, i);
if (i % LIMIT == 0) {
if (isLock) metaULock(pMeta);
@ -1386,51 +1387,72 @@ int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHas
isLock = true;
}
if (taosHashGet(tags, id, sizeof(tb_uid_t)) == NULL) {
// if (taosHashGet(tags, &p->uid, sizeof(tb_uid_t)) == NULL) {
void *val = NULL;
int32_t len = 0;
if (metaGetTableTagByUid(pMeta, suid, *id, &val, &len, false) == 0) {
taosHashPut(tags, id, sizeof(tb_uid_t), val, len);
if (metaGetTableTagByUid(pMeta, suid, p->uid, &val, &len, false) == 0) {
p->pTagVal = taosMemoryMalloc(len);
memcpy(p->pTagVal, val, len);
tdbFree(val);
} else {
metaError("vgId:%d, failed to table IDs, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid,
*id);
metaError("vgId:%d, failed to table tags, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid,
p->uid);
}
}
}
// }
if (isLock) metaULock(pMeta);
return 0;
}
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags) {
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *pUidTagInfo) {
SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid, 1);
SHashObj *uHash = NULL;
size_t len = taosArrayGetSize(uidList); // len > 0 means there already have uids
if (len > 0) {
uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
for (int i = 0; i < len; i++) {
int64_t *uid = taosArrayGet(uidList, i);
taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i));
// If len > 0 means there already have uids, and we only want the
// tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept
// in the hash map, that may require a lot of memory
SHashObj *pSepecifiedUidMap = NULL;
size_t numOfElems = taosArrayGetSize(pUidTagInfo);
if (numOfElems > 0) {
pSepecifiedUidMap = taosHashInit(numOfElems / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
for (int i = 0; i < numOfElems; i++) {
STUidTagInfo *pTagInfo = taosArrayGet(pUidTagInfo, i);
taosHashPut(pSepecifiedUidMap, &pTagInfo->uid, sizeof(uint64_t), &i, sizeof(int32_t));
}
}
while (1) {
tb_uid_t id = metaCtbCursorNext(pCur);
if (id == 0) {
break;
}
if (len > 0 && taosHashGet(uHash, &id, sizeof(int64_t)) == NULL) {
continue;
} else if (len == 0) {
taosArrayPush(uidList, &id);
}
if (numOfElems == 0) { // all data needs to be added into the pUidTagInfo list
while (1) {
tb_uid_t uid = metaCtbCursorNext(pCur);
if (uid == 0) {
break;
}
taosHashPut(tags, &id, sizeof(int64_t), pCur->pVal, pCur->vLen);
STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal};
info.pTagVal = taosMemoryMalloc(pCur->vLen);
memcpy(info.pTagVal, pCur->pVal, pCur->vLen);
taosArrayPush(pUidTagInfo, &info);
}
} else { // only the specified tables need to be added
while (1) {
tb_uid_t uid = metaCtbCursorNext(pCur);
if (uid == 0) {
break;
}
int32_t *index = taosHashGet(pSepecifiedUidMap, &uid, sizeof(uint64_t));
if (index == NULL) {
continue;
}
STUidTagInfo *pTagInfo = taosArrayGet(pUidTagInfo, *index);
if (pTagInfo->pTagVal == NULL) {
pTagInfo->pTagVal = taosMemoryMalloc(pCur->vLen);
memcpy(pTagInfo->pTagVal, pCur->pVal, pCur->vLen);
}
}
}
taosHashCleanup(uHash);
taosHashCleanup(pSepecifiedUidMap);
metaCloseCtbCursor(pCur, 1);
return TSDB_CODE_SUCCESS;
}

View File

@ -816,7 +816,6 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
// TODO version should be assigned and refed during preprocess
SWalRef* pRef = walRefCommittedVer(pTq->pVnode->pWal);
if (pRef == NULL) {
ASSERT(0);
return -1;
}
int64_t ver = pRef->refVer;
@ -837,12 +836,9 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
pHandle->execHandle.task =
qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols, NULL);
ASSERT(pHandle->execHandle.task);
void* scanner = NULL;
qExtractStreamScanner(pHandle->execHandle.task, &scanner);
ASSERT(scanner);
pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
ASSERT(pHandle->execHandle.pExecReader);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode);
@ -875,8 +871,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId);
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
// TODO
ASSERT(0);
return -1;
}
} else {
/*ASSERT(pExec->consumerId == req.oldConsumerId);*/
@ -886,8 +881,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
atomic_add_fetch_32(&pHandle->epoch, 1);
taosMemoryFree(req.qmsg);
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
// TODO
ASSERT(0);
return -1;
}
// close handle
}

View File

@ -71,17 +71,14 @@ int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
int32_t tqMetaOpen(STQ* pTq) {
if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaDB, 0) < 0) {
ASSERT(0);
return -1;
}
if (tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pExecStore, 0) < 0) {
ASSERT(0);
return -1;
}
if (tdbTbOpen("tq.check.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pCheckStore, 0) < 0) {
ASSERT(0);
return -1;
}
@ -197,40 +194,49 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
int32_t code;
int32_t vlen;
tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code);
ASSERT(code == 0);
tqDebug("tq save %s(%d) consumer %" PRId64 " vgId:%d", pHandle->subKey, (int32_t)strlen(pHandle->subKey),
pHandle->consumerId, TD_VID(pTq->pVnode));
void* buf = taosMemoryCalloc(1, vlen);
if (buf == NULL) {
ASSERT(0);
return -1;
}
SEncoder encoder;
tEncoderInit(&encoder, buf, vlen);
if (tEncodeSTqHandle(&encoder, pHandle) < 0) {
ASSERT(0);
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
TXN* txn;
if (tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
0) {
ASSERT(0);
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, txn) < 0) {
ASSERT(0);
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
if (tdbCommit(pTq->pMetaDB, txn) < 0) {
ASSERT(0);
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
if (tdbPostCommit(pTq->pMetaDB, txn) < 0) {
ASSERT(0);
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
tEncoderClear(&encoder);

View File

@ -244,6 +244,11 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST
int16_t nCol = taosArrayGetSize(pLast);
int16_t iCol = 0;
if (nCol != pTSchema->numOfCols) {
invalidate = true;
goto _invalidate;
}
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
if (keyTs > tTsVal->ts) {
STColumn *pTColumn = &pTSchema->columns[0];
@ -259,6 +264,12 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST
SColVal colVal = {0};
tTSRowGetVal(row, pTSchema, iCol, &colVal);
if (colVal.cid != tColVal->cid) {
invalidate = true;
goto _invalidate;
}
if (!COL_VAL_IS_NONE(&colVal)) {
if (keyTs == tTsVal1->ts && !COL_VAL_IS_NONE(tColVal)) {
invalidate = true;
@ -268,7 +279,8 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST
SLastCol lastCol = {.ts = keyTs, .colVal = colVal};
if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) {
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol);
taosMemoryFree(pLastCol->colVal.value.pData);
if (pLastCol->colVal.value.nData > 0 && NULL != pLastCol->colVal.value.pData)
taosMemoryFree(pLastCol->colVal.value.pData);
lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData);
if (lastCol.colVal.value.pData == NULL) {
@ -315,6 +327,11 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
int16_t nCol = taosArrayGetSize(pLast);
int16_t iCol = 0;
if (nCol != pTSchema->numOfCols) {
invalidate = true;
goto _invalidate;
}
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
if (keyTs > tTsVal->ts) {
STColumn *pTColumn = &pTSchema->columns[0];
@ -330,6 +347,12 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
SColVal colVal = {0};
tTSRowGetVal(row, pTSchema, iCol, &colVal);
if (colVal.cid != tColVal->cid) {
invalidate = true;
goto _invalidate;
}
if (COL_VAL_IS_VALUE(&colVal)) {
if (keyTs == tTsVal1->ts && COL_VAL_IS_VALUE(tColVal)) {
invalidate = true;
@ -339,7 +362,8 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
SLastCol lastCol = {.ts = keyTs, .colVal = colVal};
if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) {
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol);
taosMemoryFree(pLastCol->colVal.value.pData);
if (pLastCol->colVal.value.nData > 0 && NULL != pLastCol->colVal.value.pData)
taosMemoryFree(pLastCol->colVal.value.pData);
lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData);
if (lastCol.colVal.value.pData == NULL) {

View File

@ -181,7 +181,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
}
}
p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0);
int32_t numOfStt = ((SVnode*)pVnode)->config.sttTrigger;
p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0, numOfStt);
if (p->pLoadInfo == NULL) {
tsdbCacherowsReaderClose(p);
return TSDB_CODE_OUT_OF_MEMORY;

View File

@ -92,24 +92,56 @@ static int32_t tGetSmaFile(uint8_t *p, SSmaFile *pSmaFile) {
}
// EXPOSED APIS ==================================================
static char* getFileNamePrefix(STsdb *pTsdb, SDiskID did, int32_t fid, uint64_t commitId, char fname[]) {
const char* p1 = tfsGetDiskPath(pTsdb->pVnode->pTfs, did);
int32_t len = strlen(p1);
char* p = memcpy(fname, p1, len);
p += len;
*(p++) = TD_DIRSEP[0];
len = strlen(pTsdb->path);
memcpy(p, pTsdb->path, len);
p += len;
*(p++) = TD_DIRSEP[0];
*(p++) = 'v';
p += titoa(TD_VID(pTsdb->pVnode), 10, p);
*(p++) = 'f';
p += titoa(fid, 10, p);
memcpy(p, "ver", 3);
p += 3;
p += titoa(commitId, 10, p);
return p;
}
void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pHeadF->commitID, ".head");
char* p = getFileNamePrefix(pTsdb, did, fid, pHeadF->commitID, fname);
memcpy(p, ".head", 5);
p[5] = 0;
}
void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pDataF->commitID, ".data");
char* p = getFileNamePrefix(pTsdb, did, fid, pDataF->commitID, fname);
memcpy(p, ".data", 5);
p[5] = 0;
}
void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSttF->commitID, ".stt");
char* p = getFileNamePrefix(pTsdb, did, fid, pSttF->commitID, fname);
memcpy(p, ".stt", 4);
p[4] = 0;
}
void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSmaF->commitID, ".sma");
char* p = getFileNamePrefix(pTsdb, did, fid, pSmaF->commitID, fname);
memcpy(p, ".sma", 4);
p[4] = 0;
}
bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2) { return pDelFile1->commitID == pDelFile2->commitID; }

View File

@ -31,14 +31,16 @@ struct SLDataIter {
SSttBlockLoadInfo *pBlockLoadInfo;
};
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols) {
SSttBlockLoadInfo *pLoadInfo = taosMemoryCalloc(TSDB_MAX_STT_TRIGGER, sizeof(SSttBlockLoadInfo));
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols, int32_t numOfSttTrigger) {
SSttBlockLoadInfo *pLoadInfo = taosMemoryCalloc(numOfSttTrigger, sizeof(SSttBlockLoadInfo));
if (pLoadInfo == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) {
pLoadInfo->numOfStt = numOfSttTrigger;
for (int32_t i = 0; i < numOfSttTrigger; ++i) {
pLoadInfo[i].blockIndex[0] = -1;
pLoadInfo[i].blockIndex[1] = -1;
pLoadInfo[i].currentLoadBlockIndex = 1;
@ -63,7 +65,7 @@ SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList,
}
void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) {
for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) {
for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) {
pLoadInfo[i].currentLoadBlockIndex = 1;
pLoadInfo[i].blockIndex[0] = -1;
pLoadInfo[i].blockIndex[1] = -1;
@ -77,14 +79,14 @@ void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) {
}
void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el) {
for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) {
for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) {
*el += pLoadInfo[i].elapsedTime;
*blocks += pLoadInfo[i].loadBlocks;
}
}
void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) {
for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) {
for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) {
pLoadInfo[i].currentLoadBlockIndex = 1;
pLoadInfo[i].blockIndex[0] = -1;
pLoadInfo[i].blockIndex[1] = -1;

View File

@ -133,17 +133,17 @@ typedef struct SFileBlockDumpInfo {
bool allDumped;
} SFileBlockDumpInfo;
typedef struct SUidOrderCheckInfo {
typedef struct STableUidList {
uint64_t* tableUidList; // access table uid list in uid ascending order list
int32_t currentIndex; // index in table uid list
} SUidOrderCheckInfo;
} STableUidList;
typedef struct SReaderStatus {
bool loadFromFile; // check file stage
bool composedDataBlock; // the returned data block is a composed block or not
SHashObj* pTableMap; // SHash<STableBlockScanInfo>
STableBlockScanInfo** pTableIter; // table iterator used in building in-memory buffer data blocks.
SUidOrderCheckInfo uidCheckInfo; // check all table in uid order
STableUidList uidList; // check tables in uid order, to avoid the repeatly load of blocks in STT.
SFileBlockDumpInfo fBlockDumpInfo;
SDFileSet* pCurrentFileset; // current opened file set
SBlockData fileBlockData;
@ -317,9 +317,19 @@ static void* getPosInBlockInfoBuf(SBlockInfoBuf* pBuf, int32_t index) {
return (*pBucket) + (index % pBuf->numPerBucket) * sizeof(STableBlockScanInfo);
}
static int32_t uidComparFunc(const void* p1, const void* p2) {
uint64_t pu1 = *(uint64_t*)p1;
uint64_t pu2 = *(uint64_t*)p2;
if (pu1 == pu2) {
return 0;
} else {
return (pu1 < pu2) ? -1 : 1;
}
}
// NOTE: speedup the whole processing by preparing the buffer for STableBlockScanInfo in batch model
static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList,
int32_t numOfTables) {
STableUidList *pUidList, int32_t numOfTables) {
// allocate buffer in order to load data blocks from file
// todo use simple hash instead, optimize the memory consumption
SHashObj* pTableMap =
@ -331,9 +341,18 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf
int64_t st = taosGetTimestampUs();
initBlockScanInfoBuf(pBuf, numOfTables);
pUidList->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t));
if (pUidList->tableUidList == NULL) {
return NULL;
}
pUidList->currentIndex = 0;
for (int32_t j = 0; j < numOfTables; ++j) {
STableBlockScanInfo* pScanInfo = getPosInBlockInfoBuf(pBuf, j);
pScanInfo->uid = idList[j].uid;
pUidList->tableUidList[j] = idList[j].uid;
if (ASCENDING_TRAVERSE(pTsdbReader->order)) {
int64_t skey = pTsdbReader->window.skey;
pScanInfo->lastKey = (skey > INT64_MIN) ? (skey - 1) : skey;
@ -347,6 +366,8 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf
pScanInfo->lastKey, pTsdbReader->idStr);
}
taosSort(pUidList->tableUidList, numOfTables, sizeof(uint64_t), uidComparFunc);
pTsdbReader->cost.createScanInfoList = (taosGetTimestampUs() - st) / 1000.0;
tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, elapsed time:%.2f ms, %s", pTsdbReader, numOfTables,
(sizeof(STableBlockScanInfo) * numOfTables) / 1024.0, pTsdbReader->cost.createScanInfoList,
@ -451,8 +472,11 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdb
if (pLReader->pInfo == NULL) {
// here we ignore the first column, which is always be the primary timestamp column
SBlockLoadSuppInfo* pInfo = &pReader->suppInfo;
int32_t numOfStt = pReader->pTsdb->pVnode->config.sttTrigger;
pLReader->pInfo =
tCreateLastBlockLoadInfo(pReader->pSchema, &pReader->suppInfo.colId[1], pReader->suppInfo.numOfCols - 1);
tCreateLastBlockLoadInfo(pReader->pSchema, &pInfo->colId[1], pInfo->numOfCols - 1, numOfStt);
if (pLReader->pInfo == NULL) {
tsdbDebug("init fileset iterator failed, code:%s %s", tstrerror(terrno), pReader->idStr);
return terrno;
@ -621,7 +645,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
goto _end;
}
setColumnIdSlotList(&pReader->suppInfo, pCond->colList, pCond->pSlotList, pCond->numOfCols);
setColumnIdSlotList(pSup, pCond->colList, pCond->pSlotList, pCond->numOfCols);
*ppReader = pReader;
return code;
@ -633,57 +657,74 @@ _end:
}
static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) {
// SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx));
int64_t st = taosGetTimestampUs();
// int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx);
LRUHandle* handle = NULL;
int32_t code = tsdbCacheGetBlockIdx(pFileReader->pTsdb->biCache, pFileReader, &handle);
if (code != TSDB_CODE_SUCCESS || handle == NULL) {
goto _end;
}
int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
SArray* aBlockIdx = (SArray*)taosLRUCacheValue(pFileReader->pTsdb->biCache, handle);
size_t num = taosArrayGetSize(aBlockIdx);
if (num == 0) {
tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle);
// taosArrayDestroy(aBlockIdx);
return TSDB_CODE_SUCCESS;
}
// todo binary search to the start position
int64_t et1 = taosGetTimestampUs();
SBlockIdx* pBlockIdx = NULL;
for (int32_t i = 0; i < num; ++i) {
STableUidList* pList = &pReader->status.uidList;
int32_t i = 0, j = 0;
while(i < num && j < numOfTables) {
pBlockIdx = (SBlockIdx*)taosArrayGet(aBlockIdx, i);
// uid check
if (pBlockIdx->suid != pReader->suid) {
i += 1;
continue;
}
// this block belongs to a table that is not queried.
void* p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(uint64_t));
if (p == NULL) {
if (pBlockIdx->uid < pList->tableUidList[j]) {
i += 1;
continue;
}
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p;
if (pScanInfo->pBlockList == NULL) {
pScanInfo->pBlockList = taosArrayInit(4, sizeof(SBlockIndex));
if (pBlockIdx->uid > pList->tableUidList[j]) {
j += 1;
continue;
}
taosArrayPush(pIndexList, pBlockIdx);
if (pBlockIdx->uid == pList->tableUidList[j]) {
// this block belongs to a table that is not queried.
void* p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(uint64_t));
if (p == NULL) {
tsdbError("failed to locate the tableBlockScan Info in hashmap, uid:%"PRIu64", %s", pBlockIdx->uid, pReader->idStr);
return TSDB_CODE_APP_ERROR;
}
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p;
if (pScanInfo->pBlockList == NULL) {
pScanInfo->pBlockList = taosArrayInit(4, sizeof(SBlockIndex));
}
taosArrayPush(pIndexList, pBlockIdx);
i += 1;
j += 1;
}
}
int64_t et2 = taosGetTimestampUs();
tsdbDebug("load block index for %d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s",
(int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0, pReader->idStr);
tsdbDebug("load block index for %d/%d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s",
numOfTables, (int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0,
pReader->idStr);
pReader->cost.headFileLoadTime += (et1 - st) / 1000.0;
_end:
// taosArrayDestroy(aBlockIdx);
tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle);
return code;
}
@ -2725,74 +2766,15 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
return TSDB_CODE_SUCCESS;
}
static int32_t uidComparFunc(const void* p1, const void* p2) {
uint64_t pu1 = *(uint64_t*)p1;
uint64_t pu2 = *(uint64_t*)p2;
if (pu1 == pu2) {
return 0;
} else {
return (pu1 < pu2) ? -1 : 1;
}
static void resetTableListIndex(SReaderStatus *pStatus) {
STableUidList* pList = &pStatus->uidList;
pList->currentIndex = 0;
uint64_t uid = pList->tableUidList[0];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
}
static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) {
int32_t index = 0;
int32_t total = taosHashGetSize(pStatus->pTableMap);
void* p = taosHashIterate(pStatus->pTableMap, NULL);
while (p != NULL) {
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p;
pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid;
p = taosHashIterate(pStatus->pTableMap, p);
}
taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc);
}
static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status;
int32_t total = taosHashGetSize(pStatus->pTableMap);
if (total == 0) {
return TSDB_CODE_SUCCESS;
}
if (pOrderCheckInfo->tableUidList == NULL) {
pOrderCheckInfo->currentIndex = 0;
pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t));
if (pOrderCheckInfo->tableUidList == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order);
uint64_t uid = pOrderCheckInfo->tableUidList[0];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
} else {
if (pStatus->pTableIter == NULL) { // it is the last block of a new file
pOrderCheckInfo->currentIndex = 0;
uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
// the tableMap has already updated
if (pStatus->pTableIter == NULL) {
void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, total * sizeof(uint64_t));
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pOrderCheckInfo->tableUidList = p;
extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order);
uid = pOrderCheckInfo->tableUidList[0];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
}
}
}
return TSDB_CODE_SUCCESS;
}
static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus* pStatus) {
static bool moveToNextTable(STableUidList* pOrderedCheckInfo, SReaderStatus* pStatus) {
pOrderedCheckInfo->currentIndex += 1;
if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) {
pStatus->pTableIter = NULL;
@ -2807,11 +2789,10 @@ static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus
static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status;
SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader;
STableUidList* pUidList = &pStatus->uidList;
SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo;
int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pReader);
if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) {
return code;
if (taosHashGetSize(pStatus->pTableMap) == 0) {
return TSDB_CODE_SUCCESS;
}
SSDataBlock* pResBlock = pReader->pResBlock;
@ -2822,7 +2803,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
bool hasVal = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
if (!hasVal) {
bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
@ -2857,7 +2838,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
}
// current table is exhausted, let's try next table
bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
@ -2961,14 +2942,15 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status;
STableUidList* pUidList = &pStatus->uidList;
while (1) {
if (pStatus->pTableIter == NULL) {
pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
if (pStatus->pTableIter == NULL) {
return TSDB_CODE_SUCCESS;
}
}
// if (pStatus->pTableIter == NULL) {
// pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
// if (pStatus->pTableIter == NULL) {
// return TSDB_CODE_SUCCESS;
// }
// }
STableBlockScanInfo** pBlockScanInfo = pStatus->pTableIter;
initMemDataIterator(*pBlockScanInfo, pReader);
@ -2983,9 +2965,9 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
return TSDB_CODE_SUCCESS;
}
// current table is exhausted, let's try the next table
pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
if (pStatus->pTableIter == NULL) {
// current table is exhausted, let's try next table
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
}
@ -3006,7 +2988,6 @@ void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
SBlockNumber num = {0};
int32_t code = moveToNextFile(pReader, &num);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -3024,6 +3005,7 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl
} else { // no block data, only last block exists
tBlockDataReset(&pReader->status.fileBlockData);
resetDataBlockIterator(pBlockIter, pReader->order);
resetTableListIndex(&pReader->status);
}
// set the correct start position according to the query time window
@ -3064,6 +3046,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
// this file does not have data files, let's start check the last block file if exists
if (pBlockIter->numOfBlocks == 0) {
resetTableListIndex(&pReader->status);
goto _begin;
}
}
@ -3095,6 +3078,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
// data blocks in current file are exhausted, let's try the next file now
tBlockDataReset(&pReader->status.fileBlockData);
resetDataBlockIterator(pBlockIter, pReader->order);
resetTableListIndex(&pReader->status);
goto _begin;
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
@ -3106,6 +3090,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
// this file does not have blocks, let's start check the last block file
if (pBlockIter->numOfBlocks == 0) {
resetTableListIndex(&pReader->status);
goto _begin;
}
}
@ -3768,11 +3753,15 @@ int32_t tsdbSetTableList(STsdbReader* pReader, const void* pTableList, int32_t n
ASSERT(size >= num);
taosHashClear(pReader->status.pTableMap);
STableUidList* pUidList = &pReader->status.uidList;
pUidList->currentIndex = 0;
STableKeyInfo* pList = (STableKeyInfo*)pTableList;
for (int32_t i = 0; i < num; ++i) {
STableBlockScanInfo* pInfo = getPosInBlockInfoBuf(&pReader->blockInfoBuf, i);
pInfo->uid = pList[i].uid;
pUidList->tableUidList[i] = pList[i].uid;
taosHashPut(pReader->status.pTableMap, &pInfo->uid, sizeof(uint64_t), &pInfo, POINTER_BYTES);
}
@ -3796,18 +3785,24 @@ void* tsdbGetIvtIdx(SMeta* pMeta) {
uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; }
static int32_t doOpenReaderImpl(STsdbReader* pReader) {
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
resetDataBlockIterator(&pStatus->blockIter, pReader->order);
// no data in files, let's try buffer in memory
if (pReader->status.fileIter.numOfFiles == 0) {
pReader->status.loadFromFile = false;
return TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
if (pStatus->fileIter.numOfFiles == 0) {
pStatus->loadFromFile = false;
} else {
return initForFirstBlockInFile(pReader, pBlockIter);
code = initForFirstBlockInFile(pReader, pBlockIter);
}
if (!pStatus->loadFromFile) {
resetTableListIndex(pStatus);
}
return code;
}
// ====================================== EXPOSED APIs ======================================
@ -3894,7 +3889,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
}
STsdbReader* p = (pReader->innerReader[0] != NULL) ? pReader->innerReader[0] : pReader;
pReader->status.pTableMap = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, numOfTables);
pReader->status.pTableMap = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, &pReader->status.uidList, numOfTables);
if (pReader->status.pTableMap == NULL) {
*ppReader = NULL;
code = TSDB_CODE_OUT_OF_MEMORY;
@ -3919,12 +3914,14 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
// we need only one row
pPrevReader->capacity = 1;
pPrevReader->status.pTableMap = pReader->status.pTableMap;
pPrevReader->status.uidList = pReader->status.uidList;
pPrevReader->pSchema = pReader->pSchema;
pPrevReader->pMemSchema = pReader->pMemSchema;
pPrevReader->pReadSnap = pReader->pReadSnap;
pNextReader->capacity = 1;
pNextReader->status.pTableMap = pReader->status.pTableMap;
pNextReader->status.uidList = pReader->status.uidList;
pNextReader->pSchema = pReader->pSchema;
pNextReader->pMemSchema = pReader->pMemSchema;
pNextReader->pReadSnap = pReader->pReadSnap;
@ -3956,6 +3953,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
STsdbReader* p = pReader->innerReader[0];
p->status.pTableMap = NULL;
p->status.uidList.tableUidList = NULL;
p->pReadSnap = NULL;
p->pSchema = NULL;
p->pMemSchema = NULL;
@ -3963,6 +3961,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
p = pReader->innerReader[1];
p->status.pTableMap = NULL;
p->status.uidList.tableUidList = NULL;
p->pReadSnap = NULL;
p->pSchema = NULL;
p->pMemSchema = NULL;
@ -4010,7 +4009,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
tsdbUntakeReadSnap(pReader->pTsdb, pReader->pReadSnap, pReader->idStr);
taosMemoryFree(pReader->status.uidCheckInfo.tableUidList);
taosMemoryFree(pReader->status.uidList.tableUidList);
SIOCostSummary* pCost = &pReader->cost;
SFilesetIter* pFilesetIter = &pReader->status.fileIter;
@ -4064,6 +4063,7 @@ static bool doTsdbNextDataBlock(STsdbReader* pReader) {
if (pBlock->info.rows > 0) {
return true;
} else {
resetTableListIndex(&pReader->status);
buildBlockFromBufferSequentially(pReader);
return pBlock->info.rows > 0;
}
@ -4074,11 +4074,11 @@ static bool doTsdbNextDataBlock(STsdbReader* pReader) {
}
bool tsdbNextDataBlock(STsdbReader* pReader) {
if (isEmptyQueryTimeWindow(&pReader->window)) {
if (isEmptyQueryTimeWindow(&pReader->window) || pReader->step == EXTERNAL_ROWS_NEXT) {
return false;
}
if (pReader->innerReader[0] != NULL && pReader->step == 0) {
if (pReader->step == 0 && pReader->innerReader[0] != NULL) {
bool ret = doTsdbNextDataBlock(pReader->innerReader[0]);
pReader->step = EXTERNAL_ROWS_PREV;
if (ret) {
@ -4103,7 +4103,7 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
return ret;
}
if (pReader->innerReader[1] != NULL && pReader->step == EXTERNAL_ROWS_MAIN) {
if (pReader->step == EXTERNAL_ROWS_MAIN && pReader->innerReader[1] != NULL) {
// prepare for the next row scan
int32_t code = doOpenReaderImpl(pReader->innerReader[1]);
resetAllDataBlockScanInfo(pReader->innerReader[1]->status.pTableMap, pReader->window.ekey);
@ -4111,10 +4111,10 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
return code;
}
bool ret1 = doTsdbNextDataBlock(pReader->innerReader[1]);
ret = doTsdbNextDataBlock(pReader->innerReader[1]);
pReader->step = EXTERNAL_ROWS_NEXT;
if (ret1) {
return ret1;
if (ret) {
return ret;
}
}
@ -4277,12 +4277,14 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
return TSDB_CODE_SUCCESS;
}
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
pReader->order = pCond->order;
pReader->type = TIMEWINDOW_RANGE_CONTAINED;
pReader->status.loadFromFile = true;
pReader->status.pTableIter = NULL;
pStatus->loadFromFile = true;
pStatus->pTableIter = NULL;
pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows);
// allocate buffer in order to load data blocks from file
@ -4291,19 +4293,21 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
tsdbDataFReaderClose(&pReader->pFileReader);
int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
int32_t numOfTables = taosHashGetSize(pStatus->pTableMap);
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
resetDataBlockIterator(pBlockIter, pReader->order);
resetTableListIndex(&pReader->status);
int64_t ts = ASCENDING_TRAVERSE(pReader->order) ? pReader->window.skey - 1 : pReader->window.ekey + 1;
resetAllDataBlockScanInfo(pReader->status.pTableMap, ts);
resetAllDataBlockScanInfo(pStatus->pTableMap, ts);
int32_t code = 0;
// no data in files, let's try buffer in memory
if (pReader->status.fileIter.numOfFiles == 0) {
pReader->status.loadFromFile = false;
if (pStatus->fileIter.numOfFiles == 0) {
pStatus->loadFromFile = false;
resetTableListIndex(pStatus);
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
if (code != TSDB_CODE_SUCCESS) {
@ -4381,7 +4385,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
hasNext = blockIteratorNext(&pStatus->blockIter, pReader->idStr);
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {
if ((code != TSDB_CODE_SUCCESS) || (pStatus->loadFromFile == false)) {
break;
}

View File

@ -47,15 +47,21 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd
taosMemoryFree(pFD);
goto _exit;
}
if (taosStatFile(path, &pFD->szFile, NULL) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
taosMemoryFree(pFD->pBuf);
taosCloseFile(&pFD->pFD);
taosMemoryFree(pFD);
goto _exit;
// not check file size when reading data files.
if (flag != TD_FILE_READ) {
if (taosStatFile(path, &pFD->szFile, NULL) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
taosMemoryFree(pFD->pBuf);
taosCloseFile(&pFD->pFD);
taosMemoryFree(pFD);
goto _exit;
}
ASSERT(pFD->szFile % szPage == 0);
pFD->szFile = pFD->szFile / szPage;
}
ASSERT(pFD->szFile % szPage == 0);
pFD->szFile = pFD->szFile / szPage;
*ppFD = pFD;
_exit:
@ -103,7 +109,7 @@ _exit:
static int32_t tsdbReadFilePage(STsdbFD *pFD, int64_t pgno) {
int32_t code = 0;
ASSERT(pgno <= pFD->szFile);
// ASSERT(pgno <= pFD->szFile);
// seek
int64_t offset = PAGE_OFFSET(pgno, pFD->szPage);
@ -175,7 +181,7 @@ static int32_t tsdbReadFile(STsdbFD *pFD, int64_t offset, uint8_t *pBuf, int64_t
int32_t szPgCont = PAGE_CONTENT_SIZE(pFD->szPage);
int64_t bOffset = fOffset % pFD->szPage;
ASSERT(pgno && pgno <= pFD->szFile);
// ASSERT(pgno && pgno <= pFD->szFile);
ASSERT(bOffset < szPgCont);
while (n < size) {

View File

@ -1054,9 +1054,7 @@ static int32_t tsdbMergeSkyline(SArray *pSkyline1, SArray *pSkyline2, SArray *pS
i2++;
}
taosArraySetSize(pSkyline, TARRAY_ELEM_IDX(pSkyline, pItem));
_exit:
pSkyline->size = TARRAY_ELEM_IDX(pSkyline, pItem);
return code;
}

View File

@ -239,7 +239,7 @@ _err:
if (pVnode->pWal) walClose(pVnode->pWal);
if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
if (pVnode->pSma) smaClose(pVnode->pSma);
if (pVnode->pMeta) metaClose(pVnode->pMeta);
if (pVnode->pMeta) metaClose(&pVnode->pMeta);
if (pVnode->pPool) vnodeCloseBufPool(pVnode);
tsem_destroy(&(pVnode->canCommit));
@ -263,7 +263,7 @@ void vnodeClose(SVnode *pVnode) {
tqClose(pVnode->pTq);
if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
smaClose(pVnode->pSma);
metaClose(pVnode->pMeta);
if (pVnode->pMeta) metaClose(&pVnode->pMeta);
vnodeCloseBufPool(pVnode);
tsem_post(&pVnode->canCommit);

View File

@ -283,7 +283,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
metaRemoveSmaFromDb(pMeta, indexUid2);
tDestroyTSma(&tSma);
metaClose(pMeta);
metaClose(&pMeta);
}
#endif
@ -577,9 +577,9 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
tDestroyTSma(&tSma);
tfsClose(pTsdb->pTfs);
tsdbClose(pTsdb);
metaClose(pMeta);
metaClose(&pMeta);
}
#endif
#pragma GCC diagnostic pop
#pragma GCC diagnostic pop

View File

@ -300,7 +300,7 @@ typedef struct SCtgSubRes {
ctgSubTaskCbFp fp;
} SCtgSubRes;
typedef struct SCtgTask {
struct SCtgTask {
CTG_TASK_TYPE type;
int32_t taskId;
SCtgJob* pJob;
@ -313,7 +313,7 @@ typedef struct SCtgTask {
SRWLatch lock;
SArray* pParents;
SCtgSubRes subRes;
} SCtgTask;
};
typedef struct SCtgTaskReq {
SCtgTask* pTask;

View File

@ -1707,9 +1707,7 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx));
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
pTask->msgCtxs = taosArrayInit_s(pCtx->fetchNum, sizeof(SCtgMsgCtx), pCtx->fetchNum);
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
SName* pName = ctgGetFetchName(pCtx->pNames, pFetch);
@ -1844,7 +1842,10 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) {
ctgAddFetch(&pCtx->pFetchs, i, -1, &fetchIdx, baseResIdx, 0);
baseResIdx += taosArrayGetSize(pReq->pTables);
taosArraySetSize(pCtx->pResList, baseResIdx);
int32_t inc = baseResIdx - taosArrayGetSize(pCtx->pResList);
for(int32_t j = 0; j < inc; ++j) {
taosArrayPush(pCtx->pResList, &(SMetaRes){0});
}
}
}
@ -1856,8 +1857,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx));
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
pTask->msgCtxs = taosArrayInit_s(pCtx->fetchNum, sizeof(SCtgMsgCtx), pCtx->fetchNum);
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);

View File

@ -2480,20 +2480,20 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
ctgDebug("db %s not in cache", dbFName);
for (int32_t i = 0; i < tbNum; ++i) {
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaData){0});
}
return TSDB_CODE_SUCCESS;
}
for (int32_t i = 0; i < tbNum; ++i) {
SName *pName = taosArrayGet(pList, i);
pName = taosArrayGet(pList, i);
pCache = taosHashAcquire(dbCache->tbCache, pName->tname, strlen(pName->tname));
if (NULL == pCache) {
ctgDebug("tb %s not in cache, dbFName:%s", pName->tname, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
continue;
}
@ -2503,7 +2503,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
ctgDebug("tb %s meta not in cache, dbFName:%s", pName->tname, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
continue;
}
@ -2576,7 +2576,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
if (NULL == stName) {
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", pTableMeta->suid, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
taosMemoryFreeClear(pTableMeta);
continue;
@ -2588,7 +2588,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
taosHashRelease(dbCache->stbCache, stName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
taosMemoryFreeClear(pTableMeta);
continue;
@ -2603,7 +2603,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
taosHashRelease(dbCache->tbCache, pCache);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
taosMemoryFreeClear(pTableMeta);
@ -2619,7 +2619,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
nctx.tbInfo.suid);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
taosMemoryFreeClear(pTableMeta);

View File

@ -44,6 +44,8 @@
typedef struct SGroupResInfo {
int32_t index;
SArray* pRows; // SArray<SResKeyPos>
char* pBuf;
bool freeItem;
} SGroupResInfo;
typedef struct SResultRow {
@ -115,10 +117,6 @@ struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t i
static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos, bool forUpdate) {
SFilePage* bufPage = (SFilePage*)getBufPage(pBuf, pos->pageId);
if (NULL == bufPage) {
return NULL;
}
if (forUpdate) {
setBufPageDirty(bufPage, true);
}

View File

@ -43,11 +43,12 @@ typedef struct tagFilterAssist {
SArray* cInfoList;
} tagFilterAssist;
static int32_t removeInvalidTable(SArray* uids, SHashObj* tags);
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SHashObj* tags);
static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond);
static int32_t removeInvalidUid(SArray* uids, SHashObj* tags);
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* pTagCond);
static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond);
static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond,
SNode* pTagIndexCond, STableListInfo* pListInfo);
SNode* pTagIndexCond, STableListInfo* pListInfo, const char* idstr);
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* metaHandle);
static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; }
static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; }
@ -88,15 +89,20 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
return rowSize;
}
static void freeEx(void* p) {
taosMemoryFree(*(void**)p);
}
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) {
assert(pGroupResInfo != NULL);
for (int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) {
SResKeyPos* pRes = taosArrayGetP(pGroupResInfo->pRows, i);
taosMemoryFree(pRes);
taosMemoryFreeClear(pGroupResInfo->pBuf);
if (pGroupResInfo->freeItem) {
// taosArrayDestroy(pGroupResInfo->pRows);
taosArrayDestroyEx(pGroupResInfo->pRows, freeEx);
pGroupResInfo->freeItem = false;
pGroupResInfo->pRows = NULL;
} else {
pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows);
}
pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows);
pGroupResInfo->index = 0;
}
@ -126,26 +132,40 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in
}
// extract the result rows information from the hash map
void* pData = NULL;
pGroupResInfo->pRows = taosArrayInit(10, POINTER_BYTES);
int32_t size = tSimpleHashGetSize(pHashmap);
void* pData = NULL;
pGroupResInfo->pRows = taosArrayInit(size, POINTER_BYTES);
// todo avoid repeated malloc memory
size_t keyLen = 0;
int32_t iter = 0;
int32_t bufLen = 0, offset = 0;
// todo move away and record this during create window
while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) {
/*void* key = */tSimpleHashGetKey(pData, &keyLen);
bufLen += keyLen + sizeof(SResultRowPosition);
}
pGroupResInfo->pBuf = taosMemoryMalloc(bufLen);
iter = 0;
while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) {
void* key = tSimpleHashGetKey(pData, &keyLen);
SResKeyPos* p = taosMemoryMalloc(keyLen + sizeof(SResultRowPosition));
SResKeyPos* p = (SResKeyPos*) (pGroupResInfo->pBuf + offset);
p->groupId = *(uint64_t*)key;
p->pos = *(SResultRowPosition*)pData;
memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t));
taosArrayPush(pGroupResInfo->pRows, &p);
offset += keyLen + sizeof(struct SResultRowPosition);
}
if (order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC) {
__compar_fn_t fn = (order == TSDB_ORDER_ASC) ? resultrowComparAsc : resultrowComparDesc;
int32_t size = POINTER_BYTES;
size = POINTER_BYTES;
taosSort(pGroupResInfo->pRows->pData, taosArrayGetSize(pGroupResInfo->pRows), size, fn);
}
@ -158,6 +178,7 @@ void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL
taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree);
}
pGroupResInfo->freeItem = true;
pGroupResInfo->pRows = pArrayList;
pGroupResInfo->index = 0;
ASSERT(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo));
@ -172,7 +193,6 @@ bool hasRemainResults(SGroupResInfo* pGroupResInfo) {
}
int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) {
assert(pGroupResInfo != NULL);
if (pGroupResInfo->pRows == 0) {
return 0;
}
@ -392,150 +412,6 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara
return TSDB_CODE_SUCCESS;
}
static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* uidList, SNode* pTagCond) {
int32_t code = TSDB_CODE_SUCCESS;
SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
SHashObj* tags = NULL;
SScalarParam output = {0};
tagFilterAssist ctx = {0};
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
if (ctx.colHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
if (ctx.cInfoList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx);
pResBlock = createDataBlock();
if (pResBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) {
SColumnInfoData colInfo = {0};
colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i);
blockDataAppendColInfo(pResBlock, &colInfo);
}
// int64_t stt = taosGetTimestampUs();
tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
int32_t filter = optimizeTbnameInCond(metaHandle, suid, uidList, pTagCond, tags);
if (filter == -1) {
code = metaGetTableTags(metaHandle, suid, uidList, tags);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid);
terrno = code;
goto end;
}
}
if (suid != 0) {
removeInvalidTable(uidList, tags);
}
int32_t rows = taosArrayGetSize(uidList);
if (rows == 0) {
goto end;
}
code = blockDataEnsureCapacity(pResBlock, rows);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
goto end;
}
for (int32_t i = 0; i < rows; i++) {
int64_t* uid = taosArrayGet(uidList, i);
for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
metaGetTableNameByUid(metaHandle, *uid, str);
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
} else {
void* tag = taosHashGet(tags, uid, sizeof(int64_t));
if (tag == NULL) {
continue;
}
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppend(pColInfo, i, p, true);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter varch:%s", tmp + 2);
#endif
taosMemoryFree(tmp);
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
} else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
}
}
}
pResBlock->info.rows = rows;
// int64_t st1 = taosGetTimestampUs();
// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
pBlockList = taosArrayInit(2, POINTER_BYTES);
taosArrayPush(pBlockList, &pResBlock);
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
code = createResultData(&type, rows, &output);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
qError("failed to create result, reason:%s", tstrerror(code));
goto end;
}
code = scalarCalculate(pTagCond, pBlockList, &output);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to calculate scalar, reason:%s", tstrerror(code));
terrno = code;
goto end;
}
// int64_t st2 = taosGetTimestampUs();
// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
end:
taosHashCleanup(tags);
taosHashCleanup(ctx.colHash);
taosArrayDestroy(ctx.cInfoList);
blockDataDestroy(pResBlock);
taosArrayDestroy(pBlockList);
return output.columnData;
}
static void releaseColInfoData(void* pCol) {
if (pCol) {
SColumnInfoData* col = (SColumnInfoData*)pCol;
@ -544,12 +420,17 @@ static void releaseColInfoData(void* pCol) {
}
}
void freeItem(void* p) {
STUidTagInfo *pInfo = p;
if (pInfo->pTagVal != NULL) {
taosMemoryFree(pInfo->pTagVal);
}
}
int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo) {
int32_t code = TSDB_CODE_SUCCESS;
SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
SHashObj* tags = NULL;
SArray* uidList = NULL;
void* keyBuf = NULL;
SArray* groupData = NULL;
@ -578,89 +459,26 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
REPLACE_NODE(pNode);
}
pResBlock = createDataBlock();
if (pResBlock == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) {
SColumnInfoData colInfo = {0};
colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i);
blockDataAppendColInfo(pResBlock, &colInfo);
}
uidList = taosArrayInit(rows, sizeof(uint64_t));
SArray* pUidTagList = taosArrayInit(8, sizeof(STUidTagInfo));
for (int32_t i = 0; i < rows; ++i) {
STableKeyInfo* pkeyInfo = taosArrayGet(pTableListInfo->pTableList, i);
taosArrayPush(uidList, &pkeyInfo->uid);
STUidTagInfo info = {.uid = pkeyInfo->uid};
taosArrayPush(pUidTagList, &info);
}
// int64_t stt = taosGetTimestampUs();
tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
code = metaGetTableTags(metaHandle, pTableListInfo->suid, uidList, tags);
code = metaGetTableTags(metaHandle, pTableListInfo->suid, pUidTagList);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
// int64_t stt1 = taosGetTimestampUs();
// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
code = blockDataEnsureCapacity(pResBlock, rows);
if (code != TSDB_CODE_SUCCESS) {
int32_t numOfTables = taosArrayGetSize(pUidTagList);
pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle);
if (pResBlock == NULL) {
code = terrno;
goto end;
}
// int64_t st = taosGetTimestampUs();
for (int32_t i = 0; i < rows; i++) {
int64_t* uid = taosArrayGet(uidList, i);
for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
metaGetTableNameByUid(metaHandle, *uid, str);
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
} else {
void* tag = taosHashGet(tags, uid, sizeof(int64_t));
ASSERT(tag);
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppend(pColInfo, i, p, true);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter varch:%s", tmp + 2);
#endif
taosMemoryFree(tmp);
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
} else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
}
}
}
pResBlock->info.rows = rows;
// int64_t st1 = taosGetTimestampUs();
// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
@ -768,12 +586,11 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
end:
taosMemoryFreeClear(keyBuf);
taosHashCleanup(tags);
taosHashCleanup(ctx.colHash);
taosArrayDestroy(ctx.cInfoList);
blockDataDestroy(pResBlock);
taosArrayDestroy(pBlockList);
taosArrayDestroy(uidList);
taosArrayDestroyEx(pUidTagList, freeItem);
taosArrayDestroyP(groupData, releaseColInfoData);
return code;
}
@ -842,17 +659,26 @@ static int tableUidCompare(const void* a, const void* b) {
return u1 < u2 ? -1 : 1;
}
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* cond, SHashObj* tags) {
int32_t ret = -1;
if (nodeType(cond) == QUERY_NODE_OPERATOR) {
ret = optimizeTbnameInCondImpl(metaHandle, suid, list, cond);
if (ret != -1) {
metaGetTableTagsByUids(metaHandle, suid, list, tags);
removeInvalidTable(list, tags);
}
static int32_t filterTableInfoCompare(const void* a, const void* b) {
STUidTagInfo* p1 = (STUidTagInfo*) a;
STUidTagInfo* p2 = (STUidTagInfo*) b;
if (p1->uid == p2->uid) {
return 0;
}
if (nodeType(cond) != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) {
return p1->uid < p2->uid? -1:1;
}
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* pRes, SNode* cond) {
int32_t ret = -1;
int32_t ntype = nodeType(cond);
if (ntype == QUERY_NODE_OPERATOR) {
ret = optimizeTbnameInCondImpl(metaHandle, pRes, cond);
}
if (ntype != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) {
return ret;
}
@ -868,36 +694,40 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list
SListCell* cell = pList->pHead;
for (int i = 0; i < len; i++) {
if (cell == NULL) break;
if (optimizeTbnameInCondImpl(metaHandle, suid, list, cell->pNode) == 0) {
if (optimizeTbnameInCondImpl(metaHandle, pRes, cell->pNode) == 0) {
hasTbnameCond = true;
break;
}
cell = cell->pNext;
}
taosArraySort(list, tableUidCompare);
taosArrayRemoveDuplicate(list, tableUidCompare, NULL);
taosArraySort(pRes, filterTableInfoCompare);
taosArrayRemoveDuplicate(pRes, filterTableInfoCompare, NULL);
if (hasTbnameCond) {
ret = metaGetTableTagsByUids(metaHandle, suid, list, tags);
removeInvalidTable(list, tags);
ret = metaGetTableTagsByUids(metaHandle, suid, pRes);
// removeInvalidUid(pRes, tags);
}
return ret;
}
#if 0
/*
* handle invalid uid
*/
static int32_t removeInvalidTable(SArray* uids, SHashObj* tags) {
if (taosArrayGetSize(uids) <= 0) return 0;
static int32_t removeInvalidUid(SArray* uids, SHashObj* tags) {
int32_t size = taosArrayGetSize(uids);
if (size <= 0) {
return 0;
}
SArray* validUid = taosArrayInit(taosArrayGetSize(uids), sizeof(int64_t));
SArray* validUid = taosArrayInit(size, sizeof(STUidTagInfo));
for (int32_t i = 0; i < taosArrayGetSize(uids); i++) {
int64_t* uid = taosArrayGet(uids, i);
if (taosHashGet(tags, uid, sizeof(int64_t)) != NULL) {
taosArrayPush(validUid, uid);
for (int32_t i = 0; i < size; i++) {
STUidTagInfo* p = taosArrayGet(uids, i);
if (taosHashGet(tags, &p->uid, sizeof(int64_t)) != NULL) {
taosArrayPush(validUid, p);
}
}
@ -906,7 +736,10 @@ static int32_t removeInvalidTable(SArray* uids, SHashObj* tags) {
return 0;
}
static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond) {
#endif
// only return uid that does not contained in pExistedUidList
static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond) {
if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) {
return -1;
}
@ -929,12 +762,13 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray*
SArray* pTbList = getTableNameList(pList);
int32_t numOfTables = taosArrayGetSize(pTbList);
SHashObj* uHash = NULL;
size_t listlen = taosArrayGetSize(list); // len > 0 means there already have uids
if (listlen > 0) {
uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
for (int i = 0; i < listlen; i++) {
int64_t* uid = taosArrayGet(list, i);
taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i));
size_t numOfExisted = taosArrayGetSize(pExistedUidList); // len > 0 means there already have uids
if (numOfExisted > 0) {
uHash = taosHashInit(numOfExisted / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
for (int i = 0; i < numOfExisted; i++) {
STUidTagInfo* pTInfo = taosArrayGet(pExistedUidList, i);
taosHashPut(uHash, &pTInfo->uid, sizeof(uint64_t), &i, sizeof(i));
}
}
@ -946,7 +780,8 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray*
ETableType tbType = TSDB_TABLE_MAX;
if (metaGetTableTypeByName(metaHandle, name, &tbType) == 0 && tbType == TSDB_CHILD_TABLE) {
if (NULL == uHash || taosHashGet(uHash, &uid, sizeof(uid)) == NULL) {
taosArrayPush(list, &uid);
STUidTagInfo s = {.uid = uid, .name = name, .pTagVal = NULL};
taosArrayPush(pExistedUidList, &s);
}
} else {
taosArrayDestroy(pTbList);
@ -983,129 +818,299 @@ static void genTagFilterDigest(const SNode* pTagCond, T_MD5_CTX* pContext) {
taosMemoryFree(payload);
}
static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* res, SNode* pTagCond, void* metaHandle) {
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* metaHandle) {
SSDataBlock* pResBlock = createDataBlock();
if (pResBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
for (int32_t i = 0; i < taosArrayGetSize(pColList); ++i) {
SColumnInfoData colInfo = {0};
colInfo.info = *(SColumnInfo*)taosArrayGet(pColList, i);
blockDataAppendColInfo(pResBlock, &colInfo);
}
int32_t code = blockDataEnsureCapacity(pResBlock, numOfTables);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return NULL;
}
pResBlock->info.rows = numOfTables;
int32_t numOfCols = taosArrayGetSize(pResBlock->pDataBlock);
for (int32_t i = 0; i < numOfTables; i++) {
STUidTagInfo* p1 = taosArrayGet(pUidTagList, i);
for (int32_t j = 0; j < numOfCols; j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
if (p1->name != NULL) {
STR_TO_VARSTR(str, p1->name);
} else { // name is not retrieved during filter
metaGetTableNameByUid(metaHandle, p1->uid, str);
}
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
} else {
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
if (p1->pTagVal == NULL) {
colDataAppendNULL(pColInfo, i);
}
const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal);
if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppendNULL(pColInfo, i);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
char* tmp = alloca(tagVal.nData + VARSTR_HEADER_SIZE + 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter varch:%s", tmp + 2);
#endif
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
} else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
}
}
}
return pResBlock;
}
static void doSetQualifiedUid(SArray* pUidList, const SArray* pUidTagList, bool* pResultList) {
taosArrayClear(pUidList);
int32_t numOfTables = taosArrayGetSize(pUidTagList);
for(int32_t i = 0; i < numOfTables; ++i) {
uint64_t uid = ((STUidTagInfo*)taosArrayGet(pUidTagList, i))->uid;
qDebug("tagfilter get uid:%" PRId64 ", res:%d", uid, pResultList[i]);
if (pResultList[i]) {
taosArrayPush(pUidList, &uid);
}
}
}
static void copyExistedUids(SArray* pUidTagList, const SArray* pUidList) {
int32_t numOfExisted = taosArrayGetSize(pUidList);
if (numOfExisted == 0) {
return;
}
for(int32_t i = 0; i < numOfExisted; ++i) {
uint64_t* uid = taosArrayGet(pUidList, i);
STUidTagInfo info = {.uid = *uid};
taosArrayPush(pUidTagList, &info);
}
}
static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SNode* pTagCond, void* metaHandle) {
if (pTagCond == NULL) {
return TSDB_CODE_SUCCESS;
}
terrno = TDB_CODE_SUCCESS;
SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond);
if (terrno != TDB_CODE_SUCCESS) {
colDataDestroy(pColInfoData);
taosMemoryFreeClear(pColInfoData);
taosArrayDestroy(res);
qError("failed to getColInfoResult, code: %s", tstrerror(terrno));
return terrno;
int32_t code = TSDB_CODE_SUCCESS;
SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
SScalarParam output = {0};
tagFilterAssist ctx = {0};
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
if (ctx.colHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
int32_t i = 0;
int32_t len = taosArrayGetSize(res);
ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
if (ctx.cInfoList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
if (pColInfoData != NULL) {
bool* pResult = (bool*)pColInfoData->pData;
SArray* p = taosArrayInit(taosArrayGetSize(res), sizeof(uint64_t));
nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx);
while (i < len && pColInfoData) {
int64_t* uid = taosArrayGet(res, i);
qDebug("tagfilter get uid:%" PRId64 ", res:%d", *uid, pResult[i]);
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
if (pResult[i]) {
taosArrayPush(p, uid);
}
i += 1;
// int64_t stt = taosGetTimestampUs();
SArray* pUidTagList = taosArrayInit(10, sizeof(STUidTagInfo));
copyExistedUids(pUidTagList, pUidList);
int32_t filter = optimizeTbnameInCond(metaHandle, pListInfo->suid, pUidTagList, pTagCond);
if (filter == 0) { // tbname in filter is activated, do nothing and return
taosArrayClear(pUidList);
int32_t numOfRows = taosArrayGetSize(pUidTagList);
taosArrayEnsureCap(pUidList, numOfRows);
for(int32_t i = 0; i < numOfRows; ++i) {
STUidTagInfo* pInfo = taosArrayGet(pUidTagList, i);
taosArrayPush(pUidList, &pInfo->uid);
}
taosArraySwap(res, p);
taosArrayDestroy(p);
terrno = 0;
goto end;
} else {
// here we retrieve all tags from the vnode table-meta store
code = metaGetTableTags(metaHandle, pListInfo->suid, pUidTagList);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), pListInfo->suid);
terrno = code;
goto end;
}
}
colDataDestroy(pColInfoData);
taosMemoryFreeClear(pColInfoData);
int32_t numOfTables = taosArrayGetSize(pUidTagList);
if (numOfTables == 0) {
goto end;
}
return TSDB_CODE_SUCCESS;
pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle);
if (pResBlock == NULL) {
code = terrno;
goto end;
}
// int64_t st1 = taosGetTimestampUs();
// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
pBlockList = taosArrayInit(2, POINTER_BYTES);
taosArrayPush(pBlockList, &pResBlock);
code = createResultData(&type, numOfTables, &output);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
goto end;
}
code = scalarCalculate(pTagCond, pBlockList, &output);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to calculate scalar, reason:%s", tstrerror(code));
terrno = code;
goto end;
}
doSetQualifiedUid(pUidList, pUidTagList, (bool*) output.columnData->pData);
end:
taosHashCleanup(ctx.colHash);
taosArrayDestroy(ctx.cInfoList);
blockDataDestroy(pResBlock);
taosArrayDestroy(pBlockList);
taosArrayDestroyEx(pUidTagList, freeItem);
colDataDestroy(output.columnData);
taosMemoryFreeClear(output.columnData);
return code;
}
int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
STableListInfo* pListInfo) {
STableListInfo* pListInfo, const char* idstr) {
int32_t code = TSDB_CODE_SUCCESS;
size_t numOfTables = 0;
uint64_t tableUid = pScanNode->uid;
pListInfo->suid = pScanNode->suid;
SArray* res = taosArrayInit(8, sizeof(uint64_t));
SArray* pUidList = taosArrayInit(8, sizeof(uint64_t));
if (pScanNode->tableType != TSDB_SUPER_TABLE) {
if (metaIsTableExist(metaHandle, tableUid)) {
taosArrayPush(res, &tableUid);
if (metaIsTableExist(metaHandle, pScanNode->uid)) {
taosArrayPush(pUidList, &pScanNode->uid);
}
code = doFilterByTagCond(pListInfo, res, pTagCond, metaHandle);
code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} else {
// try to retrieve the result from meta cache
T_MD5_CTX context = {0};
genTagFilterDigest(pTagCond, &context);
bool acquired = false;
metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), res, &acquired);
if (acquired) {
qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(res));
goto _end;
}
} else {
T_MD5_CTX context = {0};
if (!pTagCond) { // no tag condition exists, let's fetch all tables of this super table
if (tsTagFilterCache) {
// try to retrieve the result from meta cache
genTagFilterDigest(pTagCond, &context);
bool acquired = false;
metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pUidList, &acquired);
if (acquired) {
qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(pUidList));
goto _end;
}
}
if (!pTagCond) { // no tag filter condition exists, let's fetch all tables of this super table
ASSERT(pTagIndexCond == NULL);
vnodeGetCtbIdList(pVnode, pScanNode->suid, res);
vnodeGetCtbIdList(pVnode, pScanNode->suid, pUidList);
} else {
// failed to find the result in the cache, let try to calculate the results
if (pTagIndexCond) {
void* pIndex = tsdbGetIvtIdx(metaHandle);
SIndexMetaArg metaArg = {
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid};
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = pIndex, .suid = pScanNode->uid};
SIdxFltStatus status = SFLT_NOT_INDEX;
code = doFilterTag(pTagIndexCond, &metaArg, res, &status);
if (code != 0 || status == SFLT_NOT_INDEX) {
qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status);
if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake
// qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
code = TDB_CODE_SUCCESS;
}
}
}
code = doFilterByTagCond(pListInfo, res, pTagCond, metaHandle);
code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle);
if (code != TSDB_CODE_SUCCESS) {
return code;
goto _end;
}
// let's add the filter results into meta-cache
numOfTables = taosArrayGetSize(res);
size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t);
char* pPayload = taosMemoryMalloc(size);
*(int32_t*)pPayload = numOfTables;
numOfTables = taosArrayGetSize(pUidList);
if (numOfTables > 0) {
memcpy(pPayload + sizeof(int32_t), taosArrayGet(res, 0), numOfTables * sizeof(uint64_t));
if (tsTagFilterCache) {
size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t);
char* pPayload = taosMemoryMalloc(size);
if (numOfTables > 0) {
*(int32_t*)pPayload = numOfTables;
memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t));
}
metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
taosMemoryFree(pPayload);
}
metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
}
_end:
numOfTables = taosArrayGetSize(res);
numOfTables = taosArrayGetSize(pUidList);
for (int i = 0; i < numOfTables; i++) {
STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(pUidList, i), .groupId = 0};
void* p = taosArrayPush(pListInfo->pTableList, &info);
if (p == NULL) {
taosArrayDestroy(res);
taosArrayDestroy(pUidList);
return TSDB_CODE_OUT_OF_MEMORY;
}
qTrace("tagfilter get uid:%" PRIu64 "", info.uid);
qTrace("tagfilter get uid:%" PRIu64", %s", info.uid, idstr);
}
taosArrayDestroy(res);
taosArrayDestroy(pUidList);
return code;
}
@ -1506,6 +1511,8 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
SFuncExecEnv env = {0};
pCtx->functionId = pExpr->pExpr->_function.pFunctNode->funcId;
pCtx->isPseudoFunc = fmIsWindowPseudoColumnFunc(pCtx->functionId);
pCtx->isNotNullFunc = fmIsNotNullOutputFunc(pCtx->functionId);
if (fmIsAggFunc(pCtx->functionId) || fmIsIndefiniteRowsFunc(pCtx->functionId)) {
bool isUdaf = fmIsUserDefinedFunc(pCtx->functionId);
@ -1513,7 +1520,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet);
} else {
char* udfName = pExpr->pExpr->_function.pFunctNode->functionName;
tstrncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN);
pCtx->udfName = strdup(udfName);
fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet);
}
pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env);
@ -2002,7 +2009,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
return TSDB_CODE_INVALID_PARA;
}
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo);
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, idStr);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to getTableList, code: %s", tstrerror(code));
return code;

View File

@ -176,10 +176,12 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i
// set the number of rows in current disk page
SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num);
memset((char*) pResultRow, 0, interBufSize);
pResultRow->pageId = pageId;
pResultRow->offset = (int32_t)pData->num;
*currentPageId = pageId;
*currentPageId = pageId;
pData->num += interBufSize;
return pResultRow;
}
@ -363,7 +365,7 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC
pCtx[k].input.colDataSMAIsSet = false;
}
if (fmIsWindowPseudoColumnFunc(pCtx[k].functionId)) {
if (pCtx[k].isPseudoFunc) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pCtx[k]);
char* p = GET_ROWCELL_INTERBUF(pEntryInfo);
@ -817,7 +819,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
continue;
}
if (fmIsWindowPseudoColumnFunc(pCtx[i].functionId)) {
if (pCtx[i].isPseudoFunc) {
continue;
}
@ -1063,7 +1065,7 @@ static void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t nu
pRow->numOfRows = pResInfo->numOfRes;
}
if (fmIsNotNullOutputFunc(pCtx[j].functionId)) {
if (pCtx[j].isNotNullFunc) {
returnNotNull = true;
}
}
@ -1187,9 +1189,15 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
}
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
ASSERT(pBlock->info.rows > 0);
releaseBufPage(pBuf, page);
break;
if (pBlock->info.rows <= 0 || pRow->numOfRows > pBlock->info.capacity) {
qError("error in copy data to ssdatablock, existed rows in block:%d, rows in pRow:%d, capacity:%d, %s",
pBlock->info.rows, pRow->numOfRows, pBlock->info.capacity, GET_TASKID(pTaskInfo));
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR);
} else {
break;
}
}
pGroupResInfo->index += 1;
@ -1730,12 +1738,12 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey) {
int32_t code = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
// _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pAggSup->currentPageId = -1;
pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
pAggSup->pResultRowHashTable = tSimpleHashInit(100, hashFn);
pAggSup->pResultRowHashTable = tSimpleHashInit(100, taosFastHash);
if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -1820,6 +1828,10 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
taosMemoryFreeClear(pCtx[i].subsidiaries.buf);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
if (pCtx[i].udfName != NULL) {
taosMemoryFree(pCtx[i].udfName);
}
}
taosMemoryFreeClear(pCtx);
@ -1950,6 +1962,22 @@ void destroyAggOperatorInfo(void* param) {
taosMemoryFreeClear(param);
}
static char* buildTaskId(uint64_t taskId, uint64_t queryId) {
char* p = taosMemoryMalloc(64);
int32_t offset = 6;
memcpy(p, "TID:0x", offset);
offset += tintToHex(taskId, &p[offset]);
memcpy(&p[offset], " QID:0x", 7);
offset += 7;
offset += tintToHex(queryId, &p[offset]);
p[offset] = 0;
return p;
}
static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPTR_EXEC_MODEL model, char* dbFName) {
SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
if (pTaskInfo == NULL) {
@ -1960,16 +1988,13 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTaskInfo->schemaInfo.dbname = strdup(dbFName);
pTaskInfo->id.queryId = queryId;
pTaskInfo->execModel = model;
pTaskInfo->pTableInfoList = tableListCreate();
pTaskInfo->stopInfo.pStopInfo = taosArrayInit(4, sizeof(SExchangeOpStopInfo));
pTaskInfo->pResultBlockList = taosArrayInit(128, POINTER_BYTES);
char* p = taosMemoryCalloc(1, 128);
snprintf(p, 128, "TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, queryId);
pTaskInfo->id.str = p;
pTaskInfo->id.queryId = queryId;
pTaskInfo->id.str = buildTaskId(taskId, queryId);
return pTaskInfo;
}

View File

@ -2297,13 +2297,14 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
if (pHandle->initTableReader) {
pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
pTSInfo->base.dataReader = NULL;
code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->base.cond, pList, num, pTSInfo->pResBlock,
&pTSInfo->base.dataReader, NULL);
if (code != 0) {
terrno = code;
destroyTableScanOperatorInfo(pTableScanOp);
goto _error;
}
pTaskInfo->streamInfo.lastStatus.uid = -1;
// code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->base.cond, pList, num, pTSInfo->pResBlock,
// &pTSInfo->base.dataReader, NULL);
// if (code != 0) {
// terrno = code;
// destroyTableScanOperatorInfo(pTableScanOp);
// goto _error;
// }
}
if (pHandle->initTqReader) {

View File

@ -2001,7 +2001,7 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi
}
pInfo->readHandle = *readHandle;
pInfo->uid = pBlockScanNode->suid;
pInfo->uid = (pBlockScanNode->suid != 0)? pBlockScanNode->suid:pBlockScanNode->uid;
int32_t numOfCols = 0;
SExprInfo* pExprInfo = createExprInfo(pBlockScanNode->pScanPseudoCols, NULL, &numOfCols);

View File

@ -847,6 +847,7 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_
if (newPos == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
newPos->groupId = groupId;
newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset};
*(int64_t*)newPos->key = ts;
@ -854,6 +855,7 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_
if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) {
taosMemoryFree(newPos);
}
return TSDB_CODE_SUCCESS;
}
@ -2567,6 +2569,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
while (1) {
@ -2877,6 +2880,8 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo*
void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t nums) {
for (int i = 0; i < nums; i++) {
pDummy[i].functionId = pCtx[i].functionId;
pDummy[i].isNotNullFunc = pCtx[i].isNotNullFunc;
pDummy[i].isPseudoFunc = pCtx[i].isPseudoFunc;
}
}
@ -3404,9 +3409,11 @@ static void copyDeleteWindowInfo(SArray* pResWins, SSHashObj* pStDeleted) {
}
}
// the allocated memory comes from outer function.
void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) {
pGroupResInfo->pRows = pArrayList;
pGroupResInfo->index = 0;
pGroupResInfo->pBuf = NULL;
}
void doBuildSessionResult(SOperatorInfo* pOperator, SStreamState* pState, SGroupResInfo* pGroupResInfo,
@ -3417,8 +3424,7 @@ void doBuildSessionResult(SOperatorInfo* pOperator, SStreamState* pState, SGroup
blockDataCleanup(pBlock);
if (!hasRemainResults(pGroupResInfo)) {
taosArrayDestroy(pGroupResInfo->pRows);
pGroupResInfo->pRows = NULL;
cleanupGroupResInfo(pGroupResInfo);
return;
}
@ -4753,6 +4759,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* downstream = pOperator->pDownstream[0];
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
@ -4812,6 +4819,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
taosArraySort(pUpdated, resultrowComparAsc);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
taosHashCleanup(pUpdatedMap);

View File

@ -123,8 +123,6 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t
}
static void doRemoveFromBucket(SFilePage* pPage, SLHashNode* pNode, SLHashBucket* pBucket) {
ASSERT(pPage != NULL && pNode != NULL && pBucket->size >= 1);
int32_t len = GET_LHASH_NODE_LEN(pNode);
char* p = (char*)pNode + len;
@ -301,8 +299,6 @@ void* tHashCleanup(SLHashObj* pHashObj) {
}
int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data, size_t size) {
ASSERT(pHashObj != NULL && key != NULL);
if (pHashObj->bits == 0) {
SLHashBucket* pBucket = pHashObj->pBucket[0];
doAddToBucket(pHashObj, pBucket, 0, key, keyLen, data, size);
@ -363,14 +359,12 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data
if (v1 != splitBucketId) { // place it into the new bucket
ASSERT(v1 == newBucketId);
// printf("move key:%d to 0x%x bucket, remain items:%d\n", *(int32_t*)k, v1, pBucket->size - 1);
SLHashBucket* pNewBucket = pHashObj->pBucket[newBucketId];
doAddToBucket(pHashObj, pNewBucket, newBucketId, (void*)GET_LHASH_NODE_KEY(pNode), pNode->keyLen,
GET_LHASH_NODE_KEY(pNode), pNode->dataLen);
doRemoveFromBucket(p, pNode, pBucket);
} else {
// printf("check key:%d, located into: %d, skip it\n", *(int*) k, v1);
int32_t nodeSize = GET_LHASH_NODE_LEN(pStart);
pStart += nodeSize;
}
@ -385,7 +379,6 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data
}
char* tHashGet(SLHashObj* pHashObj, const void* key, size_t keyLen) {
ASSERT(pHashObj != NULL && key != NULL && keyLen > 0);
int32_t hashv = pHashObj->hashFn(key, keyLen);
int32_t bucketId = doGetBucketIdFromHashVal(hashv, pHashObj->bits);

View File

@ -789,17 +789,46 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
pEntryInfo->isNullRes = (pEntryInfo->numOfRes == 0) ? 1 : 0;
if (pCol->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = GET_FLOAT_VAL(&pRes->v);
colDataAppend(pCol, currentRow, (const char*)&v, pEntryInfo->isNullRes);
// NOTE: do nothing change it, for performance issue
if (!pEntryInfo->isNullRes) {
switch (pCol->info.type) {
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_BIGINT:
((int64_t*)pCol->pData)[currentRow] = pRes->v;
// colDataAppendInt64(pCol, currentRow, &pRes->v);
break;
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_INT:
colDataAppendInt32(pCol, currentRow, (int32_t*)&pRes->v);
break;
case TSDB_DATA_TYPE_USMALLINT:
case TSDB_DATA_TYPE_SMALLINT:
colDataAppendInt16(pCol, currentRow, (int16_t*)&pRes->v);
break;
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_TINYINT:
colDataAppendInt8(pCol, currentRow, (int8_t*)&pRes->v);
break;
case TSDB_DATA_TYPE_DOUBLE:
colDataAppendDouble(pCol, currentRow, (double*)&pRes->v);
break;
case TSDB_DATA_TYPE_FLOAT: {
float v = GET_FLOAT_VAL(&pRes->v);
colDataAppendFloat(pCol, currentRow, &v);
break;
}
}
} else {
colDataAppend(pCol, currentRow, (const char*)&pRes->v, pEntryInfo->isNullRes);
colDataAppendNULL(pCol, currentRow);
}
if (pEntryInfo->numOfRes > 0) {
code = setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow);
} else {
code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow);
if (pCtx->subsidiaries.num > 0) {
if (pEntryInfo->numOfRes > 0) {
code = setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow);
} else {
code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow);
}
}
return code;

View File

@ -61,6 +61,8 @@
} \
}
#define GET_INVOKE_INTRINSIC_THRESHOLD(_bits, _bytes) ((_bits) / ((_bytes) << 3u))
static void calculateRounds(int32_t numOfRows, int32_t bytes, int32_t* remainder, int32_t* rounds, int32_t* width) {
const int32_t bitWidth = 256;
@ -700,8 +702,29 @@ static void doExtractVal(SColumnInfoData* pCol, int32_t i, int32_t end, SqlFunct
}
}
static int32_t saveRelatedTuple(SqlFunctionCtx* pCtx, SInputColumnInfoData* pInput, int32_t index, void* tval) {
SColumnInfoData* pCol = pInput->pData[0];
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SMinmaxResInfo* pBuf = GET_ROWCELL_INTERBUF(pResInfo);
int32_t code = 0;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
return code;
}
int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) {
int32_t numOfElems = 0;
int32_t code = TSDB_CODE_SUCCESS;
SInputColumnInfoData* pInput = &pCtx->input;
SColumnDataAgg* pAgg = pInput->pColumnDataAgg[0];
@ -719,6 +742,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
// data in current data block are qualified to the query
if (pInput->colDataSMAIsSet) {
numOfElems = pInput->numOfRows - pAgg->numOfNull;
if (numOfElems == 0) {
goto _over;
@ -734,15 +758,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
pBuf->v = GET_INT64_VAL(tval);
}
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
} else {
if (IS_SIGNED_NUMERIC_TYPE(type)) {
int64_t prev = 0;
@ -751,15 +767,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
int64_t val = GET_INT64_VAL(tval);
if ((prev < val) ^ isMinFunc) {
GET_INT64_VAL(&pBuf->v) = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
}
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
uint64_t prev = 0;
@ -768,15 +776,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
uint64_t val = GET_UINT64_VAL(tval);
if ((prev < val) ^ isMinFunc) {
GET_UINT64_VAL(&pBuf->v) = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
}
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
double prev = 0;
@ -785,15 +785,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
double val = GET_DOUBLE_VAL(tval);
if ((prev < val) ^ isMinFunc) {
GET_DOUBLE_VAL(&pBuf->v) = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
float prev = 0;
@ -802,16 +794,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
float val = GET_DOUBLE_VAL(tval);
if ((prev < val) ^ isMinFunc) {
GET_FLOAT_VAL(&pBuf->v) = val;
}
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
}
}
}
@ -825,14 +808,51 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
int32_t numOfRows = pInput->numOfRows;
int32_t end = start + numOfRows;
if (pCol->hasNull || numOfRows < 32 || pCtx->subsidiaries.num > 0) {
// clang-format off
int32_t threshold[] = {
//NULL, BOOL, TINYINT, SMALLINT, INT, BIGINT, FLOAT, DOUBLE, VARCHAR, TIMESTAMP, NCHAR,
INT32_MAX, INT32_MAX, 32, 16, 8, 4, 8, 4, INT32_MAX, INT32_MAX, INT32_MAX,
// UTINYINT,USMALLINT, UINT, UBIGINT, JSON, VARBINARY, DECIMAL, BLOB, MEDIUMBLOB, BINARY
32, 16, 8, 4, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX,
};
// clang-format on
if (pCol->hasNull || numOfRows < threshold[pCol->info.type] || pCtx->subsidiaries.num > 0) {
int32_t i = findFirstValPosition(pCol, start, numOfRows);
if ((i < end) && (!pBuf->assign)) {
memcpy(&pBuf->v, pCol->pData + (pCol->info.bytes * i), pCol->info.bytes);
char* p = pCol->pData + pCol->info.bytes * i;
switch (type) {
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_BIGINT:
pBuf->v = *(int64_t*)p;
break;
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_INT:
pBuf->v = *(int32_t*)p;
break;
case TSDB_DATA_TYPE_USMALLINT:
case TSDB_DATA_TYPE_SMALLINT:
pBuf->v = *(int16_t*)p;
break;
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_TINYINT:
pBuf->v = *(int8_t*)p;
break;
case TSDB_DATA_TYPE_FLOAT: {
*(float*)&pBuf->v = *(float*)p;
break;
}
default:
memcpy(&pBuf->v, p, pCol->info.bytes);
break;
}
if (pCtx->subsidiaries.num > 0) {
int32_t code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -849,7 +869,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
} else {
numOfElems = numOfRows;
switch (pCol->info.type) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
handleInt8Col(pCol->pData, start, numOfRows, pBuf, isMinFunc, true);
@ -898,13 +918,14 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
_over:
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) {
int32_t code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
pBuf->nullTupleSaved = true;
}
*nElems = numOfElems;
return TSDB_CODE_SUCCESS;
return code;
}

View File

@ -347,8 +347,6 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT
* in memory bucket, we only accept data array list
*/
int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
ASSERT(pBucket != NULL && data != NULL && size > 0);
int32_t count = 0;
int32_t bytes = pBucket->bytes;
for (int32_t i = 0; i < size; ++i) {

View File

@ -812,7 +812,7 @@ int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) {
block->info.hasVarCol = IS_VAR_DATA_TYPE(udfCol->colMeta.type);
block->pDataBlock = taosArrayInit(1, sizeof(SColumnInfoData));
taosArraySetSize(block->pDataBlock, 1);
taosArrayPush(block->pDataBlock, &(SColumnInfoData){0});
SColumnInfoData *col = taosArrayGet(block->pDataBlock, 0);
SUdfColumnMeta *meta = &udfCol->colMeta;
col->info.precision = meta->precision;

View File

@ -1467,6 +1467,15 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
}
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
SNode* pTable = pSelect->pFromTable;
if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) ||
(TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType &&
TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE,
"%s is only supported in single table query", pFunc->functionName);
}
if (pSelect->hasAggFuncs || pSelect->hasMultiRowsFunc || pSelect->hasIndefiniteRowsFunc) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
}

View File

@ -1057,7 +1057,7 @@ static FORCE_INLINE int32_t filterAddColFieldFromField(SFilterInfo *info, SFilte
int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *fid) {
if (node == NULL) {
fltError("empty node");
fltDebug("empty node");
FLT_ERR_RET(TSDB_CODE_APP_ERROR);
}

View File

@ -17,11 +17,10 @@
int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData) {
int32_t blockNum = pReq->blockNum;
SArray* pArray = taosArrayInit(blockNum, sizeof(SSDataBlock));
SArray* pArray = taosArrayInit_s(blockNum, sizeof(SSDataBlock), blockNum);
if (pArray == NULL) {
return -1;
}
taosArraySetSize(pArray, blockNum);
ASSERT(pReq->blockNum == taosArrayGetSize(pReq->data));
ASSERT(pReq->blockNum == taosArrayGetSize(pReq->dataLen));
@ -49,7 +48,7 @@ int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock
if (pArray == NULL) {
return -1;
}
taosArraySetSize(pArray, 1);
taosArrayPush(pArray, &(SSDataBlock){0});
SRetrieveTableRsp* pRetrieve = pReq->pRetrieve;
SSDataBlock* pDataBlock = taosArrayGet(pArray, 0);
blockDecode(pDataBlock, pRetrieve->data);

View File

@ -759,28 +759,30 @@ int walMetaDeserialize(SWal* pWal, const char* bytes) {
// deserialize
SArray* pArray = pWal->fileInfoSet;
taosArrayEnsureCap(pArray, sz);
SWalFileInfo* pData = pArray->pData;
for (int i = 0; i < sz; i++) {
cJSON* pInfoJson = cJSON_GetArrayItem(pFiles, i);
pInfoJson = cJSON_GetArrayItem(pFiles, i);
if (!pInfoJson) goto _err;
SWalFileInfo* pInfo = &pData[i];
SWalFileInfo info = {0};
pField = cJSON_GetObjectItem(pInfoJson, "firstVer");
if (!pField) goto _err;
pInfo->firstVer = atoll(cJSON_GetStringValue(pField));
info.firstVer = atoll(cJSON_GetStringValue(pField));
pField = cJSON_GetObjectItem(pInfoJson, "lastVer");
if (!pField) goto _err;
pInfo->lastVer = atoll(cJSON_GetStringValue(pField));
info.lastVer = atoll(cJSON_GetStringValue(pField));
pField = cJSON_GetObjectItem(pInfoJson, "createTs");
if (!pField) goto _err;
pInfo->createTs = atoll(cJSON_GetStringValue(pField));
info.createTs = atoll(cJSON_GetStringValue(pField));
pField = cJSON_GetObjectItem(pInfoJson, "closeTs");
if (!pField) goto _err;
pInfo->closeTs = atoll(cJSON_GetStringValue(pField));
info.closeTs = atoll(cJSON_GetStringValue(pField));
pField = cJSON_GetObjectItem(pInfoJson, "fileSize");
if (!pField) goto _err;
pInfo->fileSize = atoll(cJSON_GetStringValue(pField));
info.fileSize = atoll(cJSON_GetStringValue(pField));
taosArrayPush(pArray, &info);
}
taosArraySetSize(pArray, sz);
pWal->fileInfoSet = pArray;
pWal->writeCur = sz - 1;
cJSON_Delete(pRoot);

View File

@ -87,8 +87,6 @@ int32_t walApplyVer(SWal *pWal, int64_t ver) {
}
int32_t walCommit(SWal *pWal, int64_t ver) {
ASSERT(pWal->vers.commitVer >= pWal->vers.snapshotVer);
ASSERT(pWal->vers.commitVer <= pWal->vers.lastVer);
if (ver < pWal->vers.commitVer) {
return 0;
}
@ -122,41 +120,37 @@ int32_t walRollback(SWal *pWal, int64_t ver) {
// delete files in descending order
int fileSetSize = taosArrayGetSize(pWal->fileInfoSet);
for (int i = fileSetSize - 1; i >= pWal->writeCur + 1; i--) {
walBuildLogName(pWal, ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, i))->firstVer, fnameStr);
for (int i = pWal->writeCur + 1; i < fileSetSize; i++) {
SWalFileInfo* pInfo = taosArrayPop(pWal->fileInfoSet);
walBuildLogName(pWal, pInfo->firstVer, fnameStr);
wDebug("vgId:%d, wal remove file %s for rollback", pWal->cfg.vgId, fnameStr);
taosRemoveFile(fnameStr);
walBuildIdxName(pWal, ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, i))->firstVer, fnameStr);
walBuildIdxName(pWal, pInfo->firstVer, fnameStr);
wDebug("vgId:%d, wal remove file %s for rollback", pWal->cfg.vgId, fnameStr);
taosRemoveFile(fnameStr);
}
// pop from fileInfoSet
taosArraySetSize(pWal->fileInfoSet, pWal->writeCur + 1);
}
walBuildIdxName(pWal, walGetCurFileFirstVer(pWal), fnameStr);
TdFilePtr pIdxFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND);
if (pIdxFile == NULL) {
ASSERT(0);
taosThreadMutexUnlock(&pWal->mutex);
return -1;
}
int64_t idxOff = walGetVerIdxOffset(pWal, ver);
code = taosLSeekFile(pIdxFile, idxOff, SEEK_SET);
if (code < 0) {
ASSERT(0);
taosThreadMutexUnlock(&pWal->mutex);
return -1;
}
// read idx file and get log file pos
SWalIdxEntry entry;
if (taosReadFile(pIdxFile, &entry, sizeof(SWalIdxEntry)) != sizeof(SWalIdxEntry)) {
ASSERT(0);
taosThreadMutexUnlock(&pWal->mutex);
return -1;
}
ASSERT(entry.ver == ver);
walBuildLogName(pWal, walGetCurFileFirstVer(pWal), fnameStr);
TdFilePtr pLogFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND);
@ -176,24 +170,19 @@ int32_t walRollback(SWal *pWal, int64_t ver) {
}
// validate offset
SWalCkHead head;
ASSERT(taosValidFile(pLogFile));
int64_t size = taosReadFile(pLogFile, &head, sizeof(SWalCkHead));
int64_t size = taosReadFile(pLogFile, &head, sizeof(SWalCkHead));
if (size != sizeof(SWalCkHead)) {
ASSERT(0);
taosThreadMutexUnlock(&pWal->mutex);
return -1;
}
code = walValidHeadCksum(&head);
ASSERT(code == 0);
if (code != 0) {
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
ASSERT(0);
taosThreadMutexUnlock(&pWal->mutex);
return -1;
}
if (head.head.version != ver) {
ASSERT(0);
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
taosThreadMutexUnlock(&pWal->mutex);
return -1;
@ -202,22 +191,17 @@ int32_t walRollback(SWal *pWal, int64_t ver) {
// truncate old files
code = taosFtruncateFile(pLogFile, entry.offset);
if (code < 0) {
ASSERT(0);
terrno = TAOS_SYSTEM_ERROR(errno);
taosThreadMutexUnlock(&pWal->mutex);
return -1;
}
code = taosFtruncateFile(pIdxFile, idxOff);
if (code < 0) {
ASSERT(0);
terrno = TAOS_SYSTEM_ERROR(errno);
taosThreadMutexUnlock(&pWal->mutex);
return -1;
}
pWal->vers.lastVer = ver - 1;
if (pWal->vers.lastVer < pWal->vers.firstVer) {
ASSERT(pWal->vers.lastVer == pWal->vers.firstVer - 1);
}
((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->lastVer = ver - 1;
((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->fileSize = entry.offset;
taosCloseFile(&pIdxFile);
@ -386,7 +370,7 @@ int32_t walEndSnapshot(SWal *pWal) {
walBuildIdxName(pWal, pInfo->firstVer, fnameStr);
wDebug("vgId:%d, wal remove file %s", pWal->cfg.vgId, fnameStr);
if (taosRemoveFile(fnameStr) < 0 && errno != ENOENT) {
ASSERT(0);
goto END;
}
}
taosArrayClear(pWal->toDeleteFiles);
@ -441,7 +425,6 @@ int32_t walRollImpl(SWal *pWal) {
pWal->pIdxFile = pIdxFile;
pWal->pLogFile = pLogFile;
pWal->writeCur = taosArrayGetSize(pWal->fileInfoSet) - 1;
ASSERT(pWal->writeCur >= 0);
pWal->lastRollSeq = walGetSeq();
@ -458,9 +441,7 @@ END:
static int32_t walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) {
SWalIdxEntry entry = {.ver = ver, .offset = offset};
SWalFileInfo *pFileInfo = walGetCurFileInfo(pWal);
ASSERT(pFileInfo != NULL);
ASSERT(pFileInfo->firstVer >= 0);
int64_t idxOffset = (entry.ver - pFileInfo->firstVer) * sizeof(SWalIdxEntry);
int64_t idxOffset = (entry.ver - pFileInfo->firstVer) * sizeof(SWalIdxEntry);
wDebug("vgId:%d, write index, index:%" PRId64 ", offset:%" PRId64 ", at %" PRId64, pWal->cfg.vgId, ver, offset,
idxOffset);
@ -476,7 +457,6 @@ static int32_t walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) {
if (endOffset < 0) {
wFatal("vgId:%d, failed to seek end of idxfile due to %s. ver:%" PRId64 "", pWal->cfg.vgId, strerror(errno), ver);
}
ASSERT(endOffset == idxOffset + sizeof(SWalIdxEntry) && "Offset of idx entries misaligned");
return 0;
}
@ -486,9 +466,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy
int64_t offset = walGetCurFileOffset(pWal);
SWalFileInfo *pFileInfo = walGetCurFileInfo(pWal);
ASSERT(pFileInfo != NULL);
ASSERT(pFileInfo->firstVer != -1);
pWal->writeHead.head.version = index;
pWal->writeHead.head.bodyLen = bodyLen;
pWal->writeHead.head.msgType = msgType;
@ -525,7 +503,6 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy
// set status
if (pWal->vers.firstVer == -1) {
ASSERT(index == 0);
pWal->vers.firstVer = 0;
}
pWal->vers.lastVer = index;
@ -541,7 +518,6 @@ END:
wFatal("vgId:%d, failed to ftruncate logfile to offset:%" PRId64 " during recovery due to %s", pWal->cfg.vgId,
offset, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
ASSERT(0 && "failed to recover from error");
}
int64_t idxOffset = (index - pFileInfo->firstVer) * sizeof(SWalIdxEntry);
@ -549,7 +525,6 @@ END:
wFatal("vgId:%d, failed to ftruncate idxfile to offset:%" PRId64 "during recovery due to %s", pWal->cfg.vgId,
idxOffset, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
ASSERT(0 && "failed to recover from error");
}
return -1;
}
@ -576,8 +551,6 @@ int64_t walAppendLog(SWal *pWal, int64_t index, tmsg_t msgType, SWalSyncInfo syn
}
}
ASSERT(pWal->pLogFile != NULL && pWal->pIdxFile != NULL && pWal->writeCur >= 0);
if (walWriteImpl(pWal, index, msgType, syncMeta, body, bodyLen) < 0) {
taosThreadMutexUnlock(&pWal->mutex);
return -1;
@ -614,8 +587,6 @@ int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SWalSync
}
}
ASSERT(pWal->pIdxFile != NULL && pWal->pLogFile != NULL && pWal->writeCur >= 0);
if (walWriteImpl(pWal, index, msgType, syncMeta, body, bodyLen) < 0) {
taosThreadMutexUnlock(&pWal->mutex);
return -1;

View File

@ -48,6 +48,26 @@ SArray* taosArrayInit(size_t size, size_t elemSize) {
return pArray;
}
SArray* taosArrayInit_s(size_t size, size_t elemSize, size_t initialSize) {
SArray* pArray = taosMemoryMalloc(sizeof(SArray));
if (pArray == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pArray->size = initialSize;
pArray->pData = taosMemoryCalloc(initialSize, elemSize);
if (pArray->pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pArray);
return NULL;
}
pArray->capacity = initialSize;
pArray->elemSize = elemSize;
return pArray;
}
static int32_t taosArrayResize(SArray* pArray) {
assert(pArray->size >= pArray->capacity);
@ -223,7 +243,13 @@ void* taosArrayGetP(const SArray* pArray, size_t index) {
return *p;
}
void* taosArrayGetLast(const SArray* pArray) { return TARRAY_GET_ELEM(pArray, pArray->size - 1); }
void* taosArrayGetLast(const SArray* pArray) {
if (pArray->size == 0) {
return NULL;
}
return TARRAY_GET_ELEM(pArray, pArray->size - 1);
}
size_t taosArrayGetSize(const SArray* pArray) {
if (pArray == NULL) {
@ -232,11 +258,6 @@ size_t taosArrayGetSize(const SArray* pArray) {
return pArray->size;
}
void taosArraySetSize(SArray* pArray, size_t size) {
assert(size <= pArray->capacity);
pArray->size = size;
}
void* taosArrayInsert(SArray* pArray, size_t index, void* pData) {
if (pArray == NULL || pData == NULL) {
return NULL;

View File

@ -228,6 +228,7 @@ int32_t tsCompressINTImp(const char *const input, const int32_t nelements, char
}
int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, char *const output, const char type) {
int32_t word_length = 0;
switch (type) {
case TSDB_DATA_TYPE_BIGINT:
@ -263,6 +264,177 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha
int32_t _pos = 0;
int64_t prev_value = 0;
#if __AVX2__
while (1) {
if (_pos == nelements) break;
uint64_t w = 0;
memcpy(&w, ip, LONG_BYTES);
char selector = (char)(w & INT64MASK(4)); // selector = 4
char bit = bit_per_integer[(int32_t)selector]; // bit = 3
int32_t elems = selector_to_elems[(int32_t)selector];
// Optimize the performance, by remove the constantly switch operation.
int32_t v = 4;
uint64_t zigzag_value = 0;
uint64_t mask = INT64MASK(bit);
switch (type) {
case TSDB_DATA_TYPE_BIGINT: {
int64_t* p = (int64_t*) output;
int32_t gRemainder = (nelements - _pos);
int32_t num = (gRemainder > elems)? elems:gRemainder;
int32_t batch = num >> 2;
int32_t remain = num & 0x03;
if (selector == 0 || selector == 1) {
if (tsAVX2Enable && tsSIMDBuiltins) {
for (int32_t i = 0; i < batch; ++i) {
__m256i prev = _mm256_set1_epi64x(prev_value);
_mm256_storeu_si256((__m256i *)&p[_pos], prev);
_pos += 4;
}
for (int32_t i = 0; i < remain; ++i) {
p[_pos++] = prev_value;
}
} else {
for (int32_t i = 0; i < elems && count < nelements; i++, count++) {
p[_pos++] = prev_value;
v += bit;
}
}
} else {
if (tsAVX2Enable && tsSIMDBuiltins) {
__m256i base = _mm256_set1_epi64x(w);
__m256i maskVal = _mm256_set1_epi64x(mask);
__m256i shiftBits = _mm256_set_epi64x(bit * 3 + 4, bit * 2 + 4, bit + 4, 4);
__m256i inc = _mm256_set1_epi64x(bit << 2);
for (int32_t i = 0; i < batch; ++i) {
__m256i after = _mm256_srlv_epi64(base, shiftBits);
__m256i zigzagVal = _mm256_and_si256(after, maskVal);
// ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1)))
__m256i signmask = _mm256_and_si256(_mm256_set1_epi64x(1), zigzagVal);
signmask = _mm256_sub_epi64(_mm256_setzero_si256(), signmask);
// get the four zigzag values here
__m256i delta = _mm256_xor_si256(_mm256_srli_epi64(zigzagVal, 1), signmask);
// calculate the cumulative sum (prefix sum) for each number
// decode[0] = prev_value + final[0]
// decode[1] = decode[0] + final[1] -----> prev_value + final[0] + final[1]
// decode[2] = decode[1] + final[1] -----> prev_value + final[0] + final[1] + final[2]
// decode[3] = decode[2] + final[1] -----> prev_value + final[0] + final[1] + final[2] + final[3]
// 1, 2, 3, 4
//+ 0, 1, 2, 3
// 1, 3, 5, 7
// shift and add for the first round
__m128i prev = _mm_set1_epi64x(prev_value);
delta = _mm256_add_epi64(delta, _mm256_slli_si256(delta, 8));
_mm256_storeu_si256((__m256i *)&p[_pos], delta);
// 1, 3, 5, 7
//+ 0, 0, 1, 3
// 1, 3, 6, 10
// shift and add operation for the second round
__m128i firstPart = _mm_loadu_si128((__m128i *)&p[_pos]);
__m128i secPart = _mm_add_epi64(_mm_loadu_si128((__m128i *)&p[_pos + 2]), firstPart);
firstPart = _mm_add_epi64(firstPart, prev);
secPart = _mm_add_epi64(secPart, prev);
// save it in the memory
_mm_storeu_si128((__m128i *)&p[_pos], firstPart);
_mm_storeu_si128((__m128i *)&p[_pos + 2], secPart);
shiftBits = _mm256_add_epi64(shiftBits, inc);
prev_value = p[_pos + 3];
_pos += 4;
}
// handle the remain value
for (int32_t i = 0; i < remain; i++) {
zigzag_value = ((w >> (v + (batch * bit))) & mask);
prev_value += ZIGZAG_DECODE(int64_t, zigzag_value);
p[_pos++] = prev_value;
v += bit;
}
} else {
for (int32_t i = 0; i < elems && count < nelements; i++, count++) {
zigzag_value = ((w >> v) & mask);
prev_value += ZIGZAG_DECODE(int64_t, zigzag_value);
p[_pos++] = prev_value;
v += bit;
}
}
}
} break;
case TSDB_DATA_TYPE_INT: {
int32_t* p = (int32_t*) output;
if (selector == 0 || selector == 1) {
for (int32_t i = 0; i < elems && count < nelements; i++, count++) {
p[_pos++] = (int32_t)prev_value;
}
} else {
for (int32_t i = 0; i < elems && count < nelements; i++, count++) {
zigzag_value = ((w >> v) & mask);
prev_value += ZIGZAG_DECODE(int64_t, zigzag_value);
p[_pos++] = (int32_t)prev_value;
v += bit;
}
}
} break;
case TSDB_DATA_TYPE_SMALLINT: {
int16_t* p = (int16_t*) output;
if (selector == 0 || selector == 1) {
for (int32_t i = 0; i < elems && count < nelements; i++, count++) {
p[_pos++] = (int16_t)prev_value;
}
} else {
for (int32_t i = 0; i < elems && count < nelements; i++, count++) {
zigzag_value = ((w >> v) & mask);
prev_value += ZIGZAG_DECODE(int64_t, zigzag_value);
p[_pos++] = (int16_t)prev_value;
v += bit;
}
}
} break;
case TSDB_DATA_TYPE_TINYINT: {
int8_t *p = (int8_t *)output;
if (selector == 0 || selector == 1) {
for (int32_t i = 0; i < elems && count < nelements; i++, count++) {
p[_pos++] = (int8_t)prev_value;
}
} else {
for (int32_t i = 0; i < elems && count < nelements; i++, count++) {
zigzag_value = ((w >> v) & mask);
prev_value += ZIGZAG_DECODE(int64_t, zigzag_value);
p[_pos++] = (int8_t)prev_value;
v += bit;
}
}
} break;
}
ip += LONG_BYTES;
}
return nelements * word_length;
#else
while (1) {
if (count == nelements) break;
@ -273,94 +445,47 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha
char bit = bit_per_integer[(int32_t)selector]; // bit = 3
int32_t elems = selector_to_elems[(int32_t)selector];
// Optimize the performance, by remove the constantly switch operation.
int32_t v = 0;
uint64_t zigzag_value;
for (int32_t i = 0; i < elems; i++) {
uint64_t zigzag_value;
switch (type) {
case TSDB_DATA_TYPE_BIGINT: {
for (int32_t i = 0; i < elems; i++) {
if (selector == 0 || selector == 1) {
zigzag_value = 0;
} else {
zigzag_value = ((w >> (4 + v)) & INT64MASK(bit));
}
int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value);
int64_t curr_value = diff + prev_value;
prev_value = curr_value;
if (selector == 0 || selector == 1) {
zigzag_value = 0;
} else {
zigzag_value = ((w >> (4 + bit * i)) & INT64MASK(bit));
}
int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value);
int64_t curr_value = diff + prev_value;
prev_value = curr_value;
switch (type) {
case TSDB_DATA_TYPE_BIGINT:
*((int64_t *)output + _pos) = (int64_t)curr_value;
_pos++;
v += bit;
if ((++count) == nelements) break;
}
} break;
case TSDB_DATA_TYPE_INT: {
for (int32_t i = 0; i < elems; i++) {
if (selector == 0 || selector == 1) {
zigzag_value = 0;
} else {
zigzag_value = ((w >> (4 + v)) & INT64MASK(bit));
}
int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value);
int64_t curr_value = diff + prev_value;
prev_value = curr_value;
break;
case TSDB_DATA_TYPE_INT:
*((int32_t *)output + _pos) = (int32_t)curr_value;
_pos++;
v += bit;
if ((++count) == nelements) break;
}
} break;
case TSDB_DATA_TYPE_SMALLINT: {
for (int32_t i = 0; i < elems; i++) {
if (selector == 0 || selector == 1) {
zigzag_value = 0;
} else {
zigzag_value = ((w >> (4 + v)) & INT64MASK(bit));
}
int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value);
int64_t curr_value = diff + prev_value;
prev_value = curr_value;
break;
case TSDB_DATA_TYPE_SMALLINT:
*((int16_t *)output + _pos) = (int16_t)curr_value;
_pos++;
v += bit;
if ((++count) == nelements) break;
}
} break;
case TSDB_DATA_TYPE_TINYINT: {
for (int32_t i = 0; i < elems; i++) {
if (selector == 0 || selector == 1) {
zigzag_value = 0;
} else {
zigzag_value = ((w >> (4 + v)) & INT64MASK(bit));
}
int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value);
int64_t curr_value = diff + prev_value;
prev_value = curr_value;
break;
case TSDB_DATA_TYPE_TINYINT:
*((int8_t *)output + _pos) = (int8_t)curr_value;
_pos++;
v += bit;
if ((++count) == nelements) break;
}
} break;
break;
default:
perror("Wrong integer types.\n");
return -1;
}
count++;
if (count == nelements) break;
}
ip += LONG_BYTES;
}
return nelements * word_length;
#endif
}
/* ----------------------------------------------Bool Compression

View File

@ -421,7 +421,11 @@ int32_t taosHashGetDup_m(SHashObj *pHashObj, const void *key, size_t keyLen, voi
}
void *taosHashGetImpl(SHashObj *pHashObj, const void *key, size_t keyLen, void **d, int32_t *size, bool addRef) {
if (pHashObj == NULL || taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) {
if (pHashObj == NULL || keyLen == 0 || key == NULL) {
return NULL;
}
if ((atomic_load_64((int64_t *)&pHashObj->size) == 0)) {
return NULL;
}

View File

@ -17,6 +17,7 @@
#include "tcompare.h"
#include "thash.h"
#include "types.h"
#include "xxhash.h"
#define ROTL32(x, r) ((x) << (r) | (x) >> (32u - (r)))
@ -49,6 +50,11 @@ uint32_t taosDJB2Hash(const char *key, uint32_t len) {
return hash;
}
uint32_t xxHash(const char *key, uint32_t len) {
int32_t seed = 0xcc9e2d51;
return XXH32(key, len, seed);
}
uint32_t MurmurHash3_32(const char *key, uint32_t len) {
const uint8_t *data = (const uint8_t *)key;
const int32_t nblocks = len >> 2u;
@ -192,8 +198,6 @@ _hash_fn_t taosGetDefaultHashFunction(int32_t type) {
fn = taosIntHash_64;
break;
case TSDB_DATA_TYPE_BINARY:
fn = MurmurHash3_32;
break;
case TSDB_DATA_TYPE_NCHAR:
fn = MurmurHash3_32;
break;

View File

@ -325,11 +325,10 @@ int32_t tjsonToTArray(const SJson* pJson, const char* pName, FToObject func, SAr
const cJSON* jArray = tjsonGetObjectItem(pJson, pName);
int32_t size = tjsonGetArraySize(jArray);
if (size > 0) {
*pArray = taosArrayInit(size, itemSize);
*pArray = taosArrayInit_s(size, itemSize, size);
if (NULL == *pArray) {
return TSDB_CODE_OUT_OF_MEMORY;
}
taosArraySetSize(*pArray, size);
for (int32_t i = 0; i < size; ++i) {
int32_t code = func(tjsonGetArrayItem(jArray, i), taosArrayGet(*pArray, i));
if (TSDB_CODE_SUCCESS != code) {

View File

@ -2,7 +2,7 @@
#include "tpagedbuf.h"
#include "taoserror.h"
#include "tcompression.h"
#include "thash.h"
#include "tsimplehash.h"
#include "tlog.h"
#define GET_PAYLOAD_DATA(_p) ((char*)(_p)->pData + POINTER_BYTES)
@ -38,7 +38,7 @@ struct SDiskbasedBuf {
int32_t inMemPages; // numOfPages that are allocated in memory
SList* freePgList; // free page list
SArray* pIdList; // page id list
SHashObj* all;
SSHashObj*all;
SList* lruList;
void* emptyDummyIdList; // dummy id list
void* assistBuf; // assistant buffer for compress/decompress data
@ -374,12 +374,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
goto _error;
}
pPBuf->assistBuf = taosMemoryMalloc(pPBuf->pageSize + 2); // EXTRA BYTES
if (pPBuf->assistBuf == NULL) {
goto _error;
}
pPBuf->all = taosHashInit(10, fn, true, false);
pPBuf->all = tSimpleHashInit(64, fn);
if (pPBuf->all == NULL) {
goto _error;
}
@ -441,7 +436,7 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) {
}
// add to hash map
taosHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES);
tSimpleHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES);
pBuf->totalBufSize += pBuf->pageSize;
}
@ -466,7 +461,7 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) {
pBuf->statis.getPages += 1;
SPageInfo** pi = taosHashGet(pBuf->all, &id, sizeof(int32_t));
SPageInfo** pi = tSimpleHashGet(pBuf->all, &id, sizeof(int32_t));
if (pi == NULL || *pi == NULL) {
uError("failed to locate the buffer page:%d, %s", id, pBuf->id);
terrno = TSDB_CODE_INVALID_PARA;
@ -615,7 +610,7 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
taosArrayDestroy(pBuf->emptyDummyIdList);
taosArrayDestroy(pBuf->pFree);
taosHashCleanup(pBuf->all);
tSimpleHashCleanup(pBuf->all);
taosMemoryFreeClear(pBuf->id);
taosMemoryFreeClear(pBuf->assistBuf);
@ -641,7 +636,12 @@ void setBufPageDirty(void* pPage, bool dirty) {
ppi->dirty = dirty;
}
void setBufPageCompressOnDisk(SDiskbasedBuf* pBuf, bool comp) { pBuf->comp = comp; }
void setBufPageCompressOnDisk(SDiskbasedBuf* pBuf, bool comp) {
pBuf->comp = comp;
if (comp && (pBuf->assistBuf == NULL)) {
pBuf->assistBuf = taosMemoryMalloc(pBuf->pageSize + 2); // EXTRA BYTES
}
}
void dBufSetBufPageRecycled(SDiskbasedBuf* pBuf, void* pPage) {
SPageInfo* ppi = getPageInfoFromPayload(pPage);
@ -704,7 +704,7 @@ void clearDiskbasedBuf(SDiskbasedBuf* pBuf) {
taosArrayClear(pBuf->emptyDummyIdList);
taosArrayClear(pBuf->pFree);
taosHashClear(pBuf->all);
tSimpleHashClear(pBuf->all);
pBuf->numOfPages = 0; // all pages are in buffer in the first place
pBuf->totalBufSize = 0;

View File

@ -18,12 +18,13 @@
#include "tlog.h"
#include "tdef.h"
#define DEFAULT_BUF_PAGE_SIZE 1024
#define SHASH_DEFAULT_LOAD_FACTOR 0.75
#define HASH_MAX_CAPACITY (1024 * 1024 * 16L)
#define SHASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * SHASH_DEFAULT_LOAD_FACTOR)
#define GET_SHASH_NODE_KEY(_n, _dl) ((char *)(_n) + sizeof(SHNode) + (_dl))
#define GET_SHASH_NODE_DATA(_n) ((char *)(_n) + sizeof(SHNode))
#define GET_SHASH_NODE_DATA(_n) (((SHNode*)_n)->data)
#define GET_SHASH_NODE_KEY(_n, _dl) ((char*)GET_SHASH_NODE_DATA(_n) + (_dl))
#define HASH_INDEX(v, c) ((v) & ((c)-1))
@ -38,6 +39,8 @@ struct SSHashObj {
int64_t size; // number of elements in hash table
_hash_fn_t hashFp; // hash function
_equal_fn_t equalFp; // equal function
SArray* pHashNodeBuf;// hash node allocation buffer, 1k size of each page by default
int32_t offset; // allocation offset in current page
};
static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
@ -57,24 +60,28 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn) {
capacity = 4;
}
SSHashObj *pHashObj = (SSHashObj *)taosMemoryCalloc(1, sizeof(SSHashObj));
SSHashObj *pHashObj = (SSHashObj *)taosMemoryMalloc(sizeof(SSHashObj));
if (!pHashObj) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
// the max slots is not defined by user
pHashObj->capacity = taosHashCapacity((int32_t)capacity);
pHashObj->equalFp = memcmp;
pHashObj->hashFp = fn;
pHashObj->capacity = taosHashCapacity((int32_t)capacity);
pHashObj->equalFp = memcmp;
pHashObj->pHashNodeBuf = taosArrayInit(10, sizeof(void*));
pHashObj->offset = 0;
pHashObj->size = 0;
pHashObj->hashList = (SHNode **)taosMemoryCalloc(pHashObj->capacity, sizeof(void *));
if (!pHashObj->hashList) {
taosMemoryFree(pHashObj);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
return pHashObj;
}
@ -82,19 +89,53 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) {
if (!pHashObj) {
return 0;
}
return (int32_t)atomic_load_64((int64_t *)&pHashObj->size);
return (int32_t) pHashObj->size;
}
static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *data, size_t dataLen, uint32_t hashVal) {
SHNode *pNewNode = taosMemoryMalloc(sizeof(SHNode) + keyLen + dataLen);
static void* doInternalAlloc(SSHashObj* pHashObj, int32_t size) {
#if 0
void** p = taosArrayGetLast(pHashObj->pHashNodeBuf);
if (p == NULL || (pHashObj->offset + size) > DEFAULT_BUF_PAGE_SIZE) {
// let's allocate one new page
int32_t allocSize = TMAX(size, DEFAULT_BUF_PAGE_SIZE);
void* pNewPage = taosMemoryMalloc(allocSize);
if (pNewPage == NULL) {
return NULL;
}
// if the allocate the buffer page is greater than the DFFAULT_BUF_PAGE_SIZE,
// pHashObj->offset will always be greater than DEFAULT_BUF_PAGE_SIZE, which means that
// current buffer page is full. And a new buffer page needs to be allocated.
pHashObj->offset = size;
taosArrayPush(pHashObj->pHashNodeBuf, &pNewPage);
return pNewPage;
} else {
void* pPos = (char*)(*p) + pHashObj->offset;
pHashObj->offset += size;
return pPos;
}
#else
return taosMemoryMalloc(size);
#endif
}
static SHNode *doCreateHashNode(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen,
uint32_t hashVal) {
SHNode *pNewNode = doInternalAlloc(pHashObj, sizeof(SHNode) + keyLen + dataLen);
if (!pNewNode) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pNewNode->keyLen = keyLen;
pNewNode->dataLen = dataLen;
pNewNode->next = NULL;
if (data) memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen);
pNewNode->hashVal = hashVal;
if (data) {
memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen);
}
memcpy(GET_SHASH_NODE_KEY(pNewNode, dataLen), key, keyLen);
return pNewNode;
}
@ -111,7 +152,7 @@ static void tSimpleHashTableResize(SSHashObj *pHashObj) {
return;
}
int64_t st = taosGetTimestampUs();
// int64_t st = taosGetTimestampUs();
void *pNewEntryList = taosMemoryRealloc(pHashObj->hashList, POINTER_BYTES * newCapacity);
if (!pNewEntryList) {
uWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity);
@ -134,10 +175,7 @@ static void tSimpleHashTableResize(SSHashObj *pHashObj) {
SHNode *pPrev = NULL;
while (pNode != NULL) {
void *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pNode->keyLen);
int32_t newIdx = HASH_INDEX(hashVal, pHashObj->capacity);
int32_t newIdx = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
pNext = pNode->next;
if (newIdx != idx) {
if (!pPrev) {
@ -156,8 +194,7 @@ static void tSimpleHashTableResize(SSHashObj *pHashObj) {
}
}
int64_t et = taosGetTimestampUs();
// int64_t et = taosGetTimestampUs();
// uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms",
// (int32_t)pHashObj->capacity,
// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
@ -179,13 +216,13 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons
SHNode *pNode = pHashObj->hashList[slot];
if (!pNode) {
SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal);
SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen, hashVal);
if (!pNewNode) {
return -1;
}
pHashObj->hashList[slot] = pNewNode;
atomic_add_fetch_64(&pHashObj->size, 1);
pHashObj->size += 1;
return 0;
}
@ -197,13 +234,13 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons
}
if (!pNode) {
SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal);
SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen, hashVal);
if (!pNewNode) {
return -1;
}
pNewNode->next = pHashObj->hashList[slot];
pHashObj->hashList[slot] = pNewNode;
atomic_add_fetch_64(&pHashObj->size, 1);
pHashObj->size += 1;
} else if (data) { // update data
memcpy(GET_SHASH_NODE_DATA(pNode), data, dataLen);
}
@ -270,7 +307,7 @@ int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen) {
pPrev->next = pNode->next;
}
FREE_HASH_NODE(pNode);
atomic_sub_fetch_64(&pHashObj->size, 1);
pHashObj->size -= 1;
code = TSDB_CODE_SUCCESS;
break;
}
@ -305,7 +342,7 @@ int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t ke
}
FREE_HASH_NODE(pNode);
atomic_sub_fetch_64(&pHashObj->size, 1);
pHashObj->size -= 1;
break;
}
pPrev = pNode;
@ -315,6 +352,10 @@ int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t ke
return TSDB_CODE_SUCCESS;
}
static void destroyItems(void* pItem) {
taosMemoryFree(*(void**)pItem);
}
void tSimpleHashClear(SSHashObj *pHashObj) {
if (!pHashObj || taosHashTableEmpty(pHashObj)) {
return;
@ -332,9 +373,13 @@ void tSimpleHashClear(SSHashObj *pHashObj) {
FREE_HASH_NODE(pNode);
pNode = pNext;
}
pHashObj->hashList[i] = NULL;
}
atomic_store_64(&pHashObj->size, 0);
taosArrayClearEx(pHashObj->pHashNodeBuf, destroyItems);
pHashObj->offset = 0;
pHashObj->size = 0;
}
void tSimpleHashCleanup(SSHashObj *pHashObj) {
@ -343,6 +388,7 @@ void tSimpleHashCleanup(SSHashObj *pHashObj) {
}
tSimpleHashClear(pHashObj);
taosArrayDestroy(pHashObj->pHashNodeBuf);
taosMemoryFreeClear(pHashObj->hashList);
taosMemoryFree(pHashObj);
}

View File

@ -159,10 +159,6 @@ char *strtolower(char *dst, const char *src) {
int32_t esc = 0;
char quote = 0, *p = dst, c;
if (ASSERTS(dst != NULL, "dst is NULL")) {
return NULL;
}
for (c = *src++; c; c = *src++) {
if (esc) {
esc = 0;
@ -188,10 +184,6 @@ char *strntolower(char *dst, const char *src, int32_t n) {
int32_t esc = 0;
char quote = 0, *p = dst, c;
if (ASSERTS(dst != NULL, "dst is NULL")) {
return NULL;
}
if (n == 0) {
*p = 0;
return dst;
@ -219,11 +211,6 @@ char *strntolower(char *dst, const char *src, int32_t n) {
char *strntolower_s(char *dst, const char *src, int32_t n) {
char *p = dst, c;
if (ASSERTS(dst != NULL, "dst is NULL")) {
return NULL;
}
if (n == 0) {
return NULL;
}
@ -333,6 +320,50 @@ char *strbetween(char *string, char *begin, char *end) {
return result;
}
int32_t tintToHex(uint64_t val, char hex[]) {
const char hexstr[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
int32_t j = 0, k = 0;
if (val == 0) {
hex[j++] = hexstr[0];
return j;
}
// ignore the initial 0
while((val & (((uint64_t)0xfL) << ((15 - k) * 4))) == 0) {
k += 1;
}
for (j = 0; k < 16; ++k, ++j) {
hex[j] = hexstr[(val & (((uint64_t)0xfL) << ((15 - k) * 4))) >> (15 - k) * 4];
}
return j;
}
int32_t titoa(uint64_t val, size_t radix, char str[]) {
if (radix < 2 || radix > 16) {
return 0;
}
const char* s = "0123456789abcdef";
char buf[65] = {0};
int32_t i = 0;
uint64_t v = val;
while(v > 0) {
buf[i++] = s[v % radix];
v /= radix;
}
// reverse order
for(int32_t j = 0; j < i; ++j) {
str[j] = buf[i - j - 1];
}
return i;
}
int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]) {
int32_t i;
char hexval[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};

1030
source/util/src/xxhash.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -294,4 +294,32 @@ TEST(utilTest, tstrncspn) {
const char* reject5 = "911";
v = tstrncspn(p2, strlen(p2), reject5, 0);
ASSERT_EQ(v, 14);
}
TEST(utilTest, intToHextStr) {
char buf[64] = {0};
int64_t v = 0;
tintToHex(0, buf);
ASSERT_STREQ(buf, "0");
v = 100000000;
tintToHex(v, buf);
char destBuf[128];
sprintf(destBuf, "%" PRIx64, v);
ASSERT_STREQ(buf, destBuf);
taosSeedRand(taosGetTimestampSec());
for(int32_t i = 0; i < 100000; ++i) {
memset(buf, 0, tListLen(buf));
memset(destBuf, 0, tListLen(destBuf));
v = taosRand();
tintToHex(v, buf);
sprintf(destBuf, "%" PRIx64, v);
ASSERT_STREQ(buf, destBuf);
}
}

View File

@ -75,7 +75,7 @@ class TDTestCase:
tdSql.checkData(2, 1, 'performance_schema')
tdSql.checkData(2, 2, None)
tdSql.query('select count(1),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;')
tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v desc;')
tdSql.checkRows(3)
tdSql.checkData(0, 0, 23)
tdSql.checkData(0, 1, 'information_schema')
@ -87,12 +87,12 @@ class TDTestCase:
tdSql.checkData(2, 1, 'tbl_count')
tdSql.checkData(2, 2, 'stb1')
tdSql.query('select count(1),db_name from information_schema.ins_tables group by db_name')
tdSql.query('select count(1) v,db_name from information_schema.ins_tables group by db_name order by v asc')
tdSql.checkRows(3)
tdSql.checkData(0, 0, 5)
tdSql.checkData(0, 1, 'performance_schema')
tdSql.checkData(1, 0, 3)
tdSql.checkData(1, 1, 'tbl_count')
tdSql.checkData(1, 0, 5)
tdSql.checkData(1, 1, 'performance_schema')
tdSql.checkData(0, 0, 3)
tdSql.checkData(0, 1, 'tbl_count')
tdSql.checkData(2, 0, 23)
tdSql.checkData(2, 1, 'information_schema')
@ -177,42 +177,44 @@ class TDTestCase:
tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:29\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);')
tdSql.query('select count(*),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;')
tdSql.query('select count(*) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v;')
tdSql.checkRows(4)
tdSql.checkData(0, 0, 1)
tdSql.checkData(0, 1, 'tbl_count')
tdSql.checkData(0, 2, 'stba')
tdSql.checkData(1, 0, 23)
tdSql.checkData(1, 1, 'information_schema')
tdSql.checkData(1, 2, None)
tdSql.checkData(2, 0, 3)
tdSql.checkData(2, 1, 'tbl_count')
tdSql.checkData(2, 2, 'stb1')
tdSql.checkData(3, 0, 5)
tdSql.checkData(3, 1, 'performance_schema')
tdSql.checkData(1, 0, 3)
tdSql.checkData(1, 1, 'tbl_count')
tdSql.checkData(1, 2, 'stb1')
tdSql.checkData(2, 0, 5)
tdSql.checkData(2, 1, 'performance_schema')
tdSql.checkData(2, 2, None)
tdSql.checkData(3, 0, 23)
tdSql.checkData(3, 1, 'information_schema')
tdSql.checkData(3, 2, None)
tdSql.query('select count(1),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;')
tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v;')
tdSql.checkRows(4)
tdSql.checkData(0, 0, 23)
tdSql.checkData(0, 1, 'information_schema')
tdSql.checkData(0, 2, None)
tdSql.checkData(0, 0, 1)
tdSql.checkData(0, 1, 'tbl_count')
tdSql.checkData(0, 2, 'stba')
tdSql.checkData(1, 0, 3)
tdSql.checkData(1, 1, 'tbl_count')
tdSql.checkData(1, 2, 'stb1')
tdSql.checkData(2, 0, 5)
tdSql.checkData(2, 1, 'performance_schema')
tdSql.checkData(2, 2, None)
tdSql.checkData(3, 0, 23)
tdSql.checkData(3, 1, 'information_schema')
tdSql.checkData(3, 2, None)
tdSql.query('select count(1) v,db_name from information_schema.ins_tables group by db_name order by v')
tdSql.checkRows(3)
tdSql.checkData(0, 0, 4)
tdSql.checkData(0, 1, 'tbl_count')
tdSql.checkData(1, 0, 5)
tdSql.checkData(1, 1, 'performance_schema')
tdSql.checkData(1, 2, None)
tdSql.checkData(2, 0, 1)
tdSql.checkData(2, 1, 'tbl_count')
tdSql.checkData(2, 2, 'stba')
tdSql.checkData(3, 0, 3)
tdSql.checkData(3, 1, 'tbl_count')
tdSql.checkData(3, 2, 'stb1')
tdSql.query('select count(1),db_name from information_schema.ins_tables group by db_name')
tdSql.checkRows(3)
tdSql.checkData(0, 0, 5)
tdSql.checkData(0, 1, 'performance_schema')
tdSql.checkData(1, 0, 4)
tdSql.checkData(1, 1, 'tbl_count')
tdSql.checkData(2, 0, 23)
tdSql.checkData(2, 1, 'information_schema')

View File

@ -58,6 +58,8 @@ sql_error alter dnode 1 'monDebugFlag 131'
sql_error alter dnode 1 'cqDebugFlag 131'
sql_error alter dnode 1 'httpDebugFlag 131'
sql_error alter dnode 1 'mqttDebugFlag 131'
sql_error alter dnode 1 'qDebugFlaga 131'
sql_error alter all dnodes 'qDebugFlaga 131'
sql_error alter dnode 2 'wDebugFlag' '135'
sql_error alter dnode 2 'tmrDebugFlag' '135'
@ -65,6 +67,8 @@ sql_error alter dnode 1 'monDebugFlag' '131'
sql_error alter dnode 1 'cqDebugFlag' '131'
sql_error alter dnode 1 'httpDebugFlag' '131'
sql_error alter dnode 1 'mqttDebugFlag' '131'
sql_error alter dnode 1 'qDebugFlaga' '131'
sql_error alter all dnodes 'qDebugFlaga' '131'
print ======== step3
sql_error alter $hostname1 debugFlag 135

View File

@ -81,7 +81,6 @@ $nt = $ntPrefix . $i
#sql select _block_dist() from $nt
print show table distributed $nt
sql_error show table distributed $nt
#if $rows == 0 then
# return -1

View File

@ -351,7 +351,7 @@ sql_error select last_row(*) from (select * from nest_tb0) having c1 > 0
print ===========>td-4805
sql_error select tbname, i from (select * from nest_tb0) group by i;
sql select count(*),c1 from (select * from nest_tb0) where c1 < 2 group by c1;
sql select count(*),c1 from (select * from nest_tb0) where c1 < 2 group by c1 order by c1;
if $rows != 2 then
return -1
endi

View File

@ -143,9 +143,11 @@ sql delete from t1 where ts<=1537146409500
sql flush database $db
print ======================================>TS-2639
sql show table distributed t1;
print =====================================>TD-22007
sql select count(*) from t1 interval(10a)
sql drop table t1
sql create table st1 (ts timestamp, k int) tags(a int);
@ -165,7 +167,7 @@ if $data00 != 10 then
return -1
endi
sql select last_row(*) from st1 group by a
sql select last_row(*) from st1 group by a order by a desc
if $rows != 2 then
return -1
endi

View File

@ -519,7 +519,7 @@ if $rows != 0 then
return -1
endi
sql select sum(f1),count(f1) from tba1 partition by case when f1 then f1 when 1 then 1 end;
sql select sum(f1) v,count(f1) from tba1 partition by case when f1 then f1 when 1 then 1 end order by v;
if $rows != 2 then
return -1
endi

View File

@ -1850,15 +1850,16 @@ class TDTestCase:
tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdLog.printNoPrefix("==========step13:stable cases")
#tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
tdSql.error(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
#tdSql.checkRows(13)
#tdSql.query(f"select interp(c0) from {dbname}.{ctbname1} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
#tdSql.checkRows(13)
#tdSql.query(f"select interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1s) fill(null)")
tdSql.error(f"select interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1s) fill(null)")
#tdSql.checkRows(13)
#tdSql.query(f"select _irowts,interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1h) fill(prev)")

View File

@ -433,7 +433,7 @@ class TDTestCase:
tdSql.checkRows(11)
tdSql.checkData(1,0,0)
tdSql.checkData(10,0,9)
tdSql.query(f"select unique(t1) from (select _rowts , t1 , tbname from {dbname}.stb1 )")
tdSql.query(f"select unique(t1) v from (select _rowts , t1 , tbname from {dbname}.stb1 ) order by v desc")
tdSql.checkRows(2)
tdSql.checkData(0,0,4)
tdSql.checkData(1,0,1)

View File

@ -28,7 +28,7 @@ void pressOtherKey(char c);
bool shellAutoInit();
// set conn
void shellSetConn(TAOS* conn);
void shellSetConn(TAOS* conn, bool runOnce);
// exit shell auto funciton, shell exit call once
void shellAutoExit();

View File

@ -332,6 +332,7 @@ bool varMode = false; // enter var names list mode
TAOS* varCon = NULL;
SShellCmd* varCmd = NULL;
bool varRunOnce = false;
SMatch* lastMatch = NULL; // save last match result
int cntDel = 0; // delete byte count after next press tab
@ -375,7 +376,7 @@ void showHelp() {
----- C ----- \n\
create table <tb_name> using <stb_name> tags ...\n\
create database <db_name> <db_options> ...\n\
create dnode \"fqdn:port\"n\
create dnode \"fqdn:port\" ...\n\
create index ...\n\
create mnode on dnode <dnode_id> ;\n\
create qnode on dnode <dnode_id> ;\n\
@ -637,10 +638,11 @@ bool shellAutoInit() {
}
// set conn
void shellSetConn(TAOS* conn) {
varCon = conn;
void shellSetConn(TAOS* conn, bool runOnce) {
varCon = conn;
varRunOnce = runOnce;
// init database and stable
//updateTireValue(WT_VAR_DBNAME, false);
if (!runOnce) updateTireValue(WT_VAR_DBNAME, false);
}
// exit shell auto funciton, shell exit call once
@ -784,6 +786,12 @@ int writeVarNames(int type, TAOS_RES* tres) {
return numOfRows;
}
void setThreadNull(int type) {
taosThreadMutexLock(&tiresMutex);
threads[type] = NULL;
taosThreadMutexUnlock(&tiresMutex);
}
bool firstMatchCommand(TAOS* con, SShellCmd* cmd);
//
// thread obtain var thread from db server
@ -799,6 +807,7 @@ void* varObtainThread(void* param) {
TAOS_RES* pSql = taos_query(varCon, varSqls[type]);
if (taos_errno(pSql)) {
taos_free_result(pSql);
setThreadNull(type);
return NULL;
}
@ -814,6 +823,7 @@ void* varObtainThread(void* param) {
firstMatchCommand(varCon, varCmd);
}
setThreadNull(type);
return NULL;
}
@ -1977,7 +1987,7 @@ void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb) {
if (dealUseDB(sql)) {
// change to new db
//updateTireValue(WT_VAR_STABLE, false);
if (!varRunOnce) updateTireValue(WT_VAR_STABLE, false);
return;
}

View File

@ -1097,10 +1097,11 @@ int32_t shellExecute() {
}
#endif
shellSetConn(shell.conn);
bool runOnce = pArgs->commands != NULL || pArgs->file[0] != 0;
shellSetConn(shell.conn, runOnce);
shellReadHistory();
if (pArgs->commands != NULL || pArgs->file[0] != 0) {
if (runOnce) {
if (pArgs->commands != NULL) {
printf("%s%s\r\n", shell.info.promptHeader, pArgs->commands);
char *cmd = strdup(pArgs->commands);
@ -1160,5 +1161,8 @@ int32_t shellExecute() {
taosThreadJoin(spid, NULL);
shellCleanupHistory();
taos_kill_query(shell.conn);
taos_close(shell.conn);
return 0;
}