From af6a9aeb13c5d1aa529d3149dee7ee3c0830789b Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 24 Oct 2022 17:46:21 +0800 Subject: [PATCH 01/44] fix: remove interp stable limitation and add test case --- source/libs/function/inc/functionMgtInt.h | 3 +- source/libs/function/src/builtins.c | 2 +- source/libs/function/src/functionMgt.c | 2 - source/libs/parser/src/parTranslater.c | 22 ----------- tests/system-test/2-query/interp.py | 47 ++++++++++++++++++++++- 5 files changed, 48 insertions(+), 28 deletions(-) diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h index 9bff812c3a..a07038384e 100644 --- a/source/libs/function/inc/functionMgtInt.h +++ b/source/libs/function/inc/functionMgtInt.h @@ -49,8 +49,7 @@ extern "C" { #define FUNC_MGT_MULTI_ROWS_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(20) #define FUNC_MGT_KEEP_ORDER_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(21) #define FUNC_MGT_CUMULATIVE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(22) -#define FUNC_MGT_FORBID_STABLE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(23) -#define FUNC_MGT_INTERP_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(24) +#define FUNC_MGT_INTERP_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(23) #define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 2223954a5b..bd565062bf 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2350,7 +2350,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "interp", .type = FUNCTION_TYPE_INTERP, .classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | - FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_STABLE_FUNC, + FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateInterp, .getEnvFunc = getSelectivityFuncEnv, .initFunc = functionSetup, diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index fde9084ae3..bae005f5c4 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -216,8 +216,6 @@ bool fmIsKeepOrderFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, F bool fmIsCumulativeFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_CUMULATIVE_FUNC); } -bool fmIsForbidSuperTableFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_FORBID_STABLE_FUNC); } - bool fmIsInterpFunc(int32_t funcId) { if (funcId < 0 || funcId >= funcMgtBuiltinsNum) { return false; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index d942af9673..77248a58c1 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1537,25 +1537,6 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p return TSDB_CODE_SUCCESS; } -static int32_t translateForbidSuperTableFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { - if (!fmIsForbidSuperTableFunc(pFunc->funcId)) { - return TSDB_CODE_SUCCESS; - } - if (!isSelectStmt(pCxt->pCurrStmt)) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, - "%s is only supported in single table query", pFunc->functionName); - } - SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - SNode* pTable = pSelect->pFromTable; - if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) || - (TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType && - TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, - "%s is only supported in single table query", pFunc->functionName); - } - return TSDB_CODE_SUCCESS; -} - static bool isStar(SNode* pNode) { return (QUERY_NODE_COLUMN == nodeType(pNode)) && ('\0' == ((SColumnNode*)pNode)->tableAlias[0]) && (0 == strcmp(((SColumnNode*)pNode)->colName, "*")); @@ -1717,9 +1698,6 @@ static int32_t rewriteSystemInfoFunc(STranslateContext* pCxt, SNode** pNode) { static int32_t translateNoramlFunction(STranslateContext* pCxt, SFunctionNode* pFunc) { int32_t code = translateAggFunc(pCxt, pFunc); - if (TSDB_CODE_SUCCESS == code) { - code = translateForbidSuperTableFunc(pCxt, pFunc); - } if (TSDB_CODE_SUCCESS == code) { code = translateScanPseudoColumnFunc(pCxt, pFunc); } diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index bee20710b5..1191c6d7b6 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -11,11 +11,15 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file def run(self): dbname = "db" tbname = "tb" + stbname = "stb" + ctbname1 = "ctb1" + ctbname2 = "ctb2" tdSql.prepare() @@ -621,6 +625,32 @@ class TDTestCase: tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')") tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-11 00:00:05', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')") + tdSql.execute( + f'''create stable if not exists {dbname}.{stbname} + (ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10)) tags(t1 int) + ''' + ) + + + tdSql.execute( + f'''create table if not exists {dbname}.{ctbname1} using {dbname}.{stbname} tags(1) + ''' + ) + + tdSql.execute( + f'''create table if not exists {dbname}.{ctbname2} using {dbname}.{stbname} tags(1) + ''' + ) + + tdSql.execute(f"insert into {dbname}.{ctbname1} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')") + tdSql.execute(f"insert into {dbname}.{ctbname1} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')") + tdSql.execute(f"insert into {dbname}.{ctbname1} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')") + + tdSql.execute(f"insert into {dbname}.{ctbname2} values ('2020-02-02 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')") + tdSql.execute(f"insert into {dbname}.{ctbname2} values ('2020-02-02 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')") + tdSql.execute(f"insert into {dbname}.{ctbname2} values ('2020-02-02 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')") + + tdSql.execute(f"flush database {dbname}"); # test fill null @@ -877,6 +907,21 @@ class TDTestCase: tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdLog.printNoPrefix("==========step12:stable cases") + + tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") + tdSql.checkRows(13) + + tdSql.query(f"select interp(c0) from {dbname}.{ctbname1} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") + tdSql.checkRows(13) + + tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-02 00:00:16') partition by tbname every(1s) fill(null)") + tdSql.checkRows(13) + + #tdSql.query(f"select _irowts,interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1h) fill(prev)") + #tdSql.query(f"select tbname,_irowts,interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1h) fill(prev)") + + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") From 89f7ad2920121ce4f2af8a17f486e69c6d10ae49 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 25 Oct 2022 10:36:30 +0800 Subject: [PATCH 02/44] fix: optimize tsdb cache loading speed --- source/dnode/vnode/inc/vnode.h | 3 +- source/dnode/vnode/src/inc/tsdb.h | 61 ++++-- source/dnode/vnode/src/tsdb/tsdbCache.c | 211 ++++++++++--------- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 46 ++-- source/libs/executor/src/cachescanoperator.c | 14 +- 5 files changed, 188 insertions(+), 147 deletions(-) diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index be0d6fdc4d..8352910d51 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -163,7 +163,8 @@ void *tsdbGetIdx(SMeta *pMeta); void *tsdbGetIvtIdx(SMeta *pMeta); uint64_t getReaderMaxVersion(STsdbReader *pReader); -int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader); +int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, uint64_t suid, + void **pReader); int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids); void *tsdbCacherowsReaderClose(void *pReader); int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index dfbbd8fbd0..89be94a35d 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -298,29 +298,6 @@ int32_t tsdbMerge(STsdb *pTsdb); #define TSDB_CACHE_LAST_ROW(c) (((c).cacheLast & 1) > 0) #define TSDB_CACHE_LAST(c) (((c).cacheLast & 2) > 0) -// tsdbCache ============================================================================================== -typedef struct { - TSKEY ts; - SColVal colVal; -} SLastCol; - -int32_t tsdbOpenCache(STsdb *pTsdb); -void tsdbCloseCache(STsdb *pTsdb); -int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb *pTsdb); -int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, STSRow *row, bool dup); -int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **h); -int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **h); -int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h); - -int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey); -int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey); -int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey); - -void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity); -size_t tsdbCacheGetCapacity(SVnode *pVnode); - -int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema); - // tsdbDiskData ============================================================================================== int32_t tDiskDataBuilderCreate(SDiskDataBuilder **ppBuilder); void *tDiskDataBuilderDestroy(SDiskDataBuilder *pBuilder); @@ -729,6 +706,44 @@ void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo); void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el); void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo); +// tsdbCache ============================================================================================== +typedef struct SCacheRowsReader { + SVnode *pVnode; + STSchema *pSchema; + uint64_t uid; + uint64_t suid; + char **transferBuf; // todo remove it soon + int32_t numOfCols; + int32_t type; + int32_t tableIndex; // currently returned result tables + SArray *pTableList; // table id list + SSttBlockLoadInfo *pLoadInfo; + STsdbReadSnap *pReadSnap; + SDataFReader *pDataFReader; +} SCacheRowsReader; + +typedef struct { + TSKEY ts; + SColVal colVal; +} SLastCol; + +int32_t tsdbOpenCache(STsdb *pTsdb); +void tsdbCloseCache(STsdb *pTsdb); +int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb *pTsdb); +int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, STSRow *row, bool dup); +int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h); +int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h); +int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h); + +int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey); +int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey); +int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey); + +void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity); +size_t tsdbCacheGetCapacity(SVnode *pVnode); + +int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema); + // ========== inline functions ========== static FORCE_INLINE int32_t tsdbKeyCmprFn(const void *p1, const void *p2) { TSDBKEY *pKey1 = (TSDBKEY *)p1; diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index bb394e8acc..f1d079f118 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -26,7 +26,7 @@ int32_t tsdbOpenCache(STsdb *pTsdb) { goto _err; } - taosLRUCacheSetStrictCapacity(pCache, true); + taosLRUCacheSetStrictCapacity(pCache, false); taosThreadMutexInit(&pTsdb->lruMutex, NULL); @@ -488,11 +488,12 @@ typedef struct { int32_t nFileSet; int32_t iFileSet; SArray *aDFileSet; - SDataFReader *pDataFReader; + SDataFReader **pDataFReader; TSDBROW row; - SMergeTree mergeTree; - SMergeTree *pMergeTree; + SMergeTree mergeTree; + SMergeTree *pMergeTree; + SSttBlockLoadInfo *pLoadInfo; } SFSLastNextRowIter; static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) { @@ -519,18 +520,20 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) { return code; } - if (state->pDataFReader != NULL) { - tsdbDataFReaderClose(&state->pDataFReader); - state->pDataFReader = NULL; + if (*state->pDataFReader == NULL || (*state->pDataFReader)->pSet->fid != pFileSet->fid) { + if (*state->pDataFReader != NULL) { + tsdbDataFReaderClose(state->pDataFReader); + + resetLastBlockLoadInfo(state->pLoadInfo); + } + + code = tsdbDataFReaderOpen(state->pDataFReader, state->pTsdb, pFileSet); + if (code) goto _err; } - code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet); - if (code) goto _err; - - SSttBlockLoadInfo *pLoadInfo = tCreateLastBlockLoadInfo(state->pTSchema, NULL, 0); - tMergeTreeOpen(&state->mergeTree, 1, state->pDataFReader, state->suid, state->uid, + tMergeTreeOpen(&state->mergeTree, 1, *state->pDataFReader, state->suid, state->uid, &(STimeWindow){.skey = TSKEY_MIN, .ekey = TSKEY_MAX}, - &(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, pLoadInfo, true, NULL); + &(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, state->pLoadInfo, false, NULL); state->pMergeTree = &state->mergeTree; bool hasVal = tMergeTreeNext(&state->mergeTree); if (!hasVal) { @@ -554,10 +557,10 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) { } _err: - if (state->pDataFReader) { + /*if (state->pDataFReader) { tsdbDataFReaderClose(&state->pDataFReader); state->pDataFReader = NULL; - } + }*/ if (state->pMergeTree != NULL) { tMergeTreeClose(state->pMergeTree); state->pMergeTree = NULL; @@ -575,12 +578,12 @@ int32_t clearNextRowFromFSLast(void *iter) { if (!state) { return code; } - + /* if (state->pDataFReader) { tsdbDataFReaderClose(&state->pDataFReader); state->pDataFReader = NULL; } - + */ if (state->pMergeTree != NULL) { tMergeTreeClose(state->pMergeTree); state->pMergeTree = NULL; @@ -597,27 +600,28 @@ typedef enum SFSNEXTROWSTATES { } SFSNEXTROWSTATES; typedef struct SFSNextRowIter { - SFSNEXTROWSTATES state; // [input] - STsdb *pTsdb; // [input] - SBlockIdx *pBlockIdxExp; // [input] - STSchema *pTSchema; // [input] - tb_uid_t suid; - tb_uid_t uid; - int32_t nFileSet; - int32_t iFileSet; - SArray *aDFileSet; - SDataFReader *pDataFReader; - SArray *aBlockIdx; - SBlockIdx *pBlockIdx; - SMapData blockMap; - int32_t nBlock; - int32_t iBlock; - SDataBlk block; - SBlockData blockData; - SBlockData *pBlockData; - int32_t nRow; - int32_t iRow; - TSDBROW row; + SFSNEXTROWSTATES state; // [input] + STsdb *pTsdb; // [input] + SBlockIdx *pBlockIdxExp; // [input] + STSchema *pTSchema; // [input] + tb_uid_t suid; + tb_uid_t uid; + int32_t nFileSet; + int32_t iFileSet; + SArray *aDFileSet; + SDataFReader **pDataFReader; + SArray *aBlockIdx; + SBlockIdx *pBlockIdx; + SMapData blockMap; + int32_t nBlock; + int32_t iBlock; + SDataBlk block; + SBlockData blockData; + SBlockData *pBlockData; + int32_t nRow; + int32_t iRow; + TSDBROW row; + SSttBlockLoadInfo *pLoadInfo; } SFSNextRowIter; static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { @@ -648,8 +652,16 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { return code; } - code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet); - if (code) goto _err; + if (*state->pDataFReader == NULL || (*state->pDataFReader)->pSet->fid != pFileSet->fid) { + if (*state->pDataFReader != NULL) { + tsdbDataFReaderClose(state->pDataFReader); + + resetLastBlockLoadInfo(state->pLoadInfo); + } + + code = tsdbDataFReaderOpen(state->pDataFReader, state->pTsdb, pFileSet); + if (code) goto _err; + } // tMapDataReset(&state->blockIdxMap); if (!state->aBlockIdx) { @@ -657,7 +669,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { } else { taosArrayClear(state->aBlockIdx); } - code = tsdbReadBlockIdx(state->pDataFReader, state->aBlockIdx); + code = tsdbReadBlockIdx(*state->pDataFReader, state->aBlockIdx); if (code) goto _err; /* if (state->pBlockIdx) { */ @@ -666,17 +678,20 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { * &state->blockIdx); */ state->pBlockIdx = taosArraySearch(state->aBlockIdx, state->pBlockIdxExp, tCmprBlockIdx, TD_EQ); - if (!state->pBlockIdx) { - tsdbDataFReaderClose(&state->pDataFReader); - state->pDataFReader = NULL; + if (!state->pBlockIdx) { /* + tsdbDataFReaderClose(state->pDataFReader); + *state->pDataFReader = NULL; + resetLastBlockLoadInfo(state->pLoadInfo);*/ goto _next_fileset; } + tMapDataReset(&state->blockMap); + /* if (state->blockMap.pData != NULL) { tMapDataClear(&state->blockMap); } - - code = tsdbReadDataBlk(state->pDataFReader, state->pBlockIdx, &state->blockMap); + */ + code = tsdbReadDataBlk(*state->pDataFReader, state->pBlockIdx, &state->blockMap); if (code) goto _err; state->nBlock = state->blockMap.nItem; @@ -703,7 +718,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, NULL, 0); if (code) goto _err; - code = tsdbReadDataBlock(state->pDataFReader, &block, state->pBlockData); + code = tsdbReadDataBlock(*state->pDataFReader, &block, state->pBlockData); if (code) goto _err; state->nRow = state->blockData.nRow; @@ -719,8 +734,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { if (--state->iRow < 0) { state->state = SFSNEXTROW_BLOCKDATA; if (--state->iBlock < 0) { - tsdbDataFReaderClose(&state->pDataFReader); - state->pDataFReader = NULL; + tsdbDataFReaderClose(state->pDataFReader); + *state->pDataFReader = NULL; + resetLastBlockLoadInfo(state->pLoadInfo); if (state->aBlockIdx) { taosArrayDestroy(state->aBlockIdx); @@ -739,16 +755,17 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { } _err: - if (state->pDataFReader) { - tsdbDataFReaderClose(&state->pDataFReader); - state->pDataFReader = NULL; - } + /* + if (*state->pDataFReader) { + tsdbDataFReaderClose(state->pDataFReader); + *state->pDataFReader = NULL; + resetLastBlockLoadInfo(state->pLoadInfo); + }*/ if (state->aBlockIdx) { taosArrayDestroy(state->aBlockIdx); state->aBlockIdx = NULL; } if (state->pBlockData) { - // tBlockDataDestroy(&state->blockData, 1); tBlockDataDestroy(state->pBlockData, 1); state->pBlockData = NULL; } @@ -765,11 +782,11 @@ int32_t clearNextRowFromFS(void *iter) { if (!state) { return code; } - + /* if (state->pDataFReader) { tsdbDataFReaderClose(&state->pDataFReader); state->pDataFReader = NULL; - } + }*/ if (state->aBlockIdx) { taosArrayDestroy(state->aBlockIdx); state->aBlockIdx = NULL; @@ -930,25 +947,21 @@ typedef struct { TSDBROW memRow, imemRow, fsLastRow, fsRow; TsdbNextRowState input[4]; - STsdbReadSnap *pReadSnap; STsdb *pTsdb; } CacheNextRowIter; -static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema) { +static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema, tb_uid_t suid, + SSttBlockLoadInfo *pLoadInfo, STsdbReadSnap *pReadSnap, SDataFReader **pDataFReader) { int code = 0; - tb_uid_t suid = getTableSuidByUid(uid, pTsdb); - - tsdbTakeReadSnap(pTsdb, &pIter->pReadSnap, NULL); - STbData *pMem = NULL; - if (pIter->pReadSnap->pMem) { - pMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pMem, suid, uid); + if (pReadSnap->pMem) { + pMem = tsdbGetTbDataFromMemTable(pReadSnap->pMem, suid, uid); } STbData *pIMem = NULL; - if (pIter->pReadSnap->pIMem) { - pIMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pIMem, suid, uid); + if (pReadSnap->pIMem) { + pIMem = tsdbGetTbDataFromMemTable(pReadSnap->pIMem, suid, uid); } pIter->pTsdb = pTsdb; @@ -957,7 +970,7 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs SDelIdx delIdx; - SDelFile *pDelFile = pIter->pReadSnap->fs.pDelFile; + SDelFile *pDelFile = pReadSnap->fs.pDelFile; if (pDelFile) { SDelFReader *pDelFReader; @@ -988,18 +1001,22 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs pIter->fsLastState.state = (SFSLASTNEXTROWSTATES)SFSNEXTROW_FS; pIter->fsLastState.pTsdb = pTsdb; - pIter->fsLastState.aDFileSet = pIter->pReadSnap->fs.aDFileSet; + pIter->fsLastState.aDFileSet = pReadSnap->fs.aDFileSet; pIter->fsLastState.pTSchema = pTSchema; pIter->fsLastState.suid = suid; pIter->fsLastState.uid = uid; + pIter->fsLastState.pLoadInfo = pLoadInfo; + pIter->fsLastState.pDataFReader = pDataFReader; pIter->fsState.state = SFSNEXTROW_FS; pIter->fsState.pTsdb = pTsdb; - pIter->fsState.aDFileSet = pIter->pReadSnap->fs.aDFileSet; + pIter->fsState.aDFileSet = pReadSnap->fs.aDFileSet; pIter->fsState.pBlockIdxExp = &pIter->idx; pIter->fsState.pTSchema = pTSchema; pIter->fsState.suid = suid; pIter->fsState.uid = uid; + pIter->fsState.pLoadInfo = pLoadInfo; + pIter->fsState.pDataFReader = pDataFReader; pIter->input[0] = (TsdbNextRowState){&pIter->memRow, true, false, &pIter->memState, getNextRowFromMem, NULL}; pIter->input[1] = (TsdbNextRowState){&pIter->imemRow, true, false, &pIter->imemState, getNextRowFromMem, NULL}; @@ -1040,8 +1057,6 @@ static int32_t nextRowIterClose(CacheNextRowIter *pIter) { taosArrayDestroy(pIter->pSkyline); } - tsdbUntakeReadSnap(pIter->pTsdb, pIter->pReadSnap, NULL); - _err: return code; } @@ -1119,10 +1134,10 @@ _err: return code; } -static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppColArray) { +static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppColArray, SCacheRowsReader *pr) { int32_t code = 0; - STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1); + STSchema *pTSchema = pr->pSchema; // metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1); int16_t nCol = pTSchema->numOfCols; int16_t iCol = 0; int16_t noneCol = 0; @@ -1133,7 +1148,7 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppCo TSKEY lastRowTs = TSKEY_MAX; CacheNextRowIter iter = {0}; - nextRowIterOpen(&iter, uid, pTsdb, pTSchema); + nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader); do { TSDBROW *pRow = NULL; @@ -1233,20 +1248,20 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppCo } nextRowIterClose(&iter); - taosMemoryFreeClear(pTSchema); + // taosMemoryFreeClear(pTSchema); return code; _err: nextRowIterClose(&iter); taosArrayDestroy(pColArray); - taosMemoryFreeClear(pTSchema); + // taosMemoryFreeClear(pTSchema); return code; } -static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) { +static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr) { int32_t code = 0; - STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1); + STSchema *pTSchema = pr->pSchema; // metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1); int16_t nCol = pTSchema->numOfCols; int16_t iCol = 0; int16_t noneCol = 0; @@ -1257,7 +1272,7 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) { TSKEY lastRowTs = TSKEY_MAX; CacheNextRowIter iter = {0}; - nextRowIterOpen(&iter, uid, pTsdb, pTSchema); + nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader); do { TSDBROW *pRow = NULL; @@ -1350,18 +1365,18 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) { } nextRowIterClose(&iter); - taosMemoryFreeClear(pTSchema); + // taosMemoryFreeClear(pTSchema); return code; _err: nextRowIterClose(&iter); - taosMemoryFreeClear(pTSchema); + // taosMemoryFreeClear(pTSchema); *ppLastArray = NULL; taosArrayDestroy(pColArray); return code; } -int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **handle) { +int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **handle) { int32_t code = 0; char key[32] = {0}; int keyLen = 0; @@ -1370,13 +1385,14 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUH getTableCacheKey(uid, 0, key, &keyLen); LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen); if (!h) { + STsdb *pTsdb = pr->pVnode->pTsdb; taosThreadMutexLock(&pTsdb->lruMutex); h = taosLRUCacheLookup(pCache, key, keyLen); if (!h) { SArray *pArray = NULL; bool dup = false; // which is always false for now - code = mergeLastRow(uid, pTsdb, &dup, &pArray); + code = mergeLastRow(uid, pTsdb, &dup, &pArray, pr); // if table's empty or error, return code of -1 if (code < 0 || pArray == NULL) { if (!dup && pArray) { @@ -1392,17 +1408,17 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUH size_t charge = pArray->capacity * pArray->elemSize + sizeof(*pArray); _taos_lru_deleter_t deleter = deleteTableCacheLast; - LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, NULL, TAOS_LRU_PRIORITY_LOW); + LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW); if (status != TAOS_LRU_STATUS_OK) { code = -1; } - taosThreadMutexUnlock(&pTsdb->lruMutex); + // taosThreadMutexUnlock(&pTsdb->lruMutex); - h = taosLRUCacheLookup(pCache, key, keyLen); - } else { - taosThreadMutexUnlock(&pTsdb->lruMutex); - } + // h = taosLRUCacheLookup(pCache, key, keyLen); + } // else { + taosThreadMutexUnlock(&pTsdb->lruMutex); + //} } *handle = h; @@ -1434,7 +1450,7 @@ _err: return code; } -int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **handle) { +int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **handle) { int32_t code = 0; char key[32] = {0}; int keyLen = 0; @@ -1443,12 +1459,13 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand getTableCacheKey(uid, 1, key, &keyLen); LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen); if (!h) { + STsdb *pTsdb = pr->pVnode->pTsdb; taosThreadMutexLock(&pTsdb->lruMutex); h = taosLRUCacheLookup(pCache, key, keyLen); if (!h) { SArray *pLastArray = NULL; - code = mergeLast(uid, pTsdb, &pLastArray); + code = mergeLast(uid, pTsdb, &pLastArray, pr); // if table's empty or error, return code of -1 if (code < 0 || pLastArray == NULL) { taosThreadMutexUnlock(&pTsdb->lruMutex); @@ -1460,17 +1477,17 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand size_t charge = pLastArray->capacity * pLastArray->elemSize + sizeof(*pLastArray); _taos_lru_deleter_t deleter = deleteTableCacheLast; LRUStatus status = - taosLRUCacheInsert(pCache, key, keyLen, pLastArray, charge, deleter, NULL, TAOS_LRU_PRIORITY_LOW); + taosLRUCacheInsert(pCache, key, keyLen, pLastArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW); if (status != TAOS_LRU_STATUS_OK) { code = -1; } - taosThreadMutexUnlock(&pTsdb->lruMutex); + // taosThreadMutexUnlock(&pTsdb->lruMutex); - h = taosLRUCacheLookup(pCache, key, keyLen); - } else { - taosThreadMutexUnlock(&pTsdb->lruMutex); - } + // h = taosLRUCacheLookup(pCache, key, keyLen); + } // else { + taosThreadMutexUnlock(&pTsdb->lruMutex); + //} } *handle = h; diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 83dcbc60c7..7784e6ba0a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -18,18 +18,7 @@ #include "tcommon.h" #include "tsdb.h" -typedef struct SCacheRowsReader { - SVnode* pVnode; - STSchema* pSchema; - uint64_t uid; - char** transferBuf; // todo remove it soon - int32_t numOfCols; - int32_t type; - int32_t tableIndex; // currently returned result tables - SArray* pTableList; // table id list -} SCacheRowsReader; - -#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t)) +#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t)) static void saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds, void** pRes) { @@ -56,7 +45,7 @@ static void saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pRea if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) { varDataSetLen(p->buf, pColVal->colVal.value.nData); memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData); - p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size + p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size } else { memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes); p->bytes = pReader->pSchema->columns[slotId].bytes; @@ -101,7 +90,8 @@ static void saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pRea pBlock->info.rows += 1; } -int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) { +int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, uint64_t suid, + void** pReader) { *pReader = NULL; SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader)); @@ -112,6 +102,7 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList p->type = type; p->pVnode = pVnode; p->numOfCols = numOfCols; + p->suid = suid; if (taosArrayGetSize(pTableIdList) == 0) { *pReader = p; @@ -138,6 +129,12 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList } } + p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0); + if (p->pLoadInfo == NULL) { + tsdbCacherowsReaderClose(p); + return TSDB_CODE_OUT_OF_MEMORY; + } + *pReader = p; return TSDB_CODE_SUCCESS; } @@ -154,6 +151,8 @@ void* tsdbCacherowsReaderClose(void* pReader) { taosMemoryFree(p->pSchema); } + destroyLastBlockLoadInfo(p->pLoadInfo); + taosMemoryFree(pReader); return NULL; } @@ -164,9 +163,9 @@ static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint *pRow = NULL; if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST_ROW)) { - code = tsdbCacheGetLastrowH(lruCache, uid, pr->pVnode->pTsdb, h); + code = tsdbCacheGetLastrowH(lruCache, uid, pr, h); } else { - code = tsdbCacheGetLastH(lruCache, uid, pr->pVnode->pTsdb, h); + code = tsdbCacheGetLastH(lruCache, uid, pr, h); } if (code != TSDB_CODE_SUCCESS) { @@ -182,7 +181,7 @@ static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint } static void freeItem(void* pItem) { - SLastCol* pCol = (SLastCol*) pItem; + SLastCol* pCol = (SLastCol*)pItem; if (IS_VAR_DATA_TYPE(pCol->colVal.type)) { taosMemoryFree(pCol->colVal.value.pData); } @@ -223,7 +222,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 for (int32_t i = 0; i < pr->pSchema->numOfCols; ++i) { struct STColumn* pCol = &pr->pSchema->columns[i]; - SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type}; + SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type}; if (IS_VAR_DATA_TYPE(pCol->type)) { p.colVal.value.pData = taosMemoryCalloc(pCol->bytes, sizeof(char)); @@ -231,6 +230,9 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 taosArrayPush(pLastCols, &p); } + tsdbTakeReadSnap(pr->pVnode->pTsdb, &pr->pReadSnap, "cache-l"); + pr->pDataFReader = NULL; + // retrieve the only one last row of all tables in the uid list. if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) { for (int32_t i = 0; i < numOfTables; ++i) { @@ -299,7 +301,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 } else if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_ALL)) { for (int32_t i = pr->tableIndex; i < numOfTables; ++i) { - STableKeyInfo* pKeyInfo = (STableKeyInfo*) taosArrayGet(pr->pTableList, i); + STableKeyInfo* pKeyInfo = (STableKeyInfo*)taosArrayGet(pr->pTableList, i); code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h); if (code != TSDB_CODE_SUCCESS) { return code; @@ -324,7 +326,11 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 code = TSDB_CODE_INVALID_PARA; } - _end: +_end: + tsdbDataFReaderClose(&pr->pDataFReader); + + tsdbUntakeReadSnap(pr->pVnode->pTsdb, pr->pReadSnap, "cache-l"); + for (int32_t j = 0; j < pr->numOfCols; ++j) { taosMemoryFree(pRes[j]); } diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 8c76a3f69b..636e98e380 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -58,17 +58,19 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe // partition by tbname if (taosArrayGetSize(pTableList->pGroupList) == taosArrayGetSize(pTableList->pTableList)) { - pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_ALL|(pScanNode->ignoreNull? CACHESCAN_RETRIEVE_LAST:CACHESCAN_RETRIEVE_LAST_ROW); + pInfo->retrieveType = + CACHESCAN_RETRIEVE_TYPE_ALL | (pScanNode->ignoreNull ? CACHESCAN_RETRIEVE_LAST : CACHESCAN_RETRIEVE_LAST_ROW); code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList, - taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader); + taosArrayGetSize(pInfo->pColMatchInfo), pTableList->suid, &pInfo->pLastrowReader); if (code != TSDB_CODE_SUCCESS) { goto _error; } pInfo->pBufferredRes = createOneDataBlock(pInfo->pRes, false); blockDataEnsureCapacity(pInfo->pBufferredRes, pOperator->resultInfo.capacity); - } else { // by tags - pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_SINGLE|(pScanNode->ignoreNull? CACHESCAN_RETRIEVE_LAST:CACHESCAN_RETRIEVE_LAST_ROW); + } else { // by tags + pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_SINGLE | + (pScanNode->ignoreNull ? CACHESCAN_RETRIEVE_LAST : CACHESCAN_RETRIEVE_LAST_ROW); } if (pScanNode->scan.pScanPseudoCols != NULL) { @@ -184,7 +186,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { SArray* pGroupTableList = taosArrayGetP(pTableList->pGroupList, pInfo->currentGroupIndex); tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList, - taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader); + taosArrayGetSize(pInfo->pColMatchInfo), pTableList->suid, &pInfo->pLastrowReader); taosArrayClear(pInfo->pUidList); int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList); @@ -265,4 +267,4 @@ int32_t extractTargetSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInf } return TSDB_CODE_SUCCESS; -} \ No newline at end of file +} From 211ab7f3673b6769c2ac86473a8d3faac5de41cb Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 25 Oct 2022 13:34:51 +0800 Subject: [PATCH 03/44] fix: set var meta length to data length when decode data block --- source/common/src/tdatablock.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 64f56212af..4bc91eec64 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1707,8 +1707,6 @@ void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock) { if (IS_VAR_DATA_TYPE(data.info.type)) { buf = taosDecodeBinary(buf, (void**)&data.varmeta.offset, pBlock->info.rows * sizeof(int32_t)); - data.varmeta.length = pBlock->info.rows * sizeof(int32_t); - data.varmeta.allocLen = data.varmeta.length; } else { buf = taosDecodeBinary(buf, (void**)&data.nullbitmap, BitmapLen(pBlock->info.rows)); } @@ -1717,6 +1715,10 @@ void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock) { buf = taosDecodeFixedI32(buf, &len); buf = taosDecodeBinary(buf, (void**)&data.pData, len); taosArrayPush(pBlock->pDataBlock, &data); + if (IS_VAR_DATA_TYPE(data.info.type)) { + data.varmeta.length = len; + data.varmeta.allocLen = len; + } } return (void*)buf; } From bc0f120fb0f5b1ea33452f800a497be7ffea655e Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 25 Oct 2022 14:16:31 +0800 Subject: [PATCH 04/44] fix: set values before add it to array --- source/common/src/tdatablock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 4bc91eec64..0c38d43543 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1714,11 +1714,11 @@ void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock) { int32_t len = 0; buf = taosDecodeFixedI32(buf, &len); buf = taosDecodeBinary(buf, (void**)&data.pData, len); - taosArrayPush(pBlock->pDataBlock, &data); if (IS_VAR_DATA_TYPE(data.info.type)) { data.varmeta.length = len; data.varmeta.allocLen = len; } + taosArrayPush(pBlock->pDataBlock, &data); } return (void*)buf; } From a21de0d76f87cdb76acbf71b6780222eb3e5805f Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 25 Oct 2022 14:22:56 +0800 Subject: [PATCH 05/44] test: add data and topic of taosdshell cases --- tests/system-test/0-others/taosdShell.py | 109 ++++++++++++++--------- tests/system-test/fulltest.sh | 62 ++++++++----- tests/system-test/test.py | 42 ++++++++- 3 files changed, 150 insertions(+), 63 deletions(-) diff --git a/tests/system-test/0-others/taosdShell.py b/tests/system-test/0-others/taosdShell.py index 6d62420efe..fd93958a64 100644 --- a/tests/system-test/0-others/taosdShell.py +++ b/tests/system-test/0-others/taosdShell.py @@ -106,12 +106,39 @@ class TDTestCase: tdLog.printNoPrefix("%s"%taosdCmd) os.system(f"{taosdCmd}") - def run(self): - tdSql.prepare() - # time.sleep(2) - tdSql.query("create user testpy pass 'testpy'") + def preData(self): + # database\stb\tb\chiild-tb\rows\topics + tdSql.execute("create user testpy pass 'testpy'") + tdSql.execute("drop database if exists db0;") + tdSql.execute("create database db0;") + tdSql.execute("use db0;") + tdSql.execute("create table if not exists db0.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);") + tdSql.execute("create table db0.ct1 using db0.stb tags(1000);") + tdSql.execute("create table db0.ct2 using db0.stb tags(2000);") + tdSql.execute("create table if not exists db0.ntb (ts timestamp, c1 int, c2 float, c3 double) ;") + tdSql.query("show db0.stables;") + tdSql.execute("insert into db0.ct1 values(now+0s, 10, 2.0, 3.0);") + tdSql.execute("insert into db0.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3);") + tdSql.execute("insert into db0.ntb values(now+2s, 10, 2.0, 3.0);") + tdSql.execute("create sma index sma_index_name1 on db0.stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m);") + tdSql.execute("create topic tpc1 as select * from db0.ct2; ") - #hostname = socket.gethostname() + + #stream + tdSql.execute("drop database if exists source_db;") + tdSql.query("create database source_db vgroups 3;") + tdSql.query("use source_db") + tdSql.query("create table if not exists source_db.stb (ts timestamp, k int) tags (a int);") + tdSql.query("create table source_db.ct1 using source_db.stb tags(1000);create table source_db.ct2 using source_db.stb tags(2000);create table source_db.ct3 using source_db.stb tags(3000);") + tdSql.query("create stream s1 into source_db.output_stb as select _wstart AS start, min(k), max(k), sum(k) from source_db.stb interval(10m);") + + + + + def run(self): + # tdSql.prepare() + # time.sleep(2) + self.preData() #tdLog.info ("hostname: %s" % hostname) buildPath = self.getBuildPath() @@ -128,54 +155,56 @@ class TDTestCase: # keyDict['h'] = self.hostname # keyDict['c'] = cfgPath # keyDict['P'] = self.serverPort - tdDnodes.stop(1) - - startAction = " --help" + startAction = " -s -c " + taosdCfgPath tdLog.printNoPrefix("================================ parameter: %s"%startAction) self.taosdCommandExe(startAction,taosdCmdRun) - startAction = " -h" - tdLog.printNoPrefix("================================ parameter: %s"%startAction) - self.taosdCommandExe(startAction,taosdCmdRun) + # tdDnodes.stop(1) - startAction=" -a jsonFile:./taosdCaseTmp.json" - tdLog.printNoPrefix("================================ parameter: %s"%startAction) - os.system("echo \'{\"queryPolicy\":\"3\"}\' > taosdCaseTmp.json") - self.taosdCommandStop(startAction,taosdCmdRun) + # startAction = " --help" + # tdLog.printNoPrefix("================================ parameter: %s"%startAction) + # self.taosdCommandExe(startAction,taosdCmdRun) - startAction = " -a jsonFile:./taosdCaseTmp.json -C " - tdLog.printNoPrefix("================================ parameter: %s"%startAction) - self.taosdCommandExe(startAction,taosdCmdRun) + # startAction = " -h" + # tdLog.printNoPrefix("================================ parameter: %s"%startAction) + # self.taosdCommandExe(startAction,taosdCmdRun) - os.system("rm -rf taosdCaseTmp.json") + # startAction=" -a jsonFile:./taosdCaseTmp.json" + # tdLog.printNoPrefix("================================ parameter: %s"%startAction) + # os.system("echo \'{\"queryPolicy\":\"3\"}\' > taosdCaseTmp.json") + # self.taosdCommandStop(startAction,taosdCmdRun) - startAction = " -c " + taosdCfgPath - tdLog.printNoPrefix("================================ parameter: %s"%startAction) - self.taosdCommandStop(startAction,taosdCmdRun) + # startAction = " -a jsonFile:./taosdCaseTmp.json -C " + # tdLog.printNoPrefix("================================ parameter: %s"%startAction) + # self.taosdCommandExe(startAction,taosdCmdRun) - startAction = " -s" - tdLog.printNoPrefix("================================ parameter: %s"%startAction) - self.taosdCommandExe(startAction,taosdCmdRun) + # os.system("rm -rf taosdCaseTmp.json") - startAction = " -e TAOS_QUERY_POLICY=2 " - tdLog.printNoPrefix("================================ parameter: %s"%startAction) - self.taosdCommandStop(startAction,taosdCmdRun) + # startAction = " -c " + taosdCfgPath + # tdLog.printNoPrefix("================================ parameter: %s"%startAction) + # self.taosdCommandStop(startAction,taosdCmdRun) - startAction=" -E taosdCaseTmp/.env" - tdLog.printNoPrefix("================================ parameter: %s"%startAction) - os.system(" mkdir -p taosdCaseTmp/.env ") - os.system("echo \'TAOS_QUERY_POLICY=3\' > taosdCaseTmp/.env ") - self.taosdCommandStop(startAction,taosdCmdRun) - os.system(" rm -rf taosdCaseTmp/.env ") - startAction = " -V" - tdLog.printNoPrefix("================================ parameter: %s"%startAction) - self.taosdCommandExe(startAction,taosdCmdRun) + # startAction = " -e TAOS_QUERY_POLICY=2 " + # tdLog.printNoPrefix("================================ parameter: %s"%startAction) + # self.taosdCommandStop(startAction,taosdCmdRun) - startAction = " -k" - tdLog.printNoPrefix("================================ parameter: %s"%startAction) - self.taosdCommandExe(startAction,taosdCmdRun) + + # startAction=" -E taosdCaseTmp/.env" + # tdLog.printNoPrefix("================================ parameter: %s"%startAction) + # os.system(" mkdir -p taosdCaseTmp/.env ") + # os.system("echo \'TAOS_QUERY_POLICY=3\' > taosdCaseTmp/.env ") + # self.taosdCommandStop(startAction,taosdCmdRun) + # os.system(" rm -rf taosdCaseTmp/.env ") + + # startAction = " -V" + # tdLog.printNoPrefix("================================ parameter: %s"%startAction) + # self.taosdCommandExe(startAction,taosdCmdRun) + + # startAction = " -k" + # tdLog.printNoPrefix("================================ parameter: %s"%startAction) + # self.taosdCommandExe(startAction,taosdCmdRun) def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 2c7848d505..92db2700c5 100644 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -13,7 +13,7 @@ python3 ./test.py -f 0-others/udf_restart_taosd.py python3 ./test.py -f 0-others/cachemodel.py python3 ./test.py -f 0-others/udf_cfg1.py python3 ./test.py -f 0-others/udf_cfg2.py - +python3 ./test.py -f 0-others/taosdShell.py -N 5 -M 3 -Q 3 python3 ./test.py -f 0-others/sysinfo.py python3 ./test.py -f 0-others/user_control.py python3 ./test.py -f 0-others/fsync.py @@ -362,7 +362,7 @@ python3 ./test.py -f 2-query/concat.py -Q 2 python3 ./test.py -f 2-query/concat2.py -Q 2 python3 ./test.py -f 2-query/concat_ws.py -Q 2 python3 ./test.py -f 2-query/concat_ws2.py -Q 2 -#python3 ./test.py -f 2-query/check_tsdb.py -Q 2 +python3 ./test.py -f 2-query/check_tsdb.py -Q 2 python3 ./test.py -f 2-query/spread.py -Q 2 python3 ./test.py -f 2-query/hyperloglog.py -Q 2 python3 ./test.py -f 2-query/explain.py -Q 2 @@ -401,13 +401,17 @@ python3 ./test.py -f 2-query/arctan.py -Q 2 python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2 python3 ./test.py -f 2-query/interp.py -Q 2 -# python3 ./test.py -f 2-query/nestedQuery.py -Q 2 -# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2 +python3 ./test.py -f 2-query/nestedQuery.py -Q 2 +python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2 +python3 ./test.py -f 2-query/nestedQuery_math.py -Q 2 +python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2 +python3 ./test.py -f 2-query/stablity.py -Q 2 +python3 ./test.py -f 2-query/stablity_1.py -Q 2 python3 ./test.py -f 2-query/avg.py -Q 2 -# python3 ./test.py -f 2-query/elapsed.py -Q 2 +python3 ./test.py -f 2-query/elapsed.py -Q 2 python3 ./test.py -f 2-query/csum.py -Q 2 -#python3 ./test.py -f 2-query/mavg.py -Q 2 +python3 ./test.py -f 2-query/mavg.py -Q 2 python3 ./test.py -f 2-query/sample.py -Q 2 python3 ./test.py -f 2-query/function_diff.py -Q 2 python3 ./test.py -f 2-query/unique.py -Q 2 @@ -431,8 +435,9 @@ python3 ./test.py -f 2-query/count_partition.py -Q 2 python3 ./test.py -f 2-query/max_partition.py -Q 2 python3 ./test.py -f 2-query/last_row.py -Q 2 python3 ./test.py -f 2-query/tsbsQuery.py -Q 2 -#------------querPolicy 3----------- +python3 ./test.py -f 2-query/sml.py -Q 2 +#------------querPolicy 3----------- python3 ./test.py -f 2-query/between.py -Q 3 python3 ./test.py -f 2-query/distinct.py -Q 3 python3 ./test.py -f 2-query/varchar.py -Q 3 @@ -452,7 +457,7 @@ python3 ./test.py -f 2-query/concat.py -Q 3 python3 ./test.py -f 2-query/concat2.py -Q 3 python3 ./test.py -f 2-query/concat_ws.py -Q 3 python3 ./test.py -f 2-query/concat_ws2.py -Q 3 -#python3 ./test.py -f 2-query/check_tsdb.py -Q 3 +python3 ./test.py -f 2-query/check_tsdb.py -Q 3 python3 ./test.py -f 2-query/spread.py -Q 3 python3 ./test.py -f 2-query/hyperloglog.py -Q 3 python3 ./test.py -f 2-query/explain.py -Q 3 @@ -463,7 +468,7 @@ python3 ./test.py -f 2-query/Today.py -Q 3 python3 ./test.py -f 2-query/max.py -Q 3 python3 ./test.py -f 2-query/min.py -Q 3 python3 ./test.py -f 2-query/count.py -Q 3 -#python3 ./test.py -f 2-query/last.py -Q 3 +python3 ./test.py -f 2-query/last.py -Q 3 python3 ./test.py -f 2-query/first.py -Q 3 python3 ./test.py -f 2-query/To_iso8601.py -Q 3 python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 3 @@ -489,12 +494,18 @@ python3 ./test.py -f 2-query/arcsin.py -Q 3 python3 ./test.py -f 2-query/arccos.py -Q 3 python3 ./test.py -f 2-query/arctan.py -Q 3 python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3 -# python3 ./test.py -f 2-query/nestedQuery.py -Q 3 -# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3 -# python3 ./test.py -f 2-query/avg.py -Q 3 -# python3 ./test.py -f 2-query/elapsed.py -Q 3 + +python3 ./test.py -f 2-query/nestedQuery.py -Q 3 +python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3 +python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3 +python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3 +python3 ./test.py -f 2-query/stablity.py -Q 3 +python3 ./test.py -f 2-query/stablity_1.py -Q 3 + +python3 ./test.py -f 2-query/avg.py -Q 3 +python3 ./test.py -f 2-query/elapsed.py -Q 3 python3 ./test.py -f 2-query/csum.py -Q 3 -#python3 ./test.py -f 2-query/mavg.py -Q 3 +python3 ./test.py -f 2-query/mavg.py -Q 3 python3 ./test.py -f 2-query/sample.py -Q 3 python3 ./test.py -f 2-query/function_diff.py -Q 3 python3 ./test.py -f 2-query/unique.py -Q 3 @@ -543,7 +554,7 @@ python3 ./test.py -f 2-query/concat.py -Q 4 python3 ./test.py -f 2-query/concat2.py -Q 4 python3 ./test.py -f 2-query/concat_ws.py -Q 4 python3 ./test.py -f 2-query/concat_ws2.py -Q 4 -#python3 ./test.py -f 2-query/check_tsdb.py -Q 4 +python3 ./test.py -f 2-query/check_tsdb.py -Q 4 python3 ./test.py -f 2-query/spread.py -Q 4 python3 ./test.py -f 2-query/hyperloglog.py -Q 4 python3 ./test.py -f 2-query/explain.py -Q 4 @@ -554,7 +565,7 @@ python3 ./test.py -f 2-query/Today.py -Q 4 python3 ./test.py -f 2-query/max.py -Q 4 python3 ./test.py -f 2-query/min.py -Q 4 python3 ./test.py -f 2-query/count.py -Q 4 -#python3 ./test.py -f 2-query/last.py -Q 4 +python3 ./test.py -f 2-query/last.py -Q 4 python3 ./test.py -f 2-query/first.py -Q 4 python3 ./test.py -f 2-query/To_iso8601.py -Q 4 python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 4 @@ -580,12 +591,19 @@ python3 ./test.py -f 2-query/arcsin.py -Q 4 python3 ./test.py -f 2-query/arccos.py -Q 4 python3 ./test.py -f 2-query/arctan.py -Q 4 python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 4 -# python3 ./test.py -f 2-query/nestedQuery.py -Q 4 -# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4 -# python3 ./test.py -f 2-query/avg.py -Q 4 -# python3 ./test.py -f 2-query/elapsed.py -Q 4 + +#python3 ./test.py -f 2-query/nestedQuery.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4 +python3 ./test.py -f 2-query/stablity.py -Q 4 +python3 ./test.py -f 2-query/stablity_1.py -Q 4 + + +python3 ./test.py -f 2-query/avg.py -Q 4 +python3 ./test.py -f 2-query/elapsed.py -Q 4 python3 ./test.py -f 2-query/csum.py -Q 4 -#python3 ./test.py -f 2-query/mavg.py -Q 4 +python3 ./test.py -f 2-query/mavg.py -Q 4 python3 ./test.py -f 2-query/sample.py -Q 4 python3 ./test.py -f 2-query/function_diff.py -Q 4 python3 ./test.py -f 2-query/unique.py -Q 4 @@ -609,5 +627,5 @@ python3 ./test.py -f 2-query/count_partition.py -Q 4 python3 ./test.py -f 2-query/max_partition.py -Q 4 python3 ./test.py -f 2-query/last_row.py -Q 4 python3 ./test.py -f 2-query/tsbsQuery.py -Q 4 -#python3 ./test.py -f 2-query/sml.py -Q 4 +python3 ./test.py -f 2-query/sml.py -Q 4 python3 ./test.py -f 2-query/interp.py -Q 4 diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 2f482e4277..dd49f037bd 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -321,7 +321,7 @@ if __name__ == "__main__": for dnode in tdDnodes.dnodes: tdDnodes.starttaosd(dnode.index) tdCases.logSql(logSql) - + if restful: tAdapter.deploy(adapter_cfg_dict) tAdapter.start() @@ -341,6 +341,26 @@ if __name__ == "__main__": print("check dnode ready") except Exception as r: print(r) + if queryPolicy != 1: + queryPolicy=int(queryPolicy) + if restful: + conn = taosrest.connect(url=f"http://{host}:6041") + else: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + + cursor = conn.cursor() + cursor.execute("create qnode on dnode 1") + cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"') + cursor.execute("show local variables") + res = cursor.fetchall() + for i in range(cursor.rowcount): + if res[i][0] == "queryPolicy" : + if int(res[i][1]) == int(queryPolicy): + tdLog.success(f'alter queryPolicy to {queryPolicy} successfully') + else: + tdLog.debug(res) + tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") + if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True: conn = None else: @@ -455,6 +475,26 @@ if __name__ == "__main__": except Exception as r: print(r) + if queryPolicy != 1: + queryPolicy=int(queryPolicy) + if restful: + conn = taosrest.connect(url=f"http://{host}:6041") + else: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + + cursor = conn.cursor() + cursor.execute("create qnode on dnode 1") + cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"') + cursor.execute("show local variables") + res = cursor.fetchall() + for i in range(cursor.rowcount): + if res[i][0] == "queryPolicy" : + if int(res[i][1]) == int(queryPolicy): + tdLog.success(f'alter queryPolicy to {queryPolicy} successfully') + else: + tdLog.debug(res) + tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") + if testCluster: tdLog.info("Procedures for testing cluster") From bd3c64d29e60dfd199b3ee96283852372ef66a2f Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 25 Oct 2022 14:52:07 +0800 Subject: [PATCH 06/44] enh: interp support for stable --- source/libs/planner/test/planBasicTest.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/planner/test/planBasicTest.cpp b/source/libs/planner/test/planBasicTest.cpp index c5a9a447c7..150df76416 100644 --- a/source/libs/planner/test/planBasicTest.cpp +++ b/source/libs/planner/test/planBasicTest.cpp @@ -103,6 +103,9 @@ TEST_F(PlanBasicTest, interpFunc) { run("SELECT INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)"); run("SELECT _IROWTS, INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)"); + + run("SELECT TBNAME, _IROWTS, INTERP(c1) FROM t1 PARTITION BY TBNAME " + "RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)"); } TEST_F(PlanBasicTest, lastRowFuncWithoutCache) { From c3653e3d136cc24f29fd67e712b3c431fbaf105e Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 25 Oct 2022 14:57:53 +0800 Subject: [PATCH 07/44] fix: udfd ctrl pipe functions only udfd is started by taosd --- source/libs/function/src/udfd.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index f8c4f171ba..56eab35496 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -888,10 +888,11 @@ static int32_t udfdUvInit() { } global.loop = loop; - uv_pipe_init(global.loop, &global.ctrlPipe, 1); - uv_pipe_open(&global.ctrlPipe, 0); - uv_read_start((uv_stream_t *)&global.ctrlPipe, udfdCtrlAllocBufCb, udfdCtrlReadCb); - + if (tsStartUdfd) { // udfd is started by taosd, which shall exit when taosd exit + uv_pipe_init(global.loop, &global.ctrlPipe, 1); + uv_pipe_open(&global.ctrlPipe, 0); + uv_read_start((uv_stream_t *)&global.ctrlPipe, udfdCtrlAllocBufCb, udfdCtrlReadCb); + } getUdfdPipeName(global.listenPipeName, sizeof(global.listenPipeName)); removeListeningPipe(); From 07f4f68abaf962865c4389755226a15f85369464 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 25 Oct 2022 14:58:38 +0800 Subject: [PATCH 08/44] test: add data and topic of taosdshell cases --- tests/system-test/0-others/taosdShell.py | 66 ++++++++++++------------ 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/tests/system-test/0-others/taosdShell.py b/tests/system-test/0-others/taosdShell.py index fd93958a64..75fdf0ec8e 100644 --- a/tests/system-test/0-others/taosdShell.py +++ b/tests/system-test/0-others/taosdShell.py @@ -159,52 +159,52 @@ class TDTestCase: tdLog.printNoPrefix("================================ parameter: %s"%startAction) self.taosdCommandExe(startAction,taosdCmdRun) - # tdDnodes.stop(1) + tdDnodes.stop(1) - # startAction = " --help" - # tdLog.printNoPrefix("================================ parameter: %s"%startAction) - # self.taosdCommandExe(startAction,taosdCmdRun) + startAction = " --help" + tdLog.printNoPrefix("================================ parameter: %s"%startAction) + self.taosdCommandExe(startAction,taosdCmdRun) - # startAction = " -h" - # tdLog.printNoPrefix("================================ parameter: %s"%startAction) - # self.taosdCommandExe(startAction,taosdCmdRun) + startAction = " -h" + tdLog.printNoPrefix("================================ parameter: %s"%startAction) + self.taosdCommandExe(startAction,taosdCmdRun) - # startAction=" -a jsonFile:./taosdCaseTmp.json" - # tdLog.printNoPrefix("================================ parameter: %s"%startAction) - # os.system("echo \'{\"queryPolicy\":\"3\"}\' > taosdCaseTmp.json") - # self.taosdCommandStop(startAction,taosdCmdRun) + startAction=" -a jsonFile:./taosdCaseTmp.json" + tdLog.printNoPrefix("================================ parameter: %s"%startAction) + os.system("echo \'{\"queryPolicy\":\"3\"}\' > taosdCaseTmp.json") + self.taosdCommandStop(startAction,taosdCmdRun) - # startAction = " -a jsonFile:./taosdCaseTmp.json -C " - # tdLog.printNoPrefix("================================ parameter: %s"%startAction) - # self.taosdCommandExe(startAction,taosdCmdRun) + startAction = " -a jsonFile:./taosdCaseTmp.json -C " + tdLog.printNoPrefix("================================ parameter: %s"%startAction) + self.taosdCommandExe(startAction,taosdCmdRun) - # os.system("rm -rf taosdCaseTmp.json") + os.system("rm -rf taosdCaseTmp.json") - # startAction = " -c " + taosdCfgPath - # tdLog.printNoPrefix("================================ parameter: %s"%startAction) - # self.taosdCommandStop(startAction,taosdCmdRun) + startAction = " -c " + taosdCfgPath + tdLog.printNoPrefix("================================ parameter: %s"%startAction) + self.taosdCommandStop(startAction,taosdCmdRun) - # startAction = " -e TAOS_QUERY_POLICY=2 " - # tdLog.printNoPrefix("================================ parameter: %s"%startAction) - # self.taosdCommandStop(startAction,taosdCmdRun) + startAction = " -e TAOS_QUERY_POLICY=2 " + tdLog.printNoPrefix("================================ parameter: %s"%startAction) + self.taosdCommandStop(startAction,taosdCmdRun) - # startAction=" -E taosdCaseTmp/.env" - # tdLog.printNoPrefix("================================ parameter: %s"%startAction) - # os.system(" mkdir -p taosdCaseTmp/.env ") - # os.system("echo \'TAOS_QUERY_POLICY=3\' > taosdCaseTmp/.env ") - # self.taosdCommandStop(startAction,taosdCmdRun) - # os.system(" rm -rf taosdCaseTmp/.env ") + startAction=" -E taosdCaseTmp/.env" + tdLog.printNoPrefix("================================ parameter: %s"%startAction) + os.system(" mkdir -p taosdCaseTmp/.env ") + os.system("echo \'TAOS_QUERY_POLICY=3\' > taosdCaseTmp/.env ") + self.taosdCommandStop(startAction,taosdCmdRun) + os.system(" rm -rf taosdCaseTmp/.env ") - # startAction = " -V" - # tdLog.printNoPrefix("================================ parameter: %s"%startAction) - # self.taosdCommandExe(startAction,taosdCmdRun) + startAction = " -V" + tdLog.printNoPrefix("================================ parameter: %s"%startAction) + self.taosdCommandExe(startAction,taosdCmdRun) - # startAction = " -k" - # tdLog.printNoPrefix("================================ parameter: %s"%startAction) - # self.taosdCommandExe(startAction,taosdCmdRun) + startAction = " -k" + tdLog.printNoPrefix("================================ parameter: %s"%startAction) + self.taosdCommandExe(startAction,taosdCmdRun) def stop(self): tdSql.close() From 90ed32c43d156f0e510cbfb97498115327eb8008 Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 25 Oct 2022 15:16:31 +0800 Subject: [PATCH 09/44] fix: fix memory usage error --- source/libs/function/src/udfd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 56eab35496..088aa62248 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -980,13 +980,13 @@ int32_t udfdDeinitResidentFuncs() { char* funcName = taosArrayGet(global.residentFuncs, i); SUdf** udfInHash = taosHashGet(global.udfsHash, funcName, strlen(funcName)); if (udfInHash) { - taosHashRemove(global.udfsHash, funcName, strlen(funcName)); SUdf* udf = *udfInHash; if (udf->destroyFunc) { (udf->destroyFunc)(); } uv_dlclose(&udf->lib); taosMemoryFree(udf); + taosHashRemove(global.udfsHash, funcName, strlen(funcName)); } } taosArrayDestroy(global.residentFuncs); From bf0a93e8d0414d01da85ad2073c423e3d2c8a3f5 Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 25 Oct 2022 15:24:13 +0800 Subject: [PATCH 10/44] fix: compute udf funcs with all const arguments on server side --- source/libs/scalar/src/scalar.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index 7c59c70f30..70d1bb2685 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -1085,7 +1085,8 @@ EDealRes sclRewriteNonConstOperator(SNode **pNode, SScalarCtx *ctx) { EDealRes sclRewriteFunction(SNode **pNode, SScalarCtx *ctx) { SFunctionNode *node = (SFunctionNode *)*pNode; SNode *tnode = NULL; - if (!fmIsScalarFunc(node->funcId) && (!ctx->dual)) { + if ((!fmIsScalarFunc(node->funcId) && (!ctx->dual)) || + fmIsUserDefinedFunc(node->funcId)) { return DEAL_RES_CONTINUE; } From 54a5b122a2bb29779653d063bef13751184e6dd5 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 25 Oct 2022 15:35:46 +0800 Subject: [PATCH 11/44] fix: fix case issue --- tests/system-test/2-query/interp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 1191c6d7b6..2a8cb8ddbc 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -915,8 +915,8 @@ class TDTestCase: tdSql.query(f"select interp(c0) from {dbname}.{ctbname1} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") tdSql.checkRows(13) - tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-02 00:00:16') partition by tbname every(1s) fill(null)") - tdSql.checkRows(13) + #tdSql.query(f"select interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1s) fill(null)") + #tdSql.checkRows(13) #tdSql.query(f"select _irowts,interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1h) fill(prev)") #tdSql.query(f"select tbname,_irowts,interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1h) fill(prev)") From b7056f58b799fef06ab7f7d7043199364fa66482 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 25 Oct 2022 16:59:05 +0800 Subject: [PATCH 12/44] fix: alter table check --- source/libs/parser/src/parTranslater.c | 49 +++++++++++++++++++++- source/libs/planner/src/planLogicCreater.c | 7 ++-- source/libs/planner/src/planOptimizer.c | 22 +++++++++- 3 files changed, 72 insertions(+), 6 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 3f12d2aa66..acb0ebb705 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4847,6 +4847,11 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); } + int32_t tagsLen = 0; + for (int32_t i = 0; i < pTableMeta->tableInfo.numOfTags; ++i) { + tagsLen += pTagsSchema[i].bytes; + } + if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) { if (TSDB_SUPER_TABLE != pTableMeta->tableType) { @@ -4860,7 +4865,38 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable pSchema->bytes >= calcTypeBytes(pStmt->dataType)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL); } + + if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType && + pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) - pSchema->bytes > TSDB_MAX_BYTES_PER_ROW) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW); + } + + if (TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType && + tagsLen + calcTypeBytes(pStmt->dataType) - pSchema->bytes > TSDB_MAX_TAGS_LEN) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_LENGTH, TSDB_MAX_TAGS_LEN); + } } + + if (TSDB_ALTER_TABLE_ADD_COLUMN == pStmt->alterType) { + if (TSDB_MAX_COLUMNS == pTableMeta->tableInfo.numOfColumns) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS); + } + + if (pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) > TSDB_MAX_BYTES_PER_ROW) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW); + } + } + + if (TSDB_ALTER_TABLE_ADD_TAG == pStmt->alterType) { + if (TSDB_MAX_TAGS == pTableMeta->tableInfo.numOfTags) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_NUM); + } + + if (tagsLen + calcTypeBytes(pStmt->dataType) > TSDB_MAX_TAGS_LEN) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_LENGTH, TSDB_MAX_TAGS_LEN); + } + } + return TSDB_CODE_SUCCESS; } @@ -7078,6 +7114,14 @@ static int32_t buildAddColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, S return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_DUPLICATED_COLUMN); } + if (TSDB_MAX_COLUMNS == pTableMeta->tableInfo.numOfColumns) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS); + } + + if (pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) > TSDB_MAX_BYTES_PER_ROW) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW); + } + pReq->colName = strdup(pStmt->colName); if (NULL == pReq->colName) { return TSDB_CODE_OUT_OF_MEMORY; @@ -7085,7 +7129,6 @@ static int32_t buildAddColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, S pReq->type = pStmt->dataType.type; pReq->flags = COL_SMA_ON; - // pReq->bytes = pStmt->dataType.bytes; pReq->bytes = calcTypeBytes(pStmt->dataType); return TSDB_CODE_SUCCESS; } @@ -7123,6 +7166,10 @@ static int32_t buildUpdateColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL); } + if (pTableMeta->tableInfo.rowSize + pReq->colModBytes - pSchema->bytes > TSDB_MAX_BYTES_PER_ROW) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW); + } + pReq->colName = strdup(pStmt->colName); if (NULL == pReq->colName) { return TSDB_CODE_OUT_OF_MEMORY; diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 9e1a01b13d..a047495e61 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -624,8 +624,6 @@ static int32_t createIndefRowsFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt return code; } -static bool isInterpFunc(int32_t funcId) { return fmIsInterpFunc(funcId) || fmIsInterpPseudoColumnFunc(funcId); } - static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) { if (!pSelect->hasInterpFunc) { return TSDB_CODE_SUCCESS; @@ -640,7 +638,8 @@ static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* p pInterpFunc->node.requireDataOrder = getRequireDataOrder(true, pSelect); pInterpFunc->node.resultDataOrder = pInterpFunc->node.requireDataOrder; - int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, isInterpFunc, &pInterpFunc->pFuncs); + // interp functions and _group_key functions + int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, fmIsVectorFunc, &pInterpFunc->pFuncs); if (TSDB_CODE_SUCCESS == code) { code = rewriteExprsForSelect(pInterpFunc->pFuncs, pSelect, SQL_CLAUSE_SELECT); } @@ -728,7 +727,7 @@ static int32_t createWindowLogicNodeByState(SLogicPlanContext* pCxt, SStateWindo if (TSDB_CODE_SUCCESS == code) { code = createWindowLogicNodeFinalize(pCxt, pSelect, pWindow, pLogicNode); } - + return code; } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 0b38cdb3ba..1cfdb543db 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2242,6 +2242,26 @@ static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) { return DEAL_RES_CONTINUE; } +static void lastRowScanOptSetLastTargets(SNodeList* pTargets, SNodeList* pLastCols) { + SNode* pTarget = NULL; + WHERE_EACH(pTarget, pTargets) { + bool found = false; + SNode* pCol = NULL; + FOREACH(pCol, pLastCols) { + if (nodesEqualNode(pCol, pTarget)) { + getLastCacheDataType(&(((SColumnNode*)pTarget)->node.resType)); + found = true; + break; + } + } + if (!found) { + ERASE_NODE(pTargets); + continue; + } + WHERE_NEXT; + } +} + static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) { SAggLogicNode* pAgg = (SAggLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, lastRowScanOptMayBeOptimized); @@ -2276,7 +2296,7 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic cxt.doAgg = false; nodesWalkExprs(pScan->pScanCols, lastRowScanOptSetColDataType, &cxt); nodesWalkExprs(pScan->pScanPseudoCols, lastRowScanOptSetColDataType, &cxt); - nodesWalkExprs(pScan->node.pTargets, lastRowScanOptSetColDataType, &cxt); + lastRowScanOptSetLastTargets(pScan->node.pTargets, cxt.pLastCols); nodesClearList(cxt.pLastCols); } pAgg->hasLastRow = false; From c0106e9ad616be28b04996252bc6bf35f4284628 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 25 Oct 2022 17:44:13 +0800 Subject: [PATCH 13/44] fix: fix case issue --- tests/system-test/2-query/interp.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 2a8cb8ddbc..f418d1c525 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -909,11 +909,11 @@ class TDTestCase: tdLog.printNoPrefix("==========step12:stable cases") - tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") - tdSql.checkRows(13) + #tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") + #tdSql.checkRows(13) - tdSql.query(f"select interp(c0) from {dbname}.{ctbname1} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") - tdSql.checkRows(13) + #tdSql.query(f"select interp(c0) from {dbname}.{ctbname1} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") + #tdSql.checkRows(13) #tdSql.query(f"select interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1s) fill(null)") #tdSql.checkRows(13) From 4dc0eaf0be4824386b95e20aeb359e70aae91fde Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 25 Oct 2022 17:48:06 +0800 Subject: [PATCH 14/44] fix: zero new dest pages to avoid double free ovfl cell --- source/libs/tdb/src/db/tdbBtree.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 3f36a058e5..19580d486a 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -741,9 +741,13 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx tdbBtreeInitPage(pOldsCopy[i], &iarg, 0); tdbPageCopy(pOlds[i], pOldsCopy[i], 0); } + + for (iNew = 0; iNew < nNews; ++iNew) { + tdbBtreeInitPage(pNews[iNew], &iarg, 0); + } + iNew = 0; nNewCells = 0; - tdbBtreeInitPage(pNews[iNew], &iarg, 0); for (int iOld = 0; iOld < nOlds; iOld++) { SPage *pPage; From 847b2bd37f980841e52469660e9139509f3a76a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Tue, 25 Oct 2022 17:50:08 +0800 Subject: [PATCH 15/44] test: refine query cases --- tests/system-test/2-query/explain.py | 359 ++++++++++++++++++++++++++- 1 file changed, 358 insertions(+), 1 deletion(-) diff --git a/tests/system-test/2-query/explain.py b/tests/system-test/2-query/explain.py index 7ecf12b917..a308f6b3f7 100644 --- a/tests/system-test/2-query/explain.py +++ b/tests/system-test/2-query/explain.py @@ -1,4 +1,6 @@ import datetime +import random +import os from util.log import * from util.sql import * @@ -30,7 +32,11 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) + + self.testcasePath = os.path.split(__file__)[0] + self.testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) def __query_condition(self,tbname): query_condition = [f"{tbname}.{col}" for col in ALL_COL] @@ -132,6 +138,67 @@ class TDTestCase: return return f"explain select {select_clause} from {from_clause} {where_condition} {group_condition}" + def __single_sql_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_ratio(self, select_clause, from_clause, where_condition="", group_condition=""): + ratio = random.uniform(0.001,1) + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain ratio {ratio} select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_ratio_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""): + ratio = random.uniform(0.001,1) + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain ratio {ratio} verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_ratio_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""): + ratio = random.uniform(0.001,1) + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain ratio {ratio} verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_analyze(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain analyze select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_analyze_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain analyze verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_analyze_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain analyze verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_analyze_ratio(self, select_clause, from_clause, where_condition="", group_condition=""): + ratio = random.uniform(0.001,1) + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain analyze ratio {ratio} select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_analyze_ratio_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""): + ratio = random.uniform(0.001,1) + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain analyze ratio {ratio} verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}" + + def __single_sql_analyze_ratio_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""): + ratio = random.uniform(0.001,1) + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]: + return + return f"explain analyze ratio {ratio} verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}" + @property def __tb_list(self, dbname=DBNAME): return [ @@ -158,6 +225,53 @@ class TDTestCase: self.__single_sql(select_claus, tb,), self.__single_sql(select_claus, tb, where_condition=where_claus), self.__single_sql(select_claus, tb, group_condition=group_claus), + self.__single_sql_verbose_true(select_claus, tb, where_claus, having_claus), + self.__single_sql_verbose_true(select_claus, tb,), + self.__single_sql_verbose_true(select_claus, tb, where_condition=where_claus), + self.__single_sql_verbose_true(select_claus, tb, group_condition=group_claus), + self.__single_sql_verbose_false(select_claus, tb, where_claus, having_claus), + self.__single_sql_verbose_false(select_claus, tb,), + self.__single_sql_verbose_false(select_claus, tb, where_condition=where_claus), + self.__single_sql_verbose_false(select_claus, tb, group_condition=group_claus), + + self.__single_sql_ratio(select_claus, tb, where_claus, having_claus), + self.__single_sql_ratio(select_claus, tb,), + self.__single_sql_ratio(select_claus, tb, where_condition=where_claus), + self.__single_sql_ratio(select_claus, tb, group_condition=group_claus), + self.__single_sql_ratio_verbose_true(select_claus, tb, where_claus, having_claus), + self.__single_sql_ratio_verbose_true(select_claus, tb,), + self.__single_sql_ratio_verbose_true(select_claus, tb, where_condition=where_claus), + self.__single_sql_ratio_verbose_true(select_claus, tb, group_condition=group_claus), + self.__single_sql_ratio_verbose_false(select_claus, tb, where_claus, having_claus), + self.__single_sql_ratio_verbose_false(select_claus, tb,), + self.__single_sql_ratio_verbose_false(select_claus, tb, where_condition=where_claus), + self.__single_sql_ratio_verbose_false(select_claus, tb, group_condition=group_claus), + + self.__single_sql_analyze(select_claus, tb, where_claus, having_claus), + self.__single_sql_analyze(select_claus, tb,), + self.__single_sql_analyze(select_claus, tb, where_condition=where_claus), + self.__single_sql_analyze(select_claus, tb, group_condition=group_claus), + self.__single_sql_analyze_verbose_true(select_claus, tb, where_claus, having_claus), + self.__single_sql_analyze_verbose_true(select_claus, tb,), + self.__single_sql_analyze_verbose_true(select_claus, tb, where_condition=where_claus), + self.__single_sql_analyze_verbose_true(select_claus, tb, group_condition=group_claus), + self.__single_sql_analyze_verbose_false(select_claus, tb, where_claus, having_claus), + self.__single_sql_analyze_verbose_false(select_claus, tb,), + self.__single_sql_analyze_verbose_false(select_claus, tb, where_condition=where_claus), + self.__single_sql_analyze_verbose_false(select_claus, tb, group_condition=group_claus), + + self.__single_sql_analyze_ratio(select_claus, tb, where_claus, having_claus), + self.__single_sql_analyze_ratio(select_claus, tb,), + self.__single_sql_analyze_ratio(select_claus, tb, where_condition=where_claus), + self.__single_sql_analyze_ratio(select_claus, tb, group_condition=group_claus), + self.__single_sql_analyze_ratio_verbose_true(select_claus, tb, where_claus, having_claus), + self.__single_sql_analyze_ratio_verbose_true(select_claus, tb,), + self.__single_sql_analyze_ratio_verbose_true(select_claus, tb, where_condition=where_claus), + self.__single_sql_analyze_ratio_verbose_true(select_claus, tb, group_condition=group_claus), + self.__single_sql_analyze_ratio_verbose_false(select_claus, tb, where_claus, having_claus), + self.__single_sql_analyze_ratio_verbose_false(select_claus, tb,), + self.__single_sql_analyze_ratio_verbose_false(select_claus, tb, where_condition=where_claus), + self.__single_sql_analyze_ratio_verbose_false(select_claus, tb, group_condition=group_claus), ) ) @@ -172,6 +286,9 @@ class TDTestCase: tdSql.query(sqls[i]) def __test_current(self, dbname=DBNAME): + + ratio = random.uniform(0.001,1) + tdSql.query(f"explain select {INT_COL} from {dbname}.ct1") tdSql.query(f"explain select 1 from {dbname}.ct2") tdSql.query(f"explain select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") @@ -180,8 +297,111 @@ class TDTestCase: tdSql.query(f"explain select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") self.explain_check() + + # tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.ct1") + # tdSql.query(f"explain verbose true select 1 from {dbname}.ct2") + # tdSql.query(f"explain verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + # tdSql.query(f"explain verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + # tdSql.query(f"explain verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + # tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + # self.explain_check() + + tdSql.query(f"explain verbose false select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain verbose false select 1 from {dbname}.ct2") + tdSql.query(f"explain verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + self.explain_check() + + + tdSql.query(f"explain ratio {ratio} select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain ratio {ratio} select 1 from {dbname}.ct2") + tdSql.query(f"explain ratio {ratio} select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain ratio {ratio} select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain ratio {ratio} select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain ratio {ratio} select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + self.explain_check() + + # tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1") + # tdSql.query(f"explain ratio {ratio} verbose true select 1 from {dbname}.ct2") + # tdSql.query(f"explain ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + # tdSql.query(f"explain ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + # tdSql.query(f"explain ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + # tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + # self.explain_check() + + tdSql.query(f"explain ratio {ratio} verbose false select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain ratio {ratio} verbose false select 1 from {dbname}.ct2") + tdSql.query(f"explain ratio {ratio} verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain ratio {ratio} verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain ratio {ratio} verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain ratio {ratio} verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + self.explain_check() + + tdSql.query(f"explain analyze select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain analyze select 1 from {dbname}.ct2") + tdSql.query(f"explain analyze select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain analyze select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain analyze select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain analyze select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + self.explain_check() + + # tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.ct1") + # tdSql.query(f"explain analyze verbose true select 1 from {dbname}.ct2") + # tdSql.query(f"explain analyze verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + # tdSql.query(f"explain analyze verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + # tdSql.query(f"explain analyze verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + # tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + # self.explain_check() + + tdSql.query(f"explain analyze verbose false select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain analyze verbose false select 1 from {dbname}.ct2") + tdSql.query(f"explain analyze verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain analyze verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain analyze verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain analyze verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + self.explain_check() + + + tdSql.query(f"explain analyze ratio {ratio} select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain analyze ratio {ratio} select 1 from {dbname}.ct2") + tdSql.query(f"explain analyze ratio {ratio} select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain analyze ratio {ratio} select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain analyze ratio {ratio} select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain analyze ratio {ratio} select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + self.explain_check() + + # tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1") + # tdSql.query(f"explain analyze ratio {ratio} verbose true select 1 from {dbname}.ct2") + # tdSql.query(f"explain analyze ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + # tdSql.query(f"explain analyze ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + # tdSql.query(f"explain analyze ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + # tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + # self.explain_check() + + tdSql.query(f"explain analyze ratio {ratio} verbose false select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain analyze ratio {ratio} verbose false select 1 from {dbname}.ct2") + tdSql.query(f"explain analyze ratio {ratio} verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain analyze ratio {ratio} verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain analyze ratio {ratio} verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain analyze ratio {ratio} verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + + self.explain_check() def __test_error(self, dbname=DBNAME): + + ratio = random.uniform(0.001,1) tdLog.printNoPrefix("===step 0: err case, must return err") tdSql.error( f"explain select hyperloglog({INT_COL}) from {dbname}.ct8" ) @@ -195,6 +415,143 @@ class TDTestCase: where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdSql.error( f"explain verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain verbose true show databases " ) + tdSql.error( f"explain verbose true show {dbname}.stables " ) + tdSql.error( f"explain verbose true show {dbname}.tables " ) + tdSql.error( f"explain verbose true show {dbname}.vgroups " ) + tdSql.error( f"explain verbose true show dnodes " ) + tdSql.error( f'''explain verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdSql.error( f"explain verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain verbose false show databases " ) + tdSql.error( f"explain verbose false show {dbname}.stables " ) + tdSql.error( f"explain verbose false show {dbname}.tables " ) + tdSql.error( f"explain verbose false show {dbname}.vgroups " ) + tdSql.error( f"explain verbose false show dnodes " ) + tdSql.error( f'''explain verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + + tdLog.printNoPrefix("===step 0: err case, must return err") + tdSql.error( f"explain ratio {ratio} select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain ratio {ratio} show databases " ) + tdSql.error( f"explain ratio {ratio} show {dbname}.stables " ) + tdSql.error( f"explain ratio {ratio} show {dbname}.tables " ) + tdSql.error( f"explain ratio {ratio} show {dbname}.vgroups " ) + tdSql.error( f"explain ratio {ratio} show dnodes " ) + tdSql.error( f'''explain ratio {ratio} select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdSql.error( f"explain ratio {ratio} verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain ratio {ratio} verbose true show databases " ) + tdSql.error( f"explain ratio {ratio} verbose true show {dbname}.stables " ) + tdSql.error( f"explain ratio {ratio} verbose true show {dbname}.tables " ) + tdSql.error( f"explain ratio {ratio} verbose true show {dbname}.vgroups " ) + tdSql.error( f"explain ratio {ratio} verbose true show dnodes " ) + tdSql.error( f'''explain ratio {ratio} verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdSql.error( f"explain ratio {ratio} verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain ratio {ratio} verbose false show databases " ) + tdSql.error( f"explain ratio {ratio} verbose false show {dbname}.stables " ) + tdSql.error( f"explain ratio {ratio} verbose false show {dbname}.tables " ) + tdSql.error( f"explain ratio {ratio} verbose false show {dbname}.vgroups " ) + tdSql.error( f"explain ratio {ratio} verbose false show dnodes " ) + tdSql.error( f'''explain ratio {ratio} verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdLog.printNoPrefix("===step 0: err case, must return err") + tdSql.error( f"explain analyze select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain analyze show databases " ) + tdSql.error( f"explain analyze show {dbname}.stables " ) + tdSql.error( f"explain analyze show {dbname}.tables " ) + tdSql.error( f"explain analyze show {dbname}.vgroups " ) + tdSql.error( f"explain analyze show dnodes " ) + tdSql.error( f'''explain analyze select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdSql.error( f"explain analyze verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain analyze verbose true show databases " ) + tdSql.error( f"explain analyze verbose true show {dbname}.stables " ) + tdSql.error( f"explain analyze verbose true show {dbname}.tables " ) + tdSql.error( f"explain analyze verbose true show {dbname}.vgroups " ) + tdSql.error( f"explain analyze verbose true show dnodes " ) + tdSql.error( f'''explain analyze verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdSql.error( f"explain analyze verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain analyze verbose false show databases " ) + tdSql.error( f"explain analyze verbose false show {dbname}.stables " ) + tdSql.error( f"explain analyze verbose false show {dbname}.tables " ) + tdSql.error( f"explain analyze verbose false show {dbname}.vgroups " ) + tdSql.error( f"explain analyze verbose false show dnodes " ) + tdSql.error( f'''explain analyze verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdLog.printNoPrefix("===step 0: err case, must return err") + tdSql.error( f"explain analyze ratio {ratio} select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain analyze ratio {ratio} show databases " ) + tdSql.error( f"explain analyze ratio {ratio} show {dbname}.stables " ) + tdSql.error( f"explain analyze ratio {ratio} show {dbname}.tables " ) + tdSql.error( f"explain analyze ratio {ratio} show {dbname}.vgroups " ) + tdSql.error( f"explain analyze ratio {ratio} show dnodes " ) + tdSql.error( f'''explain analyze ratio {ratio} select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdSql.error( f"explain analyze ratio {ratio} verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain analyze ratio {ratio} verbose true show databases " ) + tdSql.error( f"explain analyze ratio {ratio} verbose true show {dbname}.stables " ) + tdSql.error( f"explain analyze ratio {ratio} verbose true show {dbname}.tables " ) + tdSql.error( f"explain analyze ratio {ratio} verbose true show {dbname}.vgroups " ) + tdSql.error( f"explain analyze ratio {ratio} verbose true show dnodes " ) + tdSql.error( f'''explain analyze ratio {ratio} verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + + tdSql.error( f"explain analyze ratio {ratio} verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" ) + tdSql.error( f"explain analyze ratio {ratio} verbose false show databases " ) + tdSql.error( f"explain analyze ratio {ratio} verbose false show {dbname}.stables " ) + tdSql.error( f"explain analyze ratio {ratio} verbose false show {dbname}.tables " ) + tdSql.error( f"explain analyze ratio {ratio} verbose false show {dbname}.vgroups " ) + tdSql.error( f"explain analyze ratio {ratio} verbose false show dnodes " ) + tdSql.error( f'''explain analyze ratio {ratio} verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']) + from {dbname}.ct1 + where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null + group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] + having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' ) + def all_test(self): self.__test_error() From 796cf764f449a84883801a183f11abfa737f1408 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Tue, 25 Oct 2022 17:54:01 +0800 Subject: [PATCH 16/44] fix(executor):add lock to hash map --- source/dnode/vnode/src/tq/tqRead.c | 4 ++-- source/libs/executor/src/executor.c | 2 +- source/libs/executor/src/executorimpl.c | 13 ++++++------- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 0e8b366113..2f6ec0c39f 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -525,7 +525,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) { if (pReader->tbIdHash) { taosHashClear(pReader->tbIdHash); } else { - pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); } if (pReader->tbIdHash == NULL) { @@ -543,7 +543,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) { int tqReaderAddTbUidList(STqReader* pReader, const SArray* tbUidList) { if (pReader->tbIdHash == NULL) { - pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); if (pReader->tbIdHash == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 0312105ca8..fb428406f4 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -233,7 +233,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo } if (pListInfo->map == NULL) { - pListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); } // traverse to the stream scanner node to add this table id diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 2432944928..cf62f54352 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -2301,8 +2301,8 @@ SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pDummyBlock->pDataBlock); pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL, - destroyExchangeOperatorInfo, NULL); + pOperator->fpSet = + createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL, destroyExchangeOperatorInfo, NULL); return pOperator; _error: @@ -3026,8 +3026,8 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN pOperator->info = pInfo; pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(doOpenAggregateOptr, getAggregateResult, NULL, NULL, destroyAggOperatorInfo, - NULL); + pOperator->fpSet = + createOperatorFpSet(doOpenAggregateOptr, getAggregateResult, NULL, NULL, destroyAggOperatorInfo, NULL); if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) { STableScanInfo* pTableScanInfo = downstream->info; @@ -3253,8 +3253,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pOperator->info = pInfo; pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroyFillOperatorInfo, NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroyFillOperatorInfo, NULL); code = appendDownstream(pOperator, &downstream, 1); return pOperator; @@ -3443,7 +3442,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, return TDB_CODE_SUCCESS; } - pTableListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pTableListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); if (pTableListInfo->map == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } From c09c82abd90601a0000f51ca3b5916bb0790f373 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 25 Oct 2022 17:57:08 +0800 Subject: [PATCH 17/44] test: modify the way to stop taosd'cluster --- tests/system-test/0-others/taosdShell.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/system-test/0-others/taosdShell.py b/tests/system-test/0-others/taosdShell.py index 75fdf0ec8e..e09c8edce9 100644 --- a/tests/system-test/0-others/taosdShell.py +++ b/tests/system-test/0-others/taosdShell.py @@ -14,6 +14,7 @@ from util.log import * from util.sql import * from util.cases import * from util.dnodes import * +from util.cluster import * class TDTestCase: #updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\ @@ -155,11 +156,15 @@ class TDTestCase: # keyDict['h'] = self.hostname # keyDict['c'] = cfgPath # keyDict['P'] = self.serverPort + tdDnodes=cluster.dnodes + for i in range(5): + tdDnodes[i].stoptaosd() + + startAction = " -s -c " + taosdCfgPath tdLog.printNoPrefix("================================ parameter: %s"%startAction) self.taosdCommandExe(startAction,taosdCmdRun) - tdDnodes.stop(1) startAction = " --help" tdLog.printNoPrefix("================================ parameter: %s"%startAction) From b07670a6802e2e1fae2b77e1f055c867e27aaad1 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Tue, 25 Oct 2022 18:03:09 +0800 Subject: [PATCH 18/44] enh: improve test case coverage of tsma --- include/common/tdatablock.h | 2 +- source/common/src/tdatablock.c | 6 +++--- source/dnode/vnode/src/sma/smaRollup.c | 2 +- source/dnode/vnode/src/vnd/vnodeSvr.c | 2 +- tests/script/sh/deploy.bat | 1 + tests/script/sh/deploy.sh | 1 + tests/script/tsim/sma/tsmaCreateInsertQuery.sim | 5 +++-- 7 files changed, 11 insertions(+), 8 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 24dfa5958d..953762ff65 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -245,7 +245,7 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_ const char* blockDecode(SSDataBlock* pBlock, const char* pData); void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag); -void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag); +void blockDebugShowDataBlocks(const SArray* dataBlocks, bool isPtr, const char* flag); // for debug char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 64f56212af..d597acae62 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1797,15 +1797,15 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag) { SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock*)); taosArrayPush(dataBlocks, &pBlock); - blockDebugShowDataBlocks(dataBlocks, flag); + blockDebugShowDataBlocks(dataBlocks, true, flag); taosArrayDestroy(dataBlocks); } -void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) { +void blockDebugShowDataBlocks(const SArray* dataBlocks, bool isPtr, const char* flag) { char pBuf[128] = {0}; int32_t sz = taosArrayGetSize(dataBlocks); for (int32_t i = 0; i < sz; i++) { - SSDataBlock* pDataBlock = taosArrayGetP(dataBlocks, i); + SSDataBlock* pDataBlock = isPtr ? taosArrayGetP(dataBlocks, i) : taosArrayGet(dataBlocks, i); size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); int32_t rows = pDataBlock->info.rows; diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index c56b32514a..450fd8a8df 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -697,7 +697,7 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma #if 0 char flag[10] = {0}; snprintf(flag, 10, "level %" PRIi8, pItem->level); - blockDebugShowDataBlocks(pResList, flag); + blockDebugShowDataBlocks(pResList, true, flag); #endif for (int32_t i = 0; i < taosArrayGetSize(pResList); ++i) { SSDataBlock *output = taosArrayGetP(pResList, i); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index c8089ead99..56dca6a0f8 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -407,7 +407,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { // TODO: remove the function void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { // TODO - // blockDebugShowDataBlocks(data, __func__); + // blockDebugShowDataBlocks(data, false, __func__); tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data); } diff --git a/tests/script/sh/deploy.bat b/tests/script/sh/deploy.bat index 4b221dd479..38e7022ede 100644 --- a/tests/script/sh/deploy.bat +++ b/tests/script/sh/deploy.bat @@ -85,3 +85,4 @@ echo statusInterval 1 >> %TAOS_CFG% echo asyncLog 0 >> %TAOS_CFG% echo locale en_US.UTF-8 >> %TAOS_CFG% echo telemetryReporting 0 >> %TAOS_CFG% +echo querySmaOptimize 1 >> %TAOS_CFG% diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index 606afe164b..cc707b2ed0 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -144,4 +144,5 @@ echo "numOfLogLines 20000000" >> $TAOS_CFG echo "asyncLog 0" >> $TAOS_CFG echo "locale en_US.UTF-8" >> $TAOS_CFG echo "telemetryReporting 0" >> $TAOS_CFG +echo "querySmaOptimize 1" >> $TAOS_CFG echo " " >> $TAOS_CFG diff --git a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim index 7aef9e0f86..442b4970e4 100644 --- a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim @@ -30,12 +30,13 @@ sql insert into ct1 values('2022-10-19 09:55:46.682', 11, 2.1, 3.1)('2022-10-19 print =============== create sma index from super table -sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(5m,10s) sliding(5m) +sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(5m,10s) sliding(5m) watermark 1s max_delay 1s print $data00 $data01 $data02 $data03 print =============== trigger stream to execute sma aggr task and insert sma data into sma store sql insert into ct1 values('2022-10-19 09:55:50.682', 20, 20.0, 30.0) -#=================================================================== +#==================== sleep 2s to wait for tsma result +sleep 2000 print =============== show streams ================================ sql show streams; From 64f539cacb1462a19441474f0e4e9e233255d1f0 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 25 Oct 2022 18:03:22 +0800 Subject: [PATCH 19/44] refactor(sync): adjust timer --- source/libs/sync/inc/syncInt.h | 1 - source/libs/sync/src/syncMain.c | 84 ++++++++++-------------------- source/libs/sync/src/syncTimeout.c | 4 +- 3 files changed, 29 insertions(+), 60 deletions(-) diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index a158430a0f..5f4a6f21b4 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -155,7 +155,6 @@ typedef struct SSyncNode { tmr_h pElectTimer; int32_t electTimerMS; uint64_t electTimerLogicClock; - uint64_t electTimerLogicClockUser; TAOS_TMR_CALLBACK FpElectTimerCB; // Timer Fp uint64_t electTimerCounter; diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 09584077f1..29c82bcd8c 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -1310,7 +1310,6 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) { pSyncNode->pElectTimer = NULL; pSyncNode->electTimerMS = syncUtilElectRandomMS(pSyncNode->electBaseLine, 2 * pSyncNode->electBaseLine); atomic_store_64(&pSyncNode->electTimerLogicClock, 0); - atomic_store_64(&pSyncNode->electTimerLogicClockUser, 0); pSyncNode->FpElectTimerCB = syncNodeEqElectTimer; pSyncNode->electTimerCounter = 0; @@ -1567,15 +1566,6 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) { pSyncNode->electTimerMS = ms; taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager, &pSyncNode->pElectTimer); - atomic_store_64(&pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser); - - /* - do { - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "elect timer reset, ms:%d", ms); - syncNodeEventLog(pSyncNode, logBuf); - } while (0); - */ } else { sError("vgId:%d, start elect timer error, sync env is stop", pSyncNode->vgId); @@ -1585,11 +1575,10 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) { int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode) { int32_t ret = 0; - atomic_add_fetch_64(&pSyncNode->electTimerLogicClockUser, 1); + atomic_add_fetch_64(&pSyncNode->electTimerLogicClock, 1); taosTmrStop(pSyncNode->pElectTimer); pSyncNode->pElectTimer = NULL; - // sTrace("vgId:%d, sync %s stop elect timer", pSyncNode->vgId, syncUtilState2String(pSyncNode->state)); return ret; } @@ -1815,8 +1804,6 @@ cJSON* syncNode2Json(const SSyncNode* pSyncNode) { cJSON_AddNumberToObject(pRoot, "electTimerMS", pSyncNode->electTimerMS); snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pSyncNode->electTimerLogicClock); cJSON_AddStringToObject(pRoot, "electTimerLogicClock", u64buf); - snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pSyncNode->electTimerLogicClockUser); - cJSON_AddStringToObject(pRoot, "electTimerLogicClockUser", u64buf); snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpElectTimerCB); cJSON_AddStringToObject(pRoot, "FpElectTimerCB", u64buf); snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pSyncNode->electTimerCounter); @@ -1922,7 +1909,7 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) { snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing, pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), - pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser, peerStateStr, printStr); + pSyncNode->electTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser, peerStateStr, printStr); } else { snprintf(logBuf, sizeof(logBuf), "%s", str); } @@ -1946,7 +1933,7 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) { snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing, pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), - pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser, peerStateStr, printStr); + pSyncNode->electTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser, peerStateStr, printStr); } else { snprintf(s, len, "%s", str); } @@ -2000,7 +1987,7 @@ inline void syncNodeErrorLog(const SSyncNode* pSyncNode, char* str) { snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing, pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), - pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser, printStr); + pSyncNode->electTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser, printStr); } else { snprintf(logBuf, sizeof(logBuf), "%s", str); } @@ -2022,7 +2009,7 @@ inline void syncNodeErrorLog(const SSyncNode* pSyncNode, char* str) { snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing, pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), - pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser, printStr); + pSyncNode->electTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser, printStr); } else { snprintf(s, len, "%s", str); } @@ -2790,36 +2777,31 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) { static void syncNodeEqElectTimer(void* param, void* tmrId) { SSyncNode* pSyncNode = (SSyncNode*)param; - if (atomic_load_64(&pSyncNode->electTimerLogicClockUser) <= atomic_load_64(&pSyncNode->electTimerLogicClock)) { - SyncTimeout* pSyncMsg = syncTimeoutBuild2(SYNC_TIMEOUT_ELECTION, atomic_load_64(&pSyncNode->electTimerLogicClock), - pSyncNode->electTimerMS, pSyncNode->vgId, pSyncNode); - SRpcMsg rpcMsg; - syncTimeout2RpcMsg(pSyncMsg, &rpcMsg); - syncRpcMsgLog2((char*)"==syncNodeEqElectTimer==", &rpcMsg); - if (pSyncNode->FpEqMsg != NULL) { - int32_t code = pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg); - if (code != 0) { - sError("vgId:%d, sync enqueue elect msg error, code:%d", pSyncNode->vgId, code); - rpcFreeCont(rpcMsg.pCont); - syncTimeoutDestroy(pSyncMsg); - return; - } - } else { - sTrace("syncNodeEqElectTimer FpEqMsg is NULL"); - } - syncTimeoutDestroy(pSyncMsg); - // reset timer ms - if (syncEnvIsStart() && pSyncNode->electBaseLine > 0) { - pSyncNode->electTimerMS = syncUtilElectRandomMS(pSyncNode->electBaseLine, 2 * pSyncNode->electBaseLine); - taosTmrReset(syncNodeEqElectTimer, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager, - &pSyncNode->pElectTimer); - } else { - sError("sync env is stop, syncNodeEqElectTimer"); + SyncTimeout* pSyncMsg = syncTimeoutBuild2(SYNC_TIMEOUT_ELECTION, atomic_load_64(&pSyncNode->electTimerLogicClock), + pSyncNode->electTimerMS, pSyncNode->vgId, pSyncNode); + SRpcMsg rpcMsg; + syncTimeout2RpcMsg(pSyncMsg, &rpcMsg); + if (pSyncNode->FpEqMsg != NULL) { + int32_t code = pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg); + if (code != 0) { + sError("vgId:%d, sync enqueue elect msg error, code:%d", pSyncNode->vgId, code); + rpcFreeCont(rpcMsg.pCont); + syncTimeoutDestroy(pSyncMsg); + return; } } else { - sTrace("==syncNodeEqElectTimer== electTimerLogicClock:%" PRIu64 ", electTimerLogicClockUser:%" PRIu64, - pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser); + sTrace("syncNodeEqElectTimer FpEqMsg is NULL"); + } + syncTimeoutDestroy(pSyncMsg); + + // reset timer ms + if (syncEnvIsStart() && pSyncNode->electBaseLine > 0) { + pSyncNode->electTimerMS = syncUtilElectRandomMS(pSyncNode->electBaseLine, 2 * pSyncNode->electBaseLine); + taosTmrReset(syncNodeEqElectTimer, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager, + &pSyncNode->pElectTimer); + } else { + sError("sync env is stop, syncNodeEqElectTimer"); } } @@ -3000,16 +2982,6 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) { // on message ---- int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) { // log state - char logBuf[1024] = {0}; - snprintf(logBuf, sizeof(logBuf), - "==syncNodeOnPingCb== vgId:%d, state: %d, %s, term:%" PRIu64 " electTimerLogicClock:%" PRIu64 - ", " - "electTimerLogicClockUser:%" PRIu64 ", electTimerMS:%d", - ths->vgId, ths->state, syncUtilState2String(ths->state), ths->pRaftStore->currentTerm, - ths->electTimerLogicClock, ths->electTimerLogicClockUser, ths->electTimerMS); - - int32_t ret = 0; - syncPingLog2(logBuf, pMsg); SyncPingReply* pMsgReply = syncPingReplyBuild3(&ths->myRaftId, &pMsg->srcId, ths->vgId); SRpcMsg rpcMsg; syncPingReply2RpcMsg(pMsgReply, &rpcMsg); @@ -3024,7 +2996,7 @@ int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) { syncNodeSendMsgById(&pMsgReply->destId, ths, &rpcMsg); syncPingReplyDestroy(pMsgReply); - return ret; + return 0; } int32_t syncNodeOnPingReplyCb(SSyncNode* ths, SyncPingReply* pMsg) { diff --git a/source/libs/sync/src/syncTimeout.c b/source/libs/sync/src/syncTimeout.c index 17c8c14136..5ff73a6406 100644 --- a/source/libs/sync/src/syncTimeout.c +++ b/source/libs/sync/src/syncTimeout.c @@ -113,10 +113,8 @@ int32_t syncNodeOnTimer(SSyncNode* ths, SyncTimeout* pMsg) { } } else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) { - if (atomic_load_64(&ths->electTimerLogicClockUser) <= pMsg->logicClock) { + if (atomic_load_64(&ths->electTimerLogicClock) <= pMsg->logicClock) { ++(ths->electTimerCounter); - sTrace("vgId:%d, sync timer, type:election count:%" PRIu64 ", lc-user:%" PRIu64, ths->vgId, - ths->electTimerCounter, ths->electTimerLogicClockUser); syncNodeElect(ths); } From 12743a17afb6cf9fe2c8acf103072b0c0bc13cb5 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Tue, 25 Oct 2022 18:12:38 +0800 Subject: [PATCH 20/44] chore: revert the code change --- include/common/tdatablock.h | 2 +- source/common/src/tdatablock.c | 6 +++--- source/dnode/vnode/src/sma/smaRollup.c | 2 +- source/dnode/vnode/src/vnd/vnodeSvr.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 953762ff65..24dfa5958d 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -245,7 +245,7 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_ const char* blockDecode(SSDataBlock* pBlock, const char* pData); void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag); -void blockDebugShowDataBlocks(const SArray* dataBlocks, bool isPtr, const char* flag); +void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag); // for debug char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d597acae62..64f56212af 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1797,15 +1797,15 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag) { SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock*)); taosArrayPush(dataBlocks, &pBlock); - blockDebugShowDataBlocks(dataBlocks, true, flag); + blockDebugShowDataBlocks(dataBlocks, flag); taosArrayDestroy(dataBlocks); } -void blockDebugShowDataBlocks(const SArray* dataBlocks, bool isPtr, const char* flag) { +void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) { char pBuf[128] = {0}; int32_t sz = taosArrayGetSize(dataBlocks); for (int32_t i = 0; i < sz; i++) { - SSDataBlock* pDataBlock = isPtr ? taosArrayGetP(dataBlocks, i) : taosArrayGet(dataBlocks, i); + SSDataBlock* pDataBlock = taosArrayGetP(dataBlocks, i); size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); int32_t rows = pDataBlock->info.rows; diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 450fd8a8df..c56b32514a 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -697,7 +697,7 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma #if 0 char flag[10] = {0}; snprintf(flag, 10, "level %" PRIi8, pItem->level); - blockDebugShowDataBlocks(pResList, true, flag); + blockDebugShowDataBlocks(pResList, flag); #endif for (int32_t i = 0; i < taosArrayGetSize(pResList); ++i) { SSDataBlock *output = taosArrayGetP(pResList, i); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 56dca6a0f8..c8089ead99 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -407,7 +407,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { // TODO: remove the function void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { // TODO - // blockDebugShowDataBlocks(data, false, __func__); + // blockDebugShowDataBlocks(data, __func__); tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data); } From 16f9c172ac07ff5dbe73b59b3f1346a914d10788 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 25 Oct 2022 18:20:20 +0800 Subject: [PATCH 21/44] fix: fix merge join crash issue --- source/libs/command/src/explain.c | 7 ++++++- tests/script/tsim/query/explain.sim | 4 +++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index e0a4218243..80a524496c 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -685,7 +685,12 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_NEW(level + 1, EXPLAIN_ON_CONDITIONS_FORMAT); QRY_ERR_RET( - nodesNodeToSQL(pJoinNode->pOnConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + nodesNodeToSQL(pJoinNode->pMergeCondition, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + if (pJoinNode->pOnConditions) { + EXPLAIN_ROW_APPEND(" AND "); + QRY_ERR_RET( + nodesNodeToSQL(pJoinNode->pOnConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + } EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); } diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim index 2871252d91..78d905233b 100644 --- a/tests/script/tsim/query/explain.sim +++ b/tests/script/tsim/query/explain.sim @@ -47,6 +47,7 @@ sql explain verbose true select ts from tb1 where f1 > 0; sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00'; sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1; sql explain verbose true select * from information_schema.ins_stables where db_name='db2'; +sql explain verbose true select st1.f1 from st1 join st2 on st1.ts=st2.ts and st1.f1 > 0; print ======== step4 sql explain analyze select ts from st1 where -2; @@ -75,6 +76,7 @@ sql explain analyze verbose true select f1 from st1 where f1 > 0 and ts > '2020- sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2'; sql explain analyze verbose true select * from (select min(f1),count(*) a from st1 where f1 > 0) where a < 0; sql explain analyze verbose true select count(f1) from st1 group by tbname; +sql explain analyze verbose true select st1.f1 from st1 join st2 on st1.ts=st2.ts and st1.f1 > 0; #not pass case #sql explain verbose true select count(*),sum(f1) as aa from tb1 where (f1 > 0 or f1 < -1) and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00' order by aa; @@ -95,4 +97,4 @@ sql explain analyze verbose true select count(f1) from st1 group by tbname; #sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m); -system sh/exec.sh -n dnode1 -s stop -x SIGINT +#system sh/exec.sh -n dnode1 -s stop -x SIGINT From d63eee8b49d848ee8f048753aab478484c4e3b6b Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 25 Oct 2022 19:00:46 +0800 Subject: [PATCH 22/44] fix: alter table check --- source/libs/planner/src/planOptimizer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 1cfdb543db..9c9a7cfebb 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2285,6 +2285,7 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic } if (FUNCTION_TYPE_LAST == funcType) { nodesWalkExpr(nodesListGetNode(pFunc->pParameterList, 0), lastRowScanOptSetColDataType, &cxt); + nodesListErase(pFunc->pParameterList, nodesListGetCell(pFunc->pParameterList, 1)); } } } @@ -2294,7 +2295,7 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic pScan->igLastNull = pAgg->hasLast ? true : false; if (NULL != cxt.pLastCols) { cxt.doAgg = false; - nodesWalkExprs(pScan->pScanCols, lastRowScanOptSetColDataType, &cxt); + lastRowScanOptSetLastTargets(pScan->pScanCols, cxt.pLastCols); nodesWalkExprs(pScan->pScanPseudoCols, lastRowScanOptSetColDataType, &cxt); lastRowScanOptSetLastTargets(pScan->node.pTargets, cxt.pLastCols); nodesClearList(cxt.pLastCols); From 01b712fbfd43c7037b105298bfadebbc0077d6e6 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 25 Oct 2022 19:11:28 +0800 Subject: [PATCH 23/44] refactor(sync): add trace log --- source/libs/sync/src/syncMain.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 29c82bcd8c..6496beea93 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2790,6 +2790,12 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { syncTimeoutDestroy(pSyncMsg); return; } + + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "eq elect timer lc:%ld", pSyncMsg->logicClock); + } while (0); + } else { sTrace("syncNodeEqElectTimer FpEqMsg is NULL"); } From 05a34b53109a09ccf3457260ee85b0ea1b8c689e Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 25 Oct 2022 19:14:43 +0800 Subject: [PATCH 24/44] modify start api for no remove taosdlog file --- tests/system-test/7-tmq/tmqError.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/system-test/7-tmq/tmqError.py b/tests/system-test/7-tmq/tmqError.py index 9afcfaf968..2d7b464025 100644 --- a/tests/system-test/7-tmq/tmqError.py +++ b/tests/system-test/7-tmq/tmqError.py @@ -240,12 +240,14 @@ class TDTestCase: time.sleep(3) tdLog.info("================= stop dnode, and remove data file, then start dnode ===========================") tdDnodes.stop(1) + # time.sleep(5) dataPath = buildPath + "/../sim/dnode1/data/*" shellCmd = 'rm -rf ' + dataPath tdLog.info(shellCmd) os.system(shellCmd) - tdDnodes.start(1) + #tdDnodes.start(1) + tdDnodes.starttaosd(1) time.sleep(2) ######### redo to consume From 96a8a80cd79605e9e634d98ef1d5d77453325359 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 25 Oct 2022 19:22:25 +0800 Subject: [PATCH 25/44] refactor(sync): add trace log --- source/libs/sync/src/syncMain.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 6496beea93..b4cf27a15c 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2794,6 +2794,7 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { do { char logBuf[128]; snprintf(logBuf, sizeof(logBuf), "eq elect timer lc:%ld", pSyncMsg->logicClock); + syncNodeEventLog(pSyncNode, logBuf); } while (0); } else { From c449e49087dd37c6ca378bd2732fe53ebdf5fdc3 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 25 Oct 2022 19:38:22 +0800 Subject: [PATCH 26/44] fix: alter table check --- tests/script/tsim/parser/alter_column.sim | 2 +- tests/script/tsim/parser/alter_stable.sim | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/script/tsim/parser/alter_column.sim b/tests/script/tsim/parser/alter_column.sim index 3b6f0e4da7..c70a604c73 100644 --- a/tests/script/tsim/parser/alter_column.sim +++ b/tests/script/tsim/parser/alter_column.sim @@ -55,7 +55,7 @@ sql_error alter table tb modify column c3 nchar(10); sql_error alter table tb modify column c3 nchar(0); sql_error alter table tb modify column c3 nchar(-1); sql_error alter table tb modify column c3 binary(80); -sql alter table tb modify column c3 nchar(17000); +sql_error alter table tb modify column c3 nchar(17000); sql_error alter table tb modify column c3 nchar(100), c2 binary(30); sql_error alter table tb modify column c1 nchar(100), c2 binary(30); sql_error alter stable tb modify column c2 binary(30); diff --git a/tests/script/tsim/parser/alter_stable.sim b/tests/script/tsim/parser/alter_stable.sim index b6e1a751b9..6bf71b4365 100644 --- a/tests/script/tsim/parser/alter_stable.sim +++ b/tests/script/tsim/parser/alter_stable.sim @@ -34,7 +34,7 @@ sql alter table tb1 set tag len = 379 # case TD-5594 sql create stable st5520(ts timestamp, f int) tags(t0 bool, t1 nchar(4093), t2 nchar(1)) -sql alter stable st5520 modify tag t2 nchar(2); +sql_error alter stable st5520 modify tag t2 nchar(2); # test end sql drop database $db From 9caea0f9445ee2b98fe85a5f0daf5a47ef53f0ca Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 25 Oct 2022 19:43:07 +0800 Subject: [PATCH 27/44] refactor(sync): adjust elect timer --- source/libs/sync/src/syncMain.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index b4cf27a15c..f2d1eaf3e7 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2802,6 +2802,7 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { } syncTimeoutDestroy(pSyncMsg); +#if 0 // reset timer ms if (syncEnvIsStart() && pSyncNode->electBaseLine > 0) { pSyncNode->electTimerMS = syncUtilElectRandomMS(pSyncNode->electBaseLine, 2 * pSyncNode->electBaseLine); @@ -2810,6 +2811,7 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { } else { sError("sync env is stop, syncNodeEqElectTimer"); } +#endif } static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) { From 49af601e19f4ef26f33d082b043ba1826dc76ec1 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 25 Oct 2022 19:56:49 +0800 Subject: [PATCH 28/44] refactor(sync): add SElectTimer --- source/libs/sync/inc/syncInt.h | 6 ++++++ source/libs/sync/src/syncMain.c | 18 ++++++++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 5f4a6f21b4..ae053328ab 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -79,6 +79,12 @@ typedef struct SSyncTimer { void* pData; } SSyncTimer; +typedef struct SElectTimer { + uint64_t logicClock; + SSyncNode* pSyncNode; + void* pData; +} SElectTimer; + int32_t syncHbTimerInit(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer, SRaftId destId); int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer); int32_t syncHbTimerStop(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index f2d1eaf3e7..f2e25a023b 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -1564,7 +1564,13 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) { int32_t ret = 0; if (syncEnvIsStart()) { pSyncNode->electTimerMS = ms; - taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager, + + SElectTimer* pElectTimer = taosMemoryMalloc(sizeof(SElectTimer)); + pElectTimer->logicClock = pSyncNode->electTimerLogicClock; + pElectTimer->pSyncNode = pSyncNode; + pElectTimer->pData = NULL; + + taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, pElectTimer, gSyncEnv->pTimerManager, &pSyncNode->pElectTimer); } else { @@ -2776,10 +2782,11 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) { } static void syncNodeEqElectTimer(void* param, void* tmrId) { - SSyncNode* pSyncNode = (SSyncNode*)param; + SElectTimer* pElectTimer = (SElectTimer*)param; + SSyncNode* pSyncNode = pElectTimer->pSyncNode; - SyncTimeout* pSyncMsg = syncTimeoutBuild2(SYNC_TIMEOUT_ELECTION, atomic_load_64(&pSyncNode->electTimerLogicClock), - pSyncNode->electTimerMS, pSyncNode->vgId, pSyncNode); + SyncTimeout* pSyncMsg = syncTimeoutBuild2(SYNC_TIMEOUT_ELECTION, pElectTimer->logicClock, pSyncNode->electTimerMS, + pSyncNode->vgId, pSyncNode); SRpcMsg rpcMsg; syncTimeout2RpcMsg(pSyncMsg, &rpcMsg); if (pSyncNode->FpEqMsg != NULL) { @@ -2788,6 +2795,7 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { sError("vgId:%d, sync enqueue elect msg error, code:%d", pSyncNode->vgId, code); rpcFreeCont(rpcMsg.pCont); syncTimeoutDestroy(pSyncMsg); + taosMemoryFree(pElectTimer); return; } @@ -2800,7 +2808,9 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { } else { sTrace("syncNodeEqElectTimer FpEqMsg is NULL"); } + syncTimeoutDestroy(pSyncMsg); + taosMemoryFree(pElectTimer); #if 0 // reset timer ms From 16e6273d9d429fad788aacd1e43aa4fa05969493 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 25 Oct 2022 20:31:20 +0800 Subject: [PATCH 29/44] refactor(sync): delete assert --- source/libs/sync/src/syncVoteMgr.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/source/libs/sync/src/syncVoteMgr.c b/source/libs/sync/src/syncVoteMgr.c index 39d62b957a..ce72935221 100644 --- a/source/libs/sync/src/syncVoteMgr.c +++ b/source/libs/sync/src/syncVoteMgr.c @@ -66,7 +66,12 @@ bool voteGrantedMajority(SVotesGranted *pVotesGranted) { void voteGrantedVote(SVotesGranted *pVotesGranted, SyncRequestVoteReply *pMsg) { ASSERT(pMsg->voteGranted == true); - ASSERT(pMsg->term == pVotesGranted->term); + + if (pMsg->term != pVotesGranted->term) { + syncNodeEventLog(pVotesGranted->pSyncNode, "vote grant vnode error"); + return; + } + ASSERT(syncUtilSameId(&pVotesGranted->pSyncNode->myRaftId, &pMsg->destId)); int j = -1; @@ -201,7 +206,11 @@ bool votesResponded(SVotesRespond *pVotesRespond, const SRaftId *pRaftId) { } void votesRespondAdd(SVotesRespond *pVotesRespond, const SyncRequestVoteReply *pMsg) { - ASSERT(pVotesRespond->term == pMsg->term); + if (pVotesRespond->term != pMsg->term) { + syncNodeEventLog(pVotesRespond->pSyncNode, "vote respond add error"); + return; + } + for (int i = 0; i < pVotesRespond->replicaNum; ++i) { if (syncUtilSameId(&((*(pVotesRespond->replicas))[i]), &pMsg->srcId)) { // ASSERT(pVotesRespond->isRespond[i] == false); From 7723a9ac28d5c17c582aaeb921022ecf2f96bae3 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Tue, 25 Oct 2022 23:17:52 +0800 Subject: [PATCH 30/44] refactor(sync): delete %ld --- source/libs/sync/src/syncMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index f2e25a023b..44c19f5431 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -2801,7 +2801,7 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { do { char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "eq elect timer lc:%ld", pSyncMsg->logicClock); + snprintf(logBuf, sizeof(logBuf), "eq elect timer lc:%" PRIu64, pSyncMsg->logicClock); syncNodeEventLog(pSyncNode, logBuf); } while (0); From 75e5e490fd49932670a89366ca74a25b82a0699b Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 26 Oct 2022 00:31:00 +0800 Subject: [PATCH 31/44] refactor(stream): recover and fill history --- include/common/tcommon.h | 1 + include/common/tmsgdef.h | 3 + include/libs/executor/executor.h | 12 +- include/libs/stream/tstream.h | 40 ++++- include/libs/wal/wal.h | 21 ++- source/client/src/clientTmq.c | 1 + source/dnode/vnode/src/inc/tq.h | 6 +- source/dnode/vnode/src/tq/tq.c | 154 +++++++++++++++++- source/dnode/vnode/src/tq/tqExec.c | 4 +- source/dnode/vnode/src/tq/tqOffset.c | 4 +- source/dnode/vnode/src/tq/tqRead.c | 4 +- source/dnode/vnode/src/tq/tqSink.c | 2 + source/libs/executor/inc/executorimpl.h | 146 +++++++++-------- source/libs/executor/src/executor.c | 112 ++++++++++++- source/libs/executor/src/executorimpl.c | 11 +- source/libs/executor/src/scanoperator.c | 16 +- source/libs/executor/src/timewindowoperator.c | 33 +++- source/libs/stream/inc/streamInc.h | 2 +- source/libs/stream/src/streamDispatch.c | 5 + source/libs/stream/src/streamExec.c | 49 ++++++ source/libs/stream/src/streamMeta.c | 26 ++- source/libs/stream/src/streamRecover.c | 86 ++++++++++ source/libs/wal/src/walRef.c | 5 - 23 files changed, 603 insertions(+), 140 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 32626ca233..2b45a5d206 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -161,6 +161,7 @@ typedef enum EStreamType { STREAM_RETRIEVE, STREAM_PULL_DATA, STREAM_PULL_OVER, + STREAM_FILL_OVER, } EStreamType; typedef struct { diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 1cd02e2a28..be4bf0e4d2 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -198,6 +198,9 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqPollReq, SMqDataBlkRsp) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_DISPATCH_WRITE, "vnode-stream-task-dispatch-write", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_STEP1, "vnode-stream-recover1", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_STEP2, "vnode-stream-recover2", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_FINISH, "vnode-stream-finish", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_SMA, "vnode-create-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index b4b60f804d..d083d20058 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -53,7 +53,8 @@ typedef struct { void* sContext; // SSnapContext* - void* pStateBackend; + void* pStateBackend; + int64_t fillHistoryVer1; } SReadHandle; // in queue mode, data streams are seperated by msg @@ -176,6 +177,7 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len); int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len); +STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key); /** * return the scan info, in the form of tuple of two items, including table uid and current timestamp * @param tinfo @@ -207,9 +209,11 @@ int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner); int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem); -int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer); - -STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key); +int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo); +int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver); +int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver); +int32_t qStreamRecoverFinish(qTaskInfo_t tinfo); +int32_t qStreamRestoreParam(qTaskInfo_t tinfo); #ifdef __cplusplus } diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 2ab0dc828e..2a957b0d16 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -47,7 +47,9 @@ enum { TASK_STATUS__FAIL, TASK_STATUS__STOP, TASK_STATUS__RECOVER_DOWNSTREAM, - TASK_STATUS__RECOVER_SELF, + TASK_STATUS__RECOVER_PREPARE, + TASK_STATUS__RECOVER1, + TASK_STATUS__RECOVER2, }; enum { @@ -329,6 +331,9 @@ typedef struct SStreamTask { // state backend SStreamState* pState; + // do not serialize + int32_t recoverWaitingChild; + } SStreamTask; int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo); @@ -435,6 +440,11 @@ typedef struct { int32_t rspToTaskId; } SStreamRetrieveRsp; +typedef struct { + int64_t streamId; + int32_t taskId; +} SStreamRecoverStep1Req, SStreamRecoverStep2Req; + #if 0 typedef struct { int64_t streamId; @@ -521,8 +531,29 @@ int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp); int32_t streamTryExec(SStreamTask* pTask); int32_t streamSchedExec(SStreamTask* pTask); -typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask); +int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz); +// recover and fill history +// common +int32_t streamSetParamForRecover(SStreamTask* pTask); +int32_t streamRestoreParam(SStreamTask* pTask); +int32_t streamSetStatusNormal(SStreamTask* pTask); +// source level +int32_t streamSourceRecoverPrepareStep1(SStreamTask* pTask, int64_t ver); +int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamRecoverStep1Req* pReq); +int32_t streamSourceRecoverScanStep1(SStreamTask* pTask); +int32_t streamBuildSourceRecover2Req(SStreamTask* pTask, SStreamRecoverStep2Req* pReq); +int32_t streamSourceRecoverScanStep2(SStreamTask* pTask, int64_t ver); +int32_t streamDispatchRecoverFinishReq(SStreamTask* pTask); +// agg level +int32_t streamAggRecoverPrepare(SStreamTask* pTask); +// int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask); +int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId); + +// expand and deploy +typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver); + +// meta typedef struct SStreamMeta { char* path; TDB* db; @@ -533,12 +564,13 @@ typedef struct SStreamMeta { void* ahandle; TXN txn; FTaskExpand* expandFunc; + int32_t vgId; } SStreamMeta; -SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc); +SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId); void streamMetaClose(SStreamMeta* streamMeta); -// int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask); +int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask); int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen); int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId); diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index adf244e32a..08dba5d50d 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -33,16 +33,16 @@ extern "C" { #define wTrace(...) { if (wDebugFlag & DEBUG_TRACE) { taosPrintLog("WAL ", DEBUG_TRACE, wDebugFlag, __VA_ARGS__); }} // clang-format on -#define WAL_PROTO_VER 0 -#define WAL_NOSUFFIX_LEN 20 -#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1) -#define WAL_LOG_SUFFIX "log" -#define WAL_INDEX_SUFFIX "idx" -#define WAL_REFRESH_MS 1000 -#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12) -#define WAL_FILE_LEN (WAL_PATH_LEN + 32) -#define WAL_MAGIC 0xFAFBFCFDF4F3F2F1ULL -#define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3) +#define WAL_PROTO_VER 0 +#define WAL_NOSUFFIX_LEN 20 +#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1) +#define WAL_LOG_SUFFIX "log" +#define WAL_INDEX_SUFFIX "idx" +#define WAL_REFRESH_MS 1000 +#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12) +#define WAL_FILE_LEN (WAL_PATH_LEN + 32) +#define WAL_MAGIC 0xFAFBFCFDF4F3F2F1ULL +#define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3) #define WAL_RECOV_SIZE_LIMIT (100 * WAL_SCAN_BUF_SIZE) typedef enum { @@ -204,7 +204,6 @@ SWalRef *walRefCommittedVer(SWal *); SWalRef *walOpenRef(SWal *); void walCloseRef(SWal *pWal, int64_t refId); int32_t walRefVer(SWalRef *, int64_t ver); -int32_t walPreRefVer(SWalRef *pRef, int64_t ver); void walUnrefVer(SWalRef *); // helper function for raft diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 6e3e274052..a4ca1f796b 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1732,6 +1732,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { // in no topic status, delayed task also need to be processed if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) { + tscDebug("consumer:%" PRId64 ", poll return since consumer status is init", tmq->consumerId); return NULL; } diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 7f418439a8..edd23c80be 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -181,15 +181,15 @@ int32_t tqOffsetDelete(STqOffsetStore* pStore, const char* subscribeKey) int32_t tqOffsetCommitFile(STqOffsetStore* pStore); // tqSink -void tqSinkToTableMerge(SStreamTask* pTask, void* vnode, int64_t ver, void* data); +// void tqSinkToTableMerge(SStreamTask* pTask, void* vnode, int64_t ver, void* data); void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* data); // tqOffset -char* tqOffsetBuildFName(const char* path, int32_t ver); +char* tqOffsetBuildFName(const char* path, int32_t fVer); int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname); // tqStream -int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask); +int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index da22d0d951..81940f539d 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -98,7 +98,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { ASSERT(0); } - pTq->pStreamMeta = streamMetaOpen(path, pTq, (FTaskExpand*)tqExpandTask); + pTq->pStreamMeta = streamMetaOpen(path, pTq, (FTaskExpand*)tqExpandTask, pTq->pVnode->config.vgId); if (pTq->pStreamMeta == NULL) { ASSERT(0); } @@ -872,7 +872,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe return 0; } -int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { +int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { if (pTask->taskLevel == TASK_LEVEL__AGG) { ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0); } @@ -891,6 +891,8 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { pTask->pMsgCb = &pTq->pVnode->msgCb; + pTask->startVer = ver; + // expand executor if (pTask->taskLevel == TASK_LEVEL__SOURCE) { pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1); @@ -903,9 +905,14 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { .vnode = pTq->pVnode, .initTqReader = 1, .pStateBackend = pTask->pState, + .fillHistoryVer1 = pTask->fillHistory ? ver : -1, }; pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle); ASSERT(pTask->exec.executor); + + if (pTask->fillHistory) { + pTask->taskStatus = TASK_STATUS__RECOVER_PREPARE; + } } else if (pTask->taskLevel == TASK_LEVEL__AGG) { pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1); if (pTask->pState == NULL) { @@ -945,8 +952,148 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { } int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) { + int32_t code; +#if 0 + code = streamMetaAddSerializedTask(pTq->pStreamMeta, version, msg, msgLen); + if (code < 0) return code; +#endif + + // 1.deserialize msg and build task + SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); + if (pTask == NULL) { + return -1; + } + SDecoder decoder; + tDecoderInit(&decoder, (uint8_t*)msg, msgLen); + code = tDecodeSStreamTask(&decoder, pTask); + if (code < 0) { + tDecoderClear(&decoder); + taosMemoryFree(pTask); + return -1; + } + tDecoderClear(&decoder); + + // 2.save task + code = streamMetaAddTask(pTq->pStreamMeta, version, pTask); + if (code < 0) { + return -1; + } + + // 3.go through recover steps to fill history + if (pTask->fillHistory) { + streamSetParamForRecover(pTask); + if (pTask->taskLevel == TASK_LEVEL__SOURCE) { + streamSourceRecoverPrepareStep1(pTask, version); + + SStreamRecoverStep1Req req; + streamBuildSourceRecover1Req(pTask, &req); + + void* serialziedReq = (void*)&req; + int32_t len = sizeof(SStreamRecoverStep1Req); + + SRpcMsg rpcMsg = { + .contLen = len, + .pCont = serialziedReq, + .msgType = TDMT_VND_STREAM_RECOVER_STEP1, + }; + + tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &rpcMsg); + + } else if (pTask->taskLevel == TASK_LEVEL__AGG) { + streamAggRecoverPrepare(pTask); + } else if (pTask->taskLevel == TASK_LEVEL__SINK) { + // do nothing + } + } + + return 0; +} + +int32_t tqProcessTaskRecover1Req(STQ* pTq, char* msg, int32_t msgLen) { + int32_t code; + SStreamRecoverStep1Req* pReq = (SStreamRecoverStep1Req*)msg; + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, pReq->taskId); + if (pTask == NULL) { + return -1; + } + + // check param + int64_t fillVer1 = pTask->startVer; + if (fillVer1 <= 0) { + ASSERT(0); + return -1; + } + + // do recovery step 1 + streamSourceRecoverScanStep1(pTask); + + // build msg to launch next step + SStreamRecoverStep2Req req; + code = streamBuildSourceRecover2Req(pTask, &req); + if (code < 0) { + return -1; + } + + // serialize msg + int32_t len = sizeof(SStreamRecoverStep2Req); + void* serializedReq = (void*)&req; + + // dispatch msg + SRpcMsg rpcMsg = { + .code = 0, + .contLen = len, + .msgType = TDMT_VND_STREAM_RECOVER_STEP2, + .pCont = (void*)serializedReq, + }; + + tmsgPutToQueue(&pTq->pVnode->msgCb, WRITE_QUEUE, &rpcMsg); + + return 0; +} + +int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t version, char* msg, int32_t msgLen) { + int32_t code; + SStreamRecoverStep2Req* pReq = (SStreamRecoverStep2Req*)msg; + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, pReq->taskId); + if (pTask == NULL) { + return -1; + } + + // do recovery step 2 + code = streamSourceRecoverScanStep2(pTask, version); + if (code < 0) { + return -1; + } + + // restore param + code = streamRestoreParam(pTask); + if (code < 0) { + return -1; + } + + // set status normal + code = streamSetStatusNormal(pTask); + if (code < 0) { + return -1; + } + + // dispatch recover finish req to all related downstream task + code = streamDispatchRecoverFinishReq(pTask); + if (code < 0) { + return -1; + } + + return 0; +} + +int32_t tqProcessTaskRecoverFinishReq(STQ* pTq, char* msg, int32_t msgLen) { + int32_t code; + + // deserialize + // find task + // do process request // - return streamMetaAddSerializedTask(pTq->pStreamMeta, version, msg, msgLen); + return 0; } int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) { @@ -1081,6 +1228,7 @@ int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* pReq, int64_t ver) { if (pIter == NULL) break; SStreamTask* pTask = *(SStreamTask**)pIter; if (pTask->taskLevel != TASK_LEVEL__SOURCE) continue; + if (pTask->taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->taskStatus == TASK_STATUS__RECOVER1) continue; qDebug("data submit enqueue stream task: %d, ver: %" PRId64, pTask->taskId, ver); diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c index 305ee82982..30f6f81aa9 100644 --- a/source/dnode/vnode/src/tq/tqExec.c +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -85,11 +85,11 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs while (1) { SSDataBlock* pDataBlock = NULL; uint64_t ts = 0; - tqDebug("tmq task start to execute"); + tqDebug("vgId:%d, tmq task start to execute", pTq->pVnode->config.vgId); if (qExecTask(task, &pDataBlock, &ts) < 0) { ASSERT(0); } - tqDebug("tmq task executed, get %p", pDataBlock); + tqDebug("vgId:%d, tmq task executed, get %p", pTq->pVnode->config.vgId, pDataBlock); if (pDataBlock == NULL) { break; diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index 7097591c35..952f81e1f4 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -22,10 +22,10 @@ struct STqOffsetStore { SHashObj* pHash; // SHashObj }; -char* tqOffsetBuildFName(const char* path, int32_t ver) { +char* tqOffsetBuildFName(const char* path, int32_t fVer) { int32_t len = strlen(path); char* fname = taosMemoryCalloc(1, len + 40); - snprintf(fname, len + 40, "%s/offset-ver%d", path, ver); + snprintf(fname, len + 40, "%s/offset-ver%d", path, fVer); return fname; } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 0e8b366113..2f6ec0c39f 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -525,7 +525,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) { if (pReader->tbIdHash) { taosHashClear(pReader->tbIdHash); } else { - pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); } if (pReader->tbIdHash == NULL) { @@ -543,7 +543,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) { int tqReaderAddTbUidList(STqReader* pReader, const SArray* tbUidList) { if (pReader->tbIdHash == NULL) { - pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); if (pReader->tbIdHash == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 3e59e0fd45..8a81151273 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -530,6 +530,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d taosArrayDestroy(tagArray); } +#if 0 void tqSinkToTableMerge(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { const SArray* pRes = (const SArray*)data; SVnode* pVnode = (SVnode*)vnode; @@ -585,3 +586,4 @@ void tqSinkToTableMerge(SStreamTask* pTask, void* vnode, int64_t ver, void* data tqDebug("failed to put into write-queue since %s", terrstr()); } } +#endif diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index d94eb80994..5bd5f89ef7 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -12,7 +12,6 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ -// clang-format off #ifndef TDENGINE_EXECUTORIMPL_H #define TDENGINE_EXECUTORIMPL_H @@ -140,18 +139,19 @@ typedef struct STaskIdInfo { enum { STREAM_RECOVER_STEP__NONE = 0, - STREAM_RECOVER_STEP__PREPARE, + STREAM_RECOVER_STEP__PREPARE1, + STREAM_RECOVER_STEP__PREPARE2, STREAM_RECOVER_STEP__SCAN, }; typedef struct { // TODO remove prepareStatus - STqOffsetVal prepareStatus; // for tmq - STqOffsetVal lastStatus; // for tmq - SMqMetaRsp metaRsp; // for tmq fetching meta - int8_t returned; - int64_t snapshotVer; - const SSubmitReq* pReq; + STqOffsetVal prepareStatus; // for tmq + STqOffsetVal lastStatus; // for tmq + SMqMetaRsp metaRsp; // for tmq fetching meta + int8_t returned; + int64_t snapshotVer; + const SSubmitReq* pReq; SSchemaWrapper* schema; char tbName[TSDB_TABLE_NAME_LEN]; @@ -164,7 +164,10 @@ typedef struct { int64_t recoverEndVer; int64_t fillHistoryVer1; int64_t fillHistoryVer2; - SStreamState* pState; + + int8_t triggerSaved; + int64_t deleteMarkSaved; + SStreamState* pState; } SStreamTaskInfo; typedef struct { @@ -192,7 +195,7 @@ typedef struct SExecTaskInfo { EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] SSubplan* pSubplan; struct SOperatorInfo* pRoot; - SLocalFetch localFetch; + SLocalFetch localFetch; } SExecTaskInfo; enum { @@ -444,8 +447,8 @@ typedef struct SStreamAggSupporter { int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row SSDataBlock* pScanBlock; SStreamState* pState; - int64_t gap; // stream session window gap - SqlFunctionCtx* pDummyCtx; // for combine + int64_t gap; // stream session window gap + SqlFunctionCtx* pDummyCtx; // for combine SSHashObj* pResultRows; int32_t stateKeySize; int16_t stateKeyType; @@ -542,9 +545,9 @@ typedef struct { } SStreamRawScanInfo; typedef struct SSysTableIndex { - int8_t init; - SArray *uids; - int32_t lastIdx; + int8_t init; + SArray* uids; + int32_t lastIdx; } SSysTableIndex; typedef struct SSysTableScanInfo { @@ -559,7 +562,7 @@ typedef struct SSysTableScanInfo { bool showRewrite; SNode* pCondition; // db_name filter condition, to discard data that are not in current database SMTbCursor* pCur; // cursor for iterate the local table meta store. - SSysTableIndex* pIdx; // idx for local table meta + SSysTableIndex* pIdx; // idx for local table meta SArray* scanCols; // SArray scan column id list SName name; SSDataBlock* pRes; @@ -603,7 +606,7 @@ typedef struct SIntervalAggOperatorInfo { typedef struct SMergeAlignedIntervalAggOperatorInfo { SIntervalAggOperatorInfo* intervalAggOperatorInfo; -// bool hasGroupId; + // bool hasGroupId; uint64_t groupId; // current groupId int64_t curTs; // current ts SSDataBlock* prefetchedBlock; @@ -613,21 +616,21 @@ typedef struct SMergeAlignedIntervalAggOperatorInfo { typedef struct SStreamIntervalOperatorInfo { // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode - SOptrBasicInfo binfo; // basic info - SAggSupporter aggSup; // aggregate supporter - SExprSupp scalarSupp; // supporter for perform scalar function - SGroupResInfo groupResInfo; // multiple results build supporter - SInterval interval; // interval info - int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator. + SOptrBasicInfo binfo; // basic info + SAggSupporter aggSup; // aggregate supporter + SExprSupp scalarSupp; // supporter for perform scalar function + SGroupResInfo groupResInfo; // multiple results build supporter + SInterval interval; // interval info + int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator. STimeWindowAggSupp twAggSup; bool invertible; bool ignoreExpiredData; - SArray* pDelWins; // SWinRes + SArray* pDelWins; // SWinRes int32_t delIndex; SSDataBlock* pDelRes; - SPhysiNode* pPhyNode; // create new child + SPhysiNode* pPhyNode; // create new child SHashObj* pPullDataMap; - SArray* pPullWins; // SPullWindowInfo + SArray* pPullWins; // SPullWindowInfo int32_t pullIndex; SSDataBlock* pPullDataRes; bool isFinal; @@ -695,9 +698,9 @@ typedef struct SGroupbyOperatorInfo { SArray* pGroupCols; // group by columns, SArray SArray* pGroupColVals; // current group column values, SArray SNode* pCondition; - bool isInit; // denote if current val is initialized or not - char* keyBuf; // group by keys for hash - int32_t groupKeyLen; // total group by column width + bool isInit; // denote if current val is initialized or not + char* keyBuf; // group by keys for hash + int32_t groupKeyLen; // total group by column width SGroupResInfo groupResInfo; SExprSupp scalarSup; } SGroupbyOperatorInfo; @@ -748,9 +751,9 @@ typedef struct SSessionAggOperatorInfo { } SSessionAggOperatorInfo; typedef struct SResultWindowInfo { - void* pOutputBuf; - SSessionKey sessionWin; - bool isOutput; + void* pOutputBuf; + SSessionKey sessionWin; + bool isOutput; } SResultWindowInfo; typedef struct SStateWindowInfo { @@ -761,20 +764,20 @@ typedef struct SStateWindowInfo { typedef struct SStreamSessionAggOperatorInfo { SOptrBasicInfo binfo; SStreamAggSupporter streamAggSup; - SExprSupp scalarSupp; // supporter for perform scalar function + SExprSupp scalarSupp; // supporter for perform scalar function SGroupResInfo groupResInfo; int32_t primaryTsIndex; // primary timestamp slot id int32_t endTsIndex; // window end timestamp slot id int32_t order; // current SSDataBlock scan order STimeWindowAggSupp twAggSup; - SSDataBlock* pWinBlock; // window result - SSDataBlock* pDelRes; // delete result - SSDataBlock* pUpdateRes; // update window + SSDataBlock* pWinBlock; // window result + SSDataBlock* pDelRes; // delete result + SSDataBlock* pUpdateRes; // update window bool returnUpdate; SSHashObj* pStDeleted; void* pDelIterator; - SArray* pChildren; // cache for children's result; final stream operator - SPhysiNode* pPhyNode; // create new child + SArray* pChildren; // cache for children's result; final stream operator + SPhysiNode* pPhyNode; // create new child bool isFinal; bool ignoreExpiredData; SHashObj* pGroupIdTbNameMap; @@ -783,7 +786,7 @@ typedef struct SStreamSessionAggOperatorInfo { typedef struct SStreamStateAggOperatorInfo { SOptrBasicInfo binfo; SStreamAggSupporter streamAggSup; - SExprSupp scalarSupp; // supporter for perform scalar function + SExprSupp scalarSupp; // supporter for perform scalar function SGroupResInfo groupResInfo; int32_t primaryTsIndex; // primary timestamp slot id STimeWindowAggSupp twAggSup; @@ -791,7 +794,7 @@ typedef struct SStreamStateAggOperatorInfo { SSDataBlock* pDelRes; SSHashObj* pSeDeleted; void* pDelIterator; - SArray* pChildren; // cache for children's result; + SArray* pChildren; // cache for children's result; bool ignoreExpiredData; SHashObj* pGroupIdTbNameMap; } SStreamStateAggOperatorInfo; @@ -811,18 +814,18 @@ typedef struct SStreamPartitionOperatorInfo { typedef struct SStreamFillOperatorInfo { SStreamFillSupporter* pFillSup; - SSDataBlock* pRes; - SSDataBlock* pSrcBlock; - int32_t srcRowIndex; - SSDataBlock* pPrevSrcBlock; - SSDataBlock* pSrcDelBlock; - int32_t srcDelRowIndex; - SSDataBlock* pDelRes; - SNode* pCondition; - SArray* pColMatchColInfo; - int32_t primaryTsCol; - int32_t primarySrcSlotId; - SStreamFillInfo* pFillInfo; + SSDataBlock* pRes; + SSDataBlock* pSrcBlock; + int32_t srcRowIndex; + SSDataBlock* pPrevSrcBlock; + SSDataBlock* pSrcDelBlock; + int32_t srcDelRowIndex; + SSDataBlock* pDelRes; + SNode* pCondition; + SArray* pColMatchColInfo; + int32_t primaryTsCol; + int32_t primarySrcSlotId; + SStreamFillInfo* pFillInfo; } SStreamFillOperatorInfo; typedef struct STimeSliceOperatorInfo { @@ -855,7 +858,7 @@ typedef struct SStateWindowOperatorInfo { SStateKeys stateKey; int32_t tsSlotId; // primary timestamp column slot id STimeWindowAggSupp twAggSup; - const SNode* pCondition; + const SNode* pCondition; } SStateWindowOperatorInfo; typedef struct SSortOperatorInfo { @@ -913,8 +916,8 @@ void initResultSizeInfo(SResultInfo* pResultInfo, int32_t numOfRows); void doBuildStreamResBlock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SDiskbasedBuf* pBuf); -void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, - SDiskbasedBuf* pBuf); +void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, + SDiskbasedBuf* pBuf); int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf); bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo); @@ -983,7 +986,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExecTaskInfo* pTaskInfo, int32_t numOfChild); SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode *pAggNode, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid, SBlockDistScanPhysiNode* pBlockScanNode, SExecTaskInfo* pTaskInfo); @@ -1010,8 +1013,8 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild); -SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, - SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, + SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo); @@ -1063,20 +1066,21 @@ STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowI int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order); int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); -SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize); +SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize); void getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId, SSessionKey* pKey); -bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap); -bool functionNeedToExecute(SqlFunctionCtx* pCtx); -bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup); -bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup); -bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup); -bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SStreamState* pState, STimeWindowAggSupp* pTwSup); -void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, uint64_t* pGp, void* pTbName); -void printDataBlock(SSDataBlock* pBlock, const char* flag); +bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap); +bool functionNeedToExecute(SqlFunctionCtx* pCtx); +bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup); +bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup); +bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup); +bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SStreamState* pState, STimeWindowAggSupp* pTwSup); +void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, + uint64_t* pGp, void* pTbName); +void printDataBlock(SSDataBlock* pBlock, const char* flag); uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId); -int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, - SExprSupp* pSup, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); +int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup, + SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, @@ -1097,8 +1101,8 @@ int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pStat int32_t saveSessionDiscBuf(SStreamState* pState, SSessionKey* key, void* buf, int32_t size); int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock, SExprSupp* pSup, SGroupResInfo* pGroupResInfo); -int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResult, int64_t tableGroupId, SqlFunctionCtx* pCtx, - int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup); +int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResult, int64_t tableGroupId, + SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup); int32_t releaseOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult); int32_t saveOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult, int32_t resSize); void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index fb4248e886..c66963361f 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -233,7 +233,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo } if (pListInfo->map == NULL) { - pListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); } // traverse to the stream scanner node to add this table id @@ -660,12 +660,114 @@ int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem) { } #endif -int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer) { +int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); - pTaskInfo->streamInfo.recoverStartVer = startVer; - pTaskInfo->streamInfo.recoverEndVer = endVer; - pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__PREPARE; + pTaskInfo->streamInfo.recoverStartVer = 0; + pTaskInfo->streamInfo.recoverEndVer = ver; + pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__PREPARE1; + return 0; +} + +int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); + pTaskInfo->streamInfo.recoverStartVer = pTask->streamInfo.recoverEndVer; + pTaskInfo->streamInfo.recoverEndVer = ver; + pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__PREPARE2; + return 0; +} + +int32_t qStreamRecoverFinish(qTaskInfo_t tinfo) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); + pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE; + return 0; +} + +int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + SOperatorInfo* pOperator = pTaskInfo->pRoot; + + while (1) { + if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL || + pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL || + pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) { + SStreamIntervalOperatorInfo* pInfo = pOperator->info; + pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger; + pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark; + pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; + pInfo->twAggSup.deleteMark = INT64_MAX; + } else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION || + pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION || + pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) { + SStreamSessionAggOperatorInfo* pInfo = pOperator->info; + pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger; + pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark; + pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; + pInfo->twAggSup.deleteMark = INT64_MAX; + } else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) { + SStreamStateAggOperatorInfo* pInfo = pOperator->info; + pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger; + pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark; + pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; + pInfo->twAggSup.deleteMark = INT64_MAX; + } + + // iterate operator tree + if (pOperator->numOfDownstream != 1 || pOperator->pDownstream[0] == NULL) { + if (pOperator->numOfDownstream > 1) { + qError("unexpected stream, multiple downstream"); + ASSERT(0); + return -1; + } + return 0; + } else { + pOperator = pOperator->pDownstream[0]; + } + } + + return 0; +} + +int32_t qStreamRestoreParam(qTaskInfo_t tinfo) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + SOperatorInfo* pOperator = pTaskInfo->pRoot; + + while (1) { + if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL || + pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL || + pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) { + SStreamIntervalOperatorInfo* pInfo = pOperator->info; + + pInfo->twAggSup.calTrigger = pTaskInfo->streamInfo.triggerSaved; + pInfo->twAggSup.deleteMark = pTaskInfo->streamInfo.deleteMarkSaved; + } else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION || + pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION || + pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) { + SStreamSessionAggOperatorInfo* pInfo = pOperator->info; + + pInfo->twAggSup.calTrigger = pTaskInfo->streamInfo.triggerSaved; + pInfo->twAggSup.deleteMark = pTaskInfo->streamInfo.deleteMarkSaved; + } else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) { + SStreamStateAggOperatorInfo* pInfo = pOperator->info; + + pInfo->twAggSup.calTrigger = pTaskInfo->streamInfo.triggerSaved; + pInfo->twAggSup.deleteMark = pTaskInfo->streamInfo.deleteMarkSaved; + } + + // iterate operator tree + if (pOperator->numOfDownstream != 1 || pOperator->pDownstream[0] == NULL) { + if (pOperator->numOfDownstream > 1) { + qError("unexpected stream, multiple downstream"); + ASSERT(0); + return -1; + } + return 0; + } else { + pOperator = pOperator->pDownstream[0]; + } + } return 0; } diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index e55404935c..a804d34b5d 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3479,7 +3479,7 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) { bool groupbyTbname(SNodeList* pGroupList) { bool bytbname = false; - if (LIST_LENGTH(pGroupList) > 0) { + if (LIST_LENGTH(pGroupList) == 1) { SNode* p = nodesListGetNode(pGroupList, 0); if (p->type == QUERY_NODE_FUNCTION) { // partition by tbname/group by tbname @@ -3495,7 +3495,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, return TDB_CODE_SUCCESS; } - pTableListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pTableListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); if (pTableListInfo->map == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -4002,8 +4002,11 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead goto _complete; } - if (pHandle && pHandle->pStateBackend) { - (*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend; + if (pHandle) { + (*pTaskInfo)->streamInfo.fillHistoryVer1 = pHandle->fillHistoryVer1; + if (pHandle->pStateBackend) { + (*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend; + } } (*pTaskInfo)->sql = sql; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 115a6ab559..f13952f87a 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1770,11 +1770,17 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { #endif #if 1 - if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) { + if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1 || + pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE2) { STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; memcpy(&pTSInfo->cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond)); - pTSInfo->cond.startVersion = -1; - pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer1; + if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1) { + pTSInfo->cond.startVersion = -1; + pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer1; + } else { + pTSInfo->cond.startVersion = pTaskInfo->streamInfo.fillHistoryVer1 + 1; + pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer2; + } pTSInfo->scanTimes = 0; pTSInfo->currentGroupId = -1; pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__SCAN; @@ -2286,7 +2292,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER; pTSInfo->dataReader = NULL; if (tsdbReaderOpen(pHandle->vnode, &pTSInfo->cond, tableList, &pTSInfo->dataReader, NULL) < 0) { - ASSERT(0); + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _error; } } @@ -2322,6 +2329,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys } else { taosArrayDestroy(pColIds); } + pTaskInfo->streamInfo.fillHistoryVer1 = pHandle->fillHistoryVer1; // create the pseduo columns info if (pTableScanNode->scan.pScanPseudoCols != NULL) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 6dda6488a0..b6995c1f8c 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1776,11 +1776,11 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh } SInterval interval = {.interval = pPhyNode->interval, - .sliding = pPhyNode->sliding, - .intervalUnit = pPhyNode->intervalUnit, - .slidingUnit = pPhyNode->slidingUnit, - .offset = pPhyNode->offset, - .precision = ((SColumnNode*)pPhyNode->window.pTspk)->node.resType.precision}; + .sliding = pPhyNode->sliding, + .intervalUnit = pPhyNode->intervalUnit, + .slidingUnit = pPhyNode->slidingUnit, + .offset = pPhyNode->offset, + .precision = ((SColumnNode*)pPhyNode->window.pTspk)->node.resType.precision}; STimeWindowAggSupp as = { .waterMark = pPhyNode->window.watermark, @@ -4235,6 +4235,13 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh .minTs = INT64_MAX, }; + if (pTaskInfo->streamInfo.fillHistoryVer1 != -1) { + pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger; + pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark; + pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; + pInfo->twAggSup.deleteMark = INT64_MAX; + } + initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); pInfo->primaryTsIndex = ((SColumnNode*)pSessionNode->window.pTspk)->slotId; @@ -4741,6 +4748,14 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys .maxTs = INT64_MIN, .minTs = INT64_MAX, }; + + if (pTaskInfo->streamInfo.fillHistoryVer1 != -1) { + pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger; + pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark; + pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; + pInfo->twAggSup.deleteMark = INT64_MAX; + } + initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); SExprSupp* pSup = &pOperator->exprSupp; @@ -5530,6 +5545,14 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys }; ASSERT(twAggSupp.calTrigger != STREAM_TRIGGER_MAX_DELAY); + + if (pTaskInfo->streamInfo.fillHistoryVer1 != -1) { + pTaskInfo->streamInfo.triggerSaved = twAggSupp.calTrigger; + pTaskInfo->streamInfo.deleteMarkSaved = twAggSupp.deleteMark; + twAggSupp.calTrigger = STREAM_TRIGGER_AT_ONCE; + twAggSupp.deleteMark = INT64_MAX; + } + pOperator->pTaskInfo = pTaskInfo; pInfo->interval = interval; pInfo->twAggSup = twAggSupp; diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index 6e30eeaa86..e9d4fbeaaa 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -32,7 +32,7 @@ typedef struct { static SStreamGlobalEnv streamEnv; -int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch); +// int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch); int32_t streamDispatch(SStreamTask* pTask); int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 9d8a44c1ef..042a78a5f7 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -439,3 +439,8 @@ FREE: taosFreeQitem(pBlock); return code; } + +int32_t streamDispatchRecoverFinishReq(SStreamTask* pTask) { + // + return 0; +} diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 149b1a8447..6e37810004 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -85,6 +85,54 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* return 0; } +int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { + ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE); + + void* exec = pTask->exec.executor; + + while (1) { + SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); + if (pRes == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + + int32_t batchCnt = 0; + while (1) { + SSDataBlock* output = NULL; + uint64_t ts = 0; + if (qExecTask(exec, &output, &ts) < 0) { + ASSERT(0); + } + if (output == NULL) break; + + SSDataBlock block = {0}; + assignOneDataBlock(&block, output); + block.info.childId = pTask->selfChildId; + taosArrayPush(pRes, &block); + + if (++batchCnt >= batchSz) break; + } + if (taosArrayGetSize(pRes) == 0) { + taosArrayDestroy(pRes); + break; + } + SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); + if (qRes == NULL) { + taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + + qRes->type = STREAM_INPUT__DATA_BLOCK; + qRes->blocks = pRes; + streamTaskOutput(pTask, qRes); + // TODO stream sched dispatch + } + return 0; +} + +#if 0 int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch) { ASSERT(pTask->taskLevel != TASK_LEVEL__SINK); @@ -144,6 +192,7 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch) return 0; } +#endif int32_t streamExecForAll(SStreamTask* pTask) { while (1) { diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index df28e4a62d..0656217fdd 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -17,7 +17,7 @@ #include "streamInc.h" #include "ttimer.h" -SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) { +SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId) { SStreamMeta* pMeta = taosMemoryCalloc(1, sizeof(SStreamMeta)); if (pMeta == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -86,7 +86,8 @@ void streamMetaClose(SStreamMeta* pMeta) { taosMemoryFree(pMeta); } -int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen) { +#if 0 +int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t ver, char* msg, int32_t msgLen) { SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); if (pTask == NULL) { return -1; @@ -99,7 +100,7 @@ int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* } tDecoderClear(&decoder); - if (pMeta->expandFunc(pMeta->ahandle, pTask) < 0) { + if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) { ASSERT(0); goto FAIL; } @@ -114,26 +115,20 @@ int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* goto FAIL; } - if (pTask->fillHistory) { - // pipeline exec - // if finished, dispatch a stream-prepare-finished msg to downstream task - // set status normal - } - return 0; FAIL: if (pTask) tFreeSStreamTask(pTask); return -1; } +#endif -#if 0 -int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask) { +#if 1 +int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) { void* buf = NULL; - if (pMeta->expandFunc(pMeta->ahandle, pTask) < 0) { + if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) { return -1; } - taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)); int32_t len; int32_t code; @@ -149,12 +144,15 @@ int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask) { SEncoder encoder; tEncoderInit(&encoder, buf, len); tEncodeSStreamTask(&encoder, pTask); + tEncoderClear(&encoder); if (tdbTbUpsert(pMeta->pTaskDb, &pTask->taskId, sizeof(int32_t), buf, len, &pMeta->txn) < 0) { ASSERT(0); return -1; } + taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)); + return 0; } #endif @@ -269,7 +267,7 @@ int32_t streamLoadTasks(SStreamMeta* pMeta) { tDecodeSStreamTask(&decoder, pTask); tDecoderClear(&decoder); - if (pMeta->expandFunc(pMeta->ahandle, pTask) < 0) { + if (pMeta->expandFunc(pMeta->ahandle, pTask, -1) < 0) { tdbFree(pKey); tdbFree(pVal); tdbTbcClose(pCur); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 0505c3edd6..7027e046a3 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -15,6 +15,90 @@ #include "streamInc.h" +// common +int32_t streamSetParamForRecover(SStreamTask* pTask) { + void* exec = pTask->exec.executor; + return qStreamSetParamForRecover(exec); +} +int32_t streamRestoreParam(SStreamTask* pTask) { + void* exec = pTask->exec.executor; + return qStreamRestoreParam(exec); +} +int32_t streamSetStatusNormal(SStreamTask* pTask) { + pTask->taskStatus = TASK_STATUS__NORMAL; + return 0; +} + +// source +int32_t streamSourceRecoverPrepareStep1(SStreamTask* pTask, int64_t ver) { + void* exec = pTask->exec.executor; + return qStreamSourceRecoverStep1(exec, ver); +} + +int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamRecoverStep1Req* pReq) { + pReq->streamId = pTask->streamId; + pReq->taskId = pTask->taskId; + return 0; +} + +int32_t streamSourceRecoverScanStep1(SStreamTask* pTask) { + // + return streamScanExec(pTask, 100); + // TODO next: dispatch msg to launch scan step2 +} + +int32_t streamBuildSourceRecover2Req(SStreamTask* pTask, SStreamRecoverStep2Req* pReq) { + pReq->streamId = pTask->streamId; + pReq->taskId = pTask->taskId; + return 0; +} + +int32_t streamSourceRecoverScanStep2(SStreamTask* pTask, int64_t ver) { + void* exec = pTask->exec.executor; + if (qStreamSourceRecoverStep2(exec, ver) < 0) { + ASSERT(0); + } + return streamScanExec(pTask, 100); +} + +int32_t streamDispatchRecoverFinishReq(SStreamTask* pTask) { + if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) { + /*SStreamFillFinish*/ + } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { + } + return 0; +} + +// agg +int32_t streamAggRecoverPrepare(SStreamTask* pTask) { + void* exec = pTask->exec.executor; + if (qStreamSetParamForRecover(exec) < 0) { + return -1; + } + pTask->recoverWaitingChild = taosArrayGetSize(pTask->childEpInfo); + return 0; +} + +int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask) { + void* exec = pTask->exec.executor; + if (qStreamRestoreParam(exec) < 0) { + return -1; + } + if (qStreamRecoverFinish(exec) < 0) { + return -1; + } + return 0; +} + +int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId) { + int32_t left = atomic_sub_fetch_32(&pTask->recoverWaitingChild, 1); + ASSERT(left >= 0); + if (left == 0) { + streamAggChildrenRecoverFinish(pTask); + } + return 0; +} + #if 0 int32_t tEncodeStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamTaskRecoverReq* pReq) { if (tStartEncode(pEncoder) < 0) return -1; @@ -340,6 +424,7 @@ int32_t streamFetchDownstreamStatus(SStreamMeta* pMeta, SStreamTask* pTask) { return 0; } +#if 0 int32_t streamProcessFetchStatusRsp(SStreamMeta* pMeta, SStreamTask* pTask, SStreamRecoverDownstreamRsp* pRsp) { // if failed, set timer and retry // if successful @@ -430,3 +515,4 @@ int32_t streamRecoverTask(SStreamTask* pTask) { // return 0; } +#endif diff --git a/source/libs/wal/src/walRef.c b/source/libs/wal/src/walRef.c index 119d0575d8..5a14bcf962 100644 --- a/source/libs/wal/src/walRef.c +++ b/source/libs/wal/src/walRef.c @@ -65,11 +65,6 @@ int32_t walRefVer(SWalRef *pRef, int64_t ver) { return 0; } -int32_t walPreRefVer(SWalRef *pRef, int64_t ver) { - pRef->refVer = ver; - return 0; -} - void walUnrefVer(SWalRef *pRef) { pRef->refId = -1; pRef->refFile = -1; From b52ad0f740a8c92f8d066dec8701f4df5b576fb3 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 26 Oct 2022 08:58:38 +0800 Subject: [PATCH 32/44] tsdb/cache: new separate dataf reader for last file reading --- source/dnode/vnode/src/inc/tsdb.h | 1 + source/dnode/vnode/src/tsdb/tsdbCache.c | 11 +++++++---- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 4 +++- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 89be94a35d..e5b8a1f327 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -720,6 +720,7 @@ typedef struct SCacheRowsReader { SSttBlockLoadInfo *pLoadInfo; STsdbReadSnap *pReadSnap; SDataFReader *pDataFReader; + SDataFReader *pDataFReaderLast; } SCacheRowsReader; typedef struct { diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index d8a03334bc..f66185e977 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -951,7 +951,8 @@ typedef struct { } CacheNextRowIter; static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema, tb_uid_t suid, - SSttBlockLoadInfo *pLoadInfo, STsdbReadSnap *pReadSnap, SDataFReader **pDataFReader) { + SSttBlockLoadInfo *pLoadInfo, STsdbReadSnap *pReadSnap, SDataFReader **pDataFReader, + SDataFReader **pDataFReaderLast) { int code = 0; STbData *pMem = NULL; @@ -1006,7 +1007,7 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs pIter->fsLastState.suid = suid; pIter->fsLastState.uid = uid; pIter->fsLastState.pLoadInfo = pLoadInfo; - pIter->fsLastState.pDataFReader = pDataFReader; + pIter->fsLastState.pDataFReader = pDataFReaderLast; pIter->fsState.state = SFSNEXTROW_FS; pIter->fsState.pTsdb = pTsdb; @@ -1148,7 +1149,8 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppCo TSKEY lastRowTs = TSKEY_MAX; CacheNextRowIter iter = {0}; - nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader); + nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader, + &pr->pDataFReaderLast); do { TSDBROW *pRow = NULL; @@ -1272,7 +1274,8 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCach TSKEY lastRowTs = TSKEY_MAX; CacheNextRowIter iter = {0}; - nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader); + nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader, + &pr->pDataFReaderLast); do { TSDBROW *pRow = NULL; diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index e5d7576099..b8f49f38e4 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -64,7 +64,7 @@ static void saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pRea colDataAppend(pColInfoData, numOfRows, (const char*)pRes[i], false); } - pBlock->info.rows += allNullRow? 0:1; + pBlock->info.rows += allNullRow ? 0 : 1; } else { ASSERT(HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST_ROW)); @@ -239,6 +239,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 tsdbTakeReadSnap(pr->pVnode->pTsdb, &pr->pReadSnap, "cache-l"); pr->pDataFReader = NULL; + pr->pDataFReaderLast = NULL; // retrieve the only one last row of all tables in the uid list. if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) { @@ -334,6 +335,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 } _end: + tsdbDataFReaderClose(&pr->pDataFReaderLast); tsdbDataFReaderClose(&pr->pDataFReader); tsdbUntakeReadSnap(pr->pVnode->pTsdb, pr->pReadSnap, "cache-l"); From 61797504bd7e16f6ce311db5c02b6e1be6610af3 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 26 Oct 2022 09:01:00 +0800 Subject: [PATCH 33/44] feat: taosbenchmark kill slow query (#17651) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 7a03fc9b76..340de43343 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG bc99376 + GIT_TAG f9c1d32 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From d06bcb221987ff87c9b0bafa52fd8510910b1aef Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 26 Oct 2022 09:47:11 +0800 Subject: [PATCH 34/44] refactor: fix typo and do some internal refactor. --- include/libs/executor/executor.h | 7 -- include/util/tarray.h | 19 +----- source/dnode/vnode/src/tsdb/tsdbCommit.c | 2 +- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 4 +- source/dnode/vnode/src/tsdb/tsdbRead.c | 10 +-- source/libs/executor/src/executor.c | 17 ----- source/libs/executor/src/executorimpl.c | 4 +- source/util/src/tarray.c | 72 --------------------- 8 files changed, 11 insertions(+), 124 deletions(-) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index b4b60f804d..a012db9738 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -133,13 +133,6 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bool* hasMore, SLocalFetch* pLocal); int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds); -/** - * kill the ongoing query and free the query handle and corresponding resources automatically - * @param tinfo qhandle - * @return - */ -int32_t qKillTask(qTaskInfo_t tinfo); - /** * kill the ongoing query asynchronously * @param tinfo qhandle diff --git a/include/util/tarray.h b/include/util/tarray.h index 99f09dc769..e95568197b 100644 --- a/include/util/tarray.h +++ b/include/util/tarray.h @@ -38,7 +38,6 @@ extern "C" { #define TARRAY_MIN_SIZE 8 #define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize)) #define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize) -#define TARRAY_GET_START(array) ((array)->pData) typedef struct SArray { size_t size; @@ -71,14 +70,6 @@ int32_t taosArrayEnsureCap(SArray* pArray, size_t tsize); */ void* taosArrayAddBatch(SArray* pArray, const void* pData, int32_t nEles); -/** - * - * @param pArray - * @param pData position array list - * @param numOfElems the number of removed position - */ -void taosArrayRemoveBatch(SArray* pArray, const int32_t* pData, int32_t numOfElems); - /** * * @param pArray @@ -266,13 +257,6 @@ void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t compa */ int32_t taosArraySearchIdx(const SArray* pArray, const void* key, __compar_fn_t comparFn, int32_t flags); -/** - * search the array - * @param pArray - * @param key - */ -char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn, int32_t flags); - /** * sort the pointer data in the array * @param pArray @@ -286,8 +270,6 @@ void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void* par int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode); void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz); -char* taosShowStrArray(const SArray* pArray); - /** * swap array * @param a @@ -295,6 +277,7 @@ char* taosShowStrArray(const SArray* pArray); * @return */ void taosArraySwap(SArray* a, SArray* b); + #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 578473c79a..874fe3c958 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -1506,7 +1506,7 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, TABLEID id) { TSDB_CHECK_CODE(code, lino, _exit); } #else - if (pCommitter->dWriter.bData.nRow >= pCommitter->maxRow) { + if (pCommitter->dWriter.bDatal.nRow >= pCommitter->maxRow) { code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.bDatal, pCommitter->dWriter.aSttBlk, pCommitter->cmprAlg); TSDB_CHECK_CODE(code, lino, _exit); diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 92de870d4d..6c67c11220 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -152,8 +152,8 @@ static SBlockData *loadLastBlock(SLDataIter *pIter, const char *idStr) { pInfo->loadBlocks += 1; tsdbDebug("read last block, total load:%d, trigger by uid:%" PRIu64 - ", last file index:%d, last block index:%d, entry:%d, %p, elapsed time:%.2f ms, %s", - pInfo->loadBlocks, pIter->uid, pIter->iStt, pIter->iSttBlk, pInfo->currentLoadBlockIndex, pBlock, el, + ", last file index:%d, last block index:%d, entry:%d, rows:%d, %p, elapsed time:%.2f ms, %s", + pInfo->loadBlocks, pIter->uid, pIter->iStt, pIter->iSttBlk, pInfo->currentLoadBlockIndex, pBlock->nRow, pBlock, el, idStr); pInfo->blockIndex[pInfo->currentLoadBlockIndex] = pIter->iSttBlk; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 71828882c3..f83755fc4f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2139,12 +2139,13 @@ static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) { } static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) { return pLastBlockReader->mergeTree.pIter != NULL; } -bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) { - if (pBlockData->nRow > 0) { - ASSERT(pBlockData->nRow == pDumpInfo->totalRows); + +bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) { + if (pBlockData->nRow > 0) { + ASSERT(pBlockData->nRow == pDumpInfo->totalRows); } - return pBlockData->nRow > 0 && (!pDumpInfo->allDumped); + return pBlockData->nRow > 0 && (!pDumpInfo->allDumped); } int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, @@ -2619,6 +2620,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { ASSERT(tsLast >= pBlock->maxKey.ts); tBlockDataReset(&pReader->status.fileBlockData); + tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr); code = buildComposedDataBlock(pReader); } else { // whole block is required, return it directly SDataBlockInfo* pInfo = &pReader->pResBlock->info; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 0312105ca8..8140922e02 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -564,23 +564,6 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) { return pTaskInfo->code; } -int32_t qKillTask(qTaskInfo_t qinfo) { - SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qinfo; - if (pTaskInfo == NULL) { - return TSDB_CODE_QRY_INVALID_QHANDLE; - } - - qAsyncKillTask(qinfo); - - // Wait for the query executing thread being stopped/ - // Once the query is stopped, the owner of qHandle will be cleared immediately. - while (pTaskInfo->owner != 0) { - taosMsleep(100); - } - - return TSDB_CODE_SUCCESS; -} - int32_t qAsyncKillTask(qTaskInfo_t qinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qinfo; diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 2432944928..f0e4cbf533 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3640,9 +3640,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo return pOperator; } - int32_t num = 0; - size_t size = LIST_LENGTH(pPhyNode->pChildren); - + size_t size = LIST_LENGTH(pPhyNode->pChildren); SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES); for (int32_t i = 0; i < size; ++i) { SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i); diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 4e9ac0e0de..309e6b30ae 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -91,48 +91,6 @@ void* taosArrayAddBatch(SArray* pArray, const void* pData, int32_t nEles) { return dst; } -void taosArrayRemoveBatch(SArray* pArray, const int32_t* pData, int32_t numOfElems) { - assert(pArray != NULL && pData != NULL); - if (numOfElems <= 0) { - return; - } - - size_t size = taosArrayGetSize(pArray); - if (numOfElems >= size) { - taosArrayClear(pArray); - return; - } - - int32_t i = pData[0] + 1, j = 0; - while (i < size) { - if (j == numOfElems - 1) { - break; - } - - char* p = TARRAY_GET_ELEM(pArray, i); - if (i > pData[j] && i < pData[j + 1]) { - char* dst = TARRAY_GET_ELEM(pArray, i - (j + 1)); - memmove(dst, p, pArray->elemSize); - } else if (i == pData[j + 1]) { - j += 1; - } - - i += 1; - } - - assert(i == pData[numOfElems - 1] + 1 && i <= size); - - int32_t srcIndex = pData[numOfElems - 1] + 1; - int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1; - if (pArray->size - srcIndex > 0) { - char* dst = TARRAY_GET_ELEM(pArray, dstIndex); - char* src = TARRAY_GET_ELEM(pArray, srcIndex); - memmove(dst, src, pArray->elemSize * (pArray->size - srcIndex)); - } - - pArray->size -= numOfElems; -} - void taosArrayRemoveDuplicate(SArray* pArray, __compar_fn_t comparFn, void (*fp)(void*)) { assert(pArray); @@ -435,17 +393,6 @@ void taosArraySortString(SArray* pArray, __compar_fn_t comparFn) { taosSort(pArray->pData, pArray->size, pArray->elemSize, comparFn); } -char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn, int32_t flags) { - assert(pArray != NULL); - assert(key != NULL); - - void* p = taosbsearch(&key, pArray->pData, pArray->size, pArray->elemSize, comparFn, flags); - if (p == NULL) { - return NULL; - } - return *(char**)p; -} - static int32_t taosArrayPartition(SArray* pArray, int32_t i, int32_t j, __ext_compar_fn_t fn, const void* userData) { void* key = taosArrayGetP(pArray, i); while (i < j) { @@ -543,26 +490,7 @@ void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void* param) { taosArrayGetSize(pArray) > 8 ? taosArrayQuickSort(pArray, fn, param) : taosArrayInsertSort(pArray, fn, param); } -// TODO(yihaoDeng) add order array -// -char* taosShowStrArray(const SArray* pArray) { - int32_t sz = pArray->size; - int32_t tlen = 0; - for (int32_t i = 0; i < sz; i++) { - tlen += strlen(taosArrayGetP(pArray, i)) + 1; - } - char* res = taosMemoryCalloc(1, tlen); - char* buf = res; - for (int32_t i = 0; i < sz; i++) { - char* str = taosArrayGetP(pArray, i); - int32_t len = strlen(str); - memcpy(buf, str, len); - buf += len; - if (i != sz - 1) *buf = ','; - } - return res; -} void taosArraySwap(SArray* a, SArray* b) { if (a == NULL || b == NULL) return; size_t t = a->size; From 719b937765d291f3393e6f972e1d995ab5ab218a Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Wed, 26 Oct 2022 10:08:20 +0800 Subject: [PATCH 35/44] test: add stream test cases --- tests/script/jenkins/basic.txt | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index e8b8bce5c2..26162579c1 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -247,15 +247,15 @@ ./test.sh -f tsim/stream/ignoreExpiredData.sim ./test.sh -f tsim/stream/sliding.sim ./test.sh -f tsim/stream/partitionbyColumnInterval.sim -#./test.sh -f tsim/stream/partitionbyColumnSession.sim -#./test.sh -f tsim/stream/partitionbyColumnState.sim -#./test.sh -f tsim/stream/deleteInterval.sim -#./test.sh -f tsim/stream/deleteSession.sim -#./test.sh -f tsim/stream/deleteState.sim -#./test.sh -f tsim/stream/fillIntervalDelete0.sim -#./test.sh -f tsim/stream/fillIntervalDelete1.sim +./test.sh -f tsim/stream/partitionbyColumnSession.sim +./test.sh -f tsim/stream/partitionbyColumnState.sim +./test.sh -f tsim/stream/deleteInterval.sim +./test.sh -f tsim/stream/deleteSession.sim +./test.sh -f tsim/stream/deleteState.sim +./test.sh -f tsim/stream/fillIntervalDelete0.sim +./test.sh -f tsim/stream/fillIntervalDelete1.sim ./test.sh -f tsim/stream/fillIntervalLinear.sim -#./test.sh -f tsim/stream/fillIntervalPartitionBy.sim +./test.sh -f tsim/stream/fillIntervalPartitionBy.sim ./test.sh -f tsim/stream/fillIntervalPrevNext.sim ./test.sh -f tsim/stream/fillIntervalValue.sim From db75eb8e2057aef3b39073797f4b5dbde31df946 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 26 Oct 2022 10:20:00 +0800 Subject: [PATCH 36/44] fix: stream supports 'partition by tbname, column' --- source/libs/parser/src/parTranslater.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index acb0ebb705..648661b1f8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3058,12 +3058,14 @@ static EDealRes checkStateExpr(SNode* pNode, void* pContext) { return DEAL_RES_CONTINUE; } -static bool isPartitionByTbname(SNodeList* pPartitionByList) { - if (1 != LIST_LENGTH(pPartitionByList)) { - return false; +static bool hasPartitionByTbname(SNodeList* pPartitionByList) { + SNode* pPartKey = NULL; + FOREACH(pPartKey, pPartitionByList) { + if (QUERY_NODE_FUNCTION == nodeType(pPartKey) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPartKey)->funcType) { + return true; + } } - SNode* pPartKey = nodesListGetNode(pPartitionByList, 0); - return QUERY_NODE_FUNCTION == nodeType(pPartKey) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPartKey)->funcType; + return false; } static int32_t checkStateWindowForStream(STranslateContext* pCxt, SSelectStmt* pSelect) { @@ -3071,7 +3073,7 @@ static int32_t checkStateWindowForStream(STranslateContext* pCxt, SSelectStmt* p return TSDB_CODE_SUCCESS; } if (TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType && - !isPartitionByTbname(pSelect->pPartitionByList)) { + !hasPartitionByTbname(pSelect->pPartitionByList)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); } return TSDB_CODE_SUCCESS; @@ -5365,12 +5367,12 @@ static int32_t translateKillTransaction(STranslateContext* pCxt, SKillStmt* pStm static bool crossTableWithoutAggOper(SSelectStmt* pSelect) { return NULL == pSelect->pWindow && !pSelect->hasAggFuncs && !pSelect->hasIndefiniteRowsFunc && !pSelect->hasInterpFunc && TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType && - !isPartitionByTbname(pSelect->pPartitionByList); + !hasPartitionByTbname(pSelect->pPartitionByList); } static bool crossTableWithUdaf(SSelectStmt* pSelect) { return pSelect->hasUdaf && TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType && - !isPartitionByTbname(pSelect->pPartitionByList); + !hasPartitionByTbname(pSelect->pPartitionByList); } static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { From 8d8fd2b2bcb339105bdb7da8c53fd355a16f005d Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 26 Oct 2022 10:20:33 +0800 Subject: [PATCH 37/44] refactor(stream): recover and fill history --- include/libs/executor/executor.h | 3 +- include/libs/stream/tstream.h | 9 +++ source/common/src/tmsg.c | 2 + source/dnode/vnode/src/tq/tq.c | 18 +++++- source/libs/executor/src/scanoperator.c | 23 ++++--- source/libs/executor/src/timewindowoperator.c | 62 ++++++------------- source/libs/stream/src/streamDispatch.c | 45 +++++++++++++- source/libs/stream/src/streamMeta.c | 3 +- source/libs/stream/src/streamRecover.c | 27 +++++++- 9 files changed, 130 insertions(+), 62 deletions(-) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index d083d20058..83e0bd33a6 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -53,8 +53,7 @@ typedef struct { void* sContext; // SSnapContext* - void* pStateBackend; - int64_t fillHistoryVer1; + void* pStateBackend; } SReadHandle; // in queue mode, data streams are seperated by msg diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 2a957b0d16..534d86b1f1 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -445,6 +445,15 @@ typedef struct { int32_t taskId; } SStreamRecoverStep1Req, SStreamRecoverStep2Req; +typedef struct { + int64_t streamId; + int32_t taskId; + int32_t childId; +} SStreamRecoverFinishReq; + +int32_t tEncodeSStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFinishReq* pReq); +int32_t tDecodeSStreamRecoverFinishReq(SDecoder* pDecoder, SStreamRecoverFinishReq* pReq); + #if 0 typedef struct { int64_t streamId; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 47a260c147..aea689c0de 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -4853,6 +4853,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS if (tEncodeCStr(&encoder, pReq->sourceDB) < 0) return -1; if (tEncodeCStr(&encoder, pReq->targetStbFullName) < 0) return -1; if (tEncodeI8(&encoder, pReq->igExists) < 0) return -1; + if (tEncodeI8(&encoder, pReq->fillHistory) < 0) return -1; if (tEncodeI32(&encoder, sqlLen) < 0) return -1; if (tEncodeI32(&encoder, astLen) < 0) return -1; if (tEncodeI8(&encoder, pReq->triggerType) < 0) return -1; @@ -4889,6 +4890,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea if (tDecodeCStrTo(&decoder, pReq->sourceDB) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->targetStbFullName) < 0) return -1; if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1; + if (tDecodeI8(&decoder, &pReq->fillHistory) < 0) return -1; if (tDecodeI32(&decoder, &sqlLen) < 0) return -1; if (tDecodeI32(&decoder, &astLen) < 0) return -1; if (tDecodeI8(&decoder, &pReq->triggerType) < 0) return -1; diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 81940f539d..8bf1522d6c 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -905,7 +905,6 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { .vnode = pTq->pVnode, .initTqReader = 1, .pStateBackend = pTask->pState, - .fillHistoryVer1 = pTask->fillHistory ? ver : -1, }; pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle); ASSERT(pTask->exec.executor); @@ -1090,9 +1089,24 @@ int32_t tqProcessTaskRecoverFinishReq(STQ* pTq, char* msg, int32_t msgLen) { int32_t code; // deserialize + int32_t len; + SStreamRecoverFinishReq req; + + SDecoder decoder; + tDecoderInit(&decoder, msg, sizeof(SStreamRecoverFinishReq)); + tDecodeSStreamRecoverFinishReq(&decoder, &req); + tDecoderClear(&decoder); + // find task + SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, req.taskId); + if (pTask == NULL) { + return -1; + } // do process request - // + if (streamProcessRecoverFinishReq(pTask, req.childId) < 0) { + return -1; + } + return 0; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 12e3582c85..a7af6ca96c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -988,8 +988,8 @@ SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* re pOperator->info = pInfo; pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doBlockInfoScan, NULL, NULL, - destroyBlockDistScanOperatorInfo, NULL); + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, doBlockInfoScan, NULL, NULL, destroyBlockDistScanOperatorInfo, NULL); return pOperator; _error: @@ -2235,7 +2235,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pInfo->pGroupTags = pTableScanNode->pGroupTags; int32_t numOfCols = 0; - int32_t code = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo); + int32_t code = + extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo); int32_t numOfOutput = taosArrayGetSize(pInfo->matchInfo.pList); SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t)); @@ -2330,7 +2331,6 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys } else { taosArrayDestroy(pColIds); } - pTaskInfo->streamInfo.fillHistoryVer1 = pHandle->fillHistoryVer1; // create the pseduo columns info if (pTableScanNode->scan.pScanPseudoCols != NULL) { @@ -2361,8 +2361,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pOperator->pTaskInfo = pTaskInfo; __optr_fn_t nextFn = pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM ? doStreamScan : doQueueScan; - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL); return pOperator; @@ -3930,8 +3929,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock); pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, NULL, destroySysScanOperator, NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, NULL, destroySysScanOperator, NULL); return pOperator; @@ -4043,7 +4041,8 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi int32_t num = 0; int32_t numOfExprs = 0; SExprInfo* pExprInfo = createExprInfo(pPhyNode->pScanPseudoCols, NULL, &numOfExprs); - int32_t code = extractColMatchInfo(pPhyNode->pScanPseudoCols, pDescNode, &num, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo); + int32_t code = + extractColMatchInfo(pPhyNode->pScanPseudoCols, pDescNode, &num, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo); code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfExprs); if (code != TSDB_CODE_SUCCESS) { @@ -4066,8 +4065,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doTagScan, NULL, NULL, destroyTagScanOperatorInfo, NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTagScan, NULL, NULL, destroyTagScanOperatorInfo, NULL); return pOperator; @@ -4563,7 +4561,8 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc; int32_t numOfCols = 0; - int32_t code = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo); + int32_t code = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, + &pInfo->matchInfo); code = initQueryTableDataCond(&pInfo->cond, pTableScanNode); if (code != TSDB_CODE_SUCCESS) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index a18419c0f7..ebfbacaa9e 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1832,7 +1832,8 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh pOperator->status = OP_NOT_OPENED; pOperator->info = pInfo; - pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, NULL, NULL, destroyIntervalOperatorInfo, NULL); + pOperator->fpSet = + createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, NULL, NULL, destroyIntervalOperatorInfo, NULL); code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { @@ -2638,7 +2639,8 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode pOperator->info = pInfo; pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTimeslice, NULL, NULL, destroyTimeSliceOperatorInfo, NULL); + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, doTimeslice, NULL, NULL, destroyTimeSliceOperatorInfo, NULL); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); @@ -2708,8 +2710,8 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWi pOperator->pTaskInfo = pTaskInfo; pOperator->info = pInfo; - pOperator->fpSet = createOperatorFpSet(openStateWindowAggOptr, doStateWindowAgg, NULL, NULL, - destroyStateWindowOperatorInfo, NULL); + pOperator->fpSet = + createOperatorFpSet(openStateWindowAggOptr, doStateWindowAgg, NULL, NULL, destroyStateWindowOperatorInfo, NULL); code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { @@ -2782,8 +2784,8 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW pOperator->status = OP_NOT_OPENED; pOperator->info = pInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL, - destroySWindowOperatorInfo, NULL); + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL, destroySWindowOperatorInfo, NULL); pOperator->pTaskInfo = pTaskInfo; code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { @@ -4233,13 +4235,6 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh .minTs = INT64_MAX, }; - if (pTaskInfo->streamInfo.fillHistoryVer1 != -1) { - pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger; - pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark; - pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; - pInfo->twAggSup.deleteMark = INT64_MAX; - } - initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); pInfo->primaryTsIndex = ((SColumnNode*)pSessionNode->window.pTspk)->slotId; @@ -4264,9 +4259,8 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->info = pInfo; - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doStreamSessionAgg, NULL, NULL, destroyStreamSessionAggOperatorInfo, - NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamSessionAgg, NULL, NULL, + destroyStreamSessionAggOperatorInfo, NULL); if (downstream) { initDownStream(downstream, &pInfo->streamAggSup, pInfo->twAggSup.waterMark, pOperator->operatorType, pInfo->primaryTsIndex); @@ -4411,9 +4405,8 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR); blockDataEnsureCapacity(pInfo->pUpdateRes, 128); pOperator->name = "StreamSessionSemiAggOperator"; - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doStreamSessionSemiAgg, NULL, NULL, - destroyStreamSessionAggOperatorInfo, NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamSessionSemiAgg, NULL, NULL, + destroyStreamSessionAggOperatorInfo, NULL); } pInfo->pGroupIdTbNameMap = @@ -4747,13 +4740,6 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys .minTs = INT64_MAX, }; - if (pTaskInfo->streamInfo.fillHistoryVer1 != -1) { - pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger; - pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark; - pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; - pInfo->twAggSup.deleteMark = INT64_MAX; - } - initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); SExprSupp* pSup = &pOperator->exprSupp; @@ -4789,8 +4775,8 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys pOperator->status = OP_NOT_OPENED; pOperator->pTaskInfo = pTaskInfo; pOperator->info = pInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamStateAgg, NULL, NULL, - destroyStreamStateOperatorInfo, NULL); + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, doStreamStateAgg, NULL, NULL, destroyStreamStateOperatorInfo, NULL); initDownStream(downstream, &pInfo->streamAggSup, pInfo->twAggSup.waterMark, pOperator->operatorType, pInfo->primaryTsIndex); code = appendDownstream(pOperator, &downstream, 1); @@ -5066,8 +5052,8 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, pOperator->pTaskInfo = pTaskInfo; pOperator->info = miaInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL, - destroyMAIOperatorInfo, NULL); + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL, destroyMAIOperatorInfo, NULL); code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { @@ -5378,8 +5364,8 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMerge pOperator->pTaskInfo = pTaskInfo; pOperator->info = pMergeIntervalInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doMergeIntervalAgg, NULL, NULL, - destroyMergeIntervalOperatorInfo, NULL); + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, doMergeIntervalAgg, NULL, NULL, destroyMergeIntervalOperatorInfo, NULL); code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { @@ -5544,13 +5530,6 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys ASSERT(twAggSupp.calTrigger != STREAM_TRIGGER_MAX_DELAY); - if (pTaskInfo->streamInfo.fillHistoryVer1 != -1) { - pTaskInfo->streamInfo.triggerSaved = twAggSupp.calTrigger; - pTaskInfo->streamInfo.deleteMarkSaved = twAggSupp.deleteMark; - twAggSupp.calTrigger = STREAM_TRIGGER_AT_ONCE; - twAggSupp.deleteMark = INT64_MAX; - } - pOperator->pTaskInfo = pTaskInfo; pInfo->interval = interval; pInfo->twAggSup = twAggSupp; @@ -5618,9 +5597,8 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->info = pInfo; - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doStreamIntervalAgg, NULL, NULL, destroyStreamFinalIntervalOperatorInfo, - NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamIntervalAgg, NULL, NULL, + destroyStreamFinalIntervalOperatorInfo, NULL); initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup, &pInfo->interval, &pInfo->twAggSup); code = appendDownstream(pOperator, &downstream, 1); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 45d9cfa50c..417ddfa80d 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -210,6 +210,46 @@ static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDis return 0; } +int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecoverFinishReq* pReq, int32_t vgId, + SEpSet* pEpSet) { + void* buf = NULL; + int32_t code = -1; + SRpcMsg msg = {0}; + + int32_t tlen; + tEncodeSize(tEncodeSStreamRecoverFinishReq, pReq, tlen, code); + if (code < 0) { + return -1; + } + + buf = rpcMallocCont(sizeof(SMsgHead) + tlen); + if (buf == NULL) { + return -1; + } + + ((SMsgHead*)buf)->vgId = htonl(vgId); + void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); + + SEncoder encoder; + tEncoderInit(&encoder, abuf, tlen); + if ((code = tEncodeSStreamRecoverFinishReq(&encoder, pReq)) < 0) { + goto FAIL; + } + tEncoderClear(&encoder); + + msg.contLen = tlen + sizeof(SMsgHead); + msg.pCont = buf; + msg.msgType = TDMT_VND_STREAM_RECOVER_FINISH; + + tmsgSendReq(pEpSet, &msg); + + code = 0; + return 0; +FAIL: + if (buf) rpcFreeCont(buf); + return code; +} + int32_t streamDispatchOneReq(SStreamTask* pTask, const SStreamDispatchReq* pReq, int32_t vgId, SEpSet* pEpSet) { void* buf = NULL; int32_t code = -1; @@ -244,9 +284,10 @@ int32_t streamDispatchOneReq(SStreamTask* pTask, const SStreamDispatchReq* pReq, tmsgSendReq(pEpSet, &msg); code = 0; -FAIL: - if (code < 0 && buf) rpcFreeCont(buf); return 0; +FAIL: + if (buf) rpcFreeCont(buf); + return code; } int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz, diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index ab6074f8d4..98e4e77cb0 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -141,9 +141,10 @@ int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) { return -1; } - SEncoder encoder; + SEncoder encoder = {0}; tEncoderInit(&encoder, buf, len); tEncodeSStreamTask(&encoder, pTask); + tEncoderClear(&encoder); if (tdbTbUpsert(pMeta->pTaskDb, &pTask->taskId, sizeof(int32_t), buf, len, &pMeta->txn) < 0) { ASSERT(0); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 7027e046a3..12de2fafc1 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -62,8 +62,12 @@ int32_t streamSourceRecoverScanStep2(SStreamTask* pTask, int64_t ver) { } int32_t streamDispatchRecoverFinishReq(SStreamTask* pTask) { + SStreamRecoverFinishReq req = { + .streamId = pTask->streamId, + .taskId = pTask->taskId, + .childId = pTask->selfChildId, + }; if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) { - /*SStreamFillFinish*/ } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { } return 0; @@ -87,6 +91,7 @@ int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask) { if (qStreamRecoverFinish(exec) < 0) { return -1; } + streamSetStatusNormal(pTask); return 0; } @@ -99,6 +104,22 @@ int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId) { return 0; } +int32_t tEncodeSStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFinishReq* pReq) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->childId) < 0) return -1; + tEndEncode(pEncoder); + return pEncoder->pos; +} +int32_t tDecodeSStreamRecoverFinishReq(SDecoder* pDecoder, SStreamRecoverFinishReq* pReq) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->childId) < 0) return -1; + tEndDecode(pDecoder); + return 0; +} #if 0 int32_t tEncodeStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamTaskRecoverReq* pReq) { if (tStartEncode(pEncoder) < 0) return -1; @@ -216,6 +237,7 @@ int32_t tDecodeSStreamMultiVgCheckpointInfo(SDecoder* pDecoder, SStreamMultiVgCh return 0; } +#if 0 int32_t tEncodeSStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamRecoverDownstreamReq* pReq) { if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->downstreamTaskId) < 0) return -1; @@ -258,6 +280,7 @@ int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstrea } return 0; } +#endif int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { #if 0 @@ -353,6 +376,7 @@ int32_t streamSaveAggLevel(SStreamMeta* pMeta, SStreamTask* pTask) { return 0; } +#if 0 int32_t streamFetchRecoverStatus(SStreamTask* pTask, const SVgroupInfo* pVgInfo) { int32_t taskId = pVgInfo->taskId; int32_t nodeId = pVgInfo->vgId; @@ -423,6 +447,7 @@ int32_t streamFetchDownstreamStatus(SStreamMeta* pMeta, SStreamTask* pTask) { } return 0; } +#endif #if 0 int32_t streamProcessFetchStatusRsp(SStreamMeta* pMeta, SStreamTask* pTask, SStreamRecoverDownstreamRsp* pRsp) { From 89d74e501660abeb180ad4385ed1caf1761970c4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 26 Oct 2022 10:46:37 +0800 Subject: [PATCH 38/44] fix: malloc and copy binary data when inserting last & last_row --- source/dnode/vnode/src/tsdb/tsdbCache.c | 32 +++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index f66185e977..aa5058a1a2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -230,7 +230,21 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST break; } } else { - taosArraySet(pLast, iCol, &(SLastCol){.ts = keyTs, .colVal = colVal}); + SLastCol lastCol = {.ts = keyTs, .colVal = colVal}; + if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) { + SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol); + taosMemoryFree(pLastCol->colVal.value.pData); + + lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData); + if (lastCol.colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _invalidate; + } + memcpy(lastCol.colVal.value.pData, colVal.value.pData, colVal.value.nData); + } + + taosArraySet(pLast, iCol, &lastCol); } } } @@ -342,7 +356,21 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb break; } } else { - taosArraySet(pLast, iCol, &(SLastCol){.ts = keyTs, .colVal = colVal}); + SLastCol lastCol = {.ts = keyTs, .colVal = colVal}; + if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) { + SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol); + taosMemoryFree(pLastCol->colVal.value.pData); + + lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData); + if (lastCol.colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _invalidate; + } + memcpy(lastCol.colVal.value.pData, colVal.value.pData, colVal.value.nData); + } + + taosArraySet(pLast, iCol, &lastCol); } } } From 5843ba2d03fcc8675dd4d3bb22f0304402aebff4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Wed, 26 Oct 2022 11:10:36 +0800 Subject: [PATCH 39/44] test: refine query cases --- tests/system-test/2-query/explain.py | 56 ++++++++++++++-------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/tests/system-test/2-query/explain.py b/tests/system-test/2-query/explain.py index a308f6b3f7..21e16fab43 100644 --- a/tests/system-test/2-query/explain.py +++ b/tests/system-test/2-query/explain.py @@ -298,14 +298,14 @@ class TDTestCase: self.explain_check() - # tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.ct1") - # tdSql.query(f"explain verbose true select 1 from {dbname}.ct2") - # tdSql.query(f"explain verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") - # tdSql.query(f"explain verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") - # tdSql.query(f"explain verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") - # tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain verbose true select 1 from {dbname}.ct2") + tdSql.query(f"explain verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") - # self.explain_check() + self.explain_check() tdSql.query(f"explain verbose false select {INT_COL} from {dbname}.ct1") tdSql.query(f"explain verbose false select 1 from {dbname}.ct2") @@ -326,14 +326,14 @@ class TDTestCase: self.explain_check() - # tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1") - # tdSql.query(f"explain ratio {ratio} verbose true select 1 from {dbname}.ct2") - # tdSql.query(f"explain ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") - # tdSql.query(f"explain ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") - # tdSql.query(f"explain ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") - # tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain ratio {ratio} verbose true select 1 from {dbname}.ct2") + tdSql.query(f"explain ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") - # self.explain_check() + self.explain_check() tdSql.query(f"explain ratio {ratio} verbose false select {INT_COL} from {dbname}.ct1") tdSql.query(f"explain ratio {ratio} verbose false select 1 from {dbname}.ct2") @@ -353,14 +353,14 @@ class TDTestCase: self.explain_check() - # tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.ct1") - # tdSql.query(f"explain analyze verbose true select 1 from {dbname}.ct2") - # tdSql.query(f"explain analyze verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") - # tdSql.query(f"explain analyze verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") - # tdSql.query(f"explain analyze verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") - # tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain analyze verbose true select 1 from {dbname}.ct2") + tdSql.query(f"explain analyze verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain analyze verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain analyze verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") - # self.explain_check() + self.explain_check() tdSql.query(f"explain analyze verbose false select {INT_COL} from {dbname}.ct1") tdSql.query(f"explain analyze verbose false select 1 from {dbname}.ct2") @@ -381,14 +381,14 @@ class TDTestCase: self.explain_check() - # tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1") - # tdSql.query(f"explain analyze ratio {ratio} verbose true select 1 from {dbname}.ct2") - # tdSql.query(f"explain analyze ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") - # tdSql.query(f"explain analyze ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") - # tdSql.query(f"explain analyze ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") - # tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") + tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1") + tdSql.query(f"explain analyze ratio {ratio} verbose true select 1 from {dbname}.ct2") + tdSql.query(f"explain analyze ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}") + tdSql.query(f"explain analyze ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0") + tdSql.query(f"explain analyze ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") + tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ") - # self.explain_check() + self.explain_check() tdSql.query(f"explain analyze ratio {ratio} verbose false select {INT_COL} from {dbname}.ct1") tdSql.query(f"explain analyze ratio {ratio} verbose false select 1 from {dbname}.ct2") From 343b579dee66c9bed4fd4eb19d392098715ce626 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Wed, 26 Oct 2022 11:11:02 +0800 Subject: [PATCH 40/44] test: refine query cases --- tests/system-test/2-query/hyperloglog.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/system-test/2-query/hyperloglog.py b/tests/system-test/2-query/hyperloglog.py index e481d2c043..68f7ebdf2e 100644 --- a/tests/system-test/2-query/hyperloglog.py +++ b/tests/system-test/2-query/hyperloglog.py @@ -28,6 +28,8 @@ ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_C DBNAME = "db" class TDTestCase: + + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") From fe25112b59fcaf40244f011620518c1f5a613ab6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Wed, 26 Oct 2022 11:11:26 +0800 Subject: [PATCH 41/44] test: refine query cases --- tests/system-test/2-query/interp.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index bee20710b5..db2f8edc7d 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -32,6 +32,8 @@ class TDTestCase: tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')") tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')") tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')") + + tdSql.execute(f"insert into {dbname}.{tbname} (ts) values (now)") tdLog.printNoPrefix("==========step3:fill null") @@ -240,7 +242,7 @@ class TDTestCase: ## {. . .} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") - tdSql.checkRows(12) + tdSql.checkRows(13) tdSql.checkData(0, 0, 5) tdSql.checkData(1, 0, 5) tdSql.checkData(2, 0, 10) @@ -290,21 +292,21 @@ class TDTestCase: ## ..{.} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)") - tdSql.checkRows(3) + tdSql.checkRows(5) tdSql.checkData(0, 0, 15) tdSql.checkData(1, 0, 15) tdSql.checkData(2, 0, 15) ## ... {} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)") - tdSql.checkRows(0) + tdSql.checkRows(4) tdLog.printNoPrefix("==========step7:fill linear") ## {. . .} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)") - tdSql.checkRows(11) + tdSql.checkRows(12) tdSql.checkData(0, 0, 5) tdSql.checkData(1, 0, 6) tdSql.checkData(2, 0, 7) @@ -347,7 +349,7 @@ class TDTestCase: ## ..{.} tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(linear)") - tdSql.checkRows(3) + tdSql.checkRows(5) tdSql.checkData(0, 0, 13) tdSql.checkData(1, 0, 14) tdSql.checkData(2, 0, 15) @@ -505,7 +507,7 @@ class TDTestCase: tdSql.checkData(8, 0, '2020-02-01 00:00:12.000') tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") - tdSql.checkRows(12) + tdSql.checkRows(13) tdSql.checkCols(2) tdSql.checkData(0, 0, '2020-02-01 00:00:04.000') @@ -548,7 +550,7 @@ class TDTestCase: tdSql.checkData(8, 0, '2020-02-01 00:00:12.000') tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)") - tdSql.checkRows(11) + tdSql.checkRows(12) tdSql.checkCols(2) tdSql.checkData(0, 0, '2020-02-01 00:00:05.000') @@ -576,7 +578,7 @@ class TDTestCase: # multiple _irowts tdSql.query(f"select interp(c0),_irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)") - tdSql.checkRows(11) + tdSql.checkRows(12) tdSql.checkCols(2) tdSql.checkData(0, 1, '2020-02-01 00:00:05.000') @@ -592,7 +594,7 @@ class TDTestCase: tdSql.checkData(10, 1, '2020-02-01 00:00:15.000') tdSql.query(f"select _irowts, interp(c0), interp(c0), _irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)") - tdSql.checkRows(11) + tdSql.checkRows(12) tdSql.checkCols(4) cols = (0, 3) @@ -851,6 +853,10 @@ class TDTestCase: tdSql.checkRows(3) tdSql.checkCols(4) + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3),interp(c4),interp(c5) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(linear)") + tdSql.checkRows(3) + tdSql.checkCols(6) + for i in range (tdSql.queryCols): tdSql.checkData(0, i, 13) From 32d2a5c8bf82f00049f8e9f1cad4821c06a91364 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Wed, 26 Oct 2022 11:11:53 +0800 Subject: [PATCH 42/44] test: refine query cases --- tests/system-test/2-query/twa.py | 59 ++++++++++++++++++++++++++++---- 1 file changed, 53 insertions(+), 6 deletions(-) diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py index 4c163da485..a499c17efb 100644 --- a/tests/system-test/2-query/twa.py +++ b/tests/system-test/2-query/twa.py @@ -24,7 +24,7 @@ class TDTestCase: tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") tdSql.execute( f'''create table {dbname}.stb1 - (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp,c11 int UNSIGNED, c12 bigint UNSIGNED, c13 smallint UNSIGNED, c14 tinyint UNSIGNED) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) @@ -35,12 +35,12 @@ class TDTestCase: for j in range(self.row_nums): ts+=j*self.time_step tdSql.execute( - f"insert into {dbname}.ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + f"insert into {dbname}.ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a, 1, 11111, 111, 1 )" ) - tdSql.execute(f"insert into {dbname}.ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute(f"insert into {dbname}.ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute(f"insert into {dbname}.ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL ) ") tdLog.info(" prepare data for distributed_aggregate done! ") @@ -48,7 +48,7 @@ class TDTestCase: tdSql.query(f"desc {dbname}.stb1 ") schema_list = tdSql.queryResult for col_type in schema_list: - if col_type[1] in ["TINYINT" ,"SMALLINT","BIGINT" ,"INT","FLOAT","DOUBLE"]: + if col_type[1] in ["TINYINT" ,"SMALLINT","BIGINT" ,"INT","FLOAT","DOUBLE","TINYINT UNSIGNED" ,"SMALLINT UNSIGNED","BIGINT UNSIGNED" ,"INT UNSIGNED"]: tdSql.query(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ") else: tdSql.error(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ") @@ -98,11 +98,57 @@ class TDTestCase: tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by t1") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,1.000000000) + + tdSql.query(f"select twa(c11) from {dbname}.ct1 ") + tdSql.checkData(0,0,1.000000000) + + tdSql.query(f"select twa(c11) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(self.tb_nums) + tdSql.checkData(0,0,1.000000000) + + tdSql.query(f"select twa(c12) from {dbname}.stb1 group by tbname ") + tdSql.checkRows(self.tb_nums) + tdSql.checkData(0,0,11111.000000000) + + tdSql.query(f"select twa(c11+c12) from {dbname}.stb1 partition by tbname ") + tdSql.checkData(0,0,11112.000000000) + + tdSql.query(f"select twa(c11) from {dbname}.stb1 partition by t1") + tdSql.checkRows(self.tb_nums) + tdSql.checkData(0,0,1.000000000) + + tdSql.query(f"select twa(c13) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(self.tb_nums) + + tdSql.query(f"select twa(c13) from {dbname}.stb1 group by tbname ") + tdSql.checkRows(self.tb_nums) + + tdSql.query(f"select twa(c14) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(self.tb_nums) + + tdSql.query(f"select twa(c14) from {dbname}.stb1 group by tbname ") + tdSql.checkRows(self.tb_nums) # union all tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname union all select twa(c1) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(40) tdSql.checkData(0,0,1.000000000) + tdSql.query(f"select twa(c11) from {dbname}.stb1 partition by tbname union all select twa(c11) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(40) + tdSql.checkData(0,0,1.000000000) + + tdSql.query(f"select twa(c2) from {dbname}.stb1 partition by tbname union all select twa(c2) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(40) + tdSql.query(f"select twa(c3) from {dbname}.stb1 partition by tbname union all select twa(c3) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(40) + tdSql.query(f"select twa(c4) from {dbname}.stb1 partition by tbname union all select twa(c4) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(40) + tdSql.query(f"select twa(c12) from {dbname}.stb1 partition by tbname union all select twa(c12) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(40) + tdSql.query(f"select twa(c13) from {dbname}.stb1 partition by tbname union all select twa(c13) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(40) + tdSql.query(f"select twa(c14) from {dbname}.stb1 partition by tbname union all select twa(c14) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(40) # join @@ -122,6 +168,7 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0,0,4.500000000) tdSql.checkData(0,1,4.500000000) + # mixup with other functions tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.ct1 ") From 892492847f7ffa39d6139b21119e894f1b6ab281 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Wed, 26 Oct 2022 11:12:15 +0800 Subject: [PATCH 43/44] test: refine query cases --- tests/system-test/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index d57f682b40..e05f375cdd 100644 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -593,7 +593,7 @@ python3 ./test.py -f 2-query/arccos.py -Q 4 python3 ./test.py -f 2-query/arctan.py -Q 4 python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 4 -#python3 ./test.py -f 2-query/nestedQuery.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery.py -Q 4 python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4 python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4 python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4 From 74ec2865bfd473c4f5cf2237b3fb9bca0637b6b1 Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Wed, 26 Oct 2022 13:29:13 +0800 Subject: [PATCH 44/44] enh(taosAdapter): stmt set_tag supports json type (#17664) --- cmake/taosadapter_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index a9f8868f50..5b8192831e 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG cc43ef0 + GIT_TAG a11131c SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE