From e3098accc4c7dc222670311e887d01437d63907c Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 13 Dec 2023 23:25:37 +0800 Subject: [PATCH 01/69] support count empty table --- .gitignore | 4 +- include/libs/nodes/plannodes.h | 1 + include/libs/nodes/querynodes.h | 1 + source/dnode/vnode/src/sma/smaRollup.c | 1 + source/libs/executor/inc/executorInt.h | 4 +- source/libs/executor/src/scanoperator.c | 102 +++++++++- source/libs/nodes/src/nodesCloneFuncs.c | 1 + source/libs/nodes/src/nodesCodeFuncs.c | 9 +- source/libs/nodes/src/nodesMsgFuncs.c | 7 +- source/libs/parser/src/parTranslater.c | 1 + source/libs/planner/src/planPhysiCreater.c | 22 +++ tests/parallel_test/cases.task | 1 + tests/system-test/2-query/count.py | 33 ++-- tests/system-test/2-query/group_partition.py | 183 ++++++++++++++++++ .../2-query/nestedQueryInterval.py | 26 +-- 15 files changed, 359 insertions(+), 37 deletions(-) create mode 100644 tests/system-test/2-query/group_partition.py diff --git a/.gitignore b/.gitignore index 08e3d57717..f8b42f9176 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -build/ +*build/ compile_commands.json CMakeSettings.json .cache @@ -132,3 +132,5 @@ tools/taos-tools tools/taosws-rs tags .clangd +*CMakeCache* +*CMakeFiles* diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index b99a97a194..4b3c846389 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -434,6 +434,7 @@ typedef struct STableScanPhysiNode { bool assignBlockUid; int8_t igCheckUpdate; bool filesetDelimited; + bool needCountEmptyTable; } STableScanPhysiNode; typedef STableScanPhysiNode STableSeqScanPhysiNode; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 5c5172b9cd..fdf598153f 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -363,6 +363,7 @@ typedef struct SSelectStmt { bool hasLastRowFunc; bool hasLastFunc; bool hasTimeLineFunc; + bool hasCountFunc; bool hasUdaf; bool hasStateKey; bool onlyHasKeepOrderFunc; diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index abe4c3f2fc..5b33451be5 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -750,6 +750,7 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma } tDestroySubmitReq(pReq, TSDB_MSG_FLG_ENCODE); taosMemoryFree(pReq); + pReq = NULL; TSDB_CHECK_CODE(code, lino, _exit); } diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index e3e504cdbc..2523b87cfb 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -262,6 +262,7 @@ typedef struct STableScanInfo { int32_t scanTimes; SSDataBlock* pResBlock; SHashObj* pIgnoreTables; + SHashObj* pValuedTables; // non empty table uids SSampleExecInfo sample; // sample execution info int32_t currentGroupId; int32_t currentTable; @@ -269,8 +270,9 @@ typedef struct STableScanInfo { int8_t assignBlockUid; bool hasGroupByTag; bool countOnly; - // TsdReader readerAPI; bool filesetDelimited; + bool needCountEmptyTable; + bool processingEmptyTable; } STableScanInfo; typedef struct STableMergeScanInfo { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ef2a99d1d1..1889492aa0 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -655,6 +655,50 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, colDataDestroy(&infoData); } + +// record processed (non empty) table +static int32_t insertTableToProcessed(STableScanInfo* pTableScanInfo, uint64_t uid) { + if (!pTableScanInfo->needCountEmptyTable) { + return TSDB_CODE_SUCCESS; + } + if (NULL == pTableScanInfo->pValuedTables) { + int32_t tableNum = taosArrayGetSize(pTableScanInfo->base.pTableListInfo->pTableList); + pTableScanInfo->pValuedTables = + taosHashInit(tableNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + if (NULL == pTableScanInfo->pValuedTables) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + + taosHashPut(pTableScanInfo->pValuedTables, &uid, sizeof(uid), &pTableScanInfo->scanTimes, + sizeof(pTableScanInfo->scanTimes)); + return TSDB_CODE_SUCCESS; +} + +static SSDataBlock* getBlockForEmptyTable(SOperatorInfo* pOperator, const STableKeyInfo* tbInfo) { + STableScanInfo* pTableScanInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SSDataBlock* pBlock = pTableScanInfo->pResBlock; + + blockDataEmpty(pBlock); + pBlock->info.rows = 1; + pBlock->info.id.uid = tbInfo->uid; + pBlock->info.id.groupId = pOperator->dynamicTask ? tbInfo->uid : tbInfo->groupId; + + // only one row: set all col data to null & hasNull + int32_t col_num = blockDataGetNumOfCols(pBlock); + for (int32_t i = 0; i < col_num; ++i) { + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + colDataSetNULL(pColInfoData, 0); + } + + // set tag/tbname + doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, pBlock->info.rows); + + pOperator->resultInfo.totalRows++; + return pBlock; +} + static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { STableScanInfo* pTableScanInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -722,7 +766,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { return NULL; } -static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { +static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKeyInfo* pList, int32_t num) { STableScanInfo* pTableScanInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; @@ -736,6 +780,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { + insertTableToProcessed(pTableScanInfo, p->info.id.uid); return p; } @@ -764,6 +809,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { while (pTableScanInfo->scanTimes < total) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { + insertTableToProcessed(pTableScanInfo, p->info.id.uid); return p; } @@ -780,6 +826,39 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { } } + if (pTableScanInfo->needCountEmptyTable) { + if (num == 0 && 0 == taosHashGetSize(pTableScanInfo->pValuedTables)) { + // table by table, num is 0 + if (!pTableScanInfo->processingEmptyTable) { + pTableScanInfo->processingEmptyTable = true; + // current table is empty, fill result block info & return + const STableKeyInfo* info = tableListGetInfo(pTableScanInfo->base.pTableListInfo, pTableScanInfo->currentTable); + return getBlockForEmptyTable(pOperator, info); + } + + } else if (num > taosHashGetSize(pTableScanInfo->pValuedTables)) { + // group by group, num >= 1 + if (!pTableScanInfo->processingEmptyTable) { + pTableScanInfo->processingEmptyTable = true; + pTableScanInfo->currentTable = 0; + } + if (pTableScanInfo->currentTable < num) { + // loop: get empty table uid & process + while (pTableScanInfo->currentTable < num) { + const STableKeyInfo* info = pList + pTableScanInfo->currentTable++; + if (pTableScanInfo->pValuedTables && + NULL != taosHashGet(pTableScanInfo->pValuedTables, &info->uid, sizeof(info->uid))) { + } else { + return getBlockForEmptyTable(pOperator, info); + } + } + } + } + + pTableScanInfo->processingEmptyTable = false; + } + taosHashClear(pTableScanInfo->pValuedTables); + return NULL; } @@ -861,7 +940,7 @@ static SSDataBlock* startNextGroupScan(SOperatorInfo* pOperator) { pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); pInfo->scanTimes = 0; - SSDataBlock* result = doGroupedTableScan(pOperator); + SSDataBlock* result = doGroupedTableScan(pOperator, pList, num); if (result != NULL) { if (pOperator->dynamicTask) { result->info.id.groupId = result->info.id.uid; @@ -876,15 +955,16 @@ static SSDataBlock* groupSeqTableScan(SOperatorInfo* pOperator) { STableScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; + int32_t num = 0; + STableKeyInfo* pList = NULL; if (pInfo->currentGroupId == -1) { - if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) { + int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo); + if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo) || numOfTables == 0) { setOperatorCompleted(pOperator); return NULL; } - int32_t num = 0; - STableKeyInfo* pList = NULL; tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); ASSERT(pInfo->base.dataReader == NULL); @@ -899,9 +979,11 @@ static SSDataBlock* groupSeqTableScan(SOperatorInfo* pOperator) { if (pInfo->pResBlock->info.capacity > pOperator->resultInfo.capacity) { pOperator->resultInfo.capacity = pInfo->pResBlock->info.capacity; } + } else { + tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); } - SSDataBlock* result = doGroupedTableScan(pOperator); + SSDataBlock* result = doGroupedTableScan(pOperator, pList, num); if (result != NULL) { if (pOperator->dynamicTask) { result->info.id.groupId = result->info.id.uid; @@ -923,7 +1005,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { STableScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; - + if (pOperator->pOperatorGetParam) { pOperator->dynamicTask = true; int32_t code = createTableListInfoFromParam(pOperator); @@ -952,7 +1034,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { STableKeyInfo tInfo = {0}; while (1) { - SSDataBlock* result = doGroupedTableScan(pOperator); + SSDataBlock* result = doGroupedTableScan(pOperator, NULL, 0); if (result || (pOperator->status == OP_EXEC_DONE) || isTaskKilled(pTaskInfo)) { return result; } @@ -1012,6 +1094,7 @@ static void destroyTableScanOperatorInfo(void* param) { STableScanInfo* pTableScanInfo = (STableScanInfo*)param; blockDataDestroy(pTableScanInfo->pResBlock); taosHashCleanup(pTableScanInfo->pIgnoreTables); + taosHashCleanup(pTableScanInfo->pValuedTables); destroyTableScanBase(&pTableScanInfo->base, &pTableScanInfo->base.readerAPI); taosMemoryFreeClear(param); } @@ -1075,6 +1158,9 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, pTaskInfo); pOperator->exprSupp.numOfExprs = numOfCols; + pInfo->needCountEmptyTable = tsCountAlwaysReturnValue && pTableScanNode->needCountEmptyTable; + pInfo->processingEmptyTable = false; + pInfo->base.pTableListInfo = pTableListInfo; pInfo->base.metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5); if (pInfo->base.metaCache.pTableMetaEntryCache == NULL) { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 97438b84a6..8a154dcf00 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -652,6 +652,7 @@ static int32_t physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhy COPY_SCALAR_FIELD(watermark); COPY_SCALAR_FIELD(igExpired); COPY_SCALAR_FIELD(filesetDelimited); + COPY_SCALAR_FIELD(needCountEmptyTable); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index c445af61cc..402a6c6e3d 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1841,6 +1841,7 @@ static const char* jkTableScanPhysiPlanSubtable = "Subtable"; static const char* jkTableScanPhysiPlanAssignBlockUid = "AssignBlockUid"; static const char* jkTableScanPhysiPlanIgnoreUpdate = "IgnoreUpdate"; static const char* jkTableScanPhysiPlanFilesetDelimited = "FilesetDelimited"; +static const char* jkTableScanPhysiPlanNeedCountEmptyTable = "NeedCountEmptyTable"; static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj; @@ -1912,7 +1913,9 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanFilesetDelimited, pNode->filesetDelimited); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanNeedCountEmptyTable, pNode->needCountEmptyTable); + } return code; } @@ -1986,7 +1989,9 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanFilesetDelimited, &pNode->filesetDelimited); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanNeedCountEmptyTable, &pNode->needCountEmptyTable); + } return code; } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index d6eb3360aa..b36e2695f6 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2170,6 +2170,9 @@ static int32_t physiTableScanNodeInlineToMsg(const void* pObj, STlvEncoder* pEnc if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeValueBool(pEncoder, pNode->filesetDelimited); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeValueBool(pEncoder, pNode->needCountEmptyTable); + } return code; } @@ -2251,7 +2254,9 @@ static int32_t msgToPhysiTableScanNodeInline(STlvDecoder* pDecoder, void* pObj) if (TSDB_CODE_SUCCESS == code) { code = tlvDecodeValueBool(pDecoder, &pNode->filesetDelimited); } - + if (TSDB_CODE_SUCCESS == code) { + code = tlvDecodeValueBool(pDecoder, &pNode->needCountEmptyTable); + } return code; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 3bb24566c2..98799d3d1d 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1902,6 +1902,7 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) { if (NULL != pCurrStmt && QUERY_NODE_SELECT_STMT == nodeType(pCurrStmt)) { SSelectStmt* pSelect = (SSelectStmt*)pCurrStmt; pSelect->hasAggFuncs = pSelect->hasAggFuncs ? true : fmIsAggFunc(pFunc->funcId); + pSelect->hasCountFunc = pSelect->hasCountFunc ? true : (FUNCTION_TYPE_COUNT == pFunc->funcType); pSelect->hasRepeatScanFuncs = pSelect->hasRepeatScanFuncs ? true : fmIsRepeatScanFunc(pFunc->funcId); if (fmIsIndefiniteRowsFunc(pFunc->funcId)) { diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index d1fbd0681d..901927b1d1 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -587,6 +587,27 @@ static int32_t createTableCountScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); } +static bool calcNeedCountEmpty(SPhysiPlanContext* pCxt, SScanLogicNode* pScanLogicNode) { + // refuse interval + if (pScanLogicNode->interval > 0) { + return false; + } + SNode* pRoot = pCxt->pPlanCxt->pAstRoot; + if (QUERY_NODE_SELECT_STMT == nodeType(pRoot)) { + SSelectStmt* pSelect = (SSelectStmt*)pRoot; + // select & count + if (pSelect->hasCountFunc) { + // key only accept tag/tbname + if (NULL != pSelect->pGroupByList) { + return !keysHasCol(pSelect->pGroupByList); + } else if (NULL != pSelect->pPartitionByList) { + return !keysHasCol(pSelect->pPartitionByList); + } + } + } + return false; +} + static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, SPhysiNode** pPhyNode) { STableScanPhysiNode* pTableScan = (STableScanPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pScanLogicNode, @@ -623,6 +644,7 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pTableScan->igCheckUpdate = pScanLogicNode->igCheckUpdate; pTableScan->assignBlockUid = pCxt->pPlanCxt->rSmaQuery ? true : false; pTableScan->filesetDelimited = pScanLogicNode->filesetDelimited; + pTableScan->needCountEmptyTable = calcNeedCountEmpty(pCxt, pScanLogicNode); int32_t code = createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); if (TSDB_CODE_SUCCESS == code) { diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index bcdd143cfc..e63f86743b 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -347,6 +347,7 @@ e ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws2.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py index 40d9b3ff8b..7e99f9e4dd 100644 --- a/tests/system-test/2-query/count.py +++ b/tests/system-test/2-query/count.py @@ -57,8 +57,11 @@ class TDTestCase: tdSql.query(f'select count(*) from {self.stbname}') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) + rows = [2, 0] function_names = ['count', 'hyperloglog'] - for function_name in function_names: + for i in range(2): + function_name = function_names[i] + row = rows[i] tdSql.query(f'select {function_name}(tbname) from {self.stbname}') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) @@ -93,17 +96,17 @@ class TDTestCase: tdSql.query(f'select sum(1),max(c2),min(1),leastsquares(c1,1,1) from {self.stbname}') tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} group by tbname') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} group by c1') tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} group by t0') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} partition by tbname') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} partition by c1') tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} partition by t0') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(1) from (select {function_name}(c1),sum(c1) from {self.stbname} group by c1)') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) @@ -113,17 +116,24 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.stbname} partition by c1 interval(1s)') tdSql.checkRows(0) - tdSql.query(f'select {function_name}(1),sum(1) from (select {function_name}(1) from {self.stbname} group by tbname)') + tdSql.query(f'select {function_name}(1),sum(1) from (select {function_name}(1) from {self.stbname} group by tbname order by tbname)') tdSql.checkRows(1) - tdSql.checkData(0, 0, 0) - tdSql.checkData(0, 1, None) + if 'count' == function_name: + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + elif 'hyperloglog' == function_name: + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 0, 0) def query_empty_ntb(self): tdSql.query(f'select count(*) from {self.ntbname}') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) + rows = [1, 0] function_names = ['count', 'hyperloglog'] - for function_name in function_names: + for i in range(2): + function_name = function_names[i] + row = rows[i] tdSql.query(f'select {function_name}(tbname) from {self.ntbname}') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) @@ -158,7 +168,7 @@ class TDTestCase: tdSql.query(f'select sum(1),max(c2),min(1),leastsquares(c1,1,1) from {self.ntbname}') tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.ntbname} group by tbname') - tdSql.checkRows(0) + tdSql.checkRows(row) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.ntbname} group by c1') tdSql.checkRows(0) tdSql.query(f'select {function_name}(1) from (select {function_name}(c1),sum(c1) from {self.ntbname} group by c1)') @@ -170,10 +180,11 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query(f'select {function_name}(c1),sum(c1) from {self.ntbname} partition by c1 interval(1s)') tdSql.checkRows(0) - tdSql.query(f'select count(1),sum(1) from (select count(1) from {self.ntbname} group by tbname)') + tdSql.query(f'select count(1),sum(1) from (select count(1) from {self.ntbname} group by tbname order by tbname)') tdSql.checkRows(1) tdSql.checkData(0, 0, 0) tdSql.checkData(0, 1, None) + def count_query_stb(self,column_dict,tag_dict,stbname,tbnum,rownum): tdSql.query(f'select count(tbname) from {stbname}') tdSql.checkEqual(tdSql.queryResult[0][0],tbnum*rownum) diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py new file mode 100644 index 0000000000..eb5f069b3d --- /dev/null +++ b/tests/system-test/2-query/group_partition.py @@ -0,0 +1,183 @@ +# author : bobliu +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.row_nums = 10 + self.tb_nums = 10 + self.ts = 1537146000000 + self.dbname = "db" + self.stable = "stb" + + def prepare_db(self): + tdSql.execute(f" use {self.dbname} ") + tdSql.execute(f" create stable {self.dbname}.{self.stable} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\ + uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\ + , t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ") + + for i in range(self.tb_nums): + tbname = f"{self.dbname}.sub_{self.stable}_{i}" + ts = self.ts + i*10000 + tdSql.execute(f"create table {tbname} using {self.dbname}.{self.stable} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )") + + def insert_db(self, tb_nums, row_nums): + for i in range(tb_nums): + tbname = f"{self.dbname}.sub_{self.stable}_{i}" + ts_base = self.ts + i*10000 + for row in range(row_nums): + ts = ts_base + row*1000 + tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )") + + + def test_groupby(self, check_num, real_num): + # tbname + tdSql.query(f"select count(*) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(check_num) + + tdSql.query(f"select count(*), sum(1) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(check_num) + + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(check_num) + + # having filter out empty + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} group by tbname having count(*) <= 0") + tdSql.checkRows(check_num - real_num) + + # tag + tdSql.query(f"select count(*) from {self.dbname}.{self.stable} group by t2 ") + tdSql.checkRows(check_num) + + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 ") + tdSql.checkRows(check_num) + + # having + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 having count(*) <= 0") + tdSql.checkRows(check_num - real_num) + + # col where filter nothing + # tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now group by t2 ") + # tdSql.checkRows(check_num) + + ############### same with old ############### + # col where filter all + # tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 group by t2 ") + # tdSql.checkRows(0) + + # col where filter part + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where c1 = 1 group by t2 ") + tdSql.checkRows(real_num) + + # col + tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(real_num) + + # count + sum(col) + tdSql.query(f"select count(*), sum(c1) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(real_num) + + tdSql.query(f"select c1, count(*) from {self.dbname}.{self.stable} group by c1 ") + num = 0 + if real_num > 0: + num = self.row_nums + tdSql.checkRows(num) + + tdSql.query(f"select ts, count(*) from {self.dbname}.{self.stable} group by ts ") + tdSql.checkRows(real_num * self.row_nums) + + # col + tag + tdSql.query(f"select t2, c1, count(*) from {self.dbname}.{self.stable} group by t2, c1 ") + tdSql.checkRows(real_num * self.row_nums) + + + def test_partitionby(self, check_num, real_num): + tdSql.query(f"select tbname , count(*) from {self.dbname}.{self.stable} partition by tbname ") + tdSql.checkRows(check_num) + + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} partition by tbname ") + tdSql.checkRows(check_num) + + # having filter out empty + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} partition by tbname having count(*) <= 0") + tdSql.checkRows(check_num - real_num) + + #tag + tdSql.query(f"select count(*) from {self.dbname}.{self.stable} partition by t2 ") + tdSql.checkRows(check_num) + + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} partition by t2 ") + tdSql.checkRows(check_num) + + # having + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} partition by t2 having count(*) <= 0") + tdSql.checkRows(check_num - real_num) + + # col where filter nothing + # tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now partition by t2 ") + # tdSql.checkRows(check_num) + + ############### same with old ############### + # col where filter all + # tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 partition by t2 ") + # tdSql.checkRows(0) + + # col where filter part + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where c1 = 1 partition by t2 ") + tdSql.checkRows(real_num) + + #col + tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname ") + tdSql.checkRows(real_num) + + tdSql.query(f"select c1, count(*) from {self.dbname}.{self.stable} partition by c1 ") + num = 0 + if real_num > 0: + num = self.row_nums + tdSql.checkRows(num) + + tdSql.query(f"select ts, count(*) from {self.dbname}.{self.stable} partition by ts ") + tdSql.checkRows(real_num * self.row_nums) + + tdSql.query(f"select t2, c1, count(*) from {self.dbname}.{self.stable} partition by t2, c1 ") + tdSql.checkRows(real_num * self.row_nums) + + def test_error(self): + tdSql.error(f"select * from {self.dbname}.{self.stable} group by t2") + tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 where t2 = 1") + + + def run(self): + tdSql.prepare() + self.prepare_db() + check_num = self.tb_nums + self.test_groupby(check_num, 0) + self.test_partitionby(check_num, 0) + # insert into half of tables + real_num = 5 + self.insert_db(real_num, self.row_nums) + self.test_groupby(check_num, real_num) + self.test_partitionby(check_num, real_num) + + # test old version before changed + # self.test_groupby(0, 0) + # self.test_partitionby(0, 0) + # self.insert_db(5, self.row_nums) + # self.test_groupby(5, 5) + # self.test_partitionby(5, 5) + + self.test_error() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/nestedQueryInterval.py b/tests/system-test/2-query/nestedQueryInterval.py index c16fc03c27..b6ef50dcda 100644 --- a/tests/system-test/2-query/nestedQueryInterval.py +++ b/tests/system-test/2-query/nestedQueryInterval.py @@ -1112,13 +1112,13 @@ class TDTestCase: def TS_3932(self): tdLog.debug("test insert data into stable") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 100) tdSql.checkData(1, 1, 200) tdSql.query(f"insert into nested.stable_1 (ts,tbname) values(now,'stable_1_1');") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 101) tdSql.checkData(1, 1, 200) @@ -1127,7 +1127,7 @@ class TDTestCase: coulmn_name = qlist[i] tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_1_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;",queryTimes=5) - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 111) tdSql.checkData(1, 1, 200) @@ -1136,7 +1136,7 @@ class TDTestCase: coulmn_name = q_null_list[i] tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_1_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;",queryTimes=5) - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 121) tdSql.checkData(1, 1, 200) @@ -1184,7 +1184,7 @@ class TDTestCase: def TS_3932_flushdb(self): tdLog.debug("test flush db and insert data into stable") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 121) tdSql.checkData(1, 1, 200) @@ -1192,7 +1192,7 @@ class TDTestCase: q_null_list = ['q_int_null', 'q_bigint_null', 'q_smallint_null', 'q_tinyint_null', 'q_float_null', 'q_double_null', 'q_bool_null', 'q_binary_null', 'q_nchar_null', 'q_ts_null'] tdSql.query(f"insert into nested.stable_1 (ts,tbname) values(now,'stable_1_1');") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 122) tdSql.checkData(1, 1, 200) @@ -1200,7 +1200,7 @@ class TDTestCase: coulmn_name = qlist[i] tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_1_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 132) tdSql.checkData(1, 1, 200) @@ -1208,7 +1208,7 @@ class TDTestCase: coulmn_name = q_null_list[i] tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_1_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 142) tdSql.checkData(1, 1, 200) @@ -1223,7 +1223,7 @@ class TDTestCase: nested.stable_1 (ts,tbname,q_nchar) values(now+8a,'stable_1_1',1)\ nested.stable_1 (ts,tbname,q_ts) values(now+9a,'stable_1_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 152); tdSql.checkData(1, 1, 200); @@ -1330,7 +1330,7 @@ class TDTestCase: nested.stable_null_childtable (ts,tbname,q_ts) values(now+9a,'stable_null_childtable_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(2) + tdSql.checkRows(6) tdSql.checkData(0, 1, 162); tdSql.checkData(1, 1, 200); @@ -1349,7 +1349,7 @@ class TDTestCase: nested.stable_null_childtable (ts,tbname,q_int) values(now,'$^%$%^&',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(3) + tdSql.checkRows(7) tdSql.checkData(0, 1, 1); tdSql.checkData(1, 1, 162); tdSql.checkData(2, 1, 200); @@ -1387,7 +1387,7 @@ class TDTestCase: nested.stable_null_childtable(tbname,ts,q_int,q_binary) file '{self.testcasePath}/stable_null_childtable.csv';") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(3) + tdSql.checkRows(7) tdSql.checkData(0, 1, 1); tdSql.checkData(1, 1, 162); tdSql.checkData(2, 1, 200); @@ -1423,7 +1423,7 @@ class TDTestCase: tdSql.query(f"insert into nested.stable_null_childtable(tbname,ts,q_int,q_binary) file '{self.testcasePath}/stable_null_childtable.csv';") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(3) + tdSql.checkRows(7) tdSql.checkData(0, 1, 1); tdSql.checkData(1, 1, 162); tdSql.checkData(2, 1, 200); From 3bfc7b34862cbea233e33b9b5617f154bf28f295 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Fri, 22 Dec 2023 00:47:15 +0800 Subject: [PATCH 02/69] adjust case --- tests/system-test/2-query/group_partition.py | 44 +++++++++++++------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index eb5f069b3d..58be8563fa 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -46,6 +46,12 @@ class TDTestCase: tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} group by tbname ") tdSql.checkRows(check_num) + tdSql.query(f"select tbname from {self.dbname}.{self.stable} group by tbname order by count(*)") + tdSql.checkRows(check_num) + + tdSql.query(f"select tbname from {self.dbname}.{self.stable} group by tbname having count(*)>=0") + tdSql.checkRows(check_num) + # having filter out empty tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} group by tbname having count(*) <= 0") tdSql.checkRows(check_num - real_num) @@ -62,26 +68,26 @@ class TDTestCase: tdSql.checkRows(check_num - real_num) # col where filter nothing - # tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now group by t2 ") - # tdSql.checkRows(check_num) + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now group by t2 ") + tdSql.checkRows(check_num) - ############### same with old ############### # col where filter all - # tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 group by t2 ") - # tdSql.checkRows(0) + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 group by t2 ") + tdSql.checkRows(check_num) # col where filter part tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where c1 = 1 group by t2 ") - tdSql.checkRows(real_num) + tdSql.checkRows(check_num) # col tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} group by tbname ") - tdSql.checkRows(real_num) + tdSql.checkRows(check_num) # count + sum(col) tdSql.query(f"select count(*), sum(c1) from {self.dbname}.{self.stable} group by tbname ") - tdSql.checkRows(real_num) + tdSql.checkRows(check_num) + ############### same with old ############### tdSql.query(f"select c1, count(*) from {self.dbname}.{self.stable} group by c1 ") num = 0 if real_num > 0: @@ -103,6 +109,12 @@ class TDTestCase: tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} partition by tbname ") tdSql.checkRows(check_num) + tdSql.query(f"select tbname from {self.dbname}.{self.stable} partition by tbname order by count(*)") + tdSql.checkRows(check_num) + + tdSql.query(f"select tbname from {self.dbname}.{self.stable} partition by tbname having count(*)>=0") + tdSql.checkRows(check_num) + # having filter out empty tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} partition by tbname having count(*) <= 0") tdSql.checkRows(check_num - real_num) @@ -119,22 +131,25 @@ class TDTestCase: tdSql.checkRows(check_num - real_num) # col where filter nothing - # tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now partition by t2 ") - # tdSql.checkRows(check_num) + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now partition by t2 ") + tdSql.checkRows(check_num) - ############### same with old ############### # col where filter all - # tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 partition by t2 ") - # tdSql.checkRows(0) + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 partition by t2 ") + tdSql.checkRows(check_num) # col where filter part tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where c1 = 1 partition by t2 ") - tdSql.checkRows(real_num) + tdSql.checkRows(check_num) #col tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname ") + tdSql.checkRows(check_num) + + tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname interval(1d)") tdSql.checkRows(real_num) + ############### same with old ############### tdSql.query(f"select c1, count(*) from {self.dbname}.{self.stable} partition by c1 ") num = 0 if real_num > 0: @@ -150,6 +165,7 @@ class TDTestCase: def test_error(self): tdSql.error(f"select * from {self.dbname}.{self.stable} group by t2") tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 where t2 = 1") + tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 interval(1d)") def run(self): From 3b0d1480f7783d05b4cab34a4548fd95c436c476 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Fri, 22 Dec 2023 11:28:26 +0800 Subject: [PATCH 03/69] adjust case --- tests/script/tsim/field/3.sim | 14 ++++---- tests/script/tsim/field/4.sim | 18 +++++----- tests/script/tsim/field/6.sim | 38 ++++++++++---------- tests/script/tsim/field/bigint.sim | 4 +-- tests/script/tsim/field/double.sim | 4 +-- tests/script/tsim/field/smallint.sim | 4 +-- tests/script/tsim/field/unsigined_bigint.sim | 4 +-- tests/script/tsim/parser/mixed_blocks.sim | 3 +- tests/system-test/2-query/count_partition.py | 4 +-- tests/system-test/2-query/last_row.py | 2 +- 10 files changed, 48 insertions(+), 47 deletions(-) diff --git a/tests/script/tsim/field/3.sim b/tests/script/tsim/field/3.sim index 8b428febcd..4d4801c54a 100644 --- a/tests/script/tsim/field/3.sim +++ b/tests/script/tsim/field/3.sim @@ -447,44 +447,44 @@ if $data00 != 100 then endi print =============== step17 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step18 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/4.sim b/tests/script/tsim/field/4.sim index 361ca4c326..d83de81c88 100644 --- a/tests/script/tsim/field/4.sim +++ b/tests/script/tsim/field/4.sim @@ -619,56 +619,56 @@ if $data00 != 100 then endi print =============== step22 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step23 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/6.sim b/tests/script/tsim/field/6.sim index 52fd0b3780..2d2b5623c7 100644 --- a/tests/script/tsim/field/6.sim +++ b/tests/script/tsim/field/6.sim @@ -824,117 +824,117 @@ if $data00 != 25 then endi print =============== step28 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol3 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol3 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol4 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol4 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol5 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol5 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol6 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt group by tgcol6 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step29 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol1 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol1 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step30 -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 endi -sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol2 +sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where ts < 1626739440001 and tbcol1 = 1 and tbcol2 = 1 and tbcol3 = 1 and tbcol4 = 1 and tbcol5 = 1 and tbcol6 = 1 group by tgcol2 order by count(tbcol1) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/bigint.sim b/tests/script/tsim/field/bigint.sim index ce35cacd84..a791d6a1cb 100644 --- a/tests/script/tsim/field/bigint.sim +++ b/tests/script/tsim/field/bigint.sim @@ -127,7 +127,7 @@ if $data00 != 100 then endi print =============== step6 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 print $data10 $data11 $data12 $data13 $data14 $data15 $data16 if $data00 != 100 then @@ -136,7 +136,7 @@ if $data00 != 100 then endi print =============== step7 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/double.sim b/tests/script/tsim/field/double.sim index 1f0cea4be8..2daa78eb7a 100644 --- a/tests/script/tsim/field/double.sim +++ b/tests/script/tsim/field/double.sim @@ -127,14 +127,14 @@ if $data00 != 100 then endi print =============== step6 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step7 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/smallint.sim b/tests/script/tsim/field/smallint.sim index 66bfee5838..980d69297a 100644 --- a/tests/script/tsim/field/smallint.sim +++ b/tests/script/tsim/field/smallint.sim @@ -128,14 +128,14 @@ if $data00 != 100 then endi print =============== step6 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then return -1 endi print =============== step7 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/field/unsigined_bigint.sim b/tests/script/tsim/field/unsigined_bigint.sim index baa57ce1f6..de8098c297 100644 --- a/tests/script/tsim/field/unsigined_bigint.sim +++ b/tests/script/tsim/field/unsigined_bigint.sim @@ -131,7 +131,7 @@ if $data00 != 100 then endi print =============== step6 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 print $data10 $data11 $data12 $data13 $data14 $data15 $data16 if $data00 != 100 then @@ -140,7 +140,7 @@ if $data00 != 100 then endi print =============== step7 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < 1626739440001 and tbcol = 1 group by tgcol order by count(tbcol) desc print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 25 then return -1 diff --git a/tests/script/tsim/parser/mixed_blocks.sim b/tests/script/tsim/parser/mixed_blocks.sim index 04e9df3ff4..8716192858 100644 --- a/tests/script/tsim/parser/mixed_blocks.sim +++ b/tests/script/tsim/parser/mixed_blocks.sim @@ -100,7 +100,8 @@ if $data05 != 1 then endi sql select max(c1), min(c1), sum(c1), avg(c1), count(c1), t1 from $stb where c1 > 0 group by t1 -if $rows != 1 then +if $rows != 2 then + print === rows $rows return -1 endi if $data00 != 319 then diff --git a/tests/system-test/2-query/count_partition.py b/tests/system-test/2-query/count_partition.py index f59376a979..e970b00cec 100644 --- a/tests/system-test/2-query/count_partition.py +++ b/tests/system-test/2-query/count_partition.py @@ -66,9 +66,9 @@ class TDTestCase: tdSql.checkRows(self.row_nums+1) tdSql.query(f"select count(c1) , count(t2) from {dbname}.stb where abs(c1+t2)=1 partition by tbname") - tdSql.checkRows(2) + tdSql.checkRows(10) tdSql.query(f"select count(c1) from {dbname}.stb where abs(c1+t2)=1 partition by tbname") - tdSql.checkRows(2) + tdSql.checkRows(10) tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py index a6bcc2c5f1..5b989eb456 100644 --- a/tests/system-test/2-query/last_row.py +++ b/tests/system-test/2-query/last_row.py @@ -436,7 +436,7 @@ class TDTestCase: tdSql.checkData(1,1,None) tdSql.query(f"select t1 ,count(c1) from {dbname}.stb1 partition by t1 ") - tdSql.checkRows(2) + tdSql.checkRows(4) # filter by tbname tdSql.query(f"select last_row(c1) from {dbname}.stb1 where tbname = 'ct1' ") From ef5b5786b38e3d14465ae16780b47ed4f5935265 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Fri, 22 Dec 2023 16:03:09 +0800 Subject: [PATCH 04/69] adjust --- source/libs/executor/src/scanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 1889492aa0..6df46ef4d0 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -683,7 +683,7 @@ static SSDataBlock* getBlockForEmptyTable(SOperatorInfo* pOperator, const STable blockDataEmpty(pBlock); pBlock->info.rows = 1; pBlock->info.id.uid = tbInfo->uid; - pBlock->info.id.groupId = pOperator->dynamicTask ? tbInfo->uid : tbInfo->groupId; + pBlock->info.id.groupId = tbInfo->groupId; // only one row: set all col data to null & hasNull int32_t col_num = blockDataGetNumOfCols(pBlock); From 6e4703e5b43af47d83aca6acf4e5d8fe1dc16d7d Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Fri, 22 Dec 2023 22:58:40 +0800 Subject: [PATCH 05/69] adjust ts --- .../2-query/nestedQueryInterval.py | 100 ++++++++++-------- 1 file changed, 58 insertions(+), 42 deletions(-) diff --git a/tests/system-test/2-query/nestedQueryInterval.py b/tests/system-test/2-query/nestedQueryInterval.py index b6ef50dcda..185848f6e0 100644 --- a/tests/system-test/2-query/nestedQueryInterval.py +++ b/tests/system-test/2-query/nestedQueryInterval.py @@ -5,6 +5,8 @@ import socket import os import threading +from datetime import timezone, datetime + from util.log import * from util.sql import * from util.cases import * @@ -1196,9 +1198,14 @@ class TDTestCase: tdSql.checkData(0, 1, 122) tdSql.checkData(1, 1, 200) + pd = datetime.datetime.now() + ts = int(datetime.datetime.timestamp(pd)*1000 - 10000) + print(f"start time {ts}") + for i in range(10): coulmn_name = qlist[i] - tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_1_1',1);") + tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values({ts+i},'stable_1_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.checkRows(6) tdSql.checkData(0, 1, 132) @@ -1206,92 +1213,101 @@ class TDTestCase: for i in range(10): coulmn_name = q_null_list[i] - tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_1_1',1);") + tdSql.execute(f"insert into nested.stable_1 (ts, tbname, {coulmn_name}) values({ts+i},'stable_1_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.checkRows(6) tdSql.checkData(0, 1, 142) tdSql.checkData(1, 1, 200) - tdSql.execute(f"insert into nested.stable_1 (ts,tbname,q_int) values(now,'stable_1_1',1) \ - nested.stable_1 (ts,tbname,q_bigint) values(now+1a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_smallint) values(now+2a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_tinyint) values(now+3a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_float) values(now+4a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_double) values(now+5a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_bool) values(now+6a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_binary) values(now+7a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_nchar) values(now+8a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_ts) values(now+9a,'stable_1_1',1);") + tdSql.execute(f"insert into nested.stable_1 (ts,tbname,q_int) values({ts},'stable_1_1',1) \ + nested.stable_1 (ts,tbname,q_bigint) values({ts+1},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_smallint) values({ts+2},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_tinyint) values({ts+3},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_float) values({ts+4},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_double) values({ts+5},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_bool) values({ts+6},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_binary) values({ts+7},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_nchar) values({ts+8},'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_ts) values({ts+9},'stable_1_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.checkRows(6) tdSql.checkData(0, 1, 152); tdSql.checkData(1, 1, 200); - tdSql.query(f"insert into nested.stable_null_data (ts,tbname) values(now,'stable_null_data_1');") + tdSql.query(f"insert into nested.stable_null_data (ts,tbname) values({ts},'stable_null_data_1');") + ts = ts + 1 tdSql.query(f"select tbname,count(*) from nested.stable_null_data_1 group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 22); for i in range(10): coulmn_name = qlist[i] - tdSql.execute(f"insert into nested.stable_null_data (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_null_data_1',1);") + tdSql.execute(f"insert into nested.stable_null_data (ts, tbname, {coulmn_name}) values({ts+i},'stable_null_data_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 32) for i in range(10): coulmn_name = q_null_list[i] - tdSql.execute(f"insert into nested.stable_null_data (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_null_data_1',1);") - + tdSql.execute(f"insert into nested.stable_null_data (ts, tbname, {coulmn_name}) values({ts+i},'stable_null_data_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 42) - tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_int) values(now,'stable_null_data_1',1) \ - nested.stable_null_data (ts,tbname,q_bigint) values(now+1a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_smallint) values(now+2a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_tinyint) values(now+3a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_float) values(now+4a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_double) values(now+5a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_bool) values(now+6a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_binary) values(now+7a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_nchar) values(now+8a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_ts) values(now+9a,'stable_null_data_1',1);") + tdSql.query(f"insert into nested.stable_null_data (ts,tbname,q_int) values({ts},'stable_null_data_1',1) \ + nested.stable_null_data (ts,tbname,q_bigint) values({ts+1},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_smallint) values({ts+2},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_tinyint) values({ts+3},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_float) values({ts+4},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_double) values({ts+5},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_bool) values({ts+6},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_binary) values({ts+7},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_nchar) values({ts+8},'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_ts) values({ts+9},'stable_null_data_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 52); - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname) values(now,'stable_null_childtable_1');") + tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname) values({ts},'stable_null_childtable_1');") + ts = ts + 1 tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 22) for i in range(10): coulmn_name = qlist[i] - tdSql.execute(f"insert into nested.stable_null_childtable (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_null_childtable_1',1);") + tdSql.execute(f"insert into nested.stable_null_childtable (ts, tbname, {coulmn_name}) values({ts+i},'stable_null_childtable_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 32) for i in range(10): coulmn_name = q_null_list[i] - tdSql.execute(f"insert into nested.stable_null_childtable (ts, tbname, {coulmn_name}) values(now+{i}s,'stable_null_childtable_1',1);") + tdSql.execute(f"insert into nested.stable_null_childtable (ts, tbname, {coulmn_name}) values({ts+i},'stable_null_childtable_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 42); - tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_int) values(now,'stable_null_childtable_1',1) \ - nested.stable_null_childtable (ts,tbname,q_bigint) values(now+1a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_smallint) values(now+2a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_tinyint) values(now+3a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_float) values(now+4a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_double) values(now+5a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_bool) values(now+6a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_binary) values(now+7a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_nchar) values(now+8a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_ts) values(now+9a,'stable_null_childtable_1',1);") + tdSql.query(f"insert into nested.stable_null_childtable (ts,tbname,q_int) values({ts},'stable_null_childtable_1',1) \ + nested.stable_null_childtable (ts,tbname,q_bigint) values({ts+1},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_smallint) values({ts+2},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_tinyint) values({ts+3},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_float) values({ts+4},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_double) values({ts+5},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_bool) values({ts+6},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_binary) values({ts+7},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_nchar) values({ts+8},'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_ts) values({ts+9},'stable_null_childtable_1',1);") + ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) tdSql.checkData(0, 1, 52); @@ -1344,9 +1360,9 @@ class TDTestCase: #test special character - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values(now,'!@!@$$^$',1) \ - nested.stable_null_data (ts,tbname,q_int) values(now,'%^$^&^&',1) \ - nested.stable_null_childtable (ts,tbname,q_int) values(now,'$^%$%^&',1);") + tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values(now+10a,'!@!@$$^$',1) \ + nested.stable_null_data (ts,tbname,q_int) values(now+10a,'%^$^&^&',1) \ + nested.stable_null_childtable (ts,tbname,q_int) values(now+10a,'$^%$%^&',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.checkRows(7) From c9b69316c1f8f71281d1506ed681989b5f4e4990 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Sun, 24 Dec 2023 01:19:16 +0800 Subject: [PATCH 06/69] calcNeedCountEmpty --- include/libs/nodes/plannodes.h | 1 + source/libs/nodes/src/nodesCloneFuncs.c | 1 + source/libs/planner/src/planLogicCreater.c | 9 +++++++++ source/libs/planner/src/planOptimizer.c | 1 + source/libs/planner/src/planPhysiCreater.c | 22 +++++++++++----------- 5 files changed, 23 insertions(+), 11 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 4b3c846389..55ac3b86bf 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -147,6 +147,7 @@ typedef struct SAggLogicNode { bool isGroupTb; bool isPartTb; // true if partition keys has tbname bool hasGroup; + bool isCountByTag; // true if hasCountFunc & part by tag/tbname } SAggLogicNode; typedef struct SProjectLogicNode { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 8a154dcf00..2af31a8c7a 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -453,6 +453,7 @@ static int32_t logicAggCopy(const SAggLogicNode* pSrc, SAggLogicNode* pDst) { COPY_SCALAR_FIELD(isGroupTb); COPY_SCALAR_FIELD(isPartTb); COPY_SCALAR_FIELD(hasGroup); + COPY_SCALAR_FIELD(isCountByTag); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 2adc5b3072..17b1b272ba 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -748,6 +748,15 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, pAgg->isGroupTb = pAgg->pGroupKeys ? keysHasTbname(pAgg->pGroupKeys) : 0; pAgg->isPartTb = pSelect->pPartitionByList ? keysHasTbname(pSelect->pPartitionByList) : 0; pAgg->hasGroup = pAgg->pGroupKeys || pSelect->pPartitionByList; + bool isCountByTag = false; + if (pSelect->hasCountFunc) { + if (pSelect->pGroupByList) { + isCountByTag = !keysHasCol(pSelect->pGroupByList); + } else if (pSelect->pPartitionByList) { + isCountByTag = !keysHasCol(pSelect->pPartitionByList); + } + } + pAgg->isCountByTag = isCountByTag; if (TSDB_CODE_SUCCESS == code) { *pLogicNode = (SLogicNode*)pAgg; diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index aa3181e166..9ecbd3ae9f 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2783,6 +2783,7 @@ static int32_t splitCacheLastFuncOptCreateAggLogicNode(SAggLogicNode** pNewAgg, pNew->isGroupTb = pAgg->isGroupTb; pNew->isPartTb = pAgg->isPartTb; pNew->hasGroup = pAgg->hasGroup; + pNew->isCountByTag = pAgg->isCountByTag; pNew->node.pChildren = nodesCloneList(pAgg->node.pChildren); int32_t code = 0; diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 901927b1d1..9c4e9f97ab 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -37,6 +37,7 @@ typedef struct SPhysiPlanContext { SArray* pLocationHelper; bool hasScan; bool hasSysScan; + SLogicNode* pNode; // tmp record LogicSubplan->pNode } SPhysiPlanContext; static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char* pKey, int32_t keyBufSize) { @@ -592,17 +593,13 @@ static bool calcNeedCountEmpty(SPhysiPlanContext* pCxt, SScanLogicNode* pScanLog if (pScanLogicNode->interval > 0) { return false; } + // limit: root node is select SNode* pRoot = pCxt->pPlanCxt->pAstRoot; - if (QUERY_NODE_SELECT_STMT == nodeType(pRoot)) { - SSelectStmt* pSelect = (SSelectStmt*)pRoot; - // select & count - if (pSelect->hasCountFunc) { - // key only accept tag/tbname - if (NULL != pSelect->pGroupByList) { - return !keysHasCol(pSelect->pGroupByList); - } else if (NULL != pSelect->pPartitionByList) { - return !keysHasCol(pSelect->pPartitionByList); - } + if (QUERY_NODE_SELECT_STMT == nodeType(pRoot) + && pCxt->pNode && QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pCxt->pNode)) { + SAggLogicNode* pNode = (SAggLogicNode*)pCxt->pNode; + if (pNode->isCountByTag) { + return true; } } return false; @@ -2302,7 +2299,9 @@ static int32_t createPhysiSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogic } else { pSubplan->msgType = TDMT_SCH_MERGE_QUERY; } + pCxt->pNode = pLogicSubplan->pNode; code = createPhysiNode(pCxt, pLogicSubplan->pNode, pSubplan, &pSubplan->pNode); + pCxt->pNode = NULL; if (TSDB_CODE_SUCCESS == code && !pCxt->pPlanCxt->streamQuery && !pCxt->pPlanCxt->topicQuery) { code = createDataDispatcher(pCxt, pSubplan->pNode, &pSubplan->pDataSink); } @@ -2454,7 +2453,8 @@ int32_t createPhysiPlan(SPlanContext* pCxt, SQueryLogicPlan* pLogicPlan, SQueryP .nextDataBlockId = 0, .pLocationHelper = taosArrayInit(32, POINTER_BYTES), .hasScan = false, - .hasSysScan = false}; + .hasSysScan = false, + .pNode = NULL}; if (NULL == cxt.pLocationHelper) { return TSDB_CODE_OUT_OF_MEMORY; } From 651b681d4c95879c5a20668c8b85c45583383e98 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Sun, 24 Dec 2023 18:45:51 +0800 Subject: [PATCH 07/69] adjust case --- tests/system-test/2-query/count.py | 4 ++-- tests/system-test/2-query/last_row.py | 4 ++-- tests/system-test/2-query/nestedQueryInterval.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py index 7e99f9e4dd..0e1d332d89 100644 --- a/tests/system-test/2-query/count.py +++ b/tests/system-test/2-query/count.py @@ -119,8 +119,8 @@ class TDTestCase: tdSql.query(f'select {function_name}(1),sum(1) from (select {function_name}(1) from {self.stbname} group by tbname order by tbname)') tdSql.checkRows(1) if 'count' == function_name: - tdSql.checkData(0, 0, 0) - tdSql.checkData(0, 1, None) + tdSql.checkData(0, 0, 2) + tdSql.checkData(0, 1, 2) elif 'hyperloglog' == function_name: tdSql.checkData(0, 0, 0) tdSql.checkData(0, 0, 0) diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py index 5b989eb456..6469b3ea54 100644 --- a/tests/system-test/2-query/last_row.py +++ b/tests/system-test/2-query/last_row.py @@ -435,8 +435,8 @@ class TDTestCase: tdSql.checkData(1,0,'ct4') tdSql.checkData(1,1,None) - tdSql.query(f"select t1 ,count(c1) from {dbname}.stb1 partition by t1 ") - tdSql.checkRows(4) + tdSql.query(f"select t1 ,count(c1) from {dbname}.stb1 partition by t1 having count(c1)>0") + tdSql.checkRows(2) # filter by tbname tdSql.query(f"select last_row(c1) from {dbname}.stb1 where tbname = 'ct1' ") diff --git a/tests/system-test/2-query/nestedQueryInterval.py b/tests/system-test/2-query/nestedQueryInterval.py index 185848f6e0..07b5519432 100644 --- a/tests/system-test/2-query/nestedQueryInterval.py +++ b/tests/system-test/2-query/nestedQueryInterval.py @@ -1113,8 +1113,8 @@ class TDTestCase: def TS_3932(self): tdLog.debug("test insert data into stable") - tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") - tdSql.checkRows(6) + tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname having count(*)>0 order by tbname;") + tdSql.checkRows(2) tdSql.checkData(0, 1, 100) tdSql.checkData(1, 1, 200) From 52c244d9ec0a9ce286a25379b3516159f0380222 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Sun, 24 Dec 2023 23:48:37 +0800 Subject: [PATCH 08/69] adjust --- source/libs/planner/src/planPhysiCreater.c | 22 +++++++++++++++----- tests/system-test/2-query/count.py | 2 +- tests/system-test/2-query/group_partition.py | 10 ++++++++- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 9c4e9f97ab..5c78ff4432 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -595,13 +595,25 @@ static bool calcNeedCountEmpty(SPhysiPlanContext* pCxt, SScanLogicNode* pScanLog } // limit: root node is select SNode* pRoot = pCxt->pPlanCxt->pAstRoot; - if (QUERY_NODE_SELECT_STMT == nodeType(pRoot) - && pCxt->pNode && QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pCxt->pNode)) { - SAggLogicNode* pNode = (SAggLogicNode*)pCxt->pNode; - if (pNode->isCountByTag) { - return true; + if (QUERY_NODE_SELECT_STMT == nodeType(pRoot)) { + // case1 root select + SSelectStmt* pSelect = (SSelectStmt*)pRoot; + if (pSelect->hasCountFunc) { + if (NULL != pSelect->pGroupByList) { + return !keysHasCol(pSelect->pGroupByList); + } else if (NULL != pSelect->pPartitionByList) { + return !keysHasCol(pSelect->pPartitionByList); + } + } + // case2 inner agg + if (pCxt->pNode && QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pCxt->pNode)) { + SAggLogicNode* pNode = (SAggLogicNode*)pCxt->pNode; + if (pNode->isCountByTag) { + return true; + } } } + return false; } diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py index 0e1d332d89..a80c6f07e9 100644 --- a/tests/system-test/2-query/count.py +++ b/tests/system-test/2-query/count.py @@ -123,7 +123,7 @@ class TDTestCase: tdSql.checkData(0, 1, 2) elif 'hyperloglog' == function_name: tdSql.checkData(0, 0, 0) - tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) def query_empty_ntb(self): tdSql.query(f'select count(*) from {self.ntbname}') diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index 58be8563fa..c08ea98ef6 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -83,6 +83,10 @@ class TDTestCase: tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} group by tbname ") tdSql.checkRows(check_num) + #inner select + tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} group by tbname) ") + tdSql.checkRows(check_num) + # count + sum(col) tdSql.query(f"select count(*), sum(c1) from {self.dbname}.{self.stable} group by tbname ") tdSql.checkRows(check_num) @@ -146,6 +150,10 @@ class TDTestCase: tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname ") tdSql.checkRows(check_num) + #inner select + tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} partition by tbname) ") + tdSql.checkRows(check_num) + tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname interval(1d)") tdSql.checkRows(real_num) @@ -196,4 +204,4 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) From da0ecc18b95af94abff05cece0acd002dfd38bd0 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Mon, 25 Dec 2023 02:51:52 +0800 Subject: [PATCH 09/69] test --- source/libs/planner/src/planPhysiCreater.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 5c78ff4432..397258d6ca 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -2311,9 +2311,11 @@ static int32_t createPhysiSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogic } else { pSubplan->msgType = TDMT_SCH_MERGE_QUERY; } - pCxt->pNode = pLogicSubplan->pNode; + + if (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pLogicSubplan->pNode)) { + pCxt->pNode = pLogicSubplan->pNode; + } code = createPhysiNode(pCxt, pLogicSubplan->pNode, pSubplan, &pSubplan->pNode); - pCxt->pNode = NULL; if (TSDB_CODE_SUCCESS == code && !pCxt->pPlanCxt->streamQuery && !pCxt->pPlanCxt->topicQuery) { code = createDataDispatcher(pCxt, pSubplan->pNode, &pSubplan->pDataSink); } @@ -2414,6 +2416,7 @@ static int32_t doCreatePhysiPlan(SPhysiPlanContext* pCxt, SQueryLogicPlan* pLogi break; } } + pCxt->pNode = NULL; if (TSDB_CODE_SUCCESS == code) { *pPhysiPlan = pPlan; From 15694df001da02ab54ced68b014d6e48fe4a1cff Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Mon, 25 Dec 2023 11:54:10 +0800 Subject: [PATCH 10/69] adjust --- source/libs/nodes/src/nodesCloneFuncs.c | 2 +- source/libs/planner/src/planLogicCreater.c | 20 ++++++++-------- source/libs/planner/src/planOptimizer.c | 1 - source/libs/planner/src/planPhysiCreater.c | 27 +++------------------- tests/parallel_test/cases.task | 4 ++++ 5 files changed, 19 insertions(+), 35 deletions(-) diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 2af31a8c7a..c68fd81d22 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -424,6 +424,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) { COPY_SCALAR_FIELD(groupOrderScan); COPY_SCALAR_FIELD(onlyMetaCtbIdx); COPY_SCALAR_FIELD(filesetDelimited); + COPY_SCALAR_FIELD(isCountByTag); return TSDB_CODE_SUCCESS; } @@ -453,7 +454,6 @@ static int32_t logicAggCopy(const SAggLogicNode* pSrc, SAggLogicNode* pDst) { COPY_SCALAR_FIELD(isGroupTb); COPY_SCALAR_FIELD(isPartTb); COPY_SCALAR_FIELD(hasGroup); - COPY_SCALAR_FIELD(isCountByTag); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 17b1b272ba..f55689c472 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -486,6 +486,16 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect code = tagScanSetExecutionMode(pScan); } + bool isCountByTag = false; + if (pSelect->hasCountFunc) { + if (pSelect->pGroupByList) { + isCountByTag = !keysHasCol(pSelect->pGroupByList); + } else if (pSelect->pPartitionByList) { + isCountByTag = !keysHasCol(pSelect->pPartitionByList); + } + } + pScan->isCountByTag = isCountByTag; + if (TSDB_CODE_SUCCESS == code) { *pLogicNode = (SLogicNode*)pScan; } else { @@ -748,15 +758,7 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, pAgg->isGroupTb = pAgg->pGroupKeys ? keysHasTbname(pAgg->pGroupKeys) : 0; pAgg->isPartTb = pSelect->pPartitionByList ? keysHasTbname(pSelect->pPartitionByList) : 0; pAgg->hasGroup = pAgg->pGroupKeys || pSelect->pPartitionByList; - bool isCountByTag = false; - if (pSelect->hasCountFunc) { - if (pSelect->pGroupByList) { - isCountByTag = !keysHasCol(pSelect->pGroupByList); - } else if (pSelect->pPartitionByList) { - isCountByTag = !keysHasCol(pSelect->pPartitionByList); - } - } - pAgg->isCountByTag = isCountByTag; + if (TSDB_CODE_SUCCESS == code) { *pLogicNode = (SLogicNode*)pAgg; diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 9ecbd3ae9f..aa3181e166 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2783,7 +2783,6 @@ static int32_t splitCacheLastFuncOptCreateAggLogicNode(SAggLogicNode** pNewAgg, pNew->isGroupTb = pAgg->isGroupTb; pNew->isPartTb = pAgg->isPartTb; pNew->hasGroup = pAgg->hasGroup; - pNew->isCountByTag = pAgg->isCountByTag; pNew->node.pChildren = nodesCloneList(pAgg->node.pChildren); int32_t code = 0; diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 397258d6ca..0db0fd77fb 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -37,7 +37,6 @@ typedef struct SPhysiPlanContext { SArray* pLocationHelper; bool hasScan; bool hasSysScan; - SLogicNode* pNode; // tmp record LogicSubplan->pNode } SPhysiPlanContext; static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char* pKey, int32_t keyBufSize) { @@ -595,23 +594,8 @@ static bool calcNeedCountEmpty(SPhysiPlanContext* pCxt, SScanLogicNode* pScanLog } // limit: root node is select SNode* pRoot = pCxt->pPlanCxt->pAstRoot; - if (QUERY_NODE_SELECT_STMT == nodeType(pRoot)) { - // case1 root select - SSelectStmt* pSelect = (SSelectStmt*)pRoot; - if (pSelect->hasCountFunc) { - if (NULL != pSelect->pGroupByList) { - return !keysHasCol(pSelect->pGroupByList); - } else if (NULL != pSelect->pPartitionByList) { - return !keysHasCol(pSelect->pPartitionByList); - } - } - // case2 inner agg - if (pCxt->pNode && QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pCxt->pNode)) { - SAggLogicNode* pNode = (SAggLogicNode*)pCxt->pNode; - if (pNode->isCountByTag) { - return true; - } - } + if (QUERY_NODE_SELECT_STMT == nodeType(pRoot) && pScanLogicNode->isCountByTag) { + return true; } return false; @@ -2312,9 +2296,6 @@ static int32_t createPhysiSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogic pSubplan->msgType = TDMT_SCH_MERGE_QUERY; } - if (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pLogicSubplan->pNode)) { - pCxt->pNode = pLogicSubplan->pNode; - } code = createPhysiNode(pCxt, pLogicSubplan->pNode, pSubplan, &pSubplan->pNode); if (TSDB_CODE_SUCCESS == code && !pCxt->pPlanCxt->streamQuery && !pCxt->pPlanCxt->topicQuery) { code = createDataDispatcher(pCxt, pSubplan->pNode, &pSubplan->pDataSink); @@ -2416,7 +2397,6 @@ static int32_t doCreatePhysiPlan(SPhysiPlanContext* pCxt, SQueryLogicPlan* pLogi break; } } - pCxt->pNode = NULL; if (TSDB_CODE_SUCCESS == code) { *pPhysiPlan = pPlan; @@ -2468,8 +2448,7 @@ int32_t createPhysiPlan(SPlanContext* pCxt, SQueryLogicPlan* pLogicPlan, SQueryP .nextDataBlockId = 0, .pLocationHelper = taosArrayInit(32, POINTER_BYTES), .hasScan = false, - .hasSysScan = false, - .pNode = NULL}; + .hasSysScan = false}; if (NULL == cxt.pLocationHelper) { return TSDB_CODE_OUT_OF_MEMORY; } diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index e63f86743b..5e7ef407f0 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -348,6 +348,10 @@ e ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/group_partition.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py From baee536cb06976332fe4d22305305edc0ee1d013 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Mon, 25 Dec 2023 11:54:23 +0800 Subject: [PATCH 11/69] adjust --- include/libs/nodes/plannodes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 55ac3b86bf..f0383e8168 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -119,6 +119,7 @@ typedef struct SScanLogicNode { bool groupOrderScan; bool onlyMetaCtbIdx; // for tag scan with no tbname bool filesetDelimited; // returned blocks delimited by fileset + bool isCountByTag; // true if selectstmt hasCountFunc & part by tag/tbname } SScanLogicNode; typedef struct SJoinLogicNode { @@ -147,7 +148,6 @@ typedef struct SAggLogicNode { bool isGroupTb; bool isPartTb; // true if partition keys has tbname bool hasGroup; - bool isCountByTag; // true if hasCountFunc & part by tag/tbname } SAggLogicNode; typedef struct SProjectLogicNode { From 31c54e5edabc71924d72befc3194942b8b40fb73 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Mon, 25 Dec 2023 12:42:25 +0800 Subject: [PATCH 12/69] adjust --- tests/system-test/2-query/count.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py index a80c6f07e9..9bf9a4b9cc 100644 --- a/tests/system-test/2-query/count.py +++ b/tests/system-test/2-query/count.py @@ -182,8 +182,8 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query(f'select count(1),sum(1) from (select count(1) from {self.ntbname} group by tbname order by tbname)') tdSql.checkRows(1) - tdSql.checkData(0, 0, 0) - tdSql.checkData(0, 1, None) + tdSql.checkData(0, 0, 2) + tdSql.checkData(0, 1, 2) def count_query_stb(self,column_dict,tag_dict,stbname,tbnum,rownum): tdSql.query(f'select count(tbname) from {stbname}') From f672df239e176a5256e76f9937d9e19a8da5b2cc Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Mon, 25 Dec 2023 13:33:45 +0800 Subject: [PATCH 13/69] adjust --- tests/system-test/2-query/count.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py index 9bf9a4b9cc..3ba651622a 100644 --- a/tests/system-test/2-query/count.py +++ b/tests/system-test/2-query/count.py @@ -182,8 +182,8 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query(f'select count(1),sum(1) from (select count(1) from {self.ntbname} group by tbname order by tbname)') tdSql.checkRows(1) - tdSql.checkData(0, 0, 2) - tdSql.checkData(0, 1, 2) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 1) def count_query_stb(self,column_dict,tag_dict,stbname,tbnum,rownum): tdSql.query(f'select count(tbname) from {stbname}') From 62cfcefbb0e26c29c06ff1d22f414a71018274c8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 26 Dec 2023 00:34:04 +0800 Subject: [PATCH 14/69] refactor: remove stream-scan-history event for stream task. --- include/libs/stream/tstream.h | 19 +++-- source/dnode/mnode/impl/src/mndStream.c | 88 +++++++++++----------- source/dnode/vnode/src/tq/tq.c | 27 +++---- source/dnode/vnode/src/tqCommon/tqCommon.c | 2 +- source/libs/parser/src/parTranslater.c | 2 +- source/libs/stream/src/streamExec.c | 2 +- source/libs/stream/src/streamMeta.c | 2 +- source/libs/stream/src/streamStart.c | 20 +++-- source/libs/stream/src/streamTaskSm.c | 32 ++++---- 9 files changed, 99 insertions(+), 95 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 2d2db5c1dc..0c09e0e1ea 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -81,7 +81,7 @@ typedef enum ETaskStatus { TASK_STATUS__HALT, // pause, but not be manipulated by user command TASK_STATUS__PAUSE, // pause TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore - TASK_STATUS__STREAM_SCAN_HISTORY, +// TASK_STATUS__STREAM_SCAN_HISTORY, } ETaskStatus; enum { @@ -138,15 +138,14 @@ enum { typedef enum EStreamTaskEvent { TASK_EVENT_INIT = 0x1, TASK_EVENT_INIT_SCANHIST = 0x2, - TASK_EVENT_INIT_STREAM_SCANHIST = 0x3, - TASK_EVENT_SCANHIST_DONE = 0x4, - TASK_EVENT_STOP = 0x5, - TASK_EVENT_GEN_CHECKPOINT = 0x6, - TASK_EVENT_CHECKPOINT_DONE = 0x7, - TASK_EVENT_PAUSE = 0x8, - TASK_EVENT_RESUME = 0x9, - TASK_EVENT_HALT = 0xA, - TASK_EVENT_DROPPING = 0xB, + TASK_EVENT_SCANHIST_DONE = 0x3, + TASK_EVENT_STOP = 0x4, + TASK_EVENT_GEN_CHECKPOINT = 0x5, + TASK_EVENT_CHECKPOINT_DONE = 0x6, + TASK_EVENT_PAUSE = 0x7, + TASK_EVENT_RESUME = 0x8, + TASK_EVENT_HALT = 0x9, + TASK_EVENT_DROPPING = 0xA, } EStreamTaskEvent; typedef struct { diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 25f51b85df..2464a9207e 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -2886,38 +2886,38 @@ static SStreamTask *mndGetStreamTask(STaskId *pId, SStreamObj *pStream) { return NULL; } -static bool needDropRelatedFillhistoryTask(STaskStatusEntry *pTaskEntry, SStreamExecInfo *pExecNode) { - if (pTaskEntry->status == TASK_STATUS__STREAM_SCAN_HISTORY && pTaskEntry->statusLastDuration >= 10) { - if (!pTaskEntry->inputQChanging && pTaskEntry->inputQUnchangeCounter > 10) { - int32_t numOfReady = 0; - int32_t numOfTotal = 0; - for (int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) { - STaskId *pId = taosArrayGet(pExecNode->pTaskList, k); - if (pTaskEntry->id.streamId == pId->streamId) { - numOfTotal++; - - if (pTaskEntry->id.taskId != pId->taskId) { - STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, pId, sizeof(*pId)); - if (pEntry->status == TASK_STATUS__READY) { - numOfReady++; - } - } - } - } - - if (numOfReady > 0) { - mDebug("stream:0x%" PRIx64 - " %d tasks are ready, %d tasks in stream-scan-history for more than 50s, drop related fill-history task", - pTaskEntry->id.streamId, numOfReady, numOfTotal - numOfReady); - return true; - } else { - return false; - } - } - } - - return false; -} +//static bool needDropRelatedFillhistoryTask(STaskStatusEntry *pTaskEntry, SStreamExecInfo *pExecNode) { +// if (pTaskEntry->status == TASK_STATUS__STREAM_SCAN_HISTORY && pTaskEntry->statusLastDuration >= 10) { +// if (!pTaskEntry->inputQChanging && pTaskEntry->inputQUnchangeCounter > 10) { +// int32_t numOfReady = 0; +// int32_t numOfTotal = 0; +// for (int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) { +// STaskId *pId = taosArrayGet(pExecNode->pTaskList, k); +// if (pTaskEntry->id.streamId == pId->streamId) { +// numOfTotal++; +// +// if (pTaskEntry->id.taskId != pId->taskId) { +// STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, pId, sizeof(*pId)); +// if (pEntry->status == TASK_STATUS__READY) { +// numOfReady++; +// } +// } +// } +// } +// +// if (numOfReady > 0) { +// mDebug("stream:0x%" PRIx64 +// " %d tasks are ready, %d tasks in stream-scan-history for more than 50s, drop related fill-history task", +// pTaskEntry->id.streamId, numOfReady, numOfTotal - numOfReady); +// return true; +// } else { +// return false; +// } +// } +// } +// +// return false; +//} // currently only handle the sink task // 1. sink task, drop related fill-history task msg is missing @@ -3091,18 +3091,18 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { if (p->status != TASK_STATUS__READY) { mDebug("received s-task:0x%" PRIx64 " not in ready status:%s", p->id.taskId, streamTaskGetStatusStr(p->status)); - if (p->status == TASK_STATUS__STREAM_SCAN_HISTORY) { - bool drop = needDropRelatedFillhistoryTask(pTaskEntry, &execInfo); - if (drop) { - SStreamObj *pStreamObj = mndGetStreamObj(pMnode, pTaskEntry->id.streamId); - if (pStreamObj == NULL) { - mError("failed to acquire the streamObj:0x%" PRIx64 " it may have been dropped", pStreamObj->uid); - } else { - mndDropRelatedFillhistoryTask(pMnode, pTaskEntry, pStreamObj); - mndReleaseStream(pMnode, pStreamObj); - } - } - } +// if (p->status == TASK_STATUS__STREAM_SCAN_HISTORY) { +// bool drop = needDropRelatedFillhistoryTask(pTaskEntry, &execInfo); +// if (drop) { +// SStreamObj *pStreamObj = mndGetStreamObj(pMnode, pTaskEntry->id.streamId); +// if (pStreamObj == NULL) { +// mError("failed to acquire the streamObj:0x%" PRIx64 " it may have been dropped", pStreamObj->uid); +// } else { +// mndDropRelatedFillhistoryTask(pMnode, pTaskEntry, pStreamObj); +// mndReleaseStream(pMnode, pStreamObj); +// } +// } +// } } } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index c8cc4f9d54..a8b5842b93 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1037,15 +1037,16 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { streamMetaReleaseTask(pMeta, pStreamTask); } else { - STimeWindow* pWindow = &pTask->dataRange.window; - ASSERT(HAS_RELATED_FILLHISTORY_TASK(pTask) || streamTaskShouldStop(pTask)); - - // Not update the fill-history time window until the state transfer is completed. - tqDebug("s-task:%s scan-history in stream time window completed, start to handle data from WAL, startVer:%" PRId64 - ", window:%" PRId64 " - %" PRId64, - id, pTask->chkInfo.nextProcessVer, pWindow->skey, pWindow->ekey); - - code = streamTaskScanHistoryDataComplete(pTask); + ASSERT(0); +// STimeWindow* pWindow = &pTask->dataRange.window; +// ASSERT(HAS_RELATED_FILLHISTORY_TASK(pTask) || streamTaskShouldStop(pTask)); +// +// // Not update the fill-history time window until the state transfer is completed. +// tqDebug("s-task:%s scan-history in stream time window completed, start to handle data from WAL, startVer:%" PRId64 +// ", window:%" PRId64 " - %" PRId64, +// id, pTask->chkInfo.nextProcessVer, pWindow->skey, pWindow->ekey); +// +// code = streamTaskScanHistoryDataComplete(pTask); } atomic_store_32(&pTask->status.inScanHistorySentinel, 0); @@ -1170,7 +1171,7 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion, } else if (status == TASK_STATUS__UNINIT) { // todo: fill-history task init ? if (pTask->info.fillHistory == 0) { - EStreamTaskEvent event = HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT; + EStreamTaskEvent event = /*HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT; streamTaskHandleEvent(pTask->status.pSM, event); } } @@ -1363,9 +1364,9 @@ int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg) { taosThreadMutexLock(&pTask->lock); ETaskStatus status = streamTaskGetStatus(pTask, NULL); - if (status == TASK_STATUS__STREAM_SCAN_HISTORY) { - streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE); - } +// if (status == TASK_STATUS__STREAM_SCAN_HISTORY) { +// streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE); +// } SStreamTaskId id = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId}; streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &id); diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index f576345f64..2991e3cef5 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -545,7 +545,7 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* SStreamTask* p = streamMetaAcquireTask(pMeta, streamId, taskId); if (p != NULL && restored && p->info.fillHistory == 0) { - EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(p)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT; + EStreamTaskEvent event = /*(HAS_RELATED_FILLHISTORY_TASK(p)) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT; streamTaskHandleEvent(p->status.pSM, event); } else if (!restored) { tqWarn("s-task:%s not launched since vnode(vgId:%d) not ready", p->id.idStr, vgId); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 3bb24566c2..6f3915c4d7 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -7989,7 +7989,7 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void } if (TSDB_CODE_SUCCESS == code) { if (interval.interval > 0) { - pStmt->pReq->lastTs = taosTimeTruncate(lastTs, &interval); + pStmt->pReq->lastTs = taosTimeAdd(taosTimeTruncate(lastTs, &interval), interval.interval, interval.intervalUnit, interval.precision); } else { pStmt->pReq->lastTs = lastTs; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 25f32195be..7d2a572cda 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -646,7 +646,7 @@ bool streamTaskIsIdle(const SStreamTask* pTask) { bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus) { ETaskStatus st = streamTaskGetStatus(pTask, NULL); - return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__STREAM_SCAN_HISTORY || + return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY/* || st == TASK_STATUS__STREAM_SCAN_HISTORY*/ || st == TASK_STATUS__CK); } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index d09d370a36..f2dcc3bcc6 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -1484,7 +1484,7 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { continue; } - EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(pTask)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT; + EStreamTaskEvent event = /*(HAS_RELATED_FILLHISTORY_TASK(pTask)) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT; int32_t ret = streamTaskHandleEvent(pTask->status.pSM, event); if (ret != TSDB_CODE_SUCCESS) { stError("vgId:%d failed to handle event:%d", pMeta->vgId, event); diff --git a/source/libs/stream/src/streamStart.c b/source/libs/stream/src/streamStart.c index ea5e2edc09..70f83e27a5 100644 --- a/source/libs/stream/src/streamStart.c +++ b/source/libs/stream/src/streamStart.c @@ -52,7 +52,7 @@ int32_t streamTaskSetReady(SStreamTask* pTask) { int32_t numOfDowns = streamTaskGetNumOfDownstream(pTask); ETaskStatus status = streamTaskGetStatus(pTask, &p); - if ((status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__STREAM_SCAN_HISTORY) && + if ((status == TASK_STATUS__SCAN_HISTORY/* || status == TASK_STATUS__STREAM_SCAN_HISTORY*/) && pTask->info.taskLevel != TASK_LEVEL__SOURCE) { pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->upstreamInfo.pList); stDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s", @@ -158,7 +158,7 @@ int32_t streamTaskStartScanHistory(SStreamTask* pTask) { ETaskStatus status = streamTaskGetStatus(pTask, NULL); ASSERT(pTask->status.downstreamReady == 1 && - ((status == TASK_STATUS__SCAN_HISTORY) || (status == TASK_STATUS__STREAM_SCAN_HISTORY))); + ((status == TASK_STATUS__SCAN_HISTORY)/* || (status == TASK_STATUS__STREAM_SCAN_HISTORY)*/)); if (level == TASK_LEVEL__SOURCE) { return doStartScanHistoryTask(pTask); @@ -374,10 +374,14 @@ int32_t streamTaskOnScanhistoryTaskReady(SStreamTask* pTask) { char* p = NULL; ETaskStatus status = streamTaskGetStatus(pTask, &p); - ASSERT(status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__STREAM_SCAN_HISTORY); + ASSERT(status == TASK_STATUS__SCAN_HISTORY/* || status == TASK_STATUS__STREAM_SCAN_HISTORY*/); - stDebug("s-task:%s enter into scan-history data stage, status:%s", id, p); - streamTaskStartScanHistory(pTask); + if (pTask->info.fillHistory == 1) { + stDebug("s-task:%s fill-history task enters into scan-history data stage, status:%s", id, p); + streamTaskStartScanHistory(pTask); + } else { + stDebug("s-task:%s scan wal data, status:%s", id, p); + } // NOTE: there will be an deadlock if launch fill history here. // // start the related fill-history task, when current task is ready @@ -391,7 +395,7 @@ int32_t streamTaskOnScanhistoryTaskReady(SStreamTask* pTask) { void doProcessDownstreamReadyRsp(SStreamTask* pTask) { EStreamTaskEvent event; if (pTask->info.fillHistory == 0) { - event = HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT; + event = /*HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT; } else { event = TASK_EVENT_INIT_SCANHIST; } @@ -631,7 +635,7 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory char* p = NULL; ETaskStatus status = streamTaskGetStatus(pTask, &p); - if (status != TASK_STATUS__SCAN_HISTORY && status != TASK_STATUS__STREAM_SCAN_HISTORY) { + if (status != TASK_STATUS__SCAN_HISTORY /*&& status != TASK_STATUS__STREAM_SCAN_HISTORY*/) { stError("s-task:%s not in scan-history status, status:%s return upstream:0x%x scan-history finish directly", id, p, pReq->upstreamTaskId); @@ -693,7 +697,7 @@ int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) { return TSDB_CODE_INVALID_MSG; } - ASSERT(status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__STREAM_SCAN_HISTORY); + ASSERT(status == TASK_STATUS__SCAN_HISTORY/* || status == TASK_STATUS__STREAM_SCAN_HISTORY*/); SStreamMeta* pMeta = pTask->pMeta; // execute in the scan history complete call back msg, ready to process data from inputQ diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 51e6ad23fe..4bd6483f7f 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -31,14 +31,14 @@ SStreamTaskState StreamTaskStatusList[9] = { {.state = TASK_STATUS__HALT, .name = "halt"}, {.state = TASK_STATUS__PAUSE, .name = "paused"}, {.state = TASK_STATUS__CK, .name = "checkpoint"}, - {.state = TASK_STATUS__STREAM_SCAN_HISTORY, .name = "stream-scan-history"}, +// {.state = TASK_STATUS__STREAM_SCAN_HISTORY, .name = "stream-scan-history"}, }; SStreamEventInfo StreamTaskEventList[12] = { {.event = 0, .name = ""}, // dummy event, place holder {.event = TASK_EVENT_INIT, .name = "initialize"}, {.event = TASK_EVENT_INIT_SCANHIST, .name = "scan-history-init"}, - {.event = TASK_EVENT_INIT_STREAM_SCANHIST, .name = "stream-scan-history-init"}, +// {.event = TASK_EVENT_INIT_STREAM_SCANHIST, .name = "stream-scan-history-init"}, {.event = TASK_EVENT_SCANHIST_DONE, .name = "scan-history-completed"}, {.event = TASK_EVENT_STOP, .name = "stopping"}, {.event = TASK_EVENT_GEN_CHECKPOINT, .name = "checkpoint"}, @@ -110,12 +110,12 @@ int32_t streamTaskKeepCurrentVerInWal(SStreamTask* pTask) { // todo check rsp code for handle Event:TASK_EVENT_SCANHIST_DONE static bool isInvalidStateTransfer(ETaskStatus state, const EStreamTaskEvent event) { - if (event == TASK_EVENT_INIT_STREAM_SCANHIST || event == TASK_EVENT_INIT || event == TASK_EVENT_INIT_SCANHIST) { + if (/*event == TASK_EVENT_INIT_STREAM_SCANHIST || */event == TASK_EVENT_INIT || event == TASK_EVENT_INIT_SCANHIST) { return (state != TASK_STATUS__UNINIT); } if (event == TASK_EVENT_SCANHIST_DONE) { - return (state != TASK_STATUS__SCAN_HISTORY && state != TASK_STATUS__STREAM_SCAN_HISTORY); + return (state != TASK_STATUS__SCAN_HISTORY/* && state != TASK_STATUS__STREAM_SCAN_HISTORY*/); } if (event == TASK_EVENT_GEN_CHECKPOINT) { @@ -482,14 +482,14 @@ void doInitStateTransferTable(void) { taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false); taosArrayPush(streamTaskSMTrans, &trans); - trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STREAM_SCAN_HISTORY, TASK_EVENT_INIT_STREAM_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false); - taosArrayPush(streamTaskSMTrans, &trans); +// trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STREAM_SCAN_HISTORY, TASK_EVENT_INIT_STREAM_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false); +// taosArrayPush(streamTaskSMTrans, &trans); // scan-history related event trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true); taosArrayPush(streamTaskSMTrans, &trans); - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true); - taosArrayPush(streamTaskSMTrans, &trans); +// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true); +// taosArrayPush(streamTaskSMTrans, &trans); // halt stream task, from other task status trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true); @@ -499,8 +499,8 @@ void doInitStateTransferTable(void) { SAttachedEventInfo info = {.status = TASK_STATUS__READY, .event = TASK_EVENT_HALT}; - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true); - taosArrayPush(streamTaskSMTrans, &trans); +// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true); +// taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true); taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true); @@ -519,8 +519,8 @@ void doInitStateTransferTable(void) { taosArrayPush(streamTaskSMTrans, &trans); info = (SAttachedEventInfo){.status = TASK_STATUS__READY, .event = TASK_EVENT_PAUSE}; - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true); - taosArrayPush(streamTaskSMTrans, &trans); +// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true); +// taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true); taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true); @@ -554,8 +554,8 @@ void doInitStateTransferTable(void) { taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true); taosArrayPush(streamTaskSMTrans, &trans); - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true); - taosArrayPush(streamTaskSMTrans, &trans); +// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true); +// taosArrayPush(streamTaskSMTrans, &trans); // dropping related event trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true); @@ -574,7 +574,7 @@ void doInitStateTransferTable(void) { taosArrayPush(streamTaskSMTrans, &trans); trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, streamTaskSendTransSuccessMsg, NULL, NULL, true); taosArrayPush(streamTaskSMTrans, &trans); - trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true); - taosArrayPush(streamTaskSMTrans, &trans); +// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true); +// taosArrayPush(streamTaskSMTrans, &trans); } //clang-format on \ No newline at end of file From 2c9fa56e9c3ea6a0bbca237800da86995aa98cee Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Tue, 26 Dec 2023 10:24:50 +0800 Subject: [PATCH 15/69] calcNeedCountEmpty --- source/libs/planner/src/planPhysiCreater.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 0db0fd77fb..444bda84cd 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -587,14 +587,12 @@ static int32_t createTableCountScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); } -static bool calcNeedCountEmpty(SPhysiPlanContext* pCxt, SScanLogicNode* pScanLogicNode) { - // refuse interval +static bool calcNeedCountEmpty(SScanLogicNode* pScanLogicNode) { if (pScanLogicNode->interval > 0) { return false; } - // limit: root node is select - SNode* pRoot = pCxt->pPlanCxt->pAstRoot; - if (QUERY_NODE_SELECT_STMT == nodeType(pRoot) && pScanLogicNode->isCountByTag) { + + if (pScanLogicNode->isCountByTag) { return true; } @@ -637,7 +635,7 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pTableScan->igCheckUpdate = pScanLogicNode->igCheckUpdate; pTableScan->assignBlockUid = pCxt->pPlanCxt->rSmaQuery ? true : false; pTableScan->filesetDelimited = pScanLogicNode->filesetDelimited; - pTableScan->needCountEmptyTable = calcNeedCountEmpty(pCxt, pScanLogicNode); + pTableScan->needCountEmptyTable = calcNeedCountEmpty(pScanLogicNode); int32_t code = createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); if (TSDB_CODE_SUCCESS == code) { From b74e98b25c11d40a290d03988dd848d6a75319c2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 26 Dec 2023 14:17:46 +0800 Subject: [PATCH 16/69] fix(stream): set the correct split timestamp for session/state window. --- source/dnode/mnode/impl/src/mndScheduler.c | 6 +++--- source/libs/parser/src/parTranslater.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 3ef4c9a4d2..39121cecd9 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -251,7 +251,7 @@ int32_t doAddSinkTask(SStreamObj* pStream, SArray* pTaskList, SMnode* pMnode, in static int32_t addSourceTask(SMnode* pMnode, SVgObj* pVgroup, SArray* pTaskList, SArray* pSinkTaskList, SStreamObj* pStream, SSubplan* plan, uint64_t uid, SEpSet* pEpset, bool fillHistory, - bool hasExtraSink, int64_t firstWindowSkey, bool hasFillHistory) { + bool hasExtraSink, int64_t nextWindowSkey, bool hasFillHistory) { SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SOURCE, fillHistory, pStream->conf.triggerParam, pTaskList, hasFillHistory); if (pTask == NULL) { @@ -262,7 +262,7 @@ static int32_t addSourceTask(SMnode* pMnode, SVgObj* pVgroup, SArray* pTaskList, STimeWindow* pWindow = &pTask->dataRange.window; pWindow->skey = INT64_MIN; - pWindow->ekey = firstWindowSkey - 1; + pWindow->ekey = nextWindowSkey - 1; mDebug("add source task 0x%x window:%" PRId64 " - %" PRId64, pTask->id.taskId, pWindow->skey, pWindow->ekey); // sink or dispatch @@ -382,7 +382,7 @@ static int32_t doAddSourceTask(SArray* pTaskList, bool isFillhistory, int64_t ui epsetAssign(&(pTask)->info.mnodeEpset, pEpset); - // todo set the correct ts, which should be last key of queried table. + // set the correct ts, which is the last key of queried table. STimeWindow* pWindow = &pTask->dataRange.window; pWindow->skey = INT64_MIN; pWindow->ekey = nextWindowSkey - 1; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 6f3915c4d7..cf6db9f549 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -7991,7 +7991,7 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void if (interval.interval > 0) { pStmt->pReq->lastTs = taosTimeAdd(taosTimeTruncate(lastTs, &interval), interval.interval, interval.intervalUnit, interval.precision); } else { - pStmt->pReq->lastTs = lastTs; + pStmt->pReq->lastTs = lastTs + 1; // start key of the next time window } code = buildCmdMsg(&cxt, TDMT_MND_CREATE_STREAM, (FSerializeFunc)tSerializeSCMCreateStreamReq, pStmt->pReq); } From 2cd61804bcfd3b70b10d85464f6334a557c97e26 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 26 Dec 2023 14:40:15 +0800 Subject: [PATCH 17/69] test:update the test case. --- tests/system-test/8-stream/stream_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/8-stream/stream_basic.py b/tests/system-test/8-stream/stream_basic.py index e838950bb8..3ebc255114 100644 --- a/tests/system-test/8-stream/stream_basic.py +++ b/tests/system-test/8-stream/stream_basic.py @@ -89,7 +89,7 @@ class TDTestCase: sql = "select count(*) from sta" # loop wait max 60s to check count is ok tdLog.info("loop wait result ...") - tdSql.checkDataLoop(0, 0, 99999, sql, loopCount=120, waitTime=0.5) + tdSql.checkDataLoop(0, 0, 100000, sql, loopCount=120, waitTime=0.5) time.sleep(5) From e2564a61899ca75df168f62d8ad1c0dc4a3a241c Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Tue, 26 Dec 2023 18:35:49 +0800 Subject: [PATCH 18/69] fix issue --- source/libs/executor/src/scanoperator.c | 4 ++- .../executor/src/streameventwindowoperator.c | 1 + source/libs/stream/src/streamSessionState.c | 8 ++--- tests/script/tsim/stream/event2.sim | 33 +++++++++++++++++++ 4 files changed, 41 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ef2a99d1d1..cf5ea95088 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1961,8 +1961,10 @@ static void setBlockGroupIdByUid(SStreamScanInfo* pInfo, SSDataBlock* pBlock) { } static void doCheckUpdate(SStreamScanInfo* pInfo, TSKEY endKey, SSDataBlock* pBlock) { - if (!pInfo->igCheckUpdate && pInfo->pUpdateInfo) { + if (pInfo->pUpdateInfo) { pInfo->pUpdateInfo->maxDataVersion = TMAX(pInfo->pUpdateInfo->maxDataVersion, pBlock->info.version); + } + if (!pInfo->igCheckUpdate && pInfo->pUpdateInfo) { checkUpdateData(pInfo, true, pBlock, true); pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, endKey); if (pInfo->pUpdateDataRes->info.rows > 0) { diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index 6adab74344..bd247eba07 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -622,6 +622,7 @@ void streamEventReloadState(SOperatorInfo* pOperator) { } setEventWindowFlag(pAggSup, &curInfo); if (!curInfo.pWinFlag->startFlag || curInfo.pWinFlag->endFlag) { + saveSessionOutputBuf(pAggSup, &curInfo.winInfo); continue; } diff --git a/source/libs/stream/src/streamSessionState.c b/source/libs/stream/src/streamSessionState.c index 45d656c456..c71edccb99 100644 --- a/source/libs/stream/src/streamSessionState.c +++ b/source/libs/stream/src/streamSessionState.c @@ -449,13 +449,13 @@ int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void SSHashObj* pSessionBuff = getRowStateBuff(pCur->pStreamFileState); void** ppBuff = tSimpleHashGet(pSessionBuff, &pKey->groupId, sizeof(uint64_t)); - if (!ppBuff) { - return TSDB_CODE_FAILED; + SArray* pWinStates = NULL; + if (ppBuff) { + pWinStates = (SArray*)(*ppBuff); } - SArray* pWinStates = (SArray*)(*ppBuff); - int32_t size = taosArrayGetSize(pWinStates); if (pCur->buffIndex >= 0) { + int32_t size = taosArrayGetSize(pWinStates); if (pCur->buffIndex >= size) { return TSDB_CODE_FAILED; } diff --git a/tests/script/tsim/stream/event2.sim b/tests/script/tsim/stream/event2.sim index eb9fca46e6..9fc7615fb8 100644 --- a/tests/script/tsim/stream/event2.sim +++ b/tests/script/tsim/stream/event2.sim @@ -31,6 +31,8 @@ sql insert into t4 values(1648791223000,1,1,10,3.0); sql insert into t4 values(1648791233000,0,2,11,1.0); sql insert into t4 values(1648791243000,1,9,12,2.0); +sleep 1000 + sql create stream streams0 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 fill_history 1 into streamt0 as select _wstart as s, count(*) c1, sum(b), max(c), _wend as e from st partition by tbname event_window start with a = 0 end with b = 9; sleep 1000 @@ -79,6 +81,37 @@ if $data21 != 2 then goto loop0 endi +sql insert into t3 values(1648791222000,0,1,7,3.0); + +$loop_count = 0 +loop1: + +sleep 300 +print 2 sql select * from streamt0 order by 1, 2, 3, 4; +sql select * from streamt0 order by 1, 2, 3, 4; + +print +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 4 then + print ======rows=$rows + goto loop1 +endi + +if $data01 != 5 then + print ======data01=$data01 + goto loop0 +endi + print event1 end system sh/exec.sh -n dnode1 -s stop -x SIGINT From 5592e11235cb04824e46782eb5a172a6324af0bc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 26 Dec 2023 19:02:52 +0800 Subject: [PATCH 19/69] fix(stream): handle error when checkpoint is interrupted by nodeUpdate. --- source/dnode/vnode/src/tq/tq.c | 4 ++-- source/libs/stream/src/streamDispatch.c | 2 +- source/libs/stream/src/streamExec.c | 25 ++++++++++++++++++++++--- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index a8b5842b93..1ee5256775 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1295,8 +1295,8 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) if (status == TASK_STATUS__CK) { ASSERT(pTask->chkInfo.checkpointingId == req.checkpointId); tqWarn("s-task:%s recv checkpoint-source msg again checkpointId:%" PRId64 - " already received, ignore this msg and continue process checkpoint", - pTask->id.idStr, pTask->chkInfo.checkpointingId); + " transId:%d already received, ignore this msg and continue process checkpoint", + pTask->id.idStr, pTask->chkInfo.checkpointingId, req.transId); taosThreadMutexUnlock(&pTask->lock); streamMetaReleaseTask(pMeta, pTask); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index bfa269b0d5..00a8940b6a 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -746,7 +746,7 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask) { taosArrayClear(pTask->pReadyMsgList); stDebug("s-task:%s level:%d source checkpoint completed msg sent to mnode", pTask->id.idStr, pTask->info.taskLevel); } else { - stDebug("s-task:%s level:%d already send rsp to mnode", pTask->id.idStr, pTask->info.taskLevel); + stDebug("s-task:%s level:%d already send rsp checkpoint success to mnode", pTask->id.idStr, pTask->info.taskLevel); } taosThreadMutexUnlock(&pTask->lock); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 7d2a572cda..eafa5adea8 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -625,10 +625,29 @@ int32_t doStreamExecTask(SStreamTask* pTask) { // todo other thread may change the status // do nothing after sync executor state to storage backend, untill the vnode-level checkpoint is completed. if (type == STREAM_INPUT__CHECKPOINT) { + + // todo add lock char* p = NULL; - streamTaskGetStatus(pTask, &p); - stDebug("s-task:%s checkpoint block received, set status:%s", pTask->id.idStr, p); - streamTaskBuildCheckpoint(pTask); + ETaskStatus s = streamTaskGetStatus(pTask, &p); + if (s == TASK_STATUS__CK) { + stDebug("s-task:%s checkpoint block received, set status:%s", pTask->id.idStr, p); + streamTaskBuildCheckpoint(pTask); + } else { + // todo refactor + int32_t code = 0; + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + code = streamTaskSendCheckpointSourceRsp(pTask); + } else { + code = streamTaskSendCheckpointReadyMsg(pTask); + } + + if (code != TSDB_CODE_SUCCESS) { + // todo: let's retry send rsp to upstream/mnode + stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%" PRId64 ", code:%s", pTask->id.idStr, + 0, tstrerror(code)); + } + } + return 0; } } From e553b84904309b7ea51edecd039820ebce965a50 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 26 Dec 2023 19:16:49 +0800 Subject: [PATCH 20/69] fix(stream): fix syntax error. --- source/libs/stream/src/streamExec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index eafa5adea8..1f7bb56ec1 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -643,7 +643,7 @@ int32_t doStreamExecTask(SStreamTask* pTask) { if (code != TSDB_CODE_SUCCESS) { // todo: let's retry send rsp to upstream/mnode - stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%" PRId64 ", code:%s", pTask->id.idStr, + stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%d, code:%s", pTask->id.idStr, 0, tstrerror(code)); } } From 3faf1514f43d1e03cb6557349a644ffcc598f93b Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Tue, 26 Dec 2023 20:47:15 +0800 Subject: [PATCH 21/69] doGroupedTableScan --- source/libs/executor/src/scanoperator.c | 45 +++++++++++----------- source/libs/planner/src/planLogicCreater.c | 1 - source/libs/planner/src/planPhysiCreater.c | 1 - 3 files changed, 23 insertions(+), 24 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 6df46ef4d0..0f9b2f1eac 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -657,7 +657,7 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, // record processed (non empty) table -static int32_t insertTableToProcessed(STableScanInfo* pTableScanInfo, uint64_t uid) { +static int32_t markTableProcessed(STableScanInfo* pTableScanInfo, uint64_t uid) { if (!pTableScanInfo->needCountEmptyTable) { return TSDB_CODE_SUCCESS; } @@ -770,6 +770,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey STableScanInfo* pTableScanInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; + bool outputAll = pTableScanInfo->base.pTableListInfo->oneTableForEachGroup; // The read handle is not initialized yet, since no qualified tables exists if (pTableScanInfo->base.dataReader == NULL || pOperator->status == OP_EXEC_DONE) { @@ -780,7 +781,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { - insertTableToProcessed(pTableScanInfo, p->info.id.uid); + markTableProcessed(pTableScanInfo, p->info.id.uid); return p; } @@ -809,7 +810,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey while (pTableScanInfo->scanTimes < total) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { - insertTableToProcessed(pTableScanInfo, p->info.id.uid); + markTableProcessed(pTableScanInfo, p->info.id.uid); return p; } @@ -827,30 +828,30 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey } if (pTableScanInfo->needCountEmptyTable) { - if (num == 0 && 0 == taosHashGetSize(pTableScanInfo->pValuedTables)) { - // table by table, num is 0 - if (!pTableScanInfo->processingEmptyTable) { - pTableScanInfo->processingEmptyTable = true; - // current table is empty, fill result block info & return - const STableKeyInfo* info = tableListGetInfo(pTableScanInfo->base.pTableListInfo, pTableScanInfo->currentTable); - return getBlockForEmptyTable(pOperator, info); - } - - } else if (num > taosHashGetSize(pTableScanInfo->pValuedTables)) { - // group by group, num >= 1 + // pList is NULL in mode TABLE_SCAN__TABLE_ORDER for streamscan, no need to process + // pList not NULL, group by group, num >= 1 + int32_t tb_cnt = taosHashGetSize(pTableScanInfo->pValuedTables); + if (pList && num > tb_cnt) { if (!pTableScanInfo->processingEmptyTable) { pTableScanInfo->processingEmptyTable = true; pTableScanInfo->currentTable = 0; } if (pTableScanInfo->currentTable < num) { - // loop: get empty table uid & process - while (pTableScanInfo->currentTable < num) { - const STableKeyInfo* info = pList + pTableScanInfo->currentTable++; - if (pTableScanInfo->pValuedTables && - NULL != taosHashGet(pTableScanInfo->pValuedTables, &info->uid, sizeof(info->uid))) { - } else { - return getBlockForEmptyTable(pOperator, info); + if (outputAll) { + // loop: get empty table uid & process + while (pTableScanInfo->currentTable < num) { + const STableKeyInfo* info = pList + pTableScanInfo->currentTable++; + if (pTableScanInfo->pValuedTables && + NULL != taosHashGet(pTableScanInfo->pValuedTables, &info->uid, sizeof(info->uid))) { + } else { + return getBlockForEmptyTable(pOperator, info); + } } + } else if (tb_cnt == 0) { + // only need one & all empty table in this group + // output first one + pTableScanInfo->currentTable = num; + return getBlockForEmptyTable(pOperator, pList); } } } @@ -1005,7 +1006,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { STableScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; - + if (pOperator->pOperatorGetParam) { pOperator->dynamicTask = true; int32_t code = createTableListInfoFromParam(pOperator); diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index f55689c472..e642b5bd5f 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -758,7 +758,6 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, pAgg->isGroupTb = pAgg->pGroupKeys ? keysHasTbname(pAgg->pGroupKeys) : 0; pAgg->isPartTb = pSelect->pPartitionByList ? keysHasTbname(pSelect->pPartitionByList) : 0; pAgg->hasGroup = pAgg->pGroupKeys || pSelect->pPartitionByList; - if (TSDB_CODE_SUCCESS == code) { *pLogicNode = (SLogicNode*)pAgg; diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 444bda84cd..96205904e3 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -2293,7 +2293,6 @@ static int32_t createPhysiSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogic } else { pSubplan->msgType = TDMT_SCH_MERGE_QUERY; } - code = createPhysiNode(pCxt, pLogicSubplan->pNode, pSubplan, &pSubplan->pNode); if (TSDB_CODE_SUCCESS == code && !pCxt->pPlanCxt->streamQuery && !pCxt->pPlanCxt->topicQuery) { code = createDataDispatcher(pCxt, pSubplan->pNode, &pSubplan->pDataSink); From 42f9d54a70b67170278adf3dac701ec10742d4a9 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 27 Dec 2023 00:28:42 +0800 Subject: [PATCH 22/69] adjust --- source/libs/executor/src/scanoperator.c | 3 +- tests/system-test/2-query/group_partition.py | 42 +++++++++++++++----- 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 0f9b2f1eac..c69d963c79 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -770,7 +770,8 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey STableScanInfo* pTableScanInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; - bool outputAll = pTableScanInfo->base.pTableListInfo->oneTableForEachGroup; + // Only when all tables are scanned can you determine how many groups the tag has + bool outputAll = true; // The read handle is not initialized yet, since no qualified tables exists if (pTableScanInfo->base.dataReader == NULL || pOperator->status == OP_EXEC_DONE) { diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index c08ea98ef6..3d9874bfe2 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -40,9 +40,6 @@ class TDTestCase: tdSql.query(f"select count(*) from {self.dbname}.{self.stable} group by tbname ") tdSql.checkRows(check_num) - tdSql.query(f"select count(*), sum(1) from {self.dbname}.{self.stable} group by tbname ") - tdSql.checkRows(check_num) - tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} group by tbname ") tdSql.checkRows(check_num) @@ -57,12 +54,13 @@ class TDTestCase: tdSql.checkRows(check_num - real_num) # tag - tdSql.query(f"select count(*) from {self.dbname}.{self.stable} group by t2 ") - tdSql.checkRows(check_num) - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 ") tdSql.checkRows(check_num) + # multi tag + tdSql.query(f"select t2, t3, tbname, count(*) from {self.dbname}.{self.stable} group by t2, t3, tbname") + tdSql.checkRows(check_num) + # having tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 having count(*) <= 0") tdSql.checkRows(check_num - real_num) @@ -87,10 +85,16 @@ class TDTestCase: tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} group by tbname) ") tdSql.checkRows(check_num) - # count + sum(col) + # multi agg tdSql.query(f"select count(*), sum(c1) from {self.dbname}.{self.stable} group by tbname ") tdSql.checkRows(check_num) + tdSql.query(f"select count(1), sum(1) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(check_num) + + tdSql.query(f" select count(c1), max(c1), avg(c1), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} group by tbname") + tdSql.checkRows(real_num) + ############### same with old ############### tdSql.query(f"select c1, count(*) from {self.dbname}.{self.stable} group by c1 ") num = 0 @@ -105,14 +109,20 @@ class TDTestCase: tdSql.query(f"select t2, c1, count(*) from {self.dbname}.{self.stable} group by t2, c1 ") tdSql.checkRows(real_num * self.row_nums) + tdSql.query(f"select t2, t3, c1, count(*) from {self.dbname}.{self.stable} group by t2, t3, c1 ") + tdSql.checkRows(real_num * self.row_nums) + def test_partitionby(self, check_num, real_num): tdSql.query(f"select tbname , count(*) from {self.dbname}.{self.stable} partition by tbname ") tdSql.checkRows(check_num) - tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} partition by tbname ") + tdSql.query(f"select count(*), sum(1) from {self.dbname}.{self.stable} partition by tbname ") tdSql.checkRows(check_num) + tdSql.query(f" select count(c5), max(c5), avg(c5), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} partition by tbname") + tdSql.checkRows(real_num) + tdSql.query(f"select tbname from {self.dbname}.{self.stable} partition by tbname order by count(*)") tdSql.checkRows(check_num) @@ -124,12 +134,12 @@ class TDTestCase: tdSql.checkRows(check_num - real_num) #tag - tdSql.query(f"select count(*) from {self.dbname}.{self.stable} partition by t2 ") - tdSql.checkRows(check_num) - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} partition by t2 ") tdSql.checkRows(check_num) + tdSql.query(f"select t2, t3, tbname, count(*) from {self.dbname}.{self.stable} partition by t2, t3, tbname") + tdSql.checkRows(check_num) + # having tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} partition by t2 having count(*) <= 0") tdSql.checkRows(check_num - real_num) @@ -150,6 +160,13 @@ class TDTestCase: tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname ") tdSql.checkRows(check_num) + #multi agg + tdSql.query(f"select count(1), sum(1) from {self.dbname}.{self.stable} partition by tbname ") + tdSql.checkRows(check_num) + + tdSql.query(f" select count(c1), max(c1), avg(c1), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} partition by tbname") + tdSql.checkRows(real_num) + #inner select tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} partition by tbname) ") tdSql.checkRows(check_num) @@ -170,6 +187,9 @@ class TDTestCase: tdSql.query(f"select t2, c1, count(*) from {self.dbname}.{self.stable} partition by t2, c1 ") tdSql.checkRows(real_num * self.row_nums) + tdSql.query(f"select t2, t3, c1, count(*) from {self.dbname}.{self.stable} partition by t2, t3, c1 ") + tdSql.checkRows(real_num * self.row_nums) + def test_error(self): tdSql.error(f"select * from {self.dbname}.{self.stable} group by t2") tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 where t2 = 1") From 86351d5487ae55010b6922d58e058c8a58420173 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 27 Dec 2023 01:05:04 +0800 Subject: [PATCH 23/69] check count & window --- source/libs/planner/src/planLogicCreater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index e642b5bd5f..ea8bb666b1 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -487,7 +487,7 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect } bool isCountByTag = false; - if (pSelect->hasCountFunc) { + if (pSelect->hasCountFunc && !pSelect->pWindow) { if (pSelect->pGroupByList) { isCountByTag = !keysHasCol(pSelect->pGroupByList); } else if (pSelect->pPartitionByList) { From 158c5996dfb98aadb767847fce4d63555d63f957 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 27 Dec 2023 15:51:36 +0800 Subject: [PATCH 24/69] adjust case --- source/libs/planner/src/planLogicCreater.c | 2 +- source/libs/planner/src/planPhysiCreater.c | 14 +- tests/system-test/2-query/group_partition.py | 219 +++++++++---------- 3 files changed, 101 insertions(+), 134 deletions(-) diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index ea8bb666b1..12b7360165 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -487,7 +487,7 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect } bool isCountByTag = false; - if (pSelect->hasCountFunc && !pSelect->pWindow) { + if (pSelect->hasCountFunc && NULL == pSelect->pWindow) { if (pSelect->pGroupByList) { isCountByTag = !keysHasCol(pSelect->pGroupByList); } else if (pSelect->pPartitionByList) { diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 96205904e3..e266c55425 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -587,18 +587,6 @@ static int32_t createTableCountScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); } -static bool calcNeedCountEmpty(SScanLogicNode* pScanLogicNode) { - if (pScanLogicNode->interval > 0) { - return false; - } - - if (pScanLogicNode->isCountByTag) { - return true; - } - - return false; -} - static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, SPhysiNode** pPhyNode) { STableScanPhysiNode* pTableScan = (STableScanPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pScanLogicNode, @@ -635,7 +623,7 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pTableScan->igCheckUpdate = pScanLogicNode->igCheckUpdate; pTableScan->assignBlockUid = pCxt->pPlanCxt->rSmaQuery ? true : false; pTableScan->filesetDelimited = pScanLogicNode->filesetDelimited; - pTableScan->needCountEmptyTable = calcNeedCountEmpty(pScanLogicNode); + pTableScan->needCountEmptyTable = pScanLogicNode->isCountByTag; int32_t code = createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); if (TSDB_CODE_SUCCESS == code) { diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index 3d9874bfe2..0ac1be8482 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -35,161 +35,132 @@ class TDTestCase: tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )") - def test_groupby(self, check_num, real_num): - # tbname - tdSql.query(f"select count(*) from {self.dbname}.{self.stable} group by tbname ") + def test_groupby(self, keyword, check_num, nonempty_tb_num): + ####### by tbname + tdSql.query(f"select count(*), count(1), count(c1) from {self.dbname}.{self.stable} {keyword} by tbname ") tdSql.checkRows(check_num) - tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} group by tbname ") + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} {keyword} by tbname ") tdSql.checkRows(check_num) - tdSql.query(f"select tbname from {self.dbname}.{self.stable} group by tbname order by count(*)") + tdSql.query(f"select tbname from {self.dbname}.{self.stable} {keyword} by tbname order by count(*)") tdSql.checkRows(check_num) - tdSql.query(f"select tbname from {self.dbname}.{self.stable} group by tbname having count(*)>=0") + # last + tdSql.query(f"select last(ts), count(*) from {self.dbname}.{self.stable} {keyword} by tbname order by last(ts)") + tdSql.checkRows(check_num) + + tdSql.query(f"select tbname from {self.dbname}.{self.stable} {keyword} by tbname having count(*)>=0") tdSql.checkRows(check_num) # having filter out empty - tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} group by tbname having count(*) <= 0") - tdSql.checkRows(check_num - real_num) + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} {keyword} by tbname having count(*) <= 0") + tdSql.checkRows(check_num - nonempty_tb_num) - # tag - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 ") + ####### by tag + tdSql.query(f"select t2, count(*), count(1), count(c1) from {self.dbname}.{self.stable} {keyword} by t2 ") tdSql.checkRows(check_num) - # multi tag - tdSql.query(f"select t2, t3, tbname, count(*) from {self.dbname}.{self.stable} group by t2, t3, tbname") + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} {keyword} by t2 having count(*) <= 0") + tdSql.checkRows(check_num - nonempty_tb_num) + + # where + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now {keyword} by t2 ") tdSql.checkRows(check_num) - # having - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 having count(*) <= 0") - tdSql.checkRows(check_num - real_num) - - # col where filter nothing - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now group by t2 ") + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 {keyword} by t2 ") tdSql.checkRows(check_num) - # col where filter all - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 group by t2 ") + tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where c1 = 1 {keyword} by t2 ") tdSql.checkRows(check_num) - # col where filter part - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where c1 = 1 group by t2 ") - tdSql.checkRows(check_num) - - # col - tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} group by tbname ") - tdSql.checkRows(check_num) - - #inner select - tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} group by tbname) ") - tdSql.checkRows(check_num) - - # multi agg - tdSql.query(f"select count(*), sum(c1) from {self.dbname}.{self.stable} group by tbname ") - tdSql.checkRows(check_num) - - tdSql.query(f"select count(1), sum(1) from {self.dbname}.{self.stable} group by tbname ") - tdSql.checkRows(check_num) - - tdSql.query(f" select count(c1), max(c1), avg(c1), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} group by tbname") - tdSql.checkRows(real_num) - - ############### same with old ############### - tdSql.query(f"select c1, count(*) from {self.dbname}.{self.stable} group by c1 ") + ####### by col + tdSql.query(f"select c1, count(*), count(1), count(c1) from {self.dbname}.{self.stable} {keyword} by c1 ") num = 0 - if real_num > 0: + if nonempty_tb_num > 0: num = self.row_nums tdSql.checkRows(num) - tdSql.query(f"select ts, count(*) from {self.dbname}.{self.stable} group by ts ") - tdSql.checkRows(real_num * self.row_nums) + tdSql.query(f"select ts, count(*) from {self.dbname}.{self.stable} {keyword} by ts ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) # col + tag - tdSql.query(f"select t2, c1, count(*) from {self.dbname}.{self.stable} group by t2, c1 ") - tdSql.checkRows(real_num * self.row_nums) + tdSql.query(f"select t2, c1, count(*) from {self.dbname}.{self.stable} {keyword} by t2, c1 ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) - tdSql.query(f"select t2, t3, c1, count(*) from {self.dbname}.{self.stable} group by t2, t3, c1 ") - tdSql.checkRows(real_num * self.row_nums) + tdSql.query(f"select t2, t3, c1, count(*) from {self.dbname}.{self.stable} {keyword} by t2, t3, c1 ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) - - def test_partitionby(self, check_num, real_num): - tdSql.query(f"select tbname , count(*) from {self.dbname}.{self.stable} partition by tbname ") - tdSql.checkRows(check_num) - - tdSql.query(f"select count(*), sum(1) from {self.dbname}.{self.stable} partition by tbname ") - tdSql.checkRows(check_num) - tdSql.query(f" select count(c5), max(c5), avg(c5), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} partition by tbname") - tdSql.checkRows(real_num) - - tdSql.query(f"select tbname from {self.dbname}.{self.stable} partition by tbname order by count(*)") - tdSql.checkRows(check_num) - - tdSql.query(f"select tbname from {self.dbname}.{self.stable} partition by tbname having count(*)>=0") - tdSql.checkRows(check_num) - - # having filter out empty - tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} partition by tbname having count(*) <= 0") - tdSql.checkRows(check_num - real_num) - - #tag - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} partition by t2 ") + def test_multi_group_key(self, check_num, nonempty_tb_num): + # multi tag/tbname + tdSql.query(f"select t2, t3, tbname, count(*) from {self.dbname}.{self.stable} group by t2, t3, tbname") tdSql.checkRows(check_num) tdSql.query(f"select t2, t3, tbname, count(*) from {self.dbname}.{self.stable} partition by t2, t3, tbname") tdSql.checkRows(check_num) - # having - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} partition by t2 having count(*) <= 0") - tdSql.checkRows(check_num - real_num) + # multi tag + col + tdSql.query(f"select t1, t2, c1, count(*) from {self.dbname}.{self.stable} partition by t1, t2, c1 ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) - # col where filter nothing - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts < now partition by t2 ") - tdSql.checkRows(check_num) + # tag + multi col + tdSql.query(f"select t2, c1, c2, count(*) from {self.dbname}.{self.stable} partition by t2, c1, c2 ") + tdSql.checkRows(nonempty_tb_num * self.row_nums) - # col where filter all - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where ts > 1737146000000 partition by t2 ") - tdSql.checkRows(check_num) - # col where filter part - tdSql.query(f"select t2, count(*) from {self.dbname}.{self.stable} where c1 = 1 partition by t2 ") - tdSql.checkRows(check_num) + def test_multi_agg(self, all_tb_num, nonempty_tb_num): + tdSql.query(f"select count(*), sum(c1) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(all_tb_num) - #col - tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname ") - tdSql.checkRows(check_num) + tdSql.query(f"select count(1), sum(1), avg(c1), apercentile(c1, 50), spread(ts) from {self.dbname}.{self.stable} group by tbname ") + tdSql.checkRows(all_tb_num) - #multi agg - tdSql.query(f"select count(1), sum(1) from {self.dbname}.{self.stable} partition by tbname ") - tdSql.checkRows(check_num) + tdSql.query(f"select count(c1), sum(c1), min(c1), mode(c1), stddev(c1), spread(c1) from {self.dbname}.{self.stable} partition by tbname ") + tdSql.checkRows(all_tb_num) + + # elapsed: continuous duration in a statistical period, table merge scan + tdSql.query(f" select count(c1), max(c5), avg(c5), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} group by tbname") + tdSql.checkRows(nonempty_tb_num) tdSql.query(f" select count(c1), max(c1), avg(c1), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} partition by tbname") - tdSql.checkRows(real_num) + tdSql.checkRows(nonempty_tb_num) + + + def test_innerSelect(self, check_num): + tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} group by tbname) ") + tdSql.checkRows(check_num) - #inner select tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} partition by tbname) ") tdSql.checkRows(check_num) + tdSql.query(f"select t1, c from (select t1, sum(c1) as s, count(*) as c from {self.stable} partition by t1)") + tdSql.checkRows(check_num) + + + def test_window(self, nonempty_tb_num): + # empty group optimization condition is not met + # time window tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname interval(1d)") - tdSql.checkRows(real_num) + tdSql.checkRows(nonempty_tb_num) - ############### same with old ############### - tdSql.query(f"select c1, count(*) from {self.dbname}.{self.stable} partition by c1 ") - num = 0 - if real_num > 0: - num = self.row_nums - tdSql.checkRows(num) - - tdSql.query(f"select ts, count(*) from {self.dbname}.{self.stable} partition by ts ") - tdSql.checkRows(real_num * self.row_nums) + tdSql.query(f"select _wstart, _wend, count(c1), max(c1), apercentile(c1, 50) from {self.dbname}.{self.stable} partition by tbname interval(1d)") + tdSql.checkRows(nonempty_tb_num) - tdSql.query(f"select t2, c1, count(*) from {self.dbname}.{self.stable} partition by t2, c1 ") - tdSql.checkRows(real_num * self.row_nums) + # state window + tdSql.query(f"select tbname, count(*), c1 from {self.dbname}.{self.stable} partition by tbname state_window(c1)") + tdSql.checkRows(nonempty_tb_num * self.row_nums) - tdSql.query(f"select t2, t3, c1, count(*) from {self.dbname}.{self.stable} partition by t2, t3, c1 ") - tdSql.checkRows(real_num * self.row_nums) + # session window + tdSql.query(f"select count(c1) from {self.dbname}.{self.stable} partition by tbname session(ts, 5s)") + tdSql.checkRows(nonempty_tb_num) + # event window + tdSql.query(f"select tbname, count(*) from {self.stable} partition by tbname event_window start with c1 >= 0 end with c2 = 9;") + tdSql.checkRows(nonempty_tb_num) + + + def test_error(self): tdSql.error(f"select * from {self.dbname}.{self.stable} group by t2") tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 where t2 = 1") @@ -199,21 +170,29 @@ class TDTestCase: def run(self): tdSql.prepare() self.prepare_db() - check_num = self.tb_nums - self.test_groupby(check_num, 0) - self.test_partitionby(check_num, 0) - # insert into half of tables - real_num = 5 - self.insert_db(real_num, self.row_nums) - self.test_groupby(check_num, real_num) - self.test_partitionby(check_num, real_num) + # empty table only + self.test_groupby('group', self.tb_nums, 0) + self.test_groupby('partition', self.tb_nums, 0) + self.test_innerSelect(self.tb_nums) + self.test_multi_group_key(self.tb_nums, 0) + self.test_multi_agg(self.tb_nums, 0) + self.test_window(0) - # test old version before changed - # self.test_groupby(0, 0) - # self.test_partitionby(0, 0) + # insert data to 5 tables + nonempty_tb_num = 5 + self.insert_db(nonempty_tb_num, self.row_nums) + + self.test_groupby('group', self.tb_nums, nonempty_tb_num) + self.test_groupby('partition', self.tb_nums, nonempty_tb_num) + self.test_innerSelect(self.tb_nums) + self.test_multi_group_key(self.tb_nums, nonempty_tb_num) + self.test_multi_agg(self.tb_nums, nonempty_tb_num) + self.test_window(nonempty_tb_num) + + ## test old version before changed + # self.test_groupby('group', 0, 0) # self.insert_db(5, self.row_nums) - # self.test_groupby(5, 5) - # self.test_partitionby(5, 5) + # self.test_groupby('group', 5, 5) self.test_error() From f6e55d358b375e86c5d82aca6ed089303d4b9347 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 27 Dec 2023 16:12:50 +0800 Subject: [PATCH 25/69] =?UTF-8?q?=C2=A0adjust=20case?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/system-test/2-query/group_partition.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index 0ac1be8482..8b06f0d6fd 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -134,7 +134,7 @@ class TDTestCase: tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} partition by tbname) ") tdSql.checkRows(check_num) - tdSql.query(f"select t1, c from (select t1, sum(c1) as s, count(*) as c from {self.stable} partition by t1)") + tdSql.query(f"select t1, c from (select t1, sum(c1) as s, count(*) as c from {self.dbname}.{self.stable} partition by t1)") tdSql.checkRows(check_num) @@ -156,7 +156,7 @@ class TDTestCase: tdSql.checkRows(nonempty_tb_num) # event window - tdSql.query(f"select tbname, count(*) from {self.stable} partition by tbname event_window start with c1 >= 0 end with c2 = 9;") + tdSql.query(f"select tbname, count(*) from {self.dbname}.{self.stable} partition by tbname event_window start with c1 >= 0 end with c2 = 9;") tdSql.checkRows(nonempty_tb_num) From 10e86fc733a53b57c056c1a4e7bc61a3624cb2df Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 27 Dec 2023 19:25:08 +0800 Subject: [PATCH 26/69] fix:[TS-4391] rebalance cnt always 1 if msg lost --- source/dnode/mnode/impl/inc/mndConsumer.h | 1 + source/dnode/mnode/impl/src/mndConsumer.c | 28 +++++++++++++++------- source/dnode/mnode/impl/src/mndSubscribe.c | 4 ++++ 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndConsumer.h b/source/dnode/mnode/impl/inc/mndConsumer.h index 94c937e8f4..f075510428 100644 --- a/source/dnode/mnode/impl/inc/mndConsumer.h +++ b/source/dnode/mnode/impl/inc/mndConsumer.h @@ -47,6 +47,7 @@ int32_t mndSetConsumerCommitLogs(SMnode *pMnode, STrans *pTrans, SMqConsumerObj int32_t mndSetConsumerDropLogs(SMnode *pMnode, STrans *pTrans, SMqConsumerObj *pConsumer); bool mndRebTryStart(); +bool mndRebCanStart(); void mndRebEnd(); void mndRebCntInc(); void mndRebCntDec(); diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index c9ee66d3a0..11c929c898 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -101,6 +101,16 @@ bool mndRebTryStart() { return old == 0; } +bool mndRebCanStart() { + int32_t val = atomic_load_32(&mqRebInExecCnt); + if (val < 0) { + mError("rebalance trans end, rebalance counter:%d should not be less equalled than 0, ignore counter desc", val); + return false; + } + mInfo("tq timer, rebalance counter val:%d", val); + return val == 0; +} + void mndRebEnd() { mndRebCntDec(); } void mndRebCntInc() { @@ -119,7 +129,7 @@ void mndRebCntDec() { int32_t newVal = val - 1; int32_t oldVal = atomic_val_compare_exchange_32(&mqRebInExecCnt, val, newVal); if (oldVal == val) { - mDebug("rebalance trans end, rebalance counter:%d", newVal); + mInfo("rebalance trans end, rebalance counter:%d", newVal); break; } } @@ -284,10 +294,10 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { SMqConsumerObj *pConsumer; void *pIter = NULL; - mDebug("start to process mq timer"); + mInfo("start to process mq timer"); // rebalance cannot be parallel - if (!mndRebTryStart()) { + if (!mndRebCanStart()) { mInfo("mq rebalance already in progress, do nothing"); return 0; } @@ -295,7 +305,6 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { SMqDoRebalanceMsg *pRebMsg = rpcMallocCont(sizeof(SMqDoRebalanceMsg)); if (pRebMsg == NULL) { mError("failed to create the rebalance msg, size:%d, quit mq timer", (int32_t)sizeof(SMqDoRebalanceMsg)); - mndRebEnd(); return TSDB_CODE_OUT_OF_MEMORY; } @@ -303,7 +312,6 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { if (pRebMsg->rebSubHash == NULL) { mError("failed to create rebalance hashmap"); rpcFreeCont(pRebMsg); - mndRebEnd(); return TSDB_CODE_OUT_OF_MEMORY; } @@ -390,6 +398,11 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); } + + if (newTopicNum == 0 && removedTopicNum == 0 && taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); + } + taosRUnLockLatch(&pConsumer->lock); } @@ -397,7 +410,7 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { } if (taosHashGetSize(pRebMsg->rebSubHash) != 0) { - mInfo("mq rebalance will be triggered"); + mInfo("mq send msg to rebalance"); SRpcMsg rpcMsg = { .msgType = TDMT_MND_TMQ_DO_REBALANCE, .pCont = pRebMsg, @@ -407,8 +420,7 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { } else { taosHashCleanup(pRebMsg->rebSubHash); rpcFreeCont(pRebMsg); - mDebug("mq timer finished, no need to re-balance"); - mndRebEnd(); + mInfo("mq timer finished, no need to re-balance"); } return 0; } diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 01d4d1029c..8cd7f6bc9c 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -732,6 +732,10 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { SMqDoRebalanceMsg *pReq = pMsg->pCont; void *pIter = NULL; // bool rebalanceOnce = false; // to ensure only once. + if (!mndRebTryStart()) { + mInfo("mq rebalance already in progress, do nothing"); + return 0; + } mInfo("mq re-balance start, total required re-balanced trans:%d", taosHashGetSize(pReq->rebSubHash)); From d795798648ac0d1945e90bbbf39d445ed0f88679 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 27 Dec 2023 21:37:00 +0800 Subject: [PATCH 27/69] count state --- source/libs/executor/inc/executorInt.h | 11 ++- source/libs/executor/src/scanoperator.c | 94 ++++++++++++++----------- 2 files changed, 63 insertions(+), 42 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 2523b87cfb..8b0bef5fa6 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -217,6 +217,12 @@ enum { TABLE_SCAN__BLOCK_ORDER = 2, }; +typedef enum ETableCountState { + TABLE_COUNT_STATE_NONE = 0, // before start scan + TABLE_COUNT_STATE_SCAN = 1, // scanning + TABLE_COUNT_STATE_END = 2, // finish or noneed to process +} ETableCountState; + typedef struct SAggSupporter { SSHashObj* pResultRowHashTable; // quick locate the window object for each result char* keyBuf; // window key buffer @@ -262,17 +268,18 @@ typedef struct STableScanInfo { int32_t scanTimes; SSDataBlock* pResBlock; SHashObj* pIgnoreTables; - SHashObj* pValuedTables; // non empty table uids + SHashObj* pRemainTables; // remain table to process SSampleExecInfo sample; // sample execution info int32_t currentGroupId; int32_t currentTable; int8_t scanMode; int8_t assignBlockUid; + uint8_t countState; // empty table count state + bool isOneGroup; // whether or not only one group in this scan bool hasGroupByTag; bool countOnly; bool filesetDelimited; bool needCountEmptyTable; - bool processingEmptyTable; } STableScanInfo; typedef struct STableMergeScanInfo { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index c69d963c79..31fe31007c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -655,26 +655,44 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, colDataDestroy(&infoData); } - -// record processed (non empty) table -static int32_t markTableProcessed(STableScanInfo* pTableScanInfo, uint64_t uid) { +static int32_t initRemainTable(STableScanInfo* pTableScanInfo, const STableKeyInfo* pList, int32_t num) { if (!pTableScanInfo->needCountEmptyTable) { return TSDB_CODE_SUCCESS; } - if (NULL == pTableScanInfo->pValuedTables) { + pTableScanInfo->isOneGroup = true; + if (NULL == pTableScanInfo->pRemainTables) { int32_t tableNum = taosArrayGetSize(pTableScanInfo->base.pTableListInfo->pTableList); - pTableScanInfo->pValuedTables = + pTableScanInfo->pRemainTables = taosHashInit(tableNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); - if (NULL == pTableScanInfo->pValuedTables) { + if (NULL == pTableScanInfo->pRemainTables) { + pTableScanInfo->countState = TABLE_COUNT_STATE_END; return TSDB_CODE_OUT_OF_MEMORY; } } - - taosHashPut(pTableScanInfo->pValuedTables, &uid, sizeof(uid), &pTableScanInfo->scanTimes, - sizeof(pTableScanInfo->scanTimes)); + uint64_t groupId = 0; + for (int32_t i = 0; i < num; i++) { + const STableKeyInfo* pInfo = pList + i; + if (pTableScanInfo->isOneGroup) { + if (i == 0) { + groupId = pInfo->groupId; + } else if (groupId != pInfo->groupId) { + pTableScanInfo->isOneGroup = false; + } + } + taosHashPut(pTableScanInfo->pRemainTables, &(pInfo->uid), sizeof(pInfo->uid), &(pInfo->groupId), sizeof(pInfo->groupId)); + } return TSDB_CODE_SUCCESS; } +static void markTableProcessed(STableScanInfo* pTableScanInfo, uint64_t uid) { + // case0 group scanning, mark + // case1 stream scan: no need to mark + if (pTableScanInfo->countState > TABLE_COUNT_STATE_SCAN) { + return; + } + taosHashRemove(pTableScanInfo->pRemainTables, &uid, sizeof(uid)); +} + static SSDataBlock* getBlockForEmptyTable(SOperatorInfo* pOperator, const STableKeyInfo* tbInfo) { STableScanInfo* pTableScanInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -770,14 +788,17 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey STableScanInfo* pTableScanInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; - // Only when all tables are scanned can you determine how many groups the tag has - bool outputAll = true; // The read handle is not initialized yet, since no qualified tables exists if (pTableScanInfo->base.dataReader == NULL || pOperator->status == OP_EXEC_DONE) { return NULL; } + if (TABLE_COUNT_STATE_NONE == pTableScanInfo->countState) { + initRemainTable(pTableScanInfo, pList, num); + pTableScanInfo->countState = TABLE_COUNT_STATE_SCAN; + } + // do the ascending order traverse in the first place. while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { SSDataBlock* p = doTableScanImpl(pOperator); @@ -829,37 +850,28 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey } if (pTableScanInfo->needCountEmptyTable) { - // pList is NULL in mode TABLE_SCAN__TABLE_ORDER for streamscan, no need to process - // pList not NULL, group by group, num >= 1 - int32_t tb_cnt = taosHashGetSize(pTableScanInfo->pValuedTables); - if (pList && num > tb_cnt) { - if (!pTableScanInfo->processingEmptyTable) { - pTableScanInfo->processingEmptyTable = true; - pTableScanInfo->currentTable = 0; - } - if (pTableScanInfo->currentTable < num) { - if (outputAll) { - // loop: get empty table uid & process - while (pTableScanInfo->currentTable < num) { - const STableKeyInfo* info = pList + pTableScanInfo->currentTable++; - if (pTableScanInfo->pValuedTables && - NULL != taosHashGet(pTableScanInfo->pValuedTables, &info->uid, sizeof(info->uid))) { - } else { - return getBlockForEmptyTable(pOperator, info); - } - } - } else if (tb_cnt == 0) { - // only need one & all empty table in this group - // output first one - pTableScanInfo->currentTable = num; - return getBlockForEmptyTable(pOperator, pList); + int32_t tb_cnt = taosHashGetSize(pTableScanInfo->pRemainTables); + if (tb_cnt) { + if (!pTableScanInfo->isOneGroup) { + // get first empty table uid, mark processed & rm from hash + void *pIte = taosHashIterate(pTableScanInfo->pRemainTables, NULL); + if (pIte != NULL) { + size_t keySize = 0; + uint64_t* pUid = taosHashGetKey(pIte, &keySize); + STableKeyInfo info = {.uid = *pUid, .groupId = *(uint64_t*)pIte}; + taosHashCancelIterate(pTableScanInfo->pRemainTables, pIte); + markTableProcessed(pTableScanInfo, *pUid); + return getBlockForEmptyTable(pOperator, &info); } + } else { + // output one table for this group + taosHashClear(pTableScanInfo->pRemainTables); + return getBlockForEmptyTable(pOperator, pList); } } - pTableScanInfo->processingEmptyTable = false; + pTableScanInfo->countState = TABLE_COUNT_STATE_END; } - taosHashClear(pTableScanInfo->pValuedTables); return NULL; } @@ -937,7 +949,8 @@ static SSDataBlock* startNextGroupScan(SOperatorInfo* pOperator) { int32_t num = 0; STableKeyInfo* pList = NULL; tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); - + pInfo->countState = TABLE_COUNT_STATE_NONE; + pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num); pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); pInfo->scanTimes = 0; @@ -968,6 +981,7 @@ static SSDataBlock* groupSeqTableScan(SOperatorInfo* pOperator) { } tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); + pInfo->countState = TABLE_COUNT_STATE_NONE; ASSERT(pInfo->base.dataReader == NULL); int32_t code = pAPI->tsdReader.tsdReaderOpen(pInfo->base.readHandle.vnode, &pInfo->base.cond, pList, num, pInfo->pResBlock, @@ -1034,6 +1048,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pInfo->scanMode == TABLE_SCAN__TABLE_ORDER) { int32_t numOfTables = 0; // tableListGetSize(pTaskInfo->pTableListInfo); STableKeyInfo tInfo = {0}; + pInfo->countState = TABLE_COUNT_STATE_END; while (1) { SSDataBlock* result = doGroupedTableScan(pOperator, NULL, 0); @@ -1096,7 +1111,7 @@ static void destroyTableScanOperatorInfo(void* param) { STableScanInfo* pTableScanInfo = (STableScanInfo*)param; blockDataDestroy(pTableScanInfo->pResBlock); taosHashCleanup(pTableScanInfo->pIgnoreTables); - taosHashCleanup(pTableScanInfo->pValuedTables); + taosHashCleanup(pTableScanInfo->pRemainTables); destroyTableScanBase(&pTableScanInfo->base, &pTableScanInfo->base.readerAPI); taosMemoryFreeClear(param); } @@ -1161,7 +1176,6 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, pOperator->exprSupp.numOfExprs = numOfCols; pInfo->needCountEmptyTable = tsCountAlwaysReturnValue && pTableScanNode->needCountEmptyTable; - pInfo->processingEmptyTable = false; pInfo->base.pTableListInfo = pTableListInfo; pInfo->base.metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5); From abed781bd324dda7c636e162a25646292214004a Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 27 Dec 2023 23:15:11 +0800 Subject: [PATCH 28/69] opt --- source/libs/executor/src/scanoperator.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 31fe31007c..0714019352 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -657,6 +657,7 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, static int32_t initRemainTable(STableScanInfo* pTableScanInfo, const STableKeyInfo* pList, int32_t num) { if (!pTableScanInfo->needCountEmptyTable) { + pTableScanInfo->countState = TABLE_COUNT_STATE_END; return TSDB_CODE_SUCCESS; } pTableScanInfo->isOneGroup = true; @@ -681,6 +682,7 @@ static int32_t initRemainTable(STableScanInfo* pTableScanInfo, const STableKeyIn } taosHashPut(pTableScanInfo->pRemainTables, &(pInfo->uid), sizeof(pInfo->uid), &(pInfo->groupId), sizeof(pInfo->groupId)); } + pTableScanInfo->countState = TABLE_COUNT_STATE_SCAN; return TSDB_CODE_SUCCESS; } @@ -690,6 +692,11 @@ static void markTableProcessed(STableScanInfo* pTableScanInfo, uint64_t uid) { if (pTableScanInfo->countState > TABLE_COUNT_STATE_SCAN) { return; } + // case2 only one group, uid ready + if (pTableScanInfo->isOneGroup) { + pTableScanInfo->countState = TABLE_COUNT_STATE_END; + return; + } taosHashRemove(pTableScanInfo->pRemainTables, &uid, sizeof(uid)); } @@ -796,7 +803,6 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey if (TABLE_COUNT_STATE_NONE == pTableScanInfo->countState) { initRemainTable(pTableScanInfo, pList, num); - pTableScanInfo->countState = TABLE_COUNT_STATE_SCAN; } // do the ascending order traverse in the first place. @@ -849,7 +855,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey } } - if (pTableScanInfo->needCountEmptyTable) { + if (pTableScanInfo->countState < TABLE_COUNT_STATE_END) { int32_t tb_cnt = taosHashGetSize(pTableScanInfo->pRemainTables); if (tb_cnt) { if (!pTableScanInfo->isOneGroup) { @@ -865,14 +871,14 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey } } else { // output one table for this group - taosHashClear(pTableScanInfo->pRemainTables); + pTableScanInfo->countState = TABLE_COUNT_STATE_END; return getBlockForEmptyTable(pOperator, pList); } } - pTableScanInfo->countState = TABLE_COUNT_STATE_END; } + taosHashClear(pTableScanInfo->pRemainTables); return NULL; } From a5f88a86e9b18ac7b3a3a61592659fa72a40c9a1 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Thu, 28 Dec 2023 09:45:01 +0800 Subject: [PATCH 29/69] adjust --- source/libs/executor/inc/executorInt.h | 2 +- source/libs/executor/src/scanoperator.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 8b0bef5fa6..b87dee475d 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -275,7 +275,7 @@ typedef struct STableScanInfo { int8_t scanMode; int8_t assignBlockUid; uint8_t countState; // empty table count state - bool isOneGroup; // whether or not only one group in this scan + bool isSameGroup; // whether all tables are in the same group this scan bool hasGroupByTag; bool countOnly; bool filesetDelimited; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 0714019352..dd6dff0301 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -655,12 +655,12 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, colDataDestroy(&infoData); } -static int32_t initRemainTable(STableScanInfo* pTableScanInfo, const STableKeyInfo* pList, int32_t num) { +static int32_t initTableCountEnv(STableScanInfo* pTableScanInfo, const STableKeyInfo* pList, int32_t num) { if (!pTableScanInfo->needCountEmptyTable) { pTableScanInfo->countState = TABLE_COUNT_STATE_END; return TSDB_CODE_SUCCESS; } - pTableScanInfo->isOneGroup = true; + pTableScanInfo->isSameGroup = true; if (NULL == pTableScanInfo->pRemainTables) { int32_t tableNum = taosArrayGetSize(pTableScanInfo->base.pTableListInfo->pTableList); pTableScanInfo->pRemainTables = @@ -673,11 +673,11 @@ static int32_t initRemainTable(STableScanInfo* pTableScanInfo, const STableKeyIn uint64_t groupId = 0; for (int32_t i = 0; i < num; i++) { const STableKeyInfo* pInfo = pList + i; - if (pTableScanInfo->isOneGroup) { + if (pTableScanInfo->isSameGroup) { if (i == 0) { groupId = pInfo->groupId; } else if (groupId != pInfo->groupId) { - pTableScanInfo->isOneGroup = false; + pTableScanInfo->isSameGroup = false; } } taosHashPut(pTableScanInfo->pRemainTables, &(pInfo->uid), sizeof(pInfo->uid), &(pInfo->groupId), sizeof(pInfo->groupId)); @@ -692,8 +692,8 @@ static void markTableProcessed(STableScanInfo* pTableScanInfo, uint64_t uid) { if (pTableScanInfo->countState > TABLE_COUNT_STATE_SCAN) { return; } - // case2 only one group, uid ready - if (pTableScanInfo->isOneGroup) { + // case2 if all table in same group, process only once + if (pTableScanInfo->isSameGroup) { pTableScanInfo->countState = TABLE_COUNT_STATE_END; return; } @@ -802,7 +802,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey } if (TABLE_COUNT_STATE_NONE == pTableScanInfo->countState) { - initRemainTable(pTableScanInfo, pList, num); + initTableCountEnv(pTableScanInfo, pList, num); } // do the ascending order traverse in the first place. @@ -858,7 +858,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey if (pTableScanInfo->countState < TABLE_COUNT_STATE_END) { int32_t tb_cnt = taosHashGetSize(pTableScanInfo->pRemainTables); if (tb_cnt) { - if (!pTableScanInfo->isOneGroup) { + if (!pTableScanInfo->isSameGroup) { // get first empty table uid, mark processed & rm from hash void *pIte = taosHashIterate(pTableScanInfo->pRemainTables, NULL); if (pIte != NULL) { From 424ab1bbe32672570d4b8549b9b8f375e55bcbb5 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Thu, 28 Dec 2023 16:18:38 +0800 Subject: [PATCH 30/69] support table merge scan --- source/libs/executor/inc/executorInt.h | 3 ++- source/libs/executor/src/scanoperator.c | 27 ++++++++++++++------ tests/system-test/2-query/group_partition.py | 17 +++++++++--- 3 files changed, 34 insertions(+), 13 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index b87dee475d..ae38d4940c 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -307,7 +307,8 @@ typedef struct STableMergeScanInfo { SHashObj* mSkipTables; int64_t mergeLimit; SSortExecInfo sortExecInfo; - + bool needCountEmptyTable; + bool bGroupProcessed; // the group return data means processed bool filesetDelimited; bool bNewFilesetEvent; bool bNextDurationBlockEvent; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index dd6dff0301..a1f9baa082 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -700,17 +700,14 @@ static void markTableProcessed(STableScanInfo* pTableScanInfo, uint64_t uid) { taosHashRemove(pTableScanInfo->pRemainTables, &uid, sizeof(uid)); } -static SSDataBlock* getBlockForEmptyTable(SOperatorInfo* pOperator, const STableKeyInfo* tbInfo) { - STableScanInfo* pTableScanInfo = pOperator->info; - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SSDataBlock* pBlock = pTableScanInfo->pResBlock; - +static SSDataBlock* getOneRowResultBlock(SExecTaskInfo* pTaskInfo, STableScanBase* pBase, SSDataBlock* pBlock, + const STableKeyInfo* tbInfo) { blockDataEmpty(pBlock); pBlock->info.rows = 1; pBlock->info.id.uid = tbInfo->uid; pBlock->info.id.groupId = tbInfo->groupId; - // only one row: set all col data to null & hasNull + // only one row: set all col data to null & hasNull int32_t col_num = blockDataGetNumOfCols(pBlock); for (int32_t i = 0; i < col_num; ++i) { SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); @@ -718,7 +715,14 @@ static SSDataBlock* getBlockForEmptyTable(SOperatorInfo* pOperator, const STable } // set tag/tbname - doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, pBlock->info.rows); + doSetTagColumnData(pBase, pBlock, pTaskInfo, pBlock->info.rows); + return pBlock; +} + +static SSDataBlock* getBlockForEmptyTable(SOperatorInfo* pOperator, const STableKeyInfo* tbInfo) { + STableScanInfo* pTableScanInfo = pOperator->info; + SSDataBlock* pBlock = + getOneRowResultBlock(pOperator->pTaskInfo, &pTableScanInfo->base, pTableScanInfo->pResBlock, tbInfo); pOperator->resultInfo.totalRows++; return pBlock; @@ -3585,7 +3589,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { } pInfo->tableEndIndex = i - 1; } - + pInfo->bGroupProcessed = false; int32_t tableStartIdx = pInfo->tableStartIndex; int32_t tableEndIdx = pInfo->tableEndIndex; @@ -3707,9 +3711,14 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity, pOperator); + if (pBlock == NULL && !pInfo->bGroupProcessed && pInfo->needCountEmptyTable) { + STableKeyInfo* tbInfo = tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableStartIndex); + pBlock = getOneRowResultBlock(pTaskInfo, &pInfo->base, pInfo->pResBlock, tbInfo); + } if (pBlock != NULL) { pBlock->info.id.groupId = pInfo->groupId; pOperator->resultInfo.totalRows += pBlock->info.rows; + pInfo->bGroupProcessed = true; return pBlock; } else { if (pInfo->bNewFilesetEvent) { @@ -3864,6 +3873,8 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN } else { pInfo->filesetDelimited = pTableScanNode->filesetDelimited; } + pInfo->needCountEmptyTable = tsCountAlwaysReturnValue && pTableScanNode->needCountEmptyTable; + setOperatorInfo(pOperator, "TableMergeScanOperator", QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, false, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->exprSupp.numOfExprs = numOfCols; diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index 8b06f0d6fd..e228351f0e 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -120,12 +120,21 @@ class TDTestCase: tdSql.checkRows(all_tb_num) # elapsed: continuous duration in a statistical period, table merge scan - tdSql.query(f" select count(c1), max(c5), avg(c5), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} group by tbname") - tdSql.checkRows(nonempty_tb_num) - - tdSql.query(f" select count(c1), max(c1), avg(c1), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} partition by tbname") + tdSql.query(f" select count(c1), max(c5), last_row(c5), elapsed(ts), spread(c1) from {self.dbname}.{self.stable} group by tbname") + tdSql.checkRows(all_tb_num) + + tdSql.query(f" select count(c1), min(c1), avg(c1), elapsed(ts), mode(c1) from {self.dbname}.{self.stable} partition by tbname") + tdSql.checkRows(all_tb_num) + + tdSql.query(f" select count(c1), elapsed(ts), twa(c1), irate(c1), leastsquares(c1,1,1) from {self.dbname}.{self.stable} partition by tbname") + tdSql.checkRows(all_tb_num) + + tdSql.query(f" select avg(c1), elapsed(ts), twa(c1), irate(c1) from {self.dbname}.{self.stable} partition by tbname") tdSql.checkRows(nonempty_tb_num) + # if nonempty_tb_num > 0: + # tdSql.query(f" select avg(c1), percentile(c1, 50) from {self.dbname}.sub_{self.stable}_1") + # tdSql.checkRows(1) def test_innerSelect(self, check_num): tdSql.query(f"select * from (select count(c1) from {self.dbname}.{self.stable} group by tbname) ") From 53ce344b362b10d0e7afd5f49d06893ee9b10f15 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 28 Dec 2023 17:09:52 +0800 Subject: [PATCH 31/69] fix:set error msg if build consumer error --- source/client/src/clientTmq.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index d05cdc0156..15c8903978 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1028,11 +1028,16 @@ static void tmqMgmtInit(void) { } } +#define SET_ERROR_MSG(MSG) if(errstr!=NULL)snprintf(errstr,errstrLen,MSG); tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { - if(conf == NULL) return NULL; + if(conf == NULL) { + SET_ERROR_MSG("configure is null") + return NULL; + } taosThreadOnce(&tmqInit, tmqMgmtInit); if (tmqInitRes != 0) { terrno = tmqInitRes; + SET_ERROR_MSG("tmq timer init error") return NULL; } @@ -1040,6 +1045,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { if (pTmq == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; tscError("failed to create consumer, groupId:%s, code:%s", conf->groupId, terrstr()); + SET_ERROR_MSG("malloc tmq failed") return NULL; } @@ -1055,6 +1061,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { conf->groupId[0] == 0) { terrno = TSDB_CODE_OUT_OF_MEMORY; tscError("consumer:0x%" PRIx64 " setup failed since %s, groupId:%s", pTmq->consumerId, terrstr(), pTmq->groupId); + SET_ERROR_MSG("malloc tmq element failed or group is empty") goto _failed; } @@ -1086,6 +1093,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { if (tsem_init(&pTmq->rspSem, 0, 0) != 0) { tscError("consumer:0x %" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId); + SET_ERROR_MSG("init t_sem failed") goto _failed; } @@ -1094,11 +1102,13 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { if (pTmq->pTscObj == NULL) { tscError("consumer:0x%" PRIx64 " setup failed since %s, groupId:%s", pTmq->consumerId, terrstr(), pTmq->groupId); tsem_destroy(&pTmq->rspSem); + SET_ERROR_MSG("init tscObj failed") goto _failed; } pTmq->refId = taosAddRef(tmqMgmt.rsetId, pTmq); if (pTmq->refId < 0) { + SET_ERROR_MSG("add tscObj ref failed") goto _failed; } From 2526df745ee62db8786adcd732e8b0cd4646a723 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Thu, 28 Dec 2023 17:15:09 +0800 Subject: [PATCH 32/69] enh: adjust threshold of snap replication timeout --- include/libs/sync/sync.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index e54237fe8b..cb053d2548 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -46,8 +46,8 @@ extern "C" { #define SYNC_HEARTBEAT_SLOW_MS 1500 #define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500 -#define SYNC_SNAP_RESEND_MS 1000 * 300 -#define SYNC_SNAP_TIMEOUT_MS 1000 * 1800 +#define SYNC_SNAP_RESEND_MS 1000 * 60 +#define SYNC_SNAP_TIMEOUT_MS 1000 * 300 #define SYNC_VND_COMMIT_MIN_MS 3000 From d83f85bf2642982d8fdcb5bd53b58011cc8f81e1 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Thu, 28 Dec 2023 17:26:46 +0800 Subject: [PATCH 33/69] enh: send rsp msg on rejecting snap replication due to smaller term --- source/libs/sync/src/syncSnapshot.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 10a8734617..f0e457ef8d 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -1001,6 +1001,7 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, SRpcMsg *pRpcMsg) { sRError(pReceiver, "reject snap replication with smaller term. msg term:%" PRId64 ", seq:%d", pMsg->term, pMsg->seq); terrno = TSDB_CODE_SYN_MISMATCHED_SIGNATURE; + syncSnapSendRsp(pReceiver, pMsg, NULL, 0, 0, terrno); return -1; } From baa7f9c895faf0ed12d59c5ff02460ae8eb3f1da Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Thu, 28 Dec 2023 17:29:49 +0800 Subject: [PATCH 34/69] enh: adjust error msg as warn for not ready to propose etc --- source/dnode/vnode/src/vnd/vnodeSync.c | 8 ++++---- source/libs/sync/src/syncMain.c | 6 +++--- source/libs/sync/src/syncPipeline.c | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 048092131d..0f491e9a58 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -220,8 +220,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) isWeak, isBlock, msg, numOfMsgs, arrayPos, pMsg->info.handle); if (!pVnode->restored) { - vGError("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg, - TMSG_INFO(pMsg->msgType)); + vGWarn("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg, + TMSG_INFO(pMsg->msgType)); terrno = TSDB_CODE_SYN_RESTORING; vnodeHandleProposeError(pVnode, pMsg, TSDB_CODE_SYN_RESTORING); rpcFreeCont(pMsg->pCont); @@ -284,8 +284,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) vnodeIsMsgBlock(pMsg->msgType), msg, numOfMsgs, pMsg->info.handle); if (!pVnode->restored) { - vGError("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg, - TMSG_INFO(pMsg->msgType)); + vGWarn("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg, + TMSG_INFO(pMsg->msgType)); vnodeHandleProposeError(pVnode, pMsg, TSDB_CODE_SYN_RESTORING); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 6f3b3fdf98..52557f7b9c 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -662,14 +662,14 @@ ESyncRole syncGetRole(int64_t rid) { int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_t* seq) { if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { terrno = TSDB_CODE_SYN_NOT_LEADER; - sNError(pSyncNode, "sync propose not leader, type:%s", TMSG_INFO(pMsg->msgType)); + sNWarn(pSyncNode, "sync propose not leader, type:%s", TMSG_INFO(pMsg->msgType)); return -1; } if (!pSyncNode->restoreFinish) { terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY; - sNError(pSyncNode, "failed to sync propose since not ready, type:%s, last:%" PRId64 ", cmt:%" PRId64, - TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex); + sNWarn(pSyncNode, "failed to sync propose since not ready, type:%s, last:%" PRId64 ", cmt:%" PRId64, + TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex); return -1; } diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 28ee5ba841..70bdd4a837 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -206,7 +206,7 @@ int32_t syncLogBufferInitWithoutLock(SSyncLogBuffer* pBuf, SSyncNode* pNode) { } if (pLogStore->syncLogGetEntry(pLogStore, index, &pEntry) < 0) { - sError("vgId:%d, failed to get log entry since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index); + sWarn("vgId:%d, failed to get log entry since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index); break; } @@ -1237,7 +1237,7 @@ SSyncRaftEntry* syncLogBufferGetOneEntry(SSyncLogBuffer* pBuf, SSyncNode* pNode, } else { *pInBuf = false; if (pNode->pLogStore->syncLogGetEntry(pNode->pLogStore, index, &pEntry) < 0) { - sError("vgId:%d, failed to get log entry since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index); + sWarn("vgId:%d, failed to get log entry since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index); } } return pEntry; @@ -1253,7 +1253,7 @@ int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex ind pEntry = syncLogBufferGetOneEntry(pBuf, pNode, index, &inBuf); if (pEntry == NULL) { - sError("vgId:%d, failed to get raft entry for index:%" PRId64 "", pNode->vgId, index); + sWarn("vgId:%d, failed to get raft entry for index:%" PRId64 "", pNode->vgId, index); if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) { SSyncLogReplMgr* pMgr = syncNodeGetLogReplMgr(pNode, pDestId); if (pMgr) { From ef22830ca39a70be588db086c6d769d49159e444 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Thu, 28 Dec 2023 18:01:30 +0800 Subject: [PATCH 35/69] adjust --- tests/system-test/2-query/irate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py index d976edb49c..d882c9b8fb 100644 --- a/tests/system-test/2-query/irate.py +++ b/tests/system-test/2-query/irate.py @@ -209,7 +209,7 @@ class TDTestCase: tdSql.error(f"select irate(c1), abs(c1) from {dbname}.ct4 ") # agg functions mix with agg functions - tdSql.query(f"select irate(c1), count(c5) from {dbname}.stb1 partition by tbname order by tbname") + tdSql.query(f"select irate(c1), count(c5) from {dbname}.stb1 partition by tbname having count(c5)>0 order by tbname") tdSql.checkData(0, 0, 0.000000000) tdSql.checkData(1, 0, 0.000000000) tdSql.checkData(0, 1, 13) From 9f8f69a791f9544e11475af4f0f7c0fa04663b56 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Thu, 28 Dec 2023 21:54:16 +0800 Subject: [PATCH 36/69] adjust --- source/libs/executor/src/scanoperator.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index a1f9baa082..7b2df4e206 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -670,15 +670,11 @@ static int32_t initTableCountEnv(STableScanInfo* pTableScanInfo, const STableKey return TSDB_CODE_OUT_OF_MEMORY; } } - uint64_t groupId = 0; + uint64_t groupId = pList->groupId; for (int32_t i = 0; i < num; i++) { const STableKeyInfo* pInfo = pList + i; - if (pTableScanInfo->isSameGroup) { - if (i == 0) { - groupId = pInfo->groupId; - } else if (groupId != pInfo->groupId) { - pTableScanInfo->isSameGroup = false; - } + if (pTableScanInfo->isSameGroup && groupId != pInfo->groupId) { + pTableScanInfo->isSameGroup = false; } taosHashPut(pTableScanInfo->pRemainTables, &(pInfo->uid), sizeof(pInfo->uid), &(pInfo->groupId), sizeof(pInfo->groupId)); } From e2b61a55fc13aee349cb63abb3c7d003c1de783f Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Fri, 29 Dec 2023 01:40:17 +0800 Subject: [PATCH 37/69] refactor --- source/libs/executor/inc/executorInt.h | 6 +- source/libs/executor/src/executor.c | 12 +-- source/libs/executor/src/scanoperator.c | 129 ++++++++---------------- 3 files changed, 52 insertions(+), 95 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index ae38d4940c..9acef69f9c 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -268,14 +268,12 @@ typedef struct STableScanInfo { int32_t scanTimes; SSDataBlock* pResBlock; SHashObj* pIgnoreTables; - SHashObj* pRemainTables; // remain table to process SSampleExecInfo sample; // sample execution info - int32_t currentGroupId; - int32_t currentTable; + int32_t tableStartIndex; // current group scan start + int32_t tableEndIndex; // current group scan end int8_t scanMode; int8_t assignBlockUid; uint8_t countState; // empty table count state - bool isSameGroup; // whether all tables are in the same group this scan bool hasGroupByTag; bool countOnly; bool filesetDelimited; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 6cee79bff2..fb39de484f 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1209,7 +1209,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT STableKeyInfo* pTableInfo = tableListGetInfo(pTableListInfo, 0); uid = pTableInfo->uid; ts = INT64_MIN; - pScanInfo->currentTable = 0; + pScanInfo->tableEndIndex = 0; } else { taosRUnLockLatch(&pTaskInfo->lock); qError("no table in table list, %s", id); @@ -1223,16 +1223,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT pInfo->pTableScanOp->resultInfo.totalRows = 0; // start from current accessed position - // we cannot start from the pScanInfo->currentTable, since the commit offset may cause the rollback of the start + // we cannot start from the pScanInfo->tableEndIndex, since the commit offset may cause the rollback of the start // position, let's find it from the beginning. index = tableListFind(pTableListInfo, uid, 0); taosRUnLockLatch(&pTaskInfo->lock); if (index >= 0) { - pScanInfo->currentTable = index; + pScanInfo->tableEndIndex = index; } else { qError("vgId:%d uid:%" PRIu64 " not found in table list, total:%d, index:%d %s", pTaskInfo->id.vgId, uid, - numOfTables, pScanInfo->currentTable, id); + numOfTables, pScanInfo->tableEndIndex, id); terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return -1; } @@ -1255,12 +1255,12 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT } qDebug("tsdb reader created with offset(snapshot) uid:%" PRId64 " ts:%" PRId64 " table index:%d, total:%d, %s", - uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id); + uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id); } else { pTaskInfo->storageAPI.tsdReader.tsdSetQueryTableList(pScanBaseInfo->dataReader, &keyInfo, 1); pTaskInfo->storageAPI.tsdReader.tsdReaderResetStatus(pScanBaseInfo->dataReader, &pScanBaseInfo->cond); qDebug("tsdb reader offset seek snapshot to uid:%" PRId64 " ts %" PRId64 " table index:%d numOfTable:%d, %s", - uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id); + uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id); } // restore the key value diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 7b2df4e206..e990d3d975 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -655,45 +655,29 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, colDataDestroy(&infoData); } -static int32_t initTableCountEnv(STableScanInfo* pTableScanInfo, const STableKeyInfo* pList, int32_t num) { - if (!pTableScanInfo->needCountEmptyTable) { - pTableScanInfo->countState = TABLE_COUNT_STATE_END; - return TSDB_CODE_SUCCESS; - } - pTableScanInfo->isSameGroup = true; - if (NULL == pTableScanInfo->pRemainTables) { - int32_t tableNum = taosArrayGetSize(pTableScanInfo->base.pTableListInfo->pTableList); - pTableScanInfo->pRemainTables = - taosHashInit(tableNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); - if (NULL == pTableScanInfo->pRemainTables) { - pTableScanInfo->countState = TABLE_COUNT_STATE_END; - return TSDB_CODE_OUT_OF_MEMORY; - } - } - uint64_t groupId = pList->groupId; - for (int32_t i = 0; i < num; i++) { - const STableKeyInfo* pInfo = pList + i; - if (pTableScanInfo->isSameGroup && groupId != pInfo->groupId) { - pTableScanInfo->isSameGroup = false; - } - taosHashPut(pTableScanInfo->pRemainTables, &(pInfo->uid), sizeof(pInfo->uid), &(pInfo->groupId), sizeof(pInfo->groupId)); - } - pTableScanInfo->countState = TABLE_COUNT_STATE_SCAN; - return TSDB_CODE_SUCCESS; -} -static void markTableProcessed(STableScanInfo* pTableScanInfo, uint64_t uid) { - // case0 group scanning, mark - // case1 stream scan: no need to mark - if (pTableScanInfo->countState > TABLE_COUNT_STATE_SCAN) { - return; +static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, int32_t* size) { + pInfo->tableStartIndex = pInfo->tableEndIndex + 1; + + int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo); + STableKeyInfo* pStart = (STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableStartIndex); + int32_t i = pInfo->tableStartIndex + 1; + for (; i < numOfTables; ++i) { + STableKeyInfo* pCur = tableListGetInfo(pInfo->base.pTableListInfo, i); + if (pCur->groupId != pStart->groupId) { + break; + } } - // case2 if all table in same group, process only once - if (pTableScanInfo->isSameGroup) { - pTableScanInfo->countState = TABLE_COUNT_STATE_END; - return; + + pInfo->tableEndIndex = i - 1; + if (!pInfo->needCountEmptyTable) { + pInfo->countState = TABLE_COUNT_STATE_END; + } else { + pInfo->countState = TABLE_COUNT_STATE_SCAN; } - taosHashRemove(pTableScanInfo->pRemainTables, &uid, sizeof(uid)); + + *pKeyInfo = pStart; + *size = i - pInfo->tableStartIndex; } static SSDataBlock* getOneRowResultBlock(SExecTaskInfo* pTaskInfo, STableScanBase* pBase, SSDataBlock* pBlock, @@ -791,7 +775,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { return NULL; } -static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKeyInfo* pList, int32_t num) { +static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { STableScanInfo* pTableScanInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; @@ -801,15 +785,11 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey return NULL; } - if (TABLE_COUNT_STATE_NONE == pTableScanInfo->countState) { - initTableCountEnv(pTableScanInfo, pList, num); - } - // do the ascending order traverse in the first place. while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { - markTableProcessed(pTableScanInfo, p->info.id.uid); + pTableScanInfo->countState = TABLE_COUNT_STATE_END; return p; } @@ -838,7 +818,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey while (pTableScanInfo->scanTimes < total) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { - markTableProcessed(pTableScanInfo, p->info.id.uid); + pTableScanInfo->countState = TABLE_COUNT_STATE_END; return p; } @@ -856,29 +836,13 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator, const STableKey } if (pTableScanInfo->countState < TABLE_COUNT_STATE_END) { - int32_t tb_cnt = taosHashGetSize(pTableScanInfo->pRemainTables); - if (tb_cnt) { - if (!pTableScanInfo->isSameGroup) { - // get first empty table uid, mark processed & rm from hash - void *pIte = taosHashIterate(pTableScanInfo->pRemainTables, NULL); - if (pIte != NULL) { - size_t keySize = 0; - uint64_t* pUid = taosHashGetKey(pIte, &keySize); - STableKeyInfo info = {.uid = *pUid, .groupId = *(uint64_t*)pIte}; - taosHashCancelIterate(pTableScanInfo->pRemainTables, pIte); - markTableProcessed(pTableScanInfo, *pUid); - return getBlockForEmptyTable(pOperator, &info); - } - } else { - // output one table for this group - pTableScanInfo->countState = TABLE_COUNT_STATE_END; - return getBlockForEmptyTable(pOperator, pList); - } - } + // output once for this group pTableScanInfo->countState = TABLE_COUNT_STATE_END; + STableKeyInfo* pStart = + (STableKeyInfo*)tableListGetInfo(pTableScanInfo->base.pTableListInfo, pTableScanInfo->tableStartIndex); + return getBlockForEmptyTable(pOperator, pStart); } - taosHashClear(pTableScanInfo->pRemainTables); return NULL; } @@ -938,8 +902,8 @@ static SSDataBlock* startNextGroupScan(SOperatorInfo* pOperator) { STableScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStorageAPI* pAPI = &pTaskInfo->storageAPI; - - if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) { + int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo); + if (pInfo->tableEndIndex + 1 >= numOfTables) { setOperatorCompleted(pOperator); if (pOperator->dynamicTask) { taosArrayClear(pInfo->base.pTableListInfo->pTableList); @@ -954,14 +918,13 @@ static SSDataBlock* startNextGroupScan(SOperatorInfo* pOperator) { int32_t num = 0; STableKeyInfo* pList = NULL; - tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); - pInfo->countState = TABLE_COUNT_STATE_NONE; + initNextGroupScan(pInfo, &pList, &num); pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num); pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); pInfo->scanTimes = 0; - SSDataBlock* result = doGroupedTableScan(pOperator, pList, num); + SSDataBlock* result = doGroupedTableScan(pOperator); if (result != NULL) { if (pOperator->dynamicTask) { result->info.id.groupId = result->info.id.uid; @@ -979,15 +942,14 @@ static SSDataBlock* groupSeqTableScan(SOperatorInfo* pOperator) { int32_t num = 0; STableKeyInfo* pList = NULL; - if (pInfo->currentGroupId == -1) { + if (pInfo->tableEndIndex == -1) { int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo); - if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo) || numOfTables == 0) { + if (pInfo->tableEndIndex + 1 == numOfTables) { setOperatorCompleted(pOperator); return NULL; } - tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); - pInfo->countState = TABLE_COUNT_STATE_NONE; + initNextGroupScan(pInfo, &pList, &num); ASSERT(pInfo->base.dataReader == NULL); int32_t code = pAPI->tsdReader.tsdReaderOpen(pInfo->base.readHandle.vnode, &pInfo->base.cond, pList, num, pInfo->pResBlock, @@ -1001,11 +963,9 @@ static SSDataBlock* groupSeqTableScan(SOperatorInfo* pOperator) { if (pInfo->pResBlock->info.capacity > pOperator->resultInfo.capacity) { pOperator->resultInfo.capacity = pInfo->pResBlock->info.capacity; } - } else { - tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); } - SSDataBlock* result = doGroupedTableScan(pOperator, pList, num); + SSDataBlock* result = doGroupedTableScan(pOperator); if (result != NULL) { if (pOperator->dynamicTask) { result->info.id.groupId = result->info.id.uid; @@ -1038,7 +998,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { T_LONG_JMP(pTaskInfo->env, code); } if (pOperator->status == OP_EXEC_DONE) { - pInfo->currentGroupId = -1; + pInfo->tableEndIndex = -1; pOperator->status = OP_OPENED; SSDataBlock* result = NULL; while (true) { @@ -1057,29 +1017,29 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { pInfo->countState = TABLE_COUNT_STATE_END; while (1) { - SSDataBlock* result = doGroupedTableScan(pOperator, NULL, 0); + SSDataBlock* result = doGroupedTableScan(pOperator); if (result || (pOperator->status == OP_EXEC_DONE) || isTaskKilled(pTaskInfo)) { return result; } // if no data, switch to next table and continue scan - pInfo->currentTable++; + pInfo->tableEndIndex++; taosRLockLatch(&pTaskInfo->lock); numOfTables = tableListGetSize(pInfo->base.pTableListInfo); - if (pInfo->currentTable >= numOfTables) { + if (pInfo->tableEndIndex >= numOfTables) { qDebug("all table checked in table list, total:%d, return NULL, %s", numOfTables, GET_TASKID(pTaskInfo)); taosRUnLockLatch(&pTaskInfo->lock); return NULL; } - tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->currentTable); + tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableEndIndex); taosRUnLockLatch(&pTaskInfo->lock); pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, &tInfo, 1); qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d/%d %s", tInfo.uid, numOfTables, - pInfo->currentTable, numOfTables, GET_TASKID(pTaskInfo)); + pInfo->tableEndIndex, numOfTables, GET_TASKID(pTaskInfo)); pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); pInfo->scanTimes = 0; @@ -1117,7 +1077,6 @@ static void destroyTableScanOperatorInfo(void* param) { STableScanInfo* pTableScanInfo = (STableScanInfo*)param; blockDataDestroy(pTableScanInfo->pResBlock); taosHashCleanup(pTableScanInfo->pIgnoreTables); - taosHashCleanup(pTableScanInfo->pRemainTables); destroyTableScanBase(&pTableScanInfo->base, &pTableScanInfo->base.readerAPI); taosMemoryFreeClear(param); } @@ -1173,7 +1132,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, goto _error; } - pInfo->currentGroupId = -1; + pInfo->tableEndIndex = -1; pInfo->assignBlockUid = pTableScanNode->assignBlockUid; pInfo->hasGroupByTag = pTableScanNode->pGroupTags ? true : false; @@ -1268,7 +1227,7 @@ void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint6 pTableScanInfo->base.cond.startVersion = 0; pTableScanInfo->base.cond.endVersion = ver; pTableScanInfo->scanTimes = 0; - pTableScanInfo->currentGroupId = -1; + pTableScanInfo->tableEndIndex = -1; pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader); pTableScanInfo->base.dataReader = NULL; } @@ -2170,7 +2129,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { pInfo->pTableScanOp->status = OP_OPENED; pTSInfo->scanTimes = 0; - pTSInfo->currentGroupId = -1; + pTSInfo->tableEndIndex = -1; } if (pStreamInfo->recoverStep == STREAM_RECOVER_STEP__SCAN1) { From 0a0f1a9d22986c5140a5adda57475455b69a43df Mon Sep 17 00:00:00 2001 From: fullhonest Date: Tue, 26 Dec 2023 19:51:14 +0800 Subject: [PATCH 38/69] Fix TD-27989 --- source/libs/function/inc/tpercentile.h | 4 +-- source/libs/function/src/tpercentile.c | 46 +++++++++++++------------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h index 65b7b38a05..5351594b2f 100644 --- a/source/libs/function/inc/tpercentile.h +++ b/source/libs/function/inc/tpercentile.h @@ -26,12 +26,12 @@ extern "C" { typedef struct MinMaxEntry { union { double dMinVal; - int64_t i64MinVal; + //double i64MinVal; uint64_t u64MinVal; }; union { double dMaxVal; - int64_t i64MaxVal; + //double i64MaxVal; int64_t u64MaxVal; }; } MinMaxEntry; diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 93008b565a..c671e7717c 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -63,8 +63,8 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) static void resetBoundingBox(MinMaxEntry *range, int32_t type) { if (IS_SIGNED_NUMERIC_TYPE(type)) { - range->i64MaxVal = INT64_MIN; - range->i64MinVal = INT64_MAX; + range->dMaxVal = INT64_MIN; + range->dMinVal = INT64_MAX; } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { range->u64MaxVal = 0; range->u64MinVal = UINT64_MAX; @@ -80,8 +80,8 @@ static int32_t setBoundingBox(MinMaxEntry *range, int16_t type, double minval, d } if (IS_SIGNED_NUMERIC_TYPE(type)) { - range->i64MinVal = (int64_t)minval; - range->i64MaxVal = (int64_t)maxval; + range->dMinVal = (int64_t)minval; + range->dMaxVal = (int64_t)maxval; } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { range->u64MinVal = (uint64_t)minval; range->u64MaxVal = (uint64_t)maxval; @@ -137,21 +137,21 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) { int32_t index = -1; - if (v > pBucket->range.i64MaxVal || v < pBucket->range.i64MinVal) { + if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) { return index; } // divide the value range into 1024 buckets - uint64_t span = (uint64_t)(pBucket->range.i64MaxVal - pBucket->range.i64MinVal); + uint64_t span = pBucket->range.dMaxVal - pBucket->range.dMinVal; if (span < pBucket->numOfSlots) { - int64_t delta = v - pBucket->range.i64MinVal; + int64_t delta = v - pBucket->range.dMinVal; index = (delta % pBucket->numOfSlots); } else { double slotSpan = ((double)span) / pBucket->numOfSlots; - uint64_t delta = (uint64_t)(v - pBucket->range.i64MinVal); + uint64_t delta = (uint64_t)(v - pBucket->range.dMinVal); - index = (int32_t)(delta / slotSpan); - if (v == pBucket->range.i64MaxVal || index == pBucket->numOfSlots) { + index = delta / slotSpan; + if (v == pBucket->range.dMaxVal || index == pBucket->numOfSlots) { index -= 1; } } @@ -318,23 +318,23 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT int64_t v = 0; GET_TYPED_DATA(v, int64_t, dataType, data); - if (r->i64MinVal > v) { - r->i64MinVal = v; + if (r->dMinVal > v) { + r->dMinVal = v; } - if (r->i64MaxVal < v) { - r->i64MaxVal = v; + if (r->dMaxVal < v) { + r->dMaxVal = v; } } else if (IS_UNSIGNED_NUMERIC_TYPE(dataType)) { uint64_t v = 0; GET_TYPED_DATA(v, uint64_t, dataType, data); - if (r->i64MinVal > v) { - r->i64MinVal = v; + if (r->u64MinVal > v) { + r->u64MinVal = v; } - if (r->i64MaxVal < v) { - r->i64MaxVal = v; + if (r->u64MaxVal < v) { + r->u64MaxVal = v; } } else if (IS_FLOAT_TYPE(dataType)) { double v = 0; @@ -438,7 +438,7 @@ static double getIdenticalDataVal(tMemBucket *pMemBucket, int32_t slotIndex) { double finalResult = 0.0; if (IS_SIGNED_NUMERIC_TYPE(pMemBucket->type)) { - finalResult = (double)pSlot->range.i64MinVal; + finalResult = (double)pSlot->range.dMinVal; } else if (IS_UNSIGNED_NUMERIC_TYPE(pMemBucket->type)) { finalResult = (double)pSlot->range.u64MinVal; } else { @@ -469,8 +469,8 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction double maxOfThisSlot = 0; double minOfNextSlot = 0; if (IS_SIGNED_NUMERIC_TYPE(pMemBucket->type)) { - maxOfThisSlot = (double)pSlot->range.i64MaxVal; - minOfNextSlot = (double)next.i64MinVal; + maxOfThisSlot = (double)pSlot->range.dMaxVal; + minOfNextSlot = (double)next.dMinVal; } else if (IS_UNSIGNED_NUMERIC_TYPE(pMemBucket->type)) { maxOfThisSlot = (double)pSlot->range.u64MaxVal; minOfNextSlot = (double)next.u64MinVal; @@ -577,7 +577,7 @@ int32_t getPercentile(tMemBucket *pMemBucket, double percent, double *result) { MinMaxEntry *pRange = &pMemBucket->range; if (IS_SIGNED_NUMERIC_TYPE(pMemBucket->type)) { - *result = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->i64MaxVal : pRange->i64MinVal); + *result = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->dMaxVal : pRange->dMinVal); } else if (IS_UNSIGNED_NUMERIC_TYPE(pMemBucket->type)) { *result = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->u64MaxVal : pRange->u64MinVal); } else { @@ -603,6 +603,6 @@ bool isIdenticalData(tMemBucket *pMemBucket, int32_t index) { if (IS_FLOAT_TYPE(pMemBucket->type)) { return fabs(pSeg->range.dMaxVal - pSeg->range.dMinVal) < DBL_EPSILON; } else { - return pSeg->range.i64MinVal == pSeg->range.i64MaxVal; + return pSeg->range.dMinVal == pSeg->range.dMaxVal; } } From b58a23df49b034490521d28faa51548caa1b2aba Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 29 Dec 2023 13:57:13 +0800 Subject: [PATCH 39/69] fix:[TS-4391] rebalance cnt always 1 if msg lost --- include/common/tmsg.h | 6 - source/dnode/mnode/impl/inc/mndConsumer.h | 5 +- source/dnode/mnode/impl/src/mndConsumer.c | 184 +-------------------- source/dnode/mnode/impl/src/mndSubscribe.c | 170 +++++++++++++++---- 4 files changed, 151 insertions(+), 214 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index bafe7583b7..c314d82036 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2544,12 +2544,6 @@ _err: return NULL; } -// this message is sent from mnode to mnode(read thread to write thread), -// so there is no need for serialization or deserialization -typedef struct { - SHashObj* rebSubHash; // SHashObj -} SMqDoRebalanceMsg; - typedef struct { int64_t streamId; int64_t checkpointId; diff --git a/source/dnode/mnode/impl/inc/mndConsumer.h b/source/dnode/mnode/impl/inc/mndConsumer.h index f075510428..59a22b76cd 100644 --- a/source/dnode/mnode/impl/inc/mndConsumer.h +++ b/source/dnode/mnode/impl/inc/mndConsumer.h @@ -46,9 +46,10 @@ SSdbRow *mndConsumerActionDecode(SSdbRaw *pRaw); int32_t mndSetConsumerCommitLogs(SMnode *pMnode, STrans *pTrans, SMqConsumerObj *pConsumer); int32_t mndSetConsumerDropLogs(SMnode *pMnode, STrans *pTrans, SMqConsumerObj *pConsumer); -bool mndRebTryStart(); +const char *mndConsumerStatusName(int status); + bool mndRebCanStart(); -void mndRebEnd(); +bool mndRebTryStart(); void mndRebCntInc(); void mndRebCntDec(); diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 11c929c898..5987f0ec34 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -29,13 +29,9 @@ #define MND_CONSUMER_RESERVE_SIZE 64 #define MND_MAX_GROUP_PER_TOPIC 100 -#define MND_CONSUMER_LOST_HB_CNT 6 -#define MND_CONSUMER_LOST_CLEAR_THRESHOLD 43200 static int32_t mqRebInExecCnt = 0; -static const char *mndConsumerStatusName(int status); - static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer); static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer); static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, SMqConsumerObj *pNewConsumer); @@ -45,7 +41,6 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter); static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg); static int32_t mndProcessAskEpReq(SRpcMsg *pMsg); static int32_t mndProcessMqHbReq(SRpcMsg *pMsg); -static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg); static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg); static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg); @@ -63,7 +58,7 @@ int32_t mndInitConsumer(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_TMQ_SUBSCRIBE, mndProcessSubscribeReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_HB, mndProcessMqHbReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_ASK_EP, mndProcessAskEpReq); - mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg); +// mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_RECOVER, mndProcessConsumerRecoverMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, mndProcessConsumerClearMsg); @@ -97,42 +92,18 @@ void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId, SRpcHandleInfo* bool mndRebTryStart() { int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1); - mDebug("tq timer, rebalance counter old val:%d", old); + mInfo("rebalance counter old val:%d", old); return old == 0; } -bool mndRebCanStart() { - int32_t val = atomic_load_32(&mqRebInExecCnt); - if (val < 0) { - mError("rebalance trans end, rebalance counter:%d should not be less equalled than 0, ignore counter desc", val); - return false; - } - mInfo("tq timer, rebalance counter val:%d", val); - return val == 0; -} - -void mndRebEnd() { mndRebCntDec(); } - void mndRebCntInc() { int32_t val = atomic_add_fetch_32(&mqRebInExecCnt, 1); - mInfo("rebalance trans start, rebalance counter:%d", val); + mInfo("rebalance cnt inc, value:%d", val); } void mndRebCntDec() { - while (1) { - int32_t val = atomic_load_32(&mqRebInExecCnt); - if (val <= 0) { - mError("rebalance trans end, rebalance counter:%d should not be less equalled than 0, ignore counter desc", val); - break; - } - - int32_t newVal = val - 1; - int32_t oldVal = atomic_val_compare_exchange_32(&mqRebInExecCnt, val, newVal); - if (oldVal == val) { - mInfo("rebalance trans end, rebalance counter:%d", newVal); - break; - } - } + int32_t val = atomic_sub_fetch_32(&mqRebInExecCnt, 1); + mInfo("rebalance cnt sub, value:%d", val); } static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *pMnode, const char *pUser, bool enableReplay) { @@ -282,149 +253,6 @@ static SMqRebInfo *mndGetOrCreateRebSub(SHashObj *pHash, const char *key) { return pRebInfo; } -static void freeRebalanceItem(void *param) { - SMqRebInfo *pInfo = param; - taosArrayDestroy(pInfo->newConsumers); - taosArrayDestroy(pInfo->removedConsumers); -} - -static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SSdb *pSdb = pMnode->pSdb; - SMqConsumerObj *pConsumer; - void *pIter = NULL; - - mInfo("start to process mq timer"); - - // rebalance cannot be parallel - if (!mndRebCanStart()) { - mInfo("mq rebalance already in progress, do nothing"); - return 0; - } - - SMqDoRebalanceMsg *pRebMsg = rpcMallocCont(sizeof(SMqDoRebalanceMsg)); - if (pRebMsg == NULL) { - mError("failed to create the rebalance msg, size:%d, quit mq timer", (int32_t)sizeof(SMqDoRebalanceMsg)); - return TSDB_CODE_OUT_OF_MEMORY; - } - - pRebMsg->rebSubHash = taosHashInit(64, MurmurHash3_32, true, HASH_NO_LOCK); - if (pRebMsg->rebSubHash == NULL) { - mError("failed to create rebalance hashmap"); - rpcFreeCont(pRebMsg); - return TSDB_CODE_OUT_OF_MEMORY; - } - - taosHashSetFreeFp(pRebMsg->rebSubHash, freeRebalanceItem); - - // iterate all consumers, find all modification - while (1) { - pIter = sdbFetch(pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); - if (pIter == NULL) { - break; - } - - int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1); - int32_t status = atomic_load_32(&pConsumer->status); - - mInfo("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d", - pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime, - hbStatus); - - if (status == MQ_CONSUMER_STATUS_READY) { - if (taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close - mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); - } else if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { - taosRLockLatch(&pConsumer->lock); - int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); - for (int32_t i = 0; i < topicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); - } - taosRUnLockLatch(&pConsumer->lock); - }else{ - int32_t newTopicNum = taosArrayGetSize(pConsumer->currentTopics); - for (int32_t i = 0; i < newTopicNum; i++) { - char * topic = taosArrayGetP(pConsumer->currentTopics, i); - SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, topic); - if (pSub == NULL) { - continue; - } - taosRLockLatch(&pSub->lock); - - // 2.2 iterate all vg assigned to the consumer of that topic - SMqConsumerEp *pConsumerEp = taosHashGet(pSub->consumerHash, &pConsumer->consumerId, sizeof(int64_t)); - int32_t vgNum = taosArrayGetSize(pConsumerEp->vgs); - - for (int32_t j = 0; j < vgNum; j++) { - SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j); - SVgObj * pVgroup = mndAcquireVgroup(pMnode, pVgEp->vgId); - if (!pVgroup) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - mndMakeSubscribeKey(key, pConsumer->cgroup, topic); - mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - mInfo("vnode splitted, vgId:%d rebalance will be triggered", pVgEp->vgId); - } - mndReleaseVgroup(pMnode, pVgroup); - } - taosRUnLockLatch(&pSub->lock); - mndReleaseSubscribe(pMnode, pSub); - } - } - } else if (status == MQ_CONSUMER_STATUS_LOST) { - if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { // clear consumer if lost a day - mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); - } - } else { // MQ_CONSUMER_STATUS_REBALANCE - taosRLockLatch(&pConsumer->lock); - - int32_t newTopicNum = taosArrayGetSize(pConsumer->rebNewTopics); - for (int32_t i = 0; i < newTopicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *newTopic = taosArrayGetP(pConsumer->rebNewTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, newTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->newConsumers, &pConsumer->consumerId); - } - - int32_t removedTopicNum = taosArrayGetSize(pConsumer->rebRemovedTopics); - for (int32_t i = 0; i < removedTopicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *removedTopic = taosArrayGetP(pConsumer->rebRemovedTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); - } - - if (newTopicNum == 0 && removedTopicNum == 0 && taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close - mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); - } - - taosRUnLockLatch(&pConsumer->lock); - } - - mndReleaseConsumer(pMnode, pConsumer); - } - - if (taosHashGetSize(pRebMsg->rebSubHash) != 0) { - mInfo("mq send msg to rebalance"); - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_DO_REBALANCE, - .pCont = pRebMsg, - .contLen = sizeof(SMqDoRebalanceMsg), - }; - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); - } else { - taosHashCleanup(pRebMsg->rebSubHash); - rpcFreeCont(pRebMsg); - mInfo("mq timer finished, no need to re-balance"); - } - return 0; -} - static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { int32_t code = 0; SMnode *pMnode = pMsg->info.node; @@ -1263,7 +1091,7 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter) { sdbCancelFetch(pSdb, pIter); } -static const char *mndConsumerStatusName(int status) { +const char *mndConsumerStatusName(int status) { switch (status) { case MQ_CONSUMER_STATUS_READY: return "ready"; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 8cd7f6bc9c..b2370374b7 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -27,7 +27,8 @@ #define MND_SUBSCRIBE_VER_NUMBER 2 #define MND_SUBSCRIBE_RESERVE_SIZE 64 -#define MND_SUBSCRIBE_REBALANCE_CNT 3 +#define MND_CONSUMER_LOST_HB_CNT 6 +#define MND_CONSUMER_LOST_CLEAR_THRESHOLD 43200 static SSdbRaw *mndSubActionEncode(SMqSubscribeObj *); static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw); @@ -38,14 +39,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg); static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg); static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextSubscribe(SMnode *pMnode, void *pIter); - -static int32_t mndSetSubRedoLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) { - SSdbRaw *pRedoRaw = mndSubActionEncode(pSub); - if (pRedoRaw == NULL) return -1; - if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) return -1; - if (sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY) != 0) return -1; - return 0; -} +static int32_t mndCheckConsumer(SRpcMsg *pMsg, SHashObj* hash); static int32_t mndSetSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) { SSdbRaw *pCommitRaw = mndSubActionEncode(pSub); @@ -68,7 +62,7 @@ int32_t mndInitSubscribe(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_VND_TMQ_SUBSCRIBE_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_VND_TMQ_DELETE_SUB_RSP, mndTransProcessRsp); - mndSetMsgHandle(pMnode, TDMT_MND_TMQ_DO_REBALANCE, mndProcessRebalanceReq); + mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessRebalanceReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_DROP_CGROUP, mndProcessDropCgroupReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_DROP_CGROUP_RSP, mndTransProcessRsp); @@ -727,21 +721,140 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu return 0; } -static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMqDoRebalanceMsg *pReq = pMsg->pCont; - void *pIter = NULL; -// bool rebalanceOnce = false; // to ensure only once. - if (!mndRebTryStart()) { - mInfo("mq rebalance already in progress, do nothing"); - return 0; +static void freeRebalanceItem(void *param) { + SMqRebInfo *pInfo = param; + taosArrayDestroy(pInfo->newConsumers); + taosArrayDestroy(pInfo->removedConsumers); +} + +static int32_t mndCheckConsumer(SRpcMsg *pMsg, SHashObj* rebSubHash) { + SMnode *pMnode = pMsg->info.node; + SSdb *pSdb = pMnode->pSdb; + SMqConsumerObj *pConsumer; + void *pIter = NULL; + + mInfo("start to process mq timer"); + + // iterate all consumers, find all modification + while (1) { + pIter = sdbFetch(pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); + if (pIter == NULL) { + break; + } + + int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1); + int32_t status = atomic_load_32(&pConsumer->status); + + mInfo("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d", + pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime, + hbStatus); + + if (status == MQ_CONSUMER_STATUS_READY) { + if (taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); + } else if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { + taosRLockLatch(&pConsumer->lock); + int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); + for (int32_t i = 0; i < topicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(rebSubHash, key); + taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); + } + taosRUnLockLatch(&pConsumer->lock); + }else{ + int32_t newTopicNum = taosArrayGetSize(pConsumer->currentTopics); + for (int32_t i = 0; i < newTopicNum; i++) { + char * topic = taosArrayGetP(pConsumer->currentTopics, i); + SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, topic); + if (pSub == NULL) { + continue; + } + taosRLockLatch(&pSub->lock); + + // 2.2 iterate all vg assigned to the consumer of that topic + SMqConsumerEp *pConsumerEp = taosHashGet(pSub->consumerHash, &pConsumer->consumerId, sizeof(int64_t)); + int32_t vgNum = taosArrayGetSize(pConsumerEp->vgs); + + for (int32_t j = 0; j < vgNum; j++) { + SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j); + SVgObj * pVgroup = mndAcquireVgroup(pMnode, pVgEp->vgId); + if (!pVgroup) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + mndMakeSubscribeKey(key, pConsumer->cgroup, topic); + mndGetOrCreateRebSub(rebSubHash, key); + mInfo("vnode splitted, vgId:%d rebalance will be triggered", pVgEp->vgId); + } + mndReleaseVgroup(pMnode, pVgroup); + } + taosRUnLockLatch(&pSub->lock); + mndReleaseSubscribe(pMnode, pSub); + } + } + } else if (status == MQ_CONSUMER_STATUS_LOST) { + if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { // clear consumer if lost a day + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); + } + } else { + taosRLockLatch(&pConsumer->lock); + + int32_t newTopicNum = taosArrayGetSize(pConsumer->rebNewTopics); + for (int32_t i = 0; i < newTopicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *newTopic = taosArrayGetP(pConsumer->rebNewTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, newTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(rebSubHash, key); + taosArrayPush(pRebSub->newConsumers, &pConsumer->consumerId); + } + + int32_t removedTopicNum = taosArrayGetSize(pConsumer->rebRemovedTopics); + for (int32_t i = 0; i < removedTopicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *removedTopic = taosArrayGetP(pConsumer->rebRemovedTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(rebSubHash, key); + taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); + } + + if (newTopicNum == 0 && removedTopicNum == 0 && taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId, &pMsg->info); + } + + taosRUnLockLatch(&pConsumer->lock); + } + + mndReleaseConsumer(pMnode, pConsumer); } - mInfo("mq re-balance start, total required re-balanced trans:%d", taosHashGetSize(pReq->rebSubHash)); + return 0; +} + +static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { + int code = 0; + if (!mndRebTryStart()) { + mInfo("mq rebalance already in progress, do nothing"); + return code; + } + + SHashObj *rebSubHash = taosHashInit(64, MurmurHash3_32, true, HASH_NO_LOCK); + if (rebSubHash == NULL) { + mError("failed to create rebalance hashmap"); + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = -1; + goto END; + } + + taosHashSetFreeFp(rebSubHash, freeRebalanceItem); + + mndCheckConsumer(pMsg, rebSubHash); + mInfo("mq re-balance start, total required re-balanced trans:%d", taosHashGetSize(rebSubHash)); // here we only handle one topic rebalance requirement to ensure the atomic execution of this transaction. + void *pIter = NULL; + SMnode *pMnode = pMsg->info.node; while (1) { - pIter = taosHashIterate(pReq->rebSubHash, pIter); + pIter = taosHashIterate(rebSubHash, pIter); if (pIter == NULL) { break; } @@ -760,12 +873,11 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { taosArrayDestroy(rebOutput.modifyConsumers); taosArrayDestroy(rebOutput.rebVgs); - taosHashCancelIterate(pReq->rebSubHash, pIter); + taosHashCancelIterate(rebSubHash, pIter); terrno = TSDB_CODE_OUT_OF_MEMORY; - mInfo("mq re-balance failed, due to out of memory"); - taosHashCleanup(pReq->rebSubHash); - mndRebEnd(); - return -1; + mError("mq re-balance failed, due to out of memory"); + code = -1; + goto END; } SMqRebInfo *pRebInfo = (SMqRebInfo *)pIter; @@ -833,10 +945,12 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { // reset flag mInfo("mq re-balance completed successfully"); - taosHashCleanup(pReq->rebSubHash); - mndRebEnd(); - return 0; +END: + taosHashCleanup(rebSubHash); + mndRebCntDec(); + + return code; } static int32_t sendDeleteSubToVnode(SMqSubscribeObj *pSub, STrans *pTrans){ From c83989754f448500aa91e4c486454a57f46c2dd9 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 29 Dec 2023 14:09:49 +0800 Subject: [PATCH 40/69] fix:[TS-4391] rebalance cnt always 1 if msg lost --- source/dnode/mnode/impl/inc/mndConsumer.h | 5 ----- source/dnode/mnode/impl/inc/mndSubscribe.h | 9 ++++----- source/dnode/mnode/impl/src/mndConsumer.c | 18 ------------------ source/dnode/mnode/impl/src/mndSubscribe.c | 18 ++++++++++++++++++ source/dnode/mnode/impl/src/mndTrans.c | 2 +- 5 files changed, 23 insertions(+), 29 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndConsumer.h b/source/dnode/mnode/impl/inc/mndConsumer.h index 59a22b76cd..8c89ddc825 100644 --- a/source/dnode/mnode/impl/inc/mndConsumer.h +++ b/source/dnode/mnode/impl/inc/mndConsumer.h @@ -48,11 +48,6 @@ int32_t mndSetConsumerDropLogs(SMnode *pMnode, STrans *pTrans, SMqConsumerObj *p const char *mndConsumerStatusName(int status); -bool mndRebCanStart(); -bool mndRebTryStart(); -void mndRebCntInc(); -void mndRebCntDec(); - #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/inc/mndSubscribe.h b/source/dnode/mnode/impl/inc/mndSubscribe.h index 10864da5fb..23b3a7d1fe 100644 --- a/source/dnode/mnode/impl/inc/mndSubscribe.h +++ b/source/dnode/mnode/impl/inc/mndSubscribe.h @@ -32,14 +32,13 @@ void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub); int32_t mndMakeSubscribeKey(char *key, const char *cgroup, const char *topicName); -//static FORCE_INLINE int32_t mndMakePartitionKey(char *key, const char *cgroup, const char *topicName, int32_t vgId) { -// return snprintf(key, TSDB_PARTITION_KEY_LEN, "%d:%s:%s", vgId, cgroup, topicName); -//} - -//int32_t mndDropSubByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topic); int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub); +bool mndRebTryStart(); +void mndRebCntInc(); +void mndRebCntDec(); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 5987f0ec34..cf8b9e019a 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -30,8 +30,6 @@ #define MND_MAX_GROUP_PER_TOPIC 100 -static int32_t mqRebInExecCnt = 0; - static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer); static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer); static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, SMqConsumerObj *pNewConsumer); @@ -90,22 +88,6 @@ void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId, SRpcHandleInfo* return; } -bool mndRebTryStart() { - int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1); - mInfo("rebalance counter old val:%d", old); - return old == 0; -} - -void mndRebCntInc() { - int32_t val = atomic_add_fetch_32(&mqRebInExecCnt, 1); - mInfo("rebalance cnt inc, value:%d", val); -} - -void mndRebCntDec() { - int32_t val = atomic_sub_fetch_32(&mqRebInExecCnt, 1); - mInfo("rebalance cnt sub, value:%d", val); -} - static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *pMnode, const char *pUser, bool enableReplay) { SMqTopicObj *pTopic = NULL; int32_t code = 0; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index b2370374b7..b7958c1484 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -30,6 +30,8 @@ #define MND_CONSUMER_LOST_HB_CNT 6 #define MND_CONSUMER_LOST_CLEAR_THRESHOLD 43200 +static int32_t mqRebInExecCnt = 0; + static SSdbRaw *mndSubActionEncode(SMqSubscribeObj *); static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw); static int32_t mndSubActionInsert(SSdb *pSdb, SMqSubscribeObj *); @@ -830,6 +832,22 @@ static int32_t mndCheckConsumer(SRpcMsg *pMsg, SHashObj* rebSubHash) { return 0; } +bool mndRebTryStart() { + int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1); + mInfo("rebalance counter old val:%d", old); + return old == 0; +} + +void mndRebCntInc() { + int32_t val = atomic_add_fetch_32(&mqRebInExecCnt, 1); + mInfo("rebalance cnt inc, value:%d", val); +} + +void mndRebCntDec() { + int32_t val = atomic_sub_fetch_32(&mqRebInExecCnt, 1); + mInfo("rebalance cnt sub, value:%d", val); +} + static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { int code = 0; if (!mndRebTryStart()) { diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 9e478f3aa5..99553fc57a 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -15,7 +15,7 @@ #define _DEFAULT_SOURCE #include "mndTrans.h" -#include "mndConsumer.h" +#include "mndSubscribe.h" #include "mndDb.h" #include "mndPrivilege.h" #include "mndShow.h" From 90a8fe558196d58198221a46b1c08f0e27203bf3 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Fri, 29 Dec 2023 15:08:56 +0800 Subject: [PATCH 41/69] fix: fill prev not working when desc fill --- source/libs/executor/src/tfill.c | 13 +++-- tests/system-test/2-query/fill_with_group.py | 51 ++++++++++++++++++++ 2 files changed, 59 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index 6c537d7b98..7b63bc8720 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -169,13 +169,14 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* setNotFillColumn(pFillInfo, pDstCol, index, i); } } else { - SGroupKeys* pKey = taosArrayGet(pFillInfo->prev.pRowVal, i); + SRowVal* pRVal = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next; + SGroupKeys* pKey = taosArrayGet(pRVal->pRowVal, i); if (IS_VAR_DATA_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || pKey->isNull) { colDataSetNULL(pDstCol, index); continue; } - SGroupKeys* pKey1 = taosArrayGet(pFillInfo->prev.pRowVal, pFillInfo->tsSlotId); + SGroupKeys* pKey1 = taosArrayGet(pRVal->pRowVal, pFillInfo->tsSlotId); int64_t prevTs = *(int64_t*)pKey1->pData; int32_t srcSlotId = GET_DEST_SLOT_ID(pCol); @@ -346,9 +347,10 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t char* src = colDataGetData(pSrc, pFillInfo->index); if (!colDataIsNull_s(pSrc, pFillInfo->index)) { colDataSetVal(pDst, index, src, false); - saveColData(pFillInfo->prev.pRowVal, i, src, false); + SRowVal* pRVal = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next; + saveColData(pRVal->pRowVal, i, src, false); if (pFillInfo->srcTsSlotId == dstSlotId) { - pFillInfo->prev.key = *(int64_t*)src; + pRVal->key = *(int64_t*)src; } } else { // the value is null if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) { @@ -361,7 +363,8 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t } else if (pFillInfo->type == TSDB_FILL_LINEAR) { bool isNull = colDataIsNull_s(pSrc, pFillInfo->index); colDataSetVal(pDst, index, src, isNull); - saveColData(pFillInfo->prev.pRowVal, i, src, isNull); // todo: + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; + saveColData(p, i, src, isNull); // todo: } else if (pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) { colDataSetNULL(pDst, index); } else if (pFillInfo->type == TSDB_FILL_NEXT) { diff --git a/tests/system-test/2-query/fill_with_group.py b/tests/system-test/2-query/fill_with_group.py index b442647ff4..fce504d0f7 100644 --- a/tests/system-test/2-query/fill_with_group.py +++ b/tests/system-test/2-query/fill_with_group.py @@ -144,10 +144,61 @@ class TDTestCase: tdSql.query(sql) tdSql.checkRows(6) + def test_fill_with_order_by2(self): + ## window size: 5 minutes, with 6 rows in meters every 10 minutes + sql = "select _wstart, count(*) from meters where ts >= '2018-09-20 00:00:00.000' and ts < '2018-09-20 01:00:00.000' interval(5m) fill(prev) order by _wstart asc;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + tdSql.checkData(0, 1, 10) + tdSql.checkData(1, 1, 10) + tdSql.checkData(2, 1, 10) + tdSql.checkData(3, 1, 10) + tdSql.checkData(4, 1, 10) + tdSql.checkData(5, 1, 10) + tdSql.checkData(6, 1, 10) + tdSql.checkData(7, 1, 10) + tdSql.checkData(8, 1, 10) + tdSql.checkData(9, 1, 10) + tdSql.checkData(10, 1, 10) + tdSql.checkData(11, 1, 10) + + sql = "select _wstart, count(*) from meters where ts >= '2018-09-20 00:00:00.000' and ts < '2018-09-20 01:00:00.000' interval(5m) fill(prev) order by _wstart desc;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + tdSql.checkData(0, 1, None) + tdSql.checkData(1, 1, 10) + tdSql.checkData(2, 1, 10) + tdSql.checkData(3, 1, 10) + tdSql.checkData(4, 1, 10) + tdSql.checkData(5, 1, 10) + tdSql.checkData(6, 1, 10) + tdSql.checkData(7, 1, 10) + tdSql.checkData(8, 1, 10) + tdSql.checkData(9, 1, 10) + tdSql.checkData(10, 1, 10) + tdSql.checkData(11, 1, 10) + + sql = "select _wstart, count(*) from meters where ts >= '2018-09-20 00:00:00.000' and ts < '2018-09-20 01:00:00.000' interval(5m) fill(linear) order by _wstart desc;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + tdSql.checkData(0, 1, None) + tdSql.checkData(1, 1, 10) + tdSql.checkData(2, 1, 10) + tdSql.checkData(3, 1, 10) + tdSql.checkData(4, 1, 10) + tdSql.checkData(5, 1, 10) + tdSql.checkData(6, 1, 10) + tdSql.checkData(7, 1, 10) + tdSql.checkData(8, 1, 10) + tdSql.checkData(9, 1, 10) + tdSql.checkData(10, 1, 10) + tdSql.checkData(11, 1, 10) + def run(self): self.prepareTestEnv() self.test_partition_by_with_interval_fill_prev_new_group_fill_error() self.test_fill_with_order_by() + self.test_fill_with_order_by2() def stop(self): tdSql.close() From 7cb12db8ce4b4eaa3e1446fdb0eca21c74376c8c Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 29 Dec 2023 15:20:27 +0800 Subject: [PATCH 42/69] remove more useless code --- source/dnode/vnode/src/inc/tsdb.h | 18 - .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 796 ------------------ 2 files changed, 814 deletions(-) diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index ebaf0851e6..75f90af378 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -258,18 +258,6 @@ int32_t tsdbFSOpen(STsdb *pTsdb, int8_t rollback); int32_t tsdbFSClose(STsdb *pTsdb); void tsdbGetCurrentFName(STsdb *pTsdb, char *current, char *current_t); // tsdbReaderWriter.c ============================================================================================== -// SDataFWriter -int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet); -int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync); -int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter); -int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx); -int32_t tsdbWriteDataBlk(SDataFWriter *pWriter, SMapData *mDataBlk, SBlockIdx *pBlockIdx); -int32_t tsdbWriteSttBlk(SDataFWriter *pWriter, SArray *aSttBlk); -int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo, - int8_t cmprAlg, int8_t toLast); -int32_t tsdbWriteDiskData(SDataFWriter *pWriter, const SDiskData *pDiskData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo); - -int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo); // SDataFReader int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet); int32_t tsdbDataFReaderClose(SDataFReader **ppReader); @@ -281,12 +269,6 @@ int32_t tsdbReadDataBlock(SDataFReader *pReader, SDataBlk *pBlock, SBlockData *p int32_t tsdbReadDataBlockEx(SDataFReader *pReader, SDataBlk *pDataBlk, SBlockData *pBlockData); int32_t tsdbReadSttBlock(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData); int32_t tsdbReadSttBlockEx(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData); -// SDelFWriter -int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb); -int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync); -int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx); -int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx); -int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter); // SDelFReader int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb); int32_t tsdbDelFReaderClose(SDelFReader **ppReader); diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index e6f419362c..babf8c75fb 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -433,635 +433,6 @@ _exit: return code; } -// SDataFWriter ==================================================== -int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet) { - int32_t code = 0; - int32_t flag; - int64_t n; - int32_t szPage = pTsdb->pVnode->config.tsdbPageSize; - SDataFWriter *pWriter = NULL; - char fname[TSDB_FILENAME_LEN]; - char hdr[TSDB_FHDR_SIZE] = {0}; - - // alloc - pWriter = taosMemoryCalloc(1, sizeof(*pWriter)); - if (pWriter == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - pWriter->pTsdb = pTsdb; - pWriter->wSet = (SDFileSet){.diskId = pSet->diskId, - .fid = pSet->fid, - .pHeadF = &pWriter->fHead, - .pDataF = &pWriter->fData, - .pSmaF = &pWriter->fSma, - .nSttF = pSet->nSttF}; - pWriter->fHead = *pSet->pHeadF; - pWriter->fData = *pSet->pDataF; - pWriter->fSma = *pSet->pSmaF; - for (int8_t iStt = 0; iStt < pSet->nSttF; iStt++) { - pWriter->wSet.aSttF[iStt] = &pWriter->fStt[iStt]; - pWriter->fStt[iStt] = *pSet->aSttF[iStt]; - } - - // head - flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC; - tsdbHeadFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fHead, fname); - code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pHeadFD); - if (code) goto _err; - - code = tsdbWriteFile(pWriter->pHeadFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - pWriter->fHead.size += TSDB_FHDR_SIZE; - - // data - if (pWriter->fData.size == 0) { - flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC; - } else { - flag = TD_FILE_READ | TD_FILE_WRITE; - } - tsdbDataFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fData, fname); - code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pDataFD); - if (code) goto _err; - if (pWriter->fData.size == 0) { - code = tsdbWriteFile(pWriter->pDataFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - pWriter->fData.size += TSDB_FHDR_SIZE; - } - - // sma - if (pWriter->fSma.size == 0) { - flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC; - } else { - flag = TD_FILE_READ | TD_FILE_WRITE; - } - tsdbSmaFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fSma, fname); - code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pSmaFD); - if (code) goto _err; - if (pWriter->fSma.size == 0) { - code = tsdbWriteFile(pWriter->pSmaFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - pWriter->fSma.size += TSDB_FHDR_SIZE; - } - - // stt - ASSERT(pWriter->fStt[pSet->nSttF - 1].size == 0); - flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC; - tsdbSttFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fStt[pSet->nSttF - 1], fname); - code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pSttFD); - if (code) goto _err; - code = tsdbWriteFile(pWriter->pSttFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - pWriter->fStt[pWriter->wSet.nSttF - 1].size += TSDB_FHDR_SIZE; - - *ppWriter = pWriter; - return code; - -_err: - tsdbError("vgId:%d, tsdb data file writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); - *ppWriter = NULL; - return code; -} - -int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) { - int32_t code = 0; - STsdb *pTsdb = NULL; - - if (*ppWriter == NULL) goto _exit; - - pTsdb = (*ppWriter)->pTsdb; - if (sync) { - code = tsdbFsyncFile((*ppWriter)->pHeadFD); - if (code) goto _err; - - code = tsdbFsyncFile((*ppWriter)->pDataFD); - if (code) goto _err; - - code = tsdbFsyncFile((*ppWriter)->pSmaFD); - if (code) goto _err; - - code = tsdbFsyncFile((*ppWriter)->pSttFD); - if (code) goto _err; - } - - tsdbCloseFile(&(*ppWriter)->pHeadFD); - tsdbCloseFile(&(*ppWriter)->pDataFD); - tsdbCloseFile(&(*ppWriter)->pSmaFD); - tsdbCloseFile(&(*ppWriter)->pSttFD); - - for (int32_t iBuf = 0; iBuf < sizeof((*ppWriter)->aBuf) / sizeof(uint8_t *); iBuf++) { - tFree((*ppWriter)->aBuf[iBuf]); - } - taosMemoryFree(*ppWriter); -_exit: - *ppWriter = NULL; - return code; - -_err: - tsdbError("vgId:%d, data file writer close failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter) { - int32_t code = 0; - int64_t n; - char hdr[TSDB_FHDR_SIZE]; - - // head ============== - memset(hdr, 0, TSDB_FHDR_SIZE); - tPutHeadFile(hdr, &pWriter->fHead); - code = tsdbWriteFile(pWriter->pHeadFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - // data ============== - memset(hdr, 0, TSDB_FHDR_SIZE); - tPutDataFile(hdr, &pWriter->fData); - code = tsdbWriteFile(pWriter->pDataFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - // sma ============== - memset(hdr, 0, TSDB_FHDR_SIZE); - tPutSmaFile(hdr, &pWriter->fSma); - code = tsdbWriteFile(pWriter->pSmaFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - // stt ============== - memset(hdr, 0, TSDB_FHDR_SIZE); - tPutSttFile(hdr, &pWriter->fStt[pWriter->wSet.nSttF - 1]); - code = tsdbWriteFile(pWriter->pSttFD, 0, hdr, TSDB_FHDR_SIZE); - if (code) goto _err; - - return code; - -_err: - tsdbError("vgId:%d, update DFileSet header failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) { - int32_t code = 0; - SHeadFile *pHeadFile = &pWriter->fHead; - int64_t size; - int64_t n; - - // check - if (taosArrayGetSize(aBlockIdx) == 0) { - pHeadFile->offset = pHeadFile->size; - goto _exit; - } - - // prepare - size = 0; - for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) { - size += tPutBlockIdx(NULL, taosArrayGet(aBlockIdx, iBlockIdx)); - } - - // alloc - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // build - n = 0; - for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) { - n += tPutBlockIdx(pWriter->aBuf[0] + n, taosArrayGet(aBlockIdx, iBlockIdx)); - } - ASSERT(n == size); - - // write - code = tsdbWriteFile(pWriter->pHeadFD, pHeadFile->size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pHeadFile->offset = pHeadFile->size; - pHeadFile->size += size; - -_exit: - // tsdbTrace("vgId:%d, write block idx, offset:%" PRId64 " size:%" PRId64 " nBlockIdx:%d", - // TD_VID(pWriter->pTsdb->pVnode), - // pHeadFile->offset, size, taosArrayGetSize(aBlockIdx)); - return code; - -_err: - tsdbError("vgId:%d, write block idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteDataBlk(SDataFWriter *pWriter, SMapData *mDataBlk, SBlockIdx *pBlockIdx) { - int32_t code = 0; - SHeadFile *pHeadFile = &pWriter->fHead; - int64_t size; - int64_t n; - - ASSERT(mDataBlk->nItem > 0); - - // alloc - size = tPutMapData(NULL, mDataBlk); - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // build - n = tPutMapData(pWriter->aBuf[0], mDataBlk); - - // write - code = tsdbWriteFile(pWriter->pHeadFD, pHeadFile->size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pBlockIdx->offset = pHeadFile->size; - pBlockIdx->size = size; - pHeadFile->size += size; - - tsdbTrace("vgId:%d, write block, file ID:%d commit ID:%" PRId64 " suid:%" PRId64 " uid:%" PRId64 " offset:%" PRId64 - " size:%" PRId64 " nItem:%d", - TD_VID(pWriter->pTsdb->pVnode), pWriter->wSet.fid, pHeadFile->commitID, pBlockIdx->suid, pBlockIdx->uid, - pBlockIdx->offset, pBlockIdx->size, mDataBlk->nItem); - return code; - -_err: - tsdbError("vgId:%d, write block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteSttBlk(SDataFWriter *pWriter, SArray *aSttBlk) { - int32_t code = 0; - SSttFile *pSttFile = &pWriter->fStt[pWriter->wSet.nSttF - 1]; - int64_t size = 0; - int64_t n; - - // check - if (taosArrayGetSize(aSttBlk) == 0) { - pSttFile->offset = pSttFile->size; - goto _exit; - } - - // size - size = 0; - for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aSttBlk); iBlockL++) { - size += tPutSttBlk(NULL, taosArrayGet(aSttBlk, iBlockL)); - } - - // alloc - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // encode - n = 0; - for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aSttBlk); iBlockL++) { - n += tPutSttBlk(pWriter->aBuf[0] + n, taosArrayGet(aSttBlk, iBlockL)); - } - - // write - code = tsdbWriteFile(pWriter->pSttFD, pSttFile->size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pSttFile->offset = pSttFile->size; - pSttFile->size += size; - -_exit: - tsdbTrace("vgId:%d, tsdb write stt block, loffset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), - pSttFile->offset, size); - return code; - -_err: - tsdbError("vgId:%d, tsdb write blockl failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData, SSmaInfo *pSmaInfo) { - int32_t code = 0; - - pSmaInfo->offset = 0; - pSmaInfo->size = 0; - - // encode - for (int32_t iColData = 0; iColData < pBlockData->nColData; iColData++) { - SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); - - if ((!pColData->smaOn) || ((pColData->flag & HAS_VALUE) == 0)) continue; - - SColumnDataAgg sma = {.colId = pColData->cid}; - tColDataCalcSMA[pColData->type](pColData, &sma.sum, &sma.max, &sma.min, &sma.numOfNull); - - code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &sma)); - if (code) goto _err; - pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &sma); - } - - // write - if (pSmaInfo->size) { - code = tsdbWriteFile(pWriter->pSmaFD, pWriter->fSma.size, pWriter->aBuf[0], pSmaInfo->size); - if (code) goto _err; - - pSmaInfo->offset = pWriter->fSma.size; - pWriter->fSma.size += pSmaInfo->size; - } - - return code; - -_err: - tsdbError("vgId:%d, tsdb write block sma failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo, - int8_t cmprAlg, int8_t toLast) { - int32_t code = 0; - - ASSERT(pBlockData->nRow > 0); - - if (toLast) { - pBlkInfo->offset = pWriter->fStt[pWriter->wSet.nSttF - 1].size; - } else { - pBlkInfo->offset = pWriter->fData.size; - } - pBlkInfo->szBlock = 0; - pBlkInfo->szKey = 0; - - int32_t aBufN[4] = {0}; - code = tCmprBlockData(pBlockData, cmprAlg, NULL, NULL, pWriter->aBuf, aBufN); - if (code) goto _err; - - // write ================= - STsdbFD *pFD = toLast ? pWriter->pSttFD : pWriter->pDataFD; - - pBlkInfo->szKey = aBufN[3] + aBufN[2]; - pBlkInfo->szBlock = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3]; - - int64_t offset = pBlkInfo->offset; - code = tsdbWriteFile(pFD, offset, pWriter->aBuf[3], aBufN[3]); - if (code) goto _err; - offset += aBufN[3]; - - code = tsdbWriteFile(pFD, offset, pWriter->aBuf[2], aBufN[2]); - if (code) goto _err; - offset += aBufN[2]; - - if (aBufN[1]) { - code = tsdbWriteFile(pFD, offset, pWriter->aBuf[1], aBufN[1]); - if (code) goto _err; - offset += aBufN[1]; - } - - if (aBufN[0]) { - code = tsdbWriteFile(pFD, offset, pWriter->aBuf[0], aBufN[0]); - if (code) goto _err; - } - - // update info - if (toLast) { - pWriter->fStt[pWriter->wSet.nSttF - 1].size += pBlkInfo->szBlock; - } else { - pWriter->fData.size += pBlkInfo->szBlock; - } - - // ================= SMA ==================== - if (pSmaInfo) { - code = tsdbWriteBlockSma(pWriter, pBlockData, pSmaInfo); - if (code) goto _err; - } - -_exit: - tsdbTrace("vgId:%d, tsdb write block data, suid:%" PRId64 " uid:%" PRId64 " nRow:%d, offset:%" PRId64 " size:%d", - TD_VID(pWriter->pTsdb->pVnode), pBlockData->suid, pBlockData->uid, pBlockData->nRow, pBlkInfo->offset, - pBlkInfo->szBlock); - return code; - -_err: - tsdbError("vgId:%d, tsdb write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteDiskData(SDataFWriter *pWriter, const SDiskData *pDiskData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo) { - int32_t code = 0; - int32_t lino = 0; - - STsdbFD *pFD = NULL; - if (pSmaInfo) { - pFD = pWriter->pDataFD; - pBlkInfo->offset = pWriter->fData.size; - } else { - pFD = pWriter->pSttFD; - pBlkInfo->offset = pWriter->fStt[pWriter->wSet.nSttF - 1].size; - } - pBlkInfo->szBlock = 0; - pBlkInfo->szKey = 0; - - // hdr - int32_t n = tPutDiskDataHdr(NULL, &pDiskData->hdr); - code = tRealloc(&pWriter->aBuf[0], n); - TSDB_CHECK_CODE(code, lino, _exit); - - tPutDiskDataHdr(pWriter->aBuf[0], &pDiskData->hdr); - - code = tsdbWriteFile(pFD, pBlkInfo->offset, pWriter->aBuf[0], n); - TSDB_CHECK_CODE(code, lino, _exit); - pBlkInfo->szKey += n; - pBlkInfo->szBlock += n; - - // uid + ver + key - if (pDiskData->pUid) { - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskData->pUid, pDiskData->hdr.szUid); - TSDB_CHECK_CODE(code, lino, _exit); - pBlkInfo->szKey += pDiskData->hdr.szUid; - pBlkInfo->szBlock += pDiskData->hdr.szUid; - } - - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskData->pVer, pDiskData->hdr.szVer); - TSDB_CHECK_CODE(code, lino, _exit); - pBlkInfo->szKey += pDiskData->hdr.szVer; - pBlkInfo->szBlock += pDiskData->hdr.szVer; - - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskData->pKey, pDiskData->hdr.szKey); - TSDB_CHECK_CODE(code, lino, _exit); - pBlkInfo->szKey += pDiskData->hdr.szKey; - pBlkInfo->szBlock += pDiskData->hdr.szKey; - - // aBlockCol - if (pDiskData->hdr.szBlkCol) { - code = tRealloc(&pWriter->aBuf[0], pDiskData->hdr.szBlkCol); - TSDB_CHECK_CODE(code, lino, _exit); - - n = 0; - for (int32_t iDiskCol = 0; iDiskCol < taosArrayGetSize(pDiskData->aDiskCol); iDiskCol++) { - SDiskCol *pDiskCol = (SDiskCol *)taosArrayGet(pDiskData->aDiskCol, iDiskCol); - n += tPutBlockCol(pWriter->aBuf[0] + n, pDiskCol); - } - ASSERT(n == pDiskData->hdr.szBlkCol); - - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pWriter->aBuf[0], pDiskData->hdr.szBlkCol); - TSDB_CHECK_CODE(code, lino, _exit); - - pBlkInfo->szBlock += pDiskData->hdr.szBlkCol; - } - - // aDiskCol - for (int32_t iDiskCol = 0; iDiskCol < taosArrayGetSize(pDiskData->aDiskCol); iDiskCol++) { - SDiskCol *pDiskCol = (SDiskCol *)taosArrayGet(pDiskData->aDiskCol, iDiskCol); - - if (pDiskCol->pBit) { - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskCol->pBit, pDiskCol->bCol.szBitmap); - TSDB_CHECK_CODE(code, lino, _exit); - - pBlkInfo->szBlock += pDiskCol->bCol.szBitmap; - } - - if (pDiskCol->pOff) { - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskCol->pOff, pDiskCol->bCol.szOffset); - TSDB_CHECK_CODE(code, lino, _exit); - - pBlkInfo->szBlock += pDiskCol->bCol.szOffset; - } - - if (pDiskCol->pVal) { - code = tsdbWriteFile(pFD, pBlkInfo->offset + pBlkInfo->szBlock, pDiskCol->pVal, pDiskCol->bCol.szValue); - TSDB_CHECK_CODE(code, lino, _exit); - - pBlkInfo->szBlock += pDiskCol->bCol.szValue; - } - } - - if (pSmaInfo) { - pWriter->fData.size += pBlkInfo->szBlock; - } else { - pWriter->fStt[pWriter->wSet.nSttF - 1].size += pBlkInfo->szBlock; - goto _exit; - } - - pSmaInfo->offset = 0; - pSmaInfo->size = 0; - for (int32_t iDiskCol = 0; iDiskCol < taosArrayGetSize(pDiskData->aDiskCol); iDiskCol++) { - SDiskCol *pDiskCol = (SDiskCol *)taosArrayGet(pDiskData->aDiskCol, iDiskCol); - - if (IS_VAR_DATA_TYPE(pDiskCol->bCol.type)) continue; - if (pDiskCol->bCol.flag == HAS_NULL || pDiskCol->bCol.flag == (HAS_NULL | HAS_NONE)) continue; - if (!pDiskCol->bCol.smaOn) continue; - - code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &pDiskCol->agg)); - TSDB_CHECK_CODE(code, lino, _exit); - pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &pDiskCol->agg); - } - - if (pSmaInfo->size) { - pSmaInfo->offset = pWriter->fSma.size; - - code = tsdbWriteFile(pWriter->pSmaFD, pSmaInfo->offset, pWriter->aBuf[0], pSmaInfo->size); - TSDB_CHECK_CODE(code, lino, _exit); - - pWriter->fSma.size += pSmaInfo->size; - } - -_exit: - if (code) { - tsdbError("vgId:%d, %s failed at %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); - } - return code; -} - -int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) { - int32_t code = 0; - int64_t n; - int64_t size; - TdFilePtr pOutFD = NULL; - TdFilePtr PInFD = NULL; - int32_t szPage = pTsdb->pVnode->config.tsdbPageSize; - char fNameFrom[TSDB_FILENAME_LEN]; - char fNameTo[TSDB_FILENAME_LEN]; - - // head - tsdbHeadFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pHeadF, fNameFrom); - tsdbHeadFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pHeadF, fNameTo); - pOutFD = taosCreateFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); - if (pOutFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - PInFD = taosOpenFile(fNameFrom, TD_FILE_READ); - if (PInFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pHeadF->size, szPage)); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - taosCloseFile(&pOutFD); - taosCloseFile(&PInFD); - - // data - tsdbDataFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pDataF, fNameFrom); - tsdbDataFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pDataF, fNameTo); - pOutFD = taosCreateFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); - if (pOutFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - PInFD = taosOpenFile(fNameFrom, TD_FILE_READ); - if (PInFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pDataF->size, szPage)); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - taosCloseFile(&pOutFD); - taosCloseFile(&PInFD); - - // sma - tsdbSmaFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pSmaF, fNameFrom); - tsdbSmaFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pSmaF, fNameTo); - pOutFD = taosCreateFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); - if (pOutFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - PInFD = taosOpenFile(fNameFrom, TD_FILE_READ); - if (PInFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pSmaF->size, szPage)); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - taosCloseFile(&pOutFD); - taosCloseFile(&PInFD); - - // stt - for (int8_t iStt = 0; iStt < pSetFrom->nSttF; iStt++) { - tsdbSttFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->aSttF[iStt], fNameFrom); - tsdbSttFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->aSttF[iStt], fNameTo); - pOutFD = taosCreateFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC); - if (pOutFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - PInFD = taosOpenFile(fNameFrom, TD_FILE_READ); - if (PInFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->aSttF[iStt]->size, szPage)); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - taosCloseFile(&pOutFD); - taosCloseFile(&PInFD); - } - - return code; - -_err: - tsdbError("vgId:%d, tsdb DFileSet copy failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); - return code; -} - // SDataFReader ==================================================== int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet) { int32_t code = 0; @@ -1478,173 +849,6 @@ _exit: return code; } -// SDelFWriter ==================================================== -int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb) { - int32_t code = 0; - int32_t lino = 0; - char fname[TSDB_FILENAME_LEN]; - uint8_t hdr[TSDB_FHDR_SIZE] = {0}; - SDelFWriter *pDelFWriter = NULL; - int64_t n; - - // alloc - pDelFWriter = (SDelFWriter *)taosMemoryCalloc(1, sizeof(*pDelFWriter)); - if (pDelFWriter == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - pDelFWriter->pTsdb = pTsdb; - pDelFWriter->fDel = *pFile; - - tsdbDelFileName(pTsdb, pFile, fname); - code = tsdbOpenFile(fname, pTsdb, TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE, &pDelFWriter->pWriteH); - TSDB_CHECK_CODE(code, lino, _exit); - - // update header - code = tsdbWriteFile(pDelFWriter->pWriteH, 0, hdr, TSDB_FHDR_SIZE); - TSDB_CHECK_CODE(code, lino, _exit); - - pDelFWriter->fDel.size = TSDB_FHDR_SIZE; - pDelFWriter->fDel.offset = 0; - - *ppWriter = pDelFWriter; - -_exit: - if (code) { - if (pDelFWriter) { - tsdbCloseFile(&pDelFWriter->pWriteH); - taosMemoryFree(pDelFWriter); - } - *ppWriter = NULL; - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(errno)); - } else { - *ppWriter = pDelFWriter; - } - return code; -} - -int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) { - int32_t code = 0; - SDelFWriter *pWriter = *ppWriter; - STsdb *pTsdb = pWriter->pTsdb; - - // sync - if (sync) { - code = tsdbFsyncFile(pWriter->pWriteH); - if (code) goto _err; - } - - // close - tsdbCloseFile(&pWriter->pWriteH); - - for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t *); iBuf++) { - tFree(pWriter->aBuf[iBuf]); - } - taosMemoryFree(pWriter); - - *ppWriter = NULL; - return code; - -_err: - tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx) { - int32_t code = 0; - int64_t size; - int64_t n; - - // prepare - size = 0; - for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) { - size += tPutDelData(NULL, taosArrayGet(aDelData, iDelData)); - } - - // alloc - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // build - n = 0; - for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) { - n += tPutDelData(pWriter->aBuf[0] + n, taosArrayGet(aDelData, iDelData)); - } - ASSERT(n == size); - - // write - code = tsdbWriteFile(pWriter->pWriteH, pWriter->fDel.size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pDelIdx->offset = pWriter->fDel.size; - pDelIdx->size = size; - pWriter->fDel.size += size; - - return code; - -_err: - tsdbError("vgId:%d, failed to write del data since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx) { - int32_t code = 0; - int64_t size; - int64_t n; - SDelIdx *pDelIdx; - - // prepare - size = 0; - for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) { - size += tPutDelIdx(NULL, taosArrayGet(aDelIdx, iDelIdx)); - } - - // alloc - code = tRealloc(&pWriter->aBuf[0], size); - if (code) goto _err; - - // build - n = 0; - for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) { - n += tPutDelIdx(pWriter->aBuf[0] + n, taosArrayGet(aDelIdx, iDelIdx)); - } - ASSERT(n == size); - - // write - code = tsdbWriteFile(pWriter->pWriteH, pWriter->fDel.size, pWriter->aBuf[0], size); - if (code) goto _err; - - // update - pWriter->fDel.offset = pWriter->fDel.size; - pWriter->fDel.size += size; - - return code; - -_err: - tsdbError("vgId:%d, write del idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter) { - int32_t code = 0; - char hdr[TSDB_FHDR_SIZE] = {0}; - int64_t size = TSDB_FHDR_SIZE; - int64_t n; - - // build - tPutDelFile(hdr, &pWriter->fDel); - - // write - code = tsdbWriteFile(pWriter->pWriteH, 0, hdr, size); - if (code) goto _err; - - return code; - -_err: - tsdbError("vgId:%d, update del file hdr failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - return code; -} // SDelFReader ==================================================== struct SDelFReader { STsdb *pTsdb; From 7e1d59c565e408dc952b26ecdcd349c070cd3f9f Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Fri, 29 Dec 2023 16:22:41 +0800 Subject: [PATCH 43/69] sort --- source/libs/executor/inc/executorInt.h | 5 ++-- source/libs/executor/src/executil.c | 4 +++- source/libs/executor/src/operator.c | 2 +- source/libs/executor/src/scanoperator.c | 32 ++++++++++++++++++------- 4 files changed, 30 insertions(+), 13 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 9acef69f9c..c09f615735 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -269,8 +269,9 @@ typedef struct STableScanInfo { SSDataBlock* pResBlock; SHashObj* pIgnoreTables; SSampleExecInfo sample; // sample execution info - int32_t tableStartIndex; // current group scan start - int32_t tableEndIndex; // current group scan end + int32_t tableStartIndex; // current group scan start + int32_t tableEndIndex; // current group scan end + int32_t currentGroupIndex; // current group index of groupOffset int8_t scanMode; int8_t assignBlockUid; uint8_t countState; // empty table count state diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 377de99fc0..4b3b1ffead 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -2122,6 +2122,9 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* bool groupByTbname = groupbyTbname(group); size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); + if (!numOfTables) { + return code; + } if (group == NULL || groupByTbname) { for (int32_t i = 0; i < numOfTables; i++) { STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); @@ -2143,7 +2146,6 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* if (code != TSDB_CODE_SUCCESS) { return code; } - if (pScanNode->groupOrderScan) pTableListInfo->numOfOuputGroups = taosArrayGetSize(pTableListInfo->pTableList); if (groupSort || pScanNode->groupOrderScan) { code = sortTableGroup(pTableListInfo); diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 69a8acb3d7..0b5e7f51ed 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -302,7 +302,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pTableListInfo->idInfo.suid = pTableScanNode->scan.suid; pTableListInfo->idInfo.tableType = pTableScanNode->scan.tableType; } else { - code = createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort, pHandle, + code = createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, true, pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); if (code) { pTaskInfo->code = code; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index e990d3d975..27c9a76757 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -659,17 +659,30 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, int32_t* size) { pInfo->tableStartIndex = pInfo->tableEndIndex + 1; - int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo); - STableKeyInfo* pStart = (STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableStartIndex); - int32_t i = pInfo->tableStartIndex + 1; - for (; i < numOfTables; ++i) { - STableKeyInfo* pCur = tableListGetInfo(pInfo->base.pTableListInfo, i); - if (pCur->groupId != pStart->groupId) { - break; + STableListInfo* pTableListInfo = pInfo->base.pTableListInfo; + int32_t numOfTables = tableListGetSize(pTableListInfo); + STableKeyInfo* pStart = (STableKeyInfo*)tableListGetInfo(pTableListInfo, pInfo->tableStartIndex); + + if (pTableListInfo->oneTableForEachGroup) { + pInfo->tableEndIndex = pInfo->tableStartIndex; + } else if (pTableListInfo->groupOffset) { + pInfo->currentGroupIndex++; + if (pInfo->currentGroupIndex + 1 < pTableListInfo->numOfOuputGroups) { + pInfo->tableEndIndex = pTableListInfo->groupOffset[pInfo->currentGroupIndex + 1] - 1; + } else { + pInfo->tableEndIndex = numOfTables - 1; } + } else { + int32_t i = pInfo->tableStartIndex + 1; + for (; i < numOfTables; ++i) { + STableKeyInfo* pCur = tableListGetInfo(pTableListInfo, i); + if (pCur->groupId != pStart->groupId) { + break; + } + } + pInfo->tableEndIndex = i - 1; } - pInfo->tableEndIndex = i - 1; if (!pInfo->needCountEmptyTable) { pInfo->countState = TABLE_COUNT_STATE_END; } else { @@ -677,7 +690,7 @@ static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, i } *pKeyInfo = pStart; - *size = i - pInfo->tableStartIndex; + *size = pInfo->tableEndIndex - pInfo->tableStartIndex + 1; } static SSDataBlock* getOneRowResultBlock(SExecTaskInfo* pTaskInfo, STableScanBase* pBase, SSDataBlock* pBlock, @@ -1133,6 +1146,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, } pInfo->tableEndIndex = -1; + pInfo->currentGroupIndex = -1; pInfo->assignBlockUid = pTableScanNode->assignBlockUid; pInfo->hasGroupByTag = pTableScanNode->pGroupTags ? true : false; From d158ca74d841b0cf373725fed96010b8a7872f4a Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 29 Dec 2023 17:22:27 +0800 Subject: [PATCH 44/69] fix:[TS-4391] rebalance cnt always 1 if msg lost --- source/dnode/mnode/impl/src/mndConsumer.c | 15 --------------- source/dnode/mnode/impl/src/mndSubscribe.c | 14 ++++++++------ 2 files changed, 8 insertions(+), 21 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index cf8b9e019a..9d9473d883 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -220,21 +220,6 @@ FAIL: return -1; } -static SMqRebInfo *mndGetOrCreateRebSub(SHashObj *pHash, const char *key) { - SMqRebInfo *pRebInfo = taosHashGet(pHash, key, strlen(key) + 1); - if (pRebInfo == NULL) { - pRebInfo = tNewSMqRebSubscribe(key); - if (pRebInfo == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - taosHashPut(pHash, key, strlen(key) + 1, pRebInfo, sizeof(SMqRebInfo)); - taosMemoryFree(pRebInfo); - pRebInfo = taosHashGet(pHash, key, strlen(key) + 1); - } - return pRebInfo; -} - static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { int32_t code = 0; SMnode *pMnode = pMsg->info.node; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index b7958c1484..16ae7f2548 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -209,16 +209,18 @@ static int32_t mndSplitSubscribeKey(const char *key, char *topic, char *cgroup, } static SMqRebInfo *mndGetOrCreateRebSub(SHashObj *pHash, const char *key) { - SMqRebInfo *pRebSub = taosHashGet(pHash, key, strlen(key) + 1); - if (pRebSub == NULL) { - pRebSub = tNewSMqRebSubscribe(key); - if (pRebSub == NULL) { + SMqRebInfo *pRebInfo = taosHashGet(pHash, key, strlen(key) + 1); + if (pRebInfo == NULL) { + pRebInfo = tNewSMqRebSubscribe(key); + if (pRebInfo == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - taosHashPut(pHash, key, strlen(key) + 1, pRebSub, sizeof(SMqRebInfo)); + taosHashPut(pHash, key, strlen(key) + 1, pRebInfo, sizeof(SMqRebInfo)); + taosMemoryFree(pRebInfo); + pRebInfo = taosHashGet(pHash, key, strlen(key) + 1); } - return pRebSub; + return pRebInfo; } static void doRemoveLostConsumers(SMqRebOutputObj *pOutput, SHashObj *pHash, const SMqRebInputObj *pInput) { From 8ac02adc0b94aae0e013c7914ec6182b24413a22 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 29 Dec 2023 17:42:23 +0800 Subject: [PATCH 45/69] refactor: do some internal refactor. --- source/dnode/vnode/src/tq/tqRead.c | 3 ++- source/libs/wal/src/walRead.c | 5 ----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 41b1aa7bd1..bfa8cfdb53 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -195,7 +195,8 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t int64_t committedVer = walGetCommittedVer(pHandle->pWalReader->pWal); int64_t appliedVer = walGetAppliedVer(pHandle->pWalReader->pWal); - wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64", 0x%"PRIx64, + wDebug("vgId:%d, start to fetch wal, index:%" PRId64 ", last:%" PRId64 " commit:%" PRId64 ", applied:%" PRId64 + ", 0x%" PRIx64, vgId, offset, lastVer, committedVer, appliedVer, id); while (offset <= appliedVer) { diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index c0435ca774..1660a0ecf0 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -259,11 +259,6 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) { int64_t contLen; bool seeked = false; - wDebug("vgId:%d, try to fetch ver %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 - ", applied ver:%" PRId64", 0x%"PRIx64, - pRead->pWal->cfg.vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer, - pRead->pWal->vers.appliedVer, pRead->readerId); - // TODO: valid ver if (ver > pRead->pWal->vers.commitVer) { return -1; From 87b7a9a980233880deda02ef05a0f3915560379c Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 29 Dec 2023 17:51:35 +0800 Subject: [PATCH 46/69] fix: stream test case --- tests/script/tsim/stream/basic0.sim | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/script/tsim/stream/basic0.sim b/tests/script/tsim/stream/basic0.sim index 917a0a73ab..a525594861 100644 --- a/tests/script/tsim/stream/basic0.sim +++ b/tests/script/tsim/stream/basic0.sim @@ -33,6 +33,8 @@ if $rows != 3 then return -1 endi +sleep 1000 + sql create stream s1 trigger at_once into outstb as select _wstart, min(k), max(k), sum(k) as sum_alias from ct1 interval(10m) sql show stables From 673d3bc376ad8955bd4b3b9163853eebb983b915 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Sun, 31 Dec 2023 13:15:05 +0800 Subject: [PATCH 47/69] arrangeTableGroup --- source/libs/executor/src/executil.c | 70 +++++++++++++++++++++++++++++ source/libs/executor/src/operator.c | 2 +- 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 4b3b1ffead..f3e2e99332 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -2116,6 +2116,70 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) { return TSDB_CODE_SUCCESS; } +static int32_t arrangeTableGroup(STableListInfo* pTableListInfo) { + int32_t code = TSDB_CODE_SUCCESS; + size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); + SArray* pDup = NULL; + SHashObj* pHashObj = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + if (!pHashObj) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t num = 1; + // first: get the number of tables per group + for (int32_t i = 0; i < numOfTables; ++i) { + STableKeyInfo* pInfo = taosArrayGet(pTableListInfo->pTableList, i); + int32_t* pVal = taosHashGet(pHashObj, &pInfo->groupId, sizeof(pInfo->groupId)); + // update each group's table count + if (!pVal) { + taosHashPut(pHashObj, &pInfo->groupId, sizeof(pInfo->groupId), &num, sizeof(num)); + } else { + (*pVal)++; + } + } + pTableListInfo->numOfOuputGroups = taosHashGetSize(pHashObj); + pTableListInfo->oneTableForEachGroup = (pTableListInfo->numOfOuputGroups == numOfTables); + + if (pTableListInfo->numOfOuputGroups > 1 && pTableListInfo->numOfOuputGroups < numOfTables) { + pDup = taosArrayDup(pTableListInfo->pTableList, NULL); + pTableListInfo->groupOffset = taosMemoryMalloc(sizeof(int32_t) * pTableListInfo->numOfOuputGroups); + if (pDup == NULL || pTableListInfo->groupOffset == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + int32_t idx = 0, sum = 0; + + void* pIter = taosHashIterate(pHashObj, NULL); + while (pIter != NULL) { + // record each group's offset + pTableListInfo->groupOffset[idx] = sum; + int32_t* pData = (int32_t*)pIter; + sum += *pData; + // change value to record group item's first position + *pData = pTableListInfo->groupOffset[idx++]; + pIter = taosHashIterate(pHashObj, pIter); + } + + // second: arrange the tables and put the items with same groupId together + STableKeyInfo* pStart = taosArrayGet(pTableListInfo->pTableList, 0); + for (int32_t i = 0; i < numOfTables; ++i) { + STableKeyInfo* pInfo = taosArrayGet(pDup, i); + int32_t* pVal = taosHashGet(pHashObj, &pInfo->groupId, sizeof(pInfo->groupId)); + if (*pVal != i) { + pStart[*pVal] = *pInfo; + } + (*pVal)++; // update to next item's position + } + } + +_exit: + taosHashCleanup(pHashObj); + taosArrayDestroy(pDup); + if (code != TSDB_CODE_SUCCESS) { + taosMemoryFreeClear(pTableListInfo->groupOffset); + } + return code; +} + int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SScanPhysiNode* pScanNode, SNodeList* group, bool groupSort, uint8_t* digest, SStorageAPI* pAPI) { int32_t code = TSDB_CODE_SUCCESS; @@ -2149,6 +2213,12 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* if (groupSort || pScanNode->groupOrderScan) { code = sortTableGroup(pTableListInfo); + } else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == nodeType(pScanNode)) { + STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pScanNode; + if (pTableScanNode->needCountEmptyTable) { + // only put together tables with the same groupid + arrangeTableGroup(pTableListInfo); + } } } diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 0b5e7f51ed..69a8acb3d7 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -302,7 +302,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pTableListInfo->idInfo.suid = pTableScanNode->scan.suid; pTableListInfo->idInfo.tableType = pTableScanNode->scan.tableType; } else { - code = createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, true, pHandle, + code = createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort, pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); if (code) { pTaskInfo->code = code; From e79516ade302ddf508230fe950d7b48fd6dbba6d Mon Sep 17 00:00:00 2001 From: fullhonest Date: Tue, 2 Jan 2024 10:02:30 +0800 Subject: [PATCH 48/69] Fix TD-27939: error for sub query is empty when where 1=2 --- source/libs/parser/src/parCalcConst.c | 7 ++++++ tests/system-test/2-query/select_null.py | 31 +++++++++++++++++++++--- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index 441f4da3b1..af24624067 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -334,6 +334,13 @@ static int32_t calcConstSelectWithoutFrom(SCalcConstContext* pCxt, SSelectStmt* static int32_t calcConstSelectFrom(SCalcConstContext* pCxt, SSelectStmt* pSelect, bool subquery) { int32_t code = calcConstFromTable(pCxt, pSelect->pFromTable); + if (TSDB_CODE_SUCCESS == code && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && + ((STempTableNode*)pSelect->pFromTable)->pSubquery != NULL && + QUERY_NODE_SELECT_STMT == nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery) && + ((SSelectStmt*)((STempTableNode*)pSelect->pFromTable)->pSubquery)->isEmptyResult){ + pSelect->isEmptyResult = true; + return code; + } if (TSDB_CODE_SUCCESS == code) { code = calcConstProjections(pCxt, pSelect, subquery); } diff --git a/tests/system-test/2-query/select_null.py b/tests/system-test/2-query/select_null.py index e20f6131c9..8411a33a1f 100755 --- a/tests/system-test/2-query/select_null.py +++ b/tests/system-test/2-query/select_null.py @@ -401,7 +401,29 @@ class TDTestCase: tdSql.execute(sql) sql = "select * from %s.`12345` order by `567` desc limit 2;"%(database) tdSql.error(sql) - + + def td_27939(self,database): + sql = "create table %s.`test1eq2` (`ts` timestamp, id int);"%(database) + tdSql.execute(sql) + + sql = "insert into %s.test1eq2 values (now,1);"%(database) + tdSql.execute(sql) + + sql = "insert into %s.`test1eq2` values (now,2);"%(database) + tdSql.execute(sql) + + sql = "select * from %s.`test1eq2` where 1=2;"%(database) + tdSql.query(sql) + tdSql.checkRows(0) + + sql = "select * from (select * from %s.`test1eq2` where 1=2);"%(database) + tdSql.query(sql) + tdSql.checkRows(0) + + sql = "drop table %s.`test1eq2` ;"%(database) + tdSql.execute(sql) + + def run(self): startTime = time.time() @@ -418,6 +440,8 @@ class TDTestCase: self.ts_3110("%s" %self.db) self.ts_23505("%s" %self.db) self.ts_3036("%s" %self.db) + + self.td_27939("%s" %self.db) tdSql.query("flush database %s" %self.db) @@ -430,8 +454,9 @@ class TDTestCase: self.ts_3110("%s" %self.db) self.ts_23505("%s" %self.db) self.ts_3036("%s" %self.db) - - + + self.td_27939("%s" %self.db) + self.test_select_as_chinese_characters(); endTime = time.time() print("total time %ds" % (endTime - startTime)) From 0e9a1b22edf3a71aab960b48f7d02b77b2336ec0 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 2 Jan 2024 10:10:44 +0800 Subject: [PATCH 49/69] fix:error msg is not specific in create stream --- source/libs/parser/src/parTranslater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index bdee4d54ac..c9345dd00e 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -7446,7 +7446,7 @@ static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStream } } if (!found) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, ((SColumnDefNode*)pTag)->colName); + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, getTagNameForCreateStreamTag(pTag)); } } return TSDB_CODE_SUCCESS; From c787535693f3d2d411f2c92e28c59d3c421018c2 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Tue, 2 Jan 2024 10:11:22 +0800 Subject: [PATCH 50/69] fix session state issue --- source/libs/stream/src/streamSessionState.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamSessionState.c b/source/libs/stream/src/streamSessionState.c index c71edccb99..765403b1aa 100644 --- a/source/libs/stream/src/streamSessionState.c +++ b/source/libs/stream/src/streamSessionState.c @@ -117,7 +117,7 @@ int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, void* pFileStore = getStateFileStore(pFileState); void* p = NULL; int32_t code_file = streamStateSessionAddIfNotExist_rocksdb(pFileStore, pKey, gap, &p, pVLen); - if (code_file == TSDB_CODE_SUCCESS) { + if (code_file == TSDB_CODE_SUCCESS || isFlushedState(pFileState, endTs, 0)) { (*pVal) = createSessionWinBuff(pFileState, pKey, p, pVLen); code = code_file; qDebug("===stream===0 get session win:%" PRId64 ",%" PRId64 " from disc, res %d", startTs, endTs, code_file); From be0d35a5af93f21e1ee774b440d0a093577432f3 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Tue, 2 Jan 2024 10:20:04 +0800 Subject: [PATCH 51/69] fix: last table scan reported invalid param when col was dropped which should report invalid col --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 2 +- tests/system-test/2-query/last_cache_scan.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 1f54f13592..4ca7881814 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -120,7 +120,7 @@ static int32_t updateBlockSMAInfo(STSchema* pSchema, SBlockLoadSuppInfo* pSupInf } else if (pTCol->colId < pSupInfo->colId[j]) { // do nothing i += 1; } else { - return TSDB_CODE_INVALID_PARA; + return TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER; } } diff --git a/tests/system-test/2-query/last_cache_scan.py b/tests/system-test/2-query/last_cache_scan.py index 39271318ba..23433277c2 100644 --- a/tests/system-test/2-query/last_cache_scan.py +++ b/tests/system-test/2-query/last_cache_scan.py @@ -413,8 +413,7 @@ class TDTestCase: tdSql.checkCols(3) tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) - - + tdSql.query_success_failed('select last(c2), c2, c3 from meters', queryTimes=10, expectErrInfo="Invalid column name: c2") def run(self): self.prepareTestEnv() From d21741269170f92deebd51684db588154ab1d7bb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 2 Jan 2024 10:33:25 +0800 Subject: [PATCH 52/69] fix(stream): avoid race condition in starting threads. --- source/dnode/vnode/src/tqCommon/tqCommon.c | 2 +- source/dnode/vnode/src/vnd/vnodeSync.c | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 2991e3cef5..f576345f64 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -545,7 +545,7 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* SStreamTask* p = streamMetaAcquireTask(pMeta, streamId, taskId); if (p != NULL && restored && p->info.fillHistory == 0) { - EStreamTaskEvent event = /*(HAS_RELATED_FILLHISTORY_TASK(p)) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT; + EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(p)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT; streamTaskHandleEvent(p->status.pSM, event); } else if (!restored) { tqWarn("s-task:%s not launched since vnode(vgId:%d) not ready", p->id.idStr, vgId); diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 048092131d..c41aea255d 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -571,7 +571,20 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) } else { vInfo("vgId:%d sync restore finished, start to launch stream tasks", pVnode->config.vgId); tqStreamTaskResetStatus(pVnode->pTq->pStreamMeta); - tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false); + + { + streamMetaWLock(pMeta); + if (pMeta->startInfo.taskStarting == 1) { + pMeta->startInfo.restartCount += 1; + tqDebug("vgId:%d in start tasks procedure, inc restartCounter by 1, remaining restart:%d", vgId, + pMeta->startInfo.restartCount); + streamMetaWUnLock(pMeta); + } else { + pMeta->startInfo.taskStarting = 1; + streamMetaWUnLock(pMeta); + tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false); + } + } } } else { vInfo("vgId:%d, sync restore finished, not launch stream tasks since not leader", vgId); From b3d8f4e9d6914c6d8746da174a4bcd6dc2859104 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 2 Jan 2024 10:39:01 +0800 Subject: [PATCH 53/69] fix(stream): fix a typo. --- source/dnode/vnode/src/tqCommon/tqCommon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index f576345f64..a3d860fd78 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -545,7 +545,7 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* SStreamTask* p = streamMetaAcquireTask(pMeta, streamId, taskId); if (p != NULL && restored && p->info.fillHistory == 0) { - EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(p)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT; + EStreamTaskEvent event = TASK_EVENT_INIT; streamTaskHandleEvent(p->status.pSM, event); } else if (!restored) { tqWarn("s-task:%s not launched since vnode(vgId:%d) not ready", p->id.idStr, vgId); From 4368bfb20a0fb4737b91011a0772dfe4ab841879 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 2 Jan 2024 11:40:29 +0800 Subject: [PATCH 54/69] fix(stream): fix dead lock. --- source/dnode/vnode/src/vnd/vnodeSync.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index c41aea255d..84ef36df74 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -587,10 +587,9 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) } } } else { + streamMetaWUnLock(pMeta); vInfo("vgId:%d, sync restore finished, not launch stream tasks since not leader", vgId); } - - streamMetaWUnLock(pMeta); } static void vnodeBecomeFollower(const SSyncFSM *pFsm) { From afc2a346e5719a9529a038b7d6a36d31461a623d Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Tue, 2 Jan 2024 14:16:34 +0800 Subject: [PATCH 55/69] reset update info --- source/libs/executor/inc/executorInt.h | 1 + .../libs/executor/src/streameventwindowoperator.c | 1 + .../libs/executor/src/streamtimewindowoperator.c | 15 +++++++++++++++ 3 files changed, 17 insertions(+) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index e3e504cdbc..a2ced573f7 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -845,6 +845,7 @@ void resetWinRange(STimeWindow* winRange); bool checkExpiredData(SStateStore* pAPI, SUpdateInfo* pUpdateInfo, STimeWindowAggSupp* pTwSup, uint64_t tableId, TSKEY ts); int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval); void resetUnCloseSessionWinInfo(SSHashObj* winMap); +void reloadAggSupFromDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup); int32_t encodeSSessionKey(void** buf, SSessionKey* key); void* decodeSSessionKey(void* buf, SSessionKey* key); diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index bd247eba07..85f15cc368 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -654,6 +654,7 @@ void streamEventReloadState(SOperatorInfo* pOperator) { if (downstream->fpSet.reloadStreamStateFn) { downstream->fpSet.reloadStreamStateFn(downstream); } + reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup); } SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index c9490e2c55..946b02f2d4 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -2759,6 +2759,18 @@ void getSessionWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey, } } +void reloadAggSupFromDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup) { + SStateStore* pAPI = &downstream->pTaskInfo->storageAPI.stateStore; + + if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + reloadAggSupFromDownStream(downstream->pDownstream[0], pAggSup); + return; + } + + SStreamScanInfo* pScanInfo = downstream->info; + pAggSup->pUpdateInfo = pScanInfo->pUpdateInfo; +} + void streamSessionSemiReloadState(SOperatorInfo* pOperator) { SStreamSessionAggOperatorInfo* pInfo = pOperator->info; SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; @@ -2790,6 +2802,7 @@ void streamSessionSemiReloadState(SOperatorInfo* pOperator) { if (downstream->fpSet.reloadStreamStateFn) { downstream->fpSet.reloadStreamStateFn(downstream); } + reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup); } void streamSessionReloadState(SOperatorInfo* pOperator) { @@ -2842,6 +2855,7 @@ void streamSessionReloadState(SOperatorInfo* pOperator) { if (downstream->fpSet.reloadStreamStateFn) { downstream->fpSet.reloadStreamStateFn(downstream); } + reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup); } SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, @@ -3724,6 +3738,7 @@ void streamStateReloadState(SOperatorInfo* pOperator) { if (downstream->fpSet.reloadStreamStateFn) { downstream->fpSet.reloadStreamStateFn(downstream); } + reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup); } SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, From 85ba9a4a5fe2332dd3fc889ee71e8f4e0b96fc20 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Tue, 2 Jan 2024 15:45:03 +0800 Subject: [PATCH 56/69] remainGroups --- source/libs/executor/inc/executil.h | 1 + source/libs/executor/inc/executorInt.h | 7 +- source/libs/executor/src/executil.c | 103 +++++++----------------- source/libs/executor/src/scanoperator.c | 51 ++++++++---- 4 files changed, 72 insertions(+), 90 deletions(-) diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 946d3311a4..640ed2f2f9 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -105,6 +105,7 @@ typedef struct STableListInfo { int32_t* groupOffset; // keep the offset value for each group in the tableList SArray* pTableList; SHashObj* map; // speedup acquire the tableQueryInfo by table uid + SHashObj* remainGroups; // remaining group has not yet processed the empty group STableListIdInfo idInfo; // this maybe the super table or ordinary table } STableListInfo; diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index c09f615735..bee378effc 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -218,9 +218,10 @@ enum { }; typedef enum ETableCountState { - TABLE_COUNT_STATE_NONE = 0, // before start scan - TABLE_COUNT_STATE_SCAN = 1, // scanning - TABLE_COUNT_STATE_END = 2, // finish or noneed to process + TABLE_COUNT_STATE_NONE = 0, // before start scan + TABLE_COUNT_STATE_SCAN = 1, // cur group scanning + TABLE_COUNT_STATE_PROCESSED = 2, // cur group processed + TABLE_COUNT_STATE_END = 3, // finish or noneed to process } ETableCountState; typedef struct SAggSupporter { diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index f3e2e99332..fe3528cce6 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -456,7 +456,7 @@ static void genTbGroupDigest(const SNode* pGroup, uint8_t* filterDigest, T_MD5_C } int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInfo* pTableListInfo, uint8_t* digest, - SStorageAPI* pAPI) { + SStorageAPI* pAPI, bool initRemainGroups) { int32_t code = TSDB_CODE_SUCCESS; SArray* pBlockList = NULL; SSDataBlock* pResBlock = NULL; @@ -590,6 +590,15 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf goto end; } + if (initRemainGroups) { + pTableListInfo->remainGroups = + taosHashInit(rows, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + if (pTableListInfo->remainGroups == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + } + for (int i = 0; i < rows; i++) { STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); @@ -631,6 +640,14 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf int32_t len = (int32_t)(pStart - (char*)keyBuf); info->groupId = calcGroupId(keyBuf, len); + if (initRemainGroups) { + // groupId ~ table uid + taosHashPut(pTableListInfo->remainGroups, &(info->groupId), sizeof(info->groupId), &(info->uid), sizeof(info->uid)); + } + } + + if (initRemainGroups) { + pTableListInfo->numOfOuputGroups = taosHashGetSize(pTableListInfo->remainGroups); } if (tsTagFilterCache) { @@ -2025,6 +2042,7 @@ STableListInfo* tableListCreate() { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } + pListInfo->remainGroups = NULL; pListInfo->pTableList = taosArrayInit(4, sizeof(STableKeyInfo)); if (pListInfo->pTableList == NULL) { @@ -2054,7 +2072,7 @@ void* tableListDestroy(STableListInfo* pTableListInfo) { taosMemoryFreeClear(pTableListInfo->groupOffset); taosHashCleanup(pTableListInfo->map); - + taosHashCleanup(pTableListInfo->remainGroups); pTableListInfo->pTableList = NULL; pTableListInfo->map = NULL; taosMemoryFree(pTableListInfo); @@ -2068,6 +2086,7 @@ void tableListClear(STableListInfo* pTableListInfo) { taosArrayClear(pTableListInfo->pTableList); taosHashClear(pTableListInfo->map); + taosHashClear(pTableListInfo->remainGroups); taosMemoryFree(pTableListInfo->groupOffset); pTableListInfo->numOfOuputGroups = 1; pTableListInfo->oneTableForEachGroup = false; @@ -2116,70 +2135,6 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) { return TSDB_CODE_SUCCESS; } -static int32_t arrangeTableGroup(STableListInfo* pTableListInfo) { - int32_t code = TSDB_CODE_SUCCESS; - size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); - SArray* pDup = NULL; - SHashObj* pHashObj = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); - if (!pHashObj) { - return TSDB_CODE_OUT_OF_MEMORY; - } - int32_t num = 1; - // first: get the number of tables per group - for (int32_t i = 0; i < numOfTables; ++i) { - STableKeyInfo* pInfo = taosArrayGet(pTableListInfo->pTableList, i); - int32_t* pVal = taosHashGet(pHashObj, &pInfo->groupId, sizeof(pInfo->groupId)); - // update each group's table count - if (!pVal) { - taosHashPut(pHashObj, &pInfo->groupId, sizeof(pInfo->groupId), &num, sizeof(num)); - } else { - (*pVal)++; - } - } - pTableListInfo->numOfOuputGroups = taosHashGetSize(pHashObj); - pTableListInfo->oneTableForEachGroup = (pTableListInfo->numOfOuputGroups == numOfTables); - - if (pTableListInfo->numOfOuputGroups > 1 && pTableListInfo->numOfOuputGroups < numOfTables) { - pDup = taosArrayDup(pTableListInfo->pTableList, NULL); - pTableListInfo->groupOffset = taosMemoryMalloc(sizeof(int32_t) * pTableListInfo->numOfOuputGroups); - if (pDup == NULL || pTableListInfo->groupOffset == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - int32_t idx = 0, sum = 0; - - void* pIter = taosHashIterate(pHashObj, NULL); - while (pIter != NULL) { - // record each group's offset - pTableListInfo->groupOffset[idx] = sum; - int32_t* pData = (int32_t*)pIter; - sum += *pData; - // change value to record group item's first position - *pData = pTableListInfo->groupOffset[idx++]; - pIter = taosHashIterate(pHashObj, pIter); - } - - // second: arrange the tables and put the items with same groupId together - STableKeyInfo* pStart = taosArrayGet(pTableListInfo->pTableList, 0); - for (int32_t i = 0; i < numOfTables; ++i) { - STableKeyInfo* pInfo = taosArrayGet(pDup, i); - int32_t* pVal = taosHashGet(pHashObj, &pInfo->groupId, sizeof(pInfo->groupId)); - if (*pVal != i) { - pStart[*pVal] = *pInfo; - } - (*pVal)++; // update to next item's position - } - } - -_exit: - taosHashCleanup(pHashObj); - taosArrayDestroy(pDup); - if (code != TSDB_CODE_SUCCESS) { - taosMemoryFreeClear(pTableListInfo->groupOffset); - } - return code; -} - int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SScanPhysiNode* pScanNode, SNodeList* group, bool groupSort, uint8_t* digest, SStorageAPI* pAPI) { int32_t code = TSDB_CODE_SUCCESS; @@ -2206,19 +2161,21 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pTableListInfo->numOfOuputGroups = 1; } } else { - code = getColInfoResultForGroupby(pHandle->vnode, group, pTableListInfo, digest, pAPI); + bool initRemainGroups = false; + if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == nodeType(pScanNode)) { + STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pScanNode; + if (pTableScanNode->needCountEmptyTable) { + initRemainGroups = true; + } + } + + code = getColInfoResultForGroupby(pHandle->vnode, group, pTableListInfo, digest, pAPI, initRemainGroups); if (code != TSDB_CODE_SUCCESS) { return code; } if (groupSort || pScanNode->groupOrderScan) { code = sortTableGroup(pTableListInfo); - } else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == nodeType(pScanNode)) { - STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pScanNode; - if (pTableScanNode->needCountEmptyTable) { - // only put together tables with the same groupid - arrangeTableGroup(pTableListInfo); - } } } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 27c9a76757..8999340ddc 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -673,14 +673,7 @@ static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, i pInfo->tableEndIndex = numOfTables - 1; } } else { - int32_t i = pInfo->tableStartIndex + 1; - for (; i < numOfTables; ++i) { - STableKeyInfo* pCur = tableListGetInfo(pTableListInfo, i); - if (pCur->groupId != pStart->groupId) { - break; - } - } - pInfo->tableEndIndex = i - 1; + pInfo->tableEndIndex = numOfTables - 1; } if (!pInfo->needCountEmptyTable) { @@ -693,6 +686,17 @@ static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, i *size = pInfo->tableEndIndex - pInfo->tableStartIndex + 1; } +void markGroupProcessed(STableScanInfo* pInfo, uint64_t groupId) { + if (pInfo->countState == TABLE_COUNT_STATE_END) { + return; + } + if (pInfo->base.pTableListInfo->oneTableForEachGroup) { + pInfo->countState = TABLE_COUNT_STATE_PROCESSED; + } else { + taosHashRemove(pInfo->base.pTableListInfo->remainGroups, &groupId, sizeof(groupId)); + } +} + static SSDataBlock* getOneRowResultBlock(SExecTaskInfo* pTaskInfo, STableScanBase* pBase, SSDataBlock* pBlock, const STableKeyInfo* tbInfo) { blockDataEmpty(pBlock); @@ -802,7 +806,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { - pTableScanInfo->countState = TABLE_COUNT_STATE_END; + markGroupProcessed(pTableScanInfo, p->info.id.groupId); return p; } @@ -831,7 +835,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { while (pTableScanInfo->scanTimes < total) { SSDataBlock* p = doTableScanImpl(pOperator); if (p != NULL) { - pTableScanInfo->countState = TABLE_COUNT_STATE_END; + markGroupProcessed(pTableScanInfo, p->info.id.groupId); return p; } @@ -849,11 +853,30 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { } if (pTableScanInfo->countState < TABLE_COUNT_STATE_END) { - // output once for this group + STableListInfo* pTableListInfo = pTableScanInfo->base.pTableListInfo; + if (pTableListInfo->oneTableForEachGroup) { // group by tbname + if (pTableScanInfo->countState < TABLE_COUNT_STATE_PROCESSED) { + pTableScanInfo->countState = TABLE_COUNT_STATE_PROCESSED; + STableKeyInfo* pStart = + (STableKeyInfo*)tableListGetInfo(pTableScanInfo->base.pTableListInfo, pTableScanInfo->tableStartIndex); + return getBlockForEmptyTable(pOperator, pStart); + } + } else { // group by tag + int32_t numOfTables = tableListGetSize(pTableListInfo); + if (pTableScanInfo->tableEndIndex + 1 >= numOfTables) { + // get empty group, mark processed & rm from hash + void* pIte = taosHashIterate(pTableListInfo->remainGroups, NULL); + if (pIte != NULL) { + size_t keySize = 0; + uint64_t* pGroupId = taosHashGetKey(pIte, &keySize); + STableKeyInfo info = {.uid = *(uint64_t*)pIte, .groupId = *pGroupId}; + taosHashCancelIterate(pTableListInfo->remainGroups, pIte); + markGroupProcessed(pTableScanInfo, *pGroupId); + return getBlockForEmptyTable(pOperator, &info); + } + } + } pTableScanInfo->countState = TABLE_COUNT_STATE_END; - STableKeyInfo* pStart = - (STableKeyInfo*)tableListGetInfo(pTableScanInfo->base.pTableListInfo, pTableScanInfo->tableStartIndex); - return getBlockForEmptyTable(pOperator, pStart); } return NULL; From 1a265a2e9dfbf1c38f4262b9f8e5b839bca71b6e Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 2 Jan 2024 16:26:46 +0800 Subject: [PATCH 57/69] test: modify testcase of hot refresh config --- tests/pytest/util/common.py | 16 ++++++++++++++++ .../test_hot_refresh_configurations.py | 18 ++++++++++++------ 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 9c45c09715..2e7bd2a89c 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -506,7 +506,23 @@ class TDCom: if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] break + if platform.system().lower() == 'windows': + win_sep = "\\" + buildPath = buildPath.replace(win_sep,'/') + return buildPath + + def getTaosdPath(self, dnodeID="dnode1"): + buildPath = self.getBuildPath() + print("123456",buildPath) + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + taosdPath = buildPath + "/../sim/" + dnodeID + tdLog.info("taosdPath: %s" % taosdPath) + return taosdPath + def getClientCfgPath(self): buildPath = self.getBuildPath() diff --git a/tests/system-test/0-others/test_hot_refresh_configurations.py b/tests/system-test/0-others/test_hot_refresh_configurations.py index 09e634974b..d281b9c1f7 100644 --- a/tests/system-test/0-others/test_hot_refresh_configurations.py +++ b/tests/system-test/0-others/test_hot_refresh_configurations.py @@ -201,6 +201,17 @@ class TDTestCase: tdSql.error(f'alter {dnode} "{name} {v}";') def run(self): + + # reset log + taosdPath = tdCom.getTaosdPath() + print(taosdPath,"123") + taosdLogAbsoluteFilename = taosdPath + "/log/" + "taosdlog*" + print(taosdLogAbsoluteFilename) + tdSql.execute("alter all dnodes 'resetlog';") + r = subprocess.Popen("cat {} | grep 'reset log file'".format(taosdLogAbsoluteFilename), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = r.communicate() + assert('reset log file' in stdout.decode()) + for key in self.configration_dic: if "cli" == key: for item in self.configration_dic[key]: @@ -214,12 +225,7 @@ class TDTestCase: self.svr_check(item["name"], item["alias"], item["except_values"], True) else: raise Exception(f"unknown key: {key}") - # reset log - path = os.sep.join([tdDnodes.getDnodesRootDir(), "dnode1", "log", "taosdlog.*"]) - tdSql.execute("alter all dnodes 'resetlog';") - r = subprocess.Popen("cat {} | grep 'reset log file'".format(path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = r.communicate() - assert('reset log file' in stdout.decode()) + def stop(self): tdSql.close() From 480f571c2bda07f02a7b77cd3b4d2beff8763b29 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 2 Jan 2024 16:29:01 +0800 Subject: [PATCH 58/69] test: modify testcase of hot refresh config --- tests/pytest/util/common.py | 1 - .../system-test/0-others/test_hot_refresh_configurations.py | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 2e7bd2a89c..010f45a573 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -514,7 +514,6 @@ class TDCom: def getTaosdPath(self, dnodeID="dnode1"): buildPath = self.getBuildPath() - print("123456",buildPath) if (buildPath == ""): tdLog.exit("taosd not found!") else: diff --git a/tests/system-test/0-others/test_hot_refresh_configurations.py b/tests/system-test/0-others/test_hot_refresh_configurations.py index d281b9c1f7..71f6290469 100644 --- a/tests/system-test/0-others/test_hot_refresh_configurations.py +++ b/tests/system-test/0-others/test_hot_refresh_configurations.py @@ -203,10 +203,7 @@ class TDTestCase: def run(self): # reset log - taosdPath = tdCom.getTaosdPath() - print(taosdPath,"123") - taosdLogAbsoluteFilename = taosdPath + "/log/" + "taosdlog*" - print(taosdLogAbsoluteFilename) + taosdLogAbsoluteFilename = tdCom.getTaosdPath() + "/log/" + "taosdlog*" tdSql.execute("alter all dnodes 'resetlog';") r = subprocess.Popen("cat {} | grep 'reset log file'".format(taosdLogAbsoluteFilename), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = r.communicate() From 2387d5bade30ab3630059ea237bc6197437fe14e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 2 Jan 2024 17:10:12 +0800 Subject: [PATCH 59/69] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 1 + source/libs/stream/inc/streamsm.h | 2 ++ source/libs/stream/src/streamExec.c | 13 ++++++++++--- source/libs/stream/src/streamTaskSm.c | 4 ++-- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 0c09e0e1ea..a6dac7f5ba 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -792,6 +792,7 @@ int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask); int32_t streamTaskClearHTaskAttr(SStreamTask* pTask); int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event); +int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, void* pFn); int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event); void streamTaskRestoreStatus(SStreamTask* pTask); diff --git a/source/libs/stream/inc/streamsm.h b/source/libs/stream/inc/streamsm.h index 7be655fbed..ea0522bd5a 100644 --- a/source/libs/stream/inc/streamsm.h +++ b/source/libs/stream/inc/streamsm.h @@ -34,6 +34,8 @@ typedef int32_t (*__state_trans_succ_fn)(SStreamTask*); typedef struct SAttachedEventInfo { ETaskStatus status; // required status that this event can be handled EStreamTaskEvent event; // the delayed handled event + void* pParam; + void* pFn; } SAttachedEventInfo; typedef struct STaskStateTrans { diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 1f7bb56ec1..b9839dfc0c 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -356,9 +356,16 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) { ASSERT(status == TASK_STATUS__HALT || status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP); } else { - ASSERT(status == TASK_STATUS__READY|| status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP); - streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT); - stDebug("s-task:%s halt by related fill-history task:%s", pStreamTask->id.idStr, id); + ASSERT(status == TASK_STATUS__READY || status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP); + int32_t code = streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT); + if (code != TSDB_CODE_SUCCESS) { + stError("s-task:%s halt stream task:%s failed, code:%s not transfer state to stream task", id, + pStreamTask->id.idStr, tstrerror(code)); + streamMetaReleaseTask(pMeta, pStreamTask); + return code; + } else { + stDebug("s-task:%s halt by related fill-history task:%s", pStreamTask->id.idStr, id); + } } // wait for the stream task to handle all in the inputQ, and to be idle diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index 4bd6483f7f..d785932109 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -278,10 +278,10 @@ static int32_t doHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskSt ETaskStatus s = streamTaskGetStatus(pTask, NULL); taosThreadMutexUnlock(&pTask->lock); - if ((s == pTrans->next.state) && (pSM->prev.evt == pTrans->event)) { + if ((s == pTrans->next.state) && (pSM->prev.evt == pTrans->event)) {// this event has been handled already stDebug("s-task:%s attached event:%s handled", id, GET_EVT_NAME(pTrans->event)); return TSDB_CODE_SUCCESS; - } else if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__STOP) { // this event has been handled already + } else if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__STOP && s != TASK_STATUS__UNINIT) { stDebug("s-task:%s not handle event:%s yet, wait for 100ms and recheck", id, GET_EVT_NAME(event)); taosMsleep(100); } else { From 93fe9558289052437385d5b24fe339593dcfd944 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 2 Jan 2024 17:48:05 +0800 Subject: [PATCH 60/69] test: modify testcase of windows --- tests/script/win-test-file | 2 +- tests/system-test/win-test-file | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/script/win-test-file b/tests/script/win-test-file index d9ff09f468..ada8574165 100644 --- a/tests/script/win-test-file +++ b/tests/script/win-test-file @@ -15,7 +15,6 @@ ./test.sh -f tsim/dnode/balance2.sim ./test.sh -f tsim/vnode/replica3_repeat.sim ./test.sh -f tsim/parser/col_arithmetic_operation.sim -./test.sh -f tsim/trans/create_db.sim ./test.sh -f tsim/dnode/balance3.sim ./test.sh -f tsim/vnode/replica3_many.sim ./test.sh -f tsim/stable/metrics_idx.sim @@ -201,6 +200,7 @@ ./test.sh -f tsim/query/show_db_table_kind.sim ./test.sh -f tsim/query/bi_star_table.sim ./test.sh -f tsim/query/bi_tag_scan.sim +./test.sh -f tsim/query/bi_tbname_col.sim ./test.sh -f tsim/query/tag_scan.sim ./test.sh -f tsim/query/nullColSma.sim ./test.sh -f tsim/query/bug3398.sim diff --git a/tests/system-test/win-test-file b/tests/system-test/win-test-file index aefdb1e824..0db83e71a3 100644 --- a/tests/system-test/win-test-file +++ b/tests/system-test/win-test-file @@ -149,7 +149,7 @@ python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot1.py python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py -python3 ./test.py -f 7-tmq/tmq_taosx.py +# python3 ./test.py -f 7-tmq/tmq_taosx.py python3 ./test.py -f 7-tmq/tmq_replay.py python3 ./test.py -f 7-tmq/tmqSeekAndCommit.py python3 ./test.py -f 7-tmq/tmq_offset.py @@ -163,6 +163,7 @@ python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 2 -n 1 python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 6 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 2 -n 1 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py -N 3 -n 3 +python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata-false.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-column.py -N 3 -n 3 @@ -196,6 +197,7 @@ python3 ./test.py -f 0-others/ttlChangeOnWrite.py python3 ./test.py -f 0-others/compress_tsz1.py python3 ./test.py -f 0-others/compress_tsz2.py python3 ./test.py -f 0-others/view/non_marterial_view/test_view.py +python3 ./test.py -f 0-others/test_show_table_distributed.py python3 ./test.py -f 0-others/compatibility.py python3 ./test.py -f 0-others/tag_index_basic.py python3 ./test.py -N 3 -f 0-others/walRetention.py @@ -204,6 +206,7 @@ python3 ./test.py -f 0-others/splitVGroupRep3.py -N 3 python3 ./test.py -f 0-others/timeRangeWise.py -N 3 python3 ./test.py -f 0-others/delete_check.py python3 ./test.py -f 0-others/test_hot_refresh_configurations.py +python3 ./test.py -f 1-insert/insert_double.py python3 ./test.py -f 1-insert/alter_database.py python3 ./test.py -f 1-insert/alter_replica.py -N 3 python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py @@ -252,6 +255,10 @@ python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4 python3 ./test.py -f 1-insert/precisionUS.py python3 ./test.py -f 1-insert/precisionNS.py python3 ./test.py -f 1-insert/test_ts4219.py +python3 ./test.py -f 1-insert/ts-4272.py +python3 ./test.py -f 1-insert/test_ts4295.py +python3 ./test.py -f 1-insert/test_td27388.py +python3 ./test.py -f 1-insert/insert_timestamp.py python3 ./test.py -f 0-others/show.py python3 ./test.py -f 0-others/show_tag_index.py python3 ./test.py -f 0-others/information_schema.py @@ -795,6 +802,7 @@ python3 ./test.py -f 2-query/blockSMA.py -Q 4 python3 ./test.py -f 2-query/projectionDesc.py -Q 4 python3 ./test.py -f 2-query/odbc.py python3 ./test.py -f 2-query/fill_with_group.py +python3 ./test.py -f 2-query/state_window.py -Q 3 python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4 python3 ./test.py -f 99-TDcase/TD-20582.py python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3 From 5bbf12c2c9240df4677fa3ba00325ce01b8d4b38 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 2 Jan 2024 18:35:17 +0800 Subject: [PATCH 61/69] fix(tsdb): check if stt blocks are clean before building composed data blocks. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 1f54f13592..0aef1efc7b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1713,6 +1713,9 @@ static int32_t mergeFileBlockAndSttBlock(STsdbReader* pReader, SSttBlockReader* } else if ((!dataInDataFile) && dataInSttFile) { // no data ile block exists return mergeRowsInSttBlocks(pSttBlockReader, pBlockScanInfo, pReader); + } else if (pBlockScanInfo->cleanSttBlocks && pReader->info.execMode == READER_EXEC_ROWS) { + // opt model for count data in stt file, which is not overlap with data blocks in files. + return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader); } else { // row in both stt file blocks and data file blocks TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); From 866fac624fa9985bd3b0d90c371cae19bdc83bc4 Mon Sep 17 00:00:00 2001 From: zk66214 Date: Tue, 2 Jan 2024 19:26:59 +0800 Subject: [PATCH 62/69] Add test cases for TS-4348 and TD-27939 --- tests/system-test/2-query/ts-4348-td-27939.py | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 tests/system-test/2-query/ts-4348-td-27939.py diff --git a/tests/system-test/2-query/ts-4348-td-27939.py b/tests/system-test/2-query/ts-4348-td-27939.py new file mode 100644 index 0000000000..2a861899d5 --- /dev/null +++ b/tests/system-test/2-query/ts-4348-td-27939.py @@ -0,0 +1,46 @@ +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + + tdSql.execute("drop database if exists ts_4338;") + tdSql.execute("create database ts_4338;") + tdSql.execute("drop table if exists ts_4338.t;") + tdSql.execute("create database if not exists ts_4338;") + tdSql.execute("create table ts_4338.t (ts timestamp, i8 tinyint);") + tdSql.execute("insert into ts_4338.t (ts, i8) values (now(), 1) (now()+1s, 2);") + + def run(self): + # TS-4348 + tdSql.query(f'select i8 from ts_4338.t;') + tdSql.checkRows(2) + + tdSql.query(f'select i8 from ts_4338.t where 1 = 1;') + tdSql.checkRows(2) + + tdSql.query(f'select i8 from ts_4338.t where i8 = 1;') + tdSql.checkRows(1) + + tdSql.query(f'select * from (select * from ts_4338.t where i8 = 3);') + tdSql.checkRows(0) + + # TD-27939 + tdSql.query(f'select * from (select * from ts_4338.t where 1 = 100);') + tdSql.checkRows(0) + + tdSql.query(f'select * from (select * from (select * from ts_4338.t where 1 = 200));') + tdSql.checkRows(0) + + tdSql.execute("drop database if exists ts_4338;") + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 453d3e8a3069f860b9aa0072986a0aa039084dd3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 3 Jan 2024 10:50:38 +0800 Subject: [PATCH 63/69] fix(stream): fix dead lock on windows. --- source/dnode/vnode/src/vnd/vnodeSync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 84ef36df74..34ae6623a7 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -567,13 +567,13 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) if (vnodeIsRoleLeader(pVnode)) { // start to restore all stream tasks if (tsDisableStream) { + streamMetaWUnLock(pMeta); vInfo("vgId:%d, sync restore finished, not launch stream tasks, since stream tasks are disabled", vgId); } else { vInfo("vgId:%d sync restore finished, start to launch stream tasks", pVnode->config.vgId); tqStreamTaskResetStatus(pVnode->pTq->pStreamMeta); { - streamMetaWLock(pMeta); if (pMeta->startInfo.taskStarting == 1) { pMeta->startInfo.restartCount += 1; tqDebug("vgId:%d in start tasks procedure, inc restartCounter by 1, remaining restart:%d", vgId, From e67c6764664b8f37504b7cc86f61555bade36601 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 3 Jan 2024 15:19:27 +0800 Subject: [PATCH 64/69] TS-4423: add test case --- tests/parallel_test/cases.task | 1 + .../system-test/1-insert/stt_blocks_check.py | 68 +++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 tests/system-test/1-insert/stt_blocks_check.py diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 093fec78ab..0e826304c1 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -109,6 +109,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/insert_stb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_stable.py +,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/stt_blocks_check.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py diff --git a/tests/system-test/1-insert/stt_blocks_check.py b/tests/system-test/1-insert/stt_blocks_check.py new file mode 100644 index 0000000000..5de6414453 --- /dev/null +++ b/tests/system-test/1-insert/stt_blocks_check.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from random import randint +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * +from util.boundary import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def stt_block_check(self): + tdSql.prepare() + tdSql.execute('use db') + + tdSql.execute('create table meters (ts timestamp, c1 int, c2 float) tags(t1 int)') + tdSql.execute("create table d0 using meters tags(1)") + + ts = 1704261670000 + + sql = "insert into d0 values " + for i in range(100): + sql = sql + f"({ts + i}, 1, 0.1)" + tdSql.execute(sql) + tdSql.execute("flush database db") + + ts = 1704261670099 + + sql = "insert into d0 values " + for i in range(100): + sql = sql + f"({ts + i}, 1, 0.1)" + tdSql.execute(sql) + tdSql.execute("flush database db") + + tdSql.execute(f"insert into d0 values({ts + 100}, 2, 1.0)") + tdSql.execute("flush database db") + + time.sleep(2) + + tdSql.query("show table distributed db.meters") + tdSql.query("select count(*) from db.meters") + tdSql.checkData(0, 0, 200) + + def run(self): + self.stt_block_check() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 589b046ecb366471018830268defd73b2735ce93 Mon Sep 17 00:00:00 2001 From: charles Date: Wed, 3 Jan 2024 17:05:55 +0800 Subject: [PATCH 65/69] update alter_database test case for arm64 ci --- tests/system-test/1-insert/alter_database.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/system-test/1-insert/alter_database.py b/tests/system-test/1-insert/alter_database.py index 8de43b8ee8..6a831b88ff 100644 --- a/tests/system-test/1-insert/alter_database.py +++ b/tests/system-test/1-insert/alter_database.py @@ -4,7 +4,8 @@ import time import socket import os import threading - +import psutil +import platform from util.log import * from util.sql import * from util.cases import * @@ -17,6 +18,14 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) self.buffer_boundary = [3, 4097, 8193, 12289, 16384] + # remove the value > free_memory, 70% is the weight to calculate the max value + if platform.system() == "Linux" and platform.machine() == "aarch64": + mem = psutil.virtual_memory() + free_memory = mem.free * 0.7 / 1024 / 1024 + for item in self.buffer_boundary: + if item > free_memory: + self.buffer_boundary.remove(item) + self.buffer_error = [self.buffer_boundary[0] - 1, self.buffer_boundary[-1]+1] # pages_boundary >= 64 From b818faaa5eba3bf6fc28a4e059ea81d86a3b54c3 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 3 Jan 2024 17:51:27 +0800 Subject: [PATCH 66/69] remainGroups --- source/libs/executor/src/executil.c | 2 +- source/libs/executor/src/scanoperator.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index fe3528cce6..2e93dbd803 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -2164,7 +2164,7 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* bool initRemainGroups = false; if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == nodeType(pScanNode)) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pScanNode; - if (pTableScanNode->needCountEmptyTable) { + if (pTableScanNode->needCountEmptyTable && !(groupSort || pScanNode->groupOrderScan)) { initRemainGroups = true; } } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 8999340ddc..c2a194d150 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -690,7 +690,7 @@ void markGroupProcessed(STableScanInfo* pInfo, uint64_t groupId) { if (pInfo->countState == TABLE_COUNT_STATE_END) { return; } - if (pInfo->base.pTableListInfo->oneTableForEachGroup) { + if (pInfo->base.pTableListInfo->oneTableForEachGroup || pInfo->base.pTableListInfo->groupOffset) { pInfo->countState = TABLE_COUNT_STATE_PROCESSED; } else { taosHashRemove(pInfo->base.pTableListInfo->remainGroups, &groupId, sizeof(groupId)); @@ -854,14 +854,14 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->countState < TABLE_COUNT_STATE_END) { STableListInfo* pTableListInfo = pTableScanInfo->base.pTableListInfo; - if (pTableListInfo->oneTableForEachGroup) { // group by tbname + if (pTableListInfo->oneTableForEachGroup || pTableListInfo->groupOffset) { // group by tbname, group by tag + sort if (pTableScanInfo->countState < TABLE_COUNT_STATE_PROCESSED) { pTableScanInfo->countState = TABLE_COUNT_STATE_PROCESSED; STableKeyInfo* pStart = (STableKeyInfo*)tableListGetInfo(pTableScanInfo->base.pTableListInfo, pTableScanInfo->tableStartIndex); return getBlockForEmptyTable(pOperator, pStart); } - } else { // group by tag + } else { // group by tag + no sort int32_t numOfTables = tableListGetSize(pTableListInfo); if (pTableScanInfo->tableEndIndex + 1 >= numOfTables) { // get empty group, mark processed & rm from hash From de631e20a5469ff80e638e2e99e742bd6b924aff Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Wed, 3 Jan 2024 19:57:22 +0800 Subject: [PATCH 67/69] fix: refactor logdebugs --- include/common/tglobal.h | 4 +- include/util/tconfig.h | 2 +- include/util/tdef.h | 7 ++- include/util/tlog.h | 1 + source/common/src/tglobal.c | 98 +++++++++++++++++++++------------- source/dnode/mgmt/exe/dmMain.c | 2 +- source/util/src/tconfig.c | 37 ++++++++++++- source/util/src/tlog.c | 1 + utils/tsim/inc/simInt.h | 1 - utils/tsim/src/simSystem.c | 2 - 10 files changed, 110 insertions(+), 45 deletions(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 887f4cb6dc..f006779a48 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -228,8 +228,8 @@ int32_t taosCfgDynamicOptions(SConfig *pCfg, char *name, bool forServer); struct SConfig *taosGetCfg(); -void taosSetAllDebugFlag(int32_t flag, bool rewrite); -void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite); +void taosSetAllDebugFlag(int32_t flag); +void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal); void taosLocalCfgForbiddenToChange(char *name, bool *forbidden); int8_t taosGranted(); diff --git a/include/util/tconfig.h b/include/util/tconfig.h index 07abdd223d..f2a9446700 100644 --- a/include/util/tconfig.h +++ b/include/util/tconfig.h @@ -94,7 +94,7 @@ typedef struct SConfigItem { int64_t imax; double fmax; }; - SArray *array; // SDiskCfg + SArray *array; // SDiskCfg/SLogVar } SConfigItem; typedef struct { diff --git a/include/util/tdef.h b/include/util/tdef.h index 51b0b63da2..875a6f5738 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -260,7 +260,7 @@ typedef enum ELogicConditionType { #define TSDB_PASSWORD_LEN 32 #define TSDB_USET_PASSWORD_LEN 129 #define TSDB_VERSION_LEN 32 -#define TSDB_LABEL_LEN 12 +#define TSDB_LABEL_LEN 12 #define TSDB_JOB_STATUS_LEN 32 #define TSDB_CLUSTER_ID_LEN 40 @@ -272,6 +272,7 @@ typedef enum ELogicConditionType { #define TSDB_SHOW_SCHEMA_JSON_LEN TSDB_MAX_COLUMNS * 256 #define TSDB_SLOW_QUERY_SQL_LEN 512 #define TSDB_SHOW_SUBQUERY_LEN 1000 +#define TSDB_LOG_VAR_LEN 32 #define TSDB_TRANS_STAGE_LEN 12 #define TSDB_TRANS_TYPE_LEN 16 @@ -503,6 +504,10 @@ typedef struct { int32_t primary; } SDiskCfg; +typedef struct { + char name[TSDB_LOG_VAR_LEN]; +} SLogVar; + #define TMQ_SEPARATOR ':' enum { diff --git a/include/util/tlog.h b/include/util/tlog.h index 6d393bfefb..11ac0e1fae 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -67,6 +67,7 @@ extern int32_t smaDebugFlag; extern int32_t idxDebugFlag; extern int32_t tdbDebugFlag; extern int32_t sndDebugFlag; +extern int32_t simDebugFlag; int32_t taosInitLog(const char *logName, int32_t maxFiles); void taosCloseLog(); diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index d6d1e1810d..79d21955d4 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -423,7 +423,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) { if (cfgAddBool(pCfg, "asyncLog", tsAsyncLog, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "logKeepDays", 0, -365000, 365000, CFG_SCOPE_BOTH, CFG_DYN_ENT_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; - if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_NONE) != 0) return -1; + if (cfgAddInt32(pCfg, "simDebugFlag", simDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "tmrDebugFlag", tmrDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "uDebugFlag", uDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "rpcDebugFlag", rpcDebugFlag, 0, 255, CFG_SCOPE_BOTH, CFG_DYN_BOTH) != 0) return -1; @@ -497,7 +497,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_BOTH, CFG_DYN_CLIENT) != 0) return -1; if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT) != 0) return -1; - if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_SERVER, CFG_DYN_CLIENT) != 0) return -1; + if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT) != 0) return -1; if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, CFG_SCOPE_CLIENT, CFG_DYN_NONE) != 0) return -1; @@ -962,6 +962,7 @@ static void taosSetClientLogCfg(SConfig *pCfg) { rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32; qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32; cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32; + simDebugFlag = cfgGetItem(pCfg, "simDebugFlag")->i32; } static void taosSetServerLogCfg(SConfig *pCfg) { @@ -1278,7 +1279,7 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi taosSetServerLogCfg(pCfg); } - taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false); + taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32); if (taosMulModeMkDir(tsLogDir, 0777, true) != 0) { terrno = TAOS_SYSTEM_ERROR(errno); @@ -1356,6 +1357,8 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile taosSetSystemCfg(tsCfg); if (taosSetFileHandlesLimit() != 0) return -1; + taosSetAllDebugFlag(cfgGetItem(tsCfg, "debugFlag")->i32); + cfgDumpCfg(tsCfg, tsc, false); if (taosCheckGlobalCfg() != 0) { @@ -1399,7 +1402,7 @@ static int32_t taosCfgSetOption(OptionNameAndVar *pOptions, int32_t optionSize, *pVar = flag; if (isDebugflag) { - taosSetDebugFlag(pOptions[d].optionVar, optName, flag, true); + taosSetDebugFlag(pOptions[d].optionVar, optName, flag); } terrno = TSDB_CODE_SUCCESS; } break; @@ -1447,8 +1450,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, char *name) { } if (strncasecmp(name, "debugFlag", 9) == 0) { - int32_t flag = pItem->i32; - taosSetAllDebugFlag(flag, true); + taosSetAllDebugFlag(pItem->i32); return 0; } @@ -1460,7 +1462,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, char *name) { {"smaDebugFlag", &smaDebugFlag}, {"idxDebugFlag", &idxDebugFlag}, {"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"metaDebugFlag", &metaDebugFlag}, - {"jniDebugFlag", &jniDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, + {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, }; static OptionNameAndVar options[] = { @@ -1524,8 +1526,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) { switch (lowcaseName[0]) { case 'd': { if (strcasecmp("debugFlag", name) == 0) { - int32_t flag = pItem->i32; - taosSetAllDebugFlag(flag, true); + taosSetAllDebugFlag(pItem->i32); matched = true; } break; @@ -1691,7 +1692,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) { {"cDebugFlag", &cDebugFlag}, {"dDebugFlag", &dDebugFlag}, {"fsDebugFlag", &fsDebugFlag}, {"idxDebugFlag", &idxDebugFlag}, {"jniDebugFlag", &jniDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, - {"uDebugFlag", &uDebugFlag}, + {"uDebugFlag", &uDebugFlag}, {"simDebugFlag", &simDebugFlag}, }; static OptionNameAndVar options[] = { @@ -1736,9 +1737,9 @@ int32_t taosCfgDynamicOptions(SConfig *pCfg, char *name, bool forServer) { return taosCfgDynamicOptionsForClient(pCfg, name); } -void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) { +void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) { SConfigItem *pItem = cfgGetItem(tsCfg, flagName); - if (pItem != NULL && (rewrite || pItem->i32 == 0)) { + if (pItem != NULL) { pItem->i32 = flagVal; } if (pFlagPtr != NULL) { @@ -1746,33 +1747,58 @@ void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, } } -void taosSetAllDebugFlag(int32_t flag, bool rewrite) { +static int taosLogVarComp(void const *lp, void const *rp) { + SLogVar *lpVar = (SLogVar *)lp; + SLogVar *rpVar = (SLogVar *)rp; + return strcasecmp(lpVar->name, rpVar->name); +} + +static void taosCheckAndSetDebugFlag(int32_t *pFlagPtr, char *name, int32_t flag, SArray *noNeedToSetVars) { + if (noNeedToSetVars != NULL && taosArraySearch(noNeedToSetVars, name, taosLogVarComp, TD_EQ) != NULL) { + return; + } + taosSetDebugFlag(pFlagPtr, name, flag); +} + +void taosSetAllDebugFlag(int32_t flag) { if (flag <= 0) return; - taosSetDebugFlag(NULL, "debugFlag", flag, rewrite); - taosSetDebugFlag(NULL, "simDebugFlag", flag, rewrite); - taosSetDebugFlag(NULL, "tmrDebugFlag", flag, rewrite); - taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite); - taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite); - taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite); - taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite); - taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite); - taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite); - taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite); - taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite); - taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite); - taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite); - taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite); - taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite); - taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite); - taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite); - taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite); - taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite); - taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite); - taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite); - taosSetDebugFlag(&stDebugFlag, "stDebugFlag", flag, rewrite); - taosSetDebugFlag(&sndDebugFlag, "sndDebugFlag", flag, rewrite); + SArray *noNeedToSetVars = NULL; + SConfigItem *pItem = cfgGetItem(tsCfg, "debugFlag"); + if (pItem != NULL) { + pItem->i32 = flag; + noNeedToSetVars = pItem->array; + } + + taosCheckAndSetDebugFlag(&simDebugFlag, "simDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&tmrDebugFlag, "tmrDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, noNeedToSetVars); + + taosCheckAndSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, noNeedToSetVars); + + taosCheckAndSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&stDebugFlag, "stDebugFlag", flag, noNeedToSetVars); + taosCheckAndSetDebugFlag(&sndDebugFlag, "sndDebugFlag", flag, noNeedToSetVars); + + taosArrayClear(noNeedToSetVars); // reset array + uInfo("all debug flag are set to %d", flag); + if (terrno == TSDB_CODE_CFG_NOT_FOUND) terrno = TSDB_CODE_SUCCESS; // ignore not exist } int8_t taosGranted() { return atomic_load_8(&tsGrant); } diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index 3c08714218..756ac8167e 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -68,7 +68,7 @@ static struct { int64_t startTime; } global = {0}; -static void dmSetDebugFlag(int32_t signum, void *sigInfo, void *context) { taosSetAllDebugFlag(143, true); } +static void dmSetDebugFlag(int32_t signum, void *sigInfo, void *context) { taosSetAllDebugFlag(143); } static void dmSetAssert(int32_t signum, void *sigInfo, void *context) { tsAssert = 1; } static void dmStopDnode(int signum, void *sigInfo, void *context) { diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index a78d930326..d656f0c14e 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -307,6 +307,34 @@ static int32_t cfgSetTfsItem(SConfig *pCfg, const char *name, const char *value, return 0; } +static int32_t cfgUpdateDebugFlagItem(SConfig *pCfg, const char *name, bool resetArray) { + SConfigItem *pDebugFlagItem = cfgGetItem(pCfg, "debugFlag"); + if (resetArray) { + // reset + if (pDebugFlagItem == NULL) return -1; + + // logflag names that should 'not' be set by 'debugFlag' + if (pDebugFlagItem->array == NULL) { + pDebugFlagItem->array = taosArrayInit(16, sizeof(SLogVar)); + if (pDebugFlagItem->array == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + } + taosArrayClear(pDebugFlagItem->array); + return 0; + } + + // update + if (pDebugFlagItem == NULL) return -1; + if (pDebugFlagItem->array != NULL) { + SLogVar logVar = {0}; + strncpy(logVar.name, name, TSDB_LOG_VAR_LEN - 1); + taosArrayPush(pDebugFlagItem->array, &logVar); + } + return 0; +} + int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcType stype) { GRANT_CFG_SET; SConfigItem *pItem = cfgGetItem(pCfg, name); @@ -661,7 +689,6 @@ void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) { SConfigItem *pItem = taosArrayGet(pCfg->array, i); if (tsc && pItem->scope == CFG_SCOPE_SERVER) continue; if (dump && strcmp(pItem->name, "scriptDir") == 0) continue; - if (dump && strcmp(pItem->name, "simDebugFlag") == 0) continue; tstrncpy(src, cfgStypeStr(pItem->stype), CFG_SRC_PRINT_LEN); for (int32_t j = 0; j < CFG_SRC_PRINT_LEN; ++j) { if (src[j] == 0) src[j] = ' '; @@ -931,6 +958,14 @@ int32_t cfgLoadFromCfgFile(SConfig *pConfig, const char *filepath) { code = cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_CFG_FILE); if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break; } + + size_t len = strlen(name); + const char *debugFlagStr = "debugFlag"; + const size_t debugFlagLen = strlen(debugFlagStr); + if (len >= debugFlagLen && strcasecmp(name + len - debugFlagLen, debugFlagStr) == 0) { + code = cfgUpdateDebugFlagItem(pConfig, name, len == debugFlagLen); + if (code != 0 && terrno != TSDB_CODE_CFG_NOT_FOUND) break; + } } taosCloseFile(&pFile); diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index e113a95fcc..bd6c37a7b5 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -111,6 +111,7 @@ int32_t udfDebugFlag = 131; int32_t smaDebugFlag = 131; int32_t idxDebugFlag = 131; int32_t sndDebugFlag = 131; +int32_t simDebugFlag = 131; int64_t dbgEmptyW = 0; int64_t dbgWN = 0; diff --git a/utils/tsim/inc/simInt.h b/utils/tsim/inc/simInt.h index 02c7540cde..f2360277e0 100644 --- a/utils/tsim/inc/simInt.h +++ b/utils/tsim/inc/simInt.h @@ -184,7 +184,6 @@ extern SScript *simScriptList[MAX_MAIN_SCRIPT_NUM]; extern SCommand simCmdList[]; extern int32_t simScriptPos; extern int32_t simScriptSucceed; -extern int32_t simDebugFlag; extern char simScriptDir[]; extern bool abortExecution; extern bool useValgrind; diff --git a/utils/tsim/src/simSystem.c b/utils/tsim/src/simSystem.c index 98f9217fd6..dcf5d6ab12 100644 --- a/utils/tsim/src/simSystem.c +++ b/utils/tsim/src/simSystem.c @@ -21,7 +21,6 @@ SScript *simScriptList[MAX_MAIN_SCRIPT_NUM]; SCommand simCmdList[SIM_CMD_END]; int32_t simScriptPos = -1; int32_t simScriptSucceed = 0; -int32_t simDebugFlag = 143; void simCloseTaosdConnect(SScript *script); char simScriptDir[PATH_MAX] = {0}; @@ -32,7 +31,6 @@ int32_t simInitCfg() { taosInitCfg(configDir, NULL, NULL, NULL, NULL, 1); SConfig *pCfg = taosGetCfg(); - simDebugFlag = cfgGetItem(pCfg, "simDebugFlag")->i32; tstrncpy(simScriptDir, cfgGetItem(pCfg, "scriptDir")->str, PATH_MAX); return 0; } From 860146d425dcea53420439ee2f2f591d7fb56400 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 3 Jan 2024 23:59:48 +0800 Subject: [PATCH 68/69] initRemainGroups --- source/libs/executor/src/executil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 2e93dbd803..e3f634f406 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -2164,7 +2164,7 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* bool initRemainGroups = false; if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == nodeType(pScanNode)) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pScanNode; - if (pTableScanNode->needCountEmptyTable && !(groupSort || pScanNode->groupOrderScan)) { + if (tsCountAlwaysReturnValue && pTableScanNode->needCountEmptyTable && !(groupSort || pScanNode->groupOrderScan)) { initRemainGroups = true; } } From 4ea2cad0bb6824e315c68be890e52422b818fceb Mon Sep 17 00:00:00 2001 From: haoranchen Date: Thu, 4 Jan 2024 18:59:55 +0800 Subject: [PATCH 69/69] comment snode_restart_with_checkpoint.py --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 1761189b19..ac21f0fbb2 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -33,7 +33,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/pause_resume_test.py #,,n,system-test,python3 ./test.py -f 8-stream/vnode_restart.py -N 4 #,,n,system-test,python3 ./test.py -f 8-stream/snode_restart.py -N 4 -,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4 +#,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tbname_vgroup.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/compact-col.py