From eea32f5f0ae23b5c649f1367bf1f5034a00daaf1 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 25 May 2022 22:21:04 +0800 Subject: [PATCH 01/66] feat: add tag condition --- source/dnode/vnode/inc/vnode.h | 4 +- source/dnode/vnode/src/tsdb/tsdbRead.c | 70 ++++--------------------- source/libs/executor/src/executorimpl.c | 26 ++++----- 3 files changed, 25 insertions(+), 75 deletions(-) diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 9e33973c05..c106dbfb68 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -103,9 +103,7 @@ tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STab void *pMemRef); int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo); bool isTsdbCacheLastRow(tsdbReaderT *pReader); -int32_t tsdbQuerySTableByTagCond(void *pMeta, uint64_t uid, TSKEY skey, const char *pTagCond, size_t len, - int16_t tagNameRelType, const char *tbnameCond, STableGroupInfo *pGroupInfo, - SColIndex *pColIndex, int32_t numOfCols, uint64_t reqId, uint64_t taskId); +int32_t tsdbQueryAllTable(void* pMeta, uint64_t uid, STableGroupInfo* pGroupInfo, SNode* pTagCond); int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle); bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index ee216cb2ab..d390b59153 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -14,6 +14,7 @@ */ #include "tsdb.h" +#include "index.h" #define EXTRA_BYTES 2 #define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) @@ -3875,74 +3876,25 @@ SArray* createTableGroup(SArray* pTableList, SSchemaWrapper* pTagSchema, SColInd // return TSDB_CODE_SUCCESS; //} -int32_t tsdbQuerySTableByTagCond(void* pMeta, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, - int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo, - SColIndex* pColIndex, int32_t numOfCols, uint64_t reqId, uint64_t taskId) { - SMetaReader mr = {0}; - - metaReaderInit(&mr, (SMeta*)pMeta, 0); - - if (metaGetTableEntryByUid(&mr, uid) < 0) { - tsdbError("%p failed to get stable, uid:%" PRIu64 ", TID:0x%" PRIx64 " QID:0x%" PRIx64, pMeta, uid, taskId, reqId); - metaReaderClear(&mr); - terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST; - goto _error; - } else { - tsdbDebug("%p succeed to get stable, uid:%" PRIu64 ", TID:0x%" PRIx64 " QID:0x%" PRIx64, pMeta, uid, taskId, reqId); - } - - if (mr.me.type != TSDB_SUPER_TABLE) { - tsdbError("%p query normal tag not allowed, uid:%" PRIu64 ", TID:0x%" PRIx64 " QID:0x%" PRIx64, pMeta, uid, taskId, - reqId); - terrno = TSDB_CODE_OPS_NOT_SUPPORT; // basically, this error is caused by invalid sql issued by client - metaReaderClear(&mr); - goto _error; - } - - metaReaderClear(&mr); - +int32_t tsdbQueryAllTable(void* pMeta, uint64_t uid, STableGroupInfo* pGroupInfo, SNode* pTagCond) { // NOTE: not add ref count for super table - SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); - SSchemaWrapper* pTagSchema = metaGetTableSchema(pMeta, uid, 1, true); - - // no tags and tbname condition, all child tables of this stable are involved - if (tbnameCond == NULL && (pTagCond == NULL || len == 0)) { - int32_t ret = getAllTableList(pMeta, uid, res); - if (ret != TSDB_CODE_SUCCESS) { - goto _error; - } - - pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); - pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); - - tsdbDebug("%p no table name/tag condition, all tables qualified, numOfTables:%u, group:%zu, TID:0x%" PRIx64 - " QID:0x%" PRIx64, - pMeta, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList), taskId, reqId); - - taosArrayDestroy(res); - return ret; - } + SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); int32_t ret = TSDB_CODE_SUCCESS; - - SFilterInfo* filterInfo = NULL; - ret = filterInitFromNode((SNode*)pTagCond, &filterInfo, 0); + if(pTagCond){ + ret = doFilterTag(pTagCond, res); + }else{ + ret = getAllTableList(pMeta, uid, res); + } if (ret != TSDB_CODE_SUCCESS) { - terrno = ret; return ret; } - ret = tsdbQueryTableList(pMeta, res, filterInfo); + pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); - pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); + pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); + taosArrayPush(pGroupInfo->pGroupList, &res); - // tsdbDebug("%p stable tid:%d, uid:%" PRIu64 " query, numOfTables:%u, belong to %" PRIzu " groups", tsdb, - // pTable->tableId, pTable->uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList)); - - taosArrayDestroy(res); return ret; - -_error: - return terrno; } int32_t tsdbQueryTableList(void* pMeta, SArray* pRes, void* filterInfo) { diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 581cf5cacd..25c807d3c4 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4438,10 +4438,10 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT } static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId); + STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagNode); static int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUid, STableGroupInfo* pGroupInfo, - uint64_t queryId, uint64_t taskId); + uint64_t queryId, uint64_t taskId, SNode* pTagCond); static SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo); static SArray* extractColumnInfo(SNodeList* pNodeList); @@ -4471,14 +4471,14 @@ void extractTableSchemaVersion(SReadHandle* pHandle, uint64_t uid, SExecTaskInfo } SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, - uint64_t queryId, uint64_t taskId, STableGroupInfo* pTableGroupInfo) { + uint64_t queryId, uint64_t taskId, STableGroupInfo* pTableGroupInfo, SNode* pTagCond) { int32_t type = nodeType(pPhyNode); if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) { if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId); + tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId, pTagCond); if (pDataReader == NULL && terrno != 0) { return NULL; } @@ -4502,9 +4502,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo tsdbReaderT pDataReader = NULL; if (pHandle->vnode) { - pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId); + pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId, pTagCond); } else { - doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, queryId, taskId); + doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, queryId, taskId, pTagCond); } if (pDataReader == NULL && terrno != 0) { @@ -4551,7 +4551,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SSDataBlock* pResBlock = createResDataBlock(pDescNode); int32_t code = doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, - queryId, taskId); + queryId, taskId, pTagCond); if (code != TSDB_CODE_SUCCESS) { return NULL; } @@ -4577,7 +4577,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES); for (int32_t i = 0; i < size; ++i) { SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i); - ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableGroupInfo); + ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableGroupInfo, pTagCond); if (ops[i] == NULL) { return NULL; } @@ -4893,10 +4893,10 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod } int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUid, STableGroupInfo* pGroupInfo, - uint64_t queryId, uint64_t taskId) { + uint64_t queryId, uint64_t taskId, SNode* pTagCond) { int32_t code = 0; if (tableType == TSDB_SUPER_TABLE) { - code = tsdbQuerySTableByTagCond(metaHandle, tableUid, 0, NULL, 0, 0, NULL, pGroupInfo, NULL, 0, queryId, taskId); + code = tsdbQueryAllTable(metaHandle, tableUid, pGroupInfo, pTagCond); } else { // Create one table group. code = tsdbGetOneTableGroup(metaHandle, tableUid, 0, pGroupInfo); } @@ -4923,10 +4923,10 @@ SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo) { } tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId) { + STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagNode) { uint64_t uid = pTableScanNode->scan.uid; int32_t code = - doCreateTableGroup(pHandle->meta, pTableScanNode->scan.tableType, uid, pTableGroupInfo, queryId, taskId); + doCreateTableGroup(pHandle->meta, pTableScanNode->scan.tableType, uid, pTableGroupInfo, queryId, taskId, pTagNode); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -4962,7 +4962,7 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead } (*pTaskInfo)->pRoot = - createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoGroupInfo); + createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoGroupInfo, pPlan->pTagCond); if (NULL == (*pTaskInfo)->pRoot) { code = terrno; goto _complete; From 71ec636c3c505b34dc51cef69bf9f4110c869eba Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 25 May 2022 22:29:24 +0800 Subject: [PATCH 02/66] feat: add tag condition --- source/dnode/vnode/inc/vnode.h | 2 +- source/dnode/vnode/src/tsdb/tsdbRead.c | 3 +-- source/libs/executor/src/executorimpl.c | 19 +++++++++++++++++-- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index c106dbfb68..88629e6c2a 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -103,7 +103,7 @@ tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STab void *pMemRef); int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo); bool isTsdbCacheLastRow(tsdbReaderT *pReader); -int32_t tsdbQueryAllTable(void* pMeta, uint64_t uid, STableGroupInfo* pGroupInfo, SNode* pTagCond); +int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list); int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle); bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index d390b59153..0ccfbf82c8 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -14,7 +14,6 @@ */ #include "tsdb.h" -#include "index.h" #define EXTRA_BYTES 2 #define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) @@ -2804,7 +2803,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int return numOfRows; } -static int32_t getAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { +int32_t getAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, uid); while (1) { diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 25c807d3c4..1e7f737ab4 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -34,6 +34,7 @@ #include "thash.h" #include "ttypes.h" #include "vnode.h" +#include "index.h" #define IS_MAIN_SCAN(runtime) ((runtime)->scanFlag == MAIN_SCAN) #define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) @@ -4894,9 +4895,23 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUid, STableGroupInfo* pGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { - int32_t code = 0; + int32_t code = TSDB_CODE_SUCCESS; if (tableType == TSDB_SUPER_TABLE) { - code = tsdbQueryAllTable(metaHandle, tableUid, pGroupInfo, pTagCond); + SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); + + int32_t ret = ; + if(pTagCond){ + code = doFilterTag(pTagCond, res); + }else{ + code = tsdbGetAllTableList(metaHandle, tableUid, res); + } + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); + pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); + taosArrayPush(pGroupInfo->pGroupList, &res); } else { // Create one table group. code = tsdbGetOneTableGroup(metaHandle, tableUid, 0, pGroupInfo); } From 4825d48019d998cdfb40e9d082efe80778c3e773 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 25 May 2022 22:34:34 +0800 Subject: [PATCH 03/66] feat: add tag condition --- source/dnode/vnode/src/tsdb/tsdbRead.c | 28 -------------------------- 1 file changed, 28 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 0ccfbf82c8..a5e3f7f73b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -146,8 +146,6 @@ typedef struct STableGroupSupporter { SSchema* pTagSchema; } STableGroupSupporter; -int32_t tsdbQueryTableList(void* pMeta, SArray* pRes, void* filterInfo); - static STimeWindow updateLastrowForEachGroup(STableGroupInfo* groupList); static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* groupList); static int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle); @@ -3875,32 +3873,6 @@ SArray* createTableGroup(SArray* pTableList, SSchemaWrapper* pTagSchema, SColInd // return TSDB_CODE_SUCCESS; //} -int32_t tsdbQueryAllTable(void* pMeta, uint64_t uid, STableGroupInfo* pGroupInfo, SNode* pTagCond) { - // NOTE: not add ref count for super table - SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); - - int32_t ret = TSDB_CODE_SUCCESS; - if(pTagCond){ - ret = doFilterTag(pTagCond, res); - }else{ - ret = getAllTableList(pMeta, uid, res); - } - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); - pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - taosArrayPush(pGroupInfo->pGroupList, &res); - - return ret; -} - -int32_t tsdbQueryTableList(void* pMeta, SArray* pRes, void* filterInfo) { - // impl later - - return TSDB_CODE_SUCCESS; -} int32_t tsdbGetOneTableGroup(void* pMeta, uint64_t uid, TSKEY startKey, STableGroupInfo* pGroupInfo) { SMetaReader mr = {0}; From 8db0315341307357536661086fa985c17f7ba0e8 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 25 May 2022 22:35:24 +0800 Subject: [PATCH 04/66] feat: add tag condition --- source/libs/executor/src/executorimpl.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 1e7f737ab4..1683ab6eb4 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4899,7 +4899,6 @@ int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUi if (tableType == TSDB_SUPER_TABLE) { SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); - int32_t ret = ; if(pTagCond){ code = doFilterTag(pTagCond, res); }else{ From 29db3bad7952f793ea9573589664ad3026fc5d61 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 25 May 2022 22:42:44 +0800 Subject: [PATCH 05/66] feat: add tag condition --- source/dnode/vnode/src/tsdb/tsdbRead.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index a5e3f7f73b..28677fd589 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2801,7 +2801,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int return numOfRows; } -int32_t getAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { +int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, uid); while (1) { From 3f3772594781ddf79fecdbbb3521f7a322b8e5c0 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 09:46:25 +0800 Subject: [PATCH 06/66] test: case for alter stable --- tests/script/jenkins/basic.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index a8f96cccf1..cec52ee40f 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -97,7 +97,7 @@ ./test.sh -f tsim/stable/values.sim ./test.sh -f tsim/stable/vnode3.sim ./test.sh -f tsim/stable/column_add.sim -#./test.sh -f tsim/stable/column_drop.sim +./test.sh -f tsim/stable/column_drop.sim #./test.sh -f tsim/stable/column_modify.sim From 9a2eddda4c0fd49e275d8d272899d377fcc99e7a Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Thu, 26 May 2022 11:02:35 +0800 Subject: [PATCH 07/66] fix(sync), call FpCommitCb twice when FOLLOWER --- include/libs/sync/sync.h | 1 + source/libs/sync/src/syncAppendEntries.c | 2 +- source/libs/sync/src/syncCommit.c | 1 + source/libs/sync/src/syncMain.c | 7 ++--- .../libs/sync/test/syncConfigChangeTest.cpp | 27 +++++++++++-------- 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index b43eafc918..6635bdbb96 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -80,6 +80,7 @@ typedef struct SFsmCbMeta { uint64_t seqNum; SyncTerm term; SyncTerm currentTerm; + uint64_t flag; } SFsmCbMeta; typedef struct SReConfigCbMeta { diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index c9e16c53c8..4899839b29 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -333,7 +333,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { cbMeta.seqNum = pEntry->seqNum; cbMeta.term = pEntry->term; cbMeta.currentTerm = ths->pRaftStore->currentTerm; - ths->pFsm->FpCommitCb(ths->pFsm, &rpcMsg, cbMeta); + cbMeta.flag = 9; bool needExecute = true; if (ths->pSnapshot != NULL && cbMeta.index <= ths->pSnapshot->lastApplyIndex) { diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index a3d480956e..d8f693da6e 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -111,6 +111,7 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { cbMeta.seqNum = pEntry->seqNum; cbMeta.term = pEntry->term; cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm; + cbMeta.flag = 7; bool needExecute = true; if (pSyncNode->pSnapshot != NULL && cbMeta.index <= pSyncNode->pSnapshot->lastApplyIndex) { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 914ce68245..9b7440e48b 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -941,12 +941,13 @@ char* syncNode2SimpleStr(const SSyncNode* pSyncNode) { int len = 256; char* s = (char*)taosMemoryMalloc(len); snprintf(s, len, - "syncNode2SimpleStr vgId:%d currentTerm:%lu, commitIndex:%ld, state:%d %s, electTimerLogicClock:%lu, " + "syncNode2SimpleStr vgId:%d currentTerm:%lu, commitIndex:%ld, state:%d %s, isStandBy:%d, " + "electTimerLogicClock:%lu, " "electTimerLogicClockUser:%lu, " "electTimerMS:%d", pSyncNode->vgId, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, pSyncNode->state, - syncUtilState2String(pSyncNode->state), pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser, - pSyncNode->electTimerMS); + syncUtilState2String(pSyncNode->state), pSyncNode->pRaftCfg->isStandBy, pSyncNode->electTimerLogicClock, + pSyncNode->electTimerLogicClockUser, pSyncNode->electTimerMS); return s; } diff --git a/source/libs/sync/test/syncConfigChangeTest.cpp b/source/libs/sync/test/syncConfigChangeTest.cpp index f52fef0019..fa453577e7 100644 --- a/source/libs/sync/test/syncConfigChangeTest.cpp +++ b/source/libs/sync/test/syncConfigChangeTest.cpp @@ -43,8 +43,8 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { if (cbMeta.index > beginIndex) { char logBuf[256]; - snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", - pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); + snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", + pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg); } else { sTrace("==callback== ==CommitCb== do not apply again %ld", cbMeta.index); @@ -54,15 +54,15 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { void PreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { char logBuf[256]; snprintf(logBuf, sizeof(logBuf), - "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index, - cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); + "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, cbMeta.index, + cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg); } void RollBackCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { char logBuf[256]; - snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", - pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); + snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", + pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg); } @@ -109,6 +109,7 @@ int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* syncInfo.pFsm = createFsm(); snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex); syncInfo.pWal = pWal; + syncInfo.isStandBy = isStandBy; SSyncCfg* pCfg = &syncInfo.syncCfg; @@ -212,11 +213,15 @@ int main(int argc, char** argv) { int64_t rid = createSyncNode(replicaNum, myIndex, gVgId, pWal, (char*)gDir, isStandBy); assert(rid > 0); - if (isStandBy) { - syncStartStandBy(rid); - } else { - syncStart(rid); - } + syncStart(rid); + + /* + if (isStandBy) { + syncStartStandBy(rid); + } else { + syncStart(rid); + } + */ SSyncNode* pSyncNode = (SSyncNode*)syncNodeAcquire(rid); assert(pSyncNode != NULL); From 72d9b078946377053d7c45ec254a3b1756438193 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 26 May 2022 11:36:07 +0800 Subject: [PATCH 08/66] fix(query): fix avg/sum function result overflown output inf/nan TD-15923 --- source/libs/function/src/builtinsimpl.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index da842877dc..659c7447d6 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -502,6 +502,11 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) { } } + //check for overflow + if (isinf(pSumRes->dsum) || isnan(pSumRes->dsum)) { + GET_RES_INFO(pCtx)->isNullRes = 1; + } + _sum_over: // data in the check operation are all null, not output SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1); @@ -813,6 +818,9 @@ int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { if (IS_INTEGER_TYPE(type)) { pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count); } else { + if (isinf(pAvgRes->sum.dsum) || isnan(pAvgRes->sum.dsum)) { + GET_RES_INFO(pCtx)->isNullRes = 1; + } pAvgRes->result = pAvgRes->sum.dsum / ((double)pAvgRes->count); } From 381ff01f522d0df3d6a899faeee05ab46bf95813 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 13:18:39 +0800 Subject: [PATCH 09/66] feat: add vgroup for tsma --- include/common/tmsg.h | 4 + source/common/src/systable.c | 2 + source/common/src/tmsg.c | 4 + source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 2 +- source/dnode/mnode/impl/inc/mndDef.h | 1 + source/dnode/mnode/impl/inc/mndVgroup.h | 1 + source/dnode/mnode/impl/src/mndDb.c | 2 +- source/dnode/mnode/impl/src/mndSma.c | 115 +++++++++++++++++++- source/dnode/mnode/impl/src/mndVgroup.c | 25 +++++ 9 files changed, 152 insertions(+), 4 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index acf2587d9d..9e95e530f3 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1022,6 +1022,10 @@ typedef struct { SReplica replicas[TSDB_MAX_REPLICA]; int32_t numOfRetensions; SArray* pRetensions; // SRetention + + // for tsma + int8_t isTsma; + } SCreateVnodeReq; int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq); diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 9fe7645e2b..4126ee5c98 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -197,12 +197,14 @@ static const SSysDbTableSchema vgroupsSchema[] = { {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, }; static const SSysDbTableSchema smaSchema[] = { {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, }; static const SSysDbTableSchema transSchema[] = { diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 1108ea1e9f..c429b0481e 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -2908,6 +2908,8 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR if (tEncodeI8(&encoder, pRetension->freqUnit) < 0) return -1; if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1; } + + if (tEncodeI8(&encoder, pReq->isTsma) < 0) return -1; tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2970,6 +2972,8 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * } } + if (tDecodeI8(&decoder, &pReq->isTsma) < 0) return -1; + tEndDecode(&decoder); tDecoderClear(&decoder); return 0; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index f28209f982..81c3ad8932 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -183,7 +183,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return -1; } - dDebug("vgId:%d, create vnode req is received", createReq.vgId); + dDebug("vgId:%d, create vnode req is received, tsma:%d", createReq.vgId, createReq.isTsma); SVnodeCfg vnodeCfg = {0}; vmGenerateVnodeCfg(&createReq, &vnodeCfg); diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 81f4c5ed1e..97ee6ad162 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -328,6 +328,7 @@ typedef struct { int64_t compStorage; int64_t pointsWritten; int8_t compact; + int8_t isTsma; int8_t replica; SVnodeGid vnodeGid[TSDB_MAX_REPLICA]; } SVgObj; diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h index 9bf7b6eb89..c9099b6b05 100644 --- a/source/dnode/mnode/impl/inc/mndVgroup.h +++ b/source/dnode/mnode/impl/inc/mndVgroup.h @@ -30,6 +30,7 @@ SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup); SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup); int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId); +int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup); int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups); SArray *mndBuildDnodesArray(SMnode *pMnode); int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray); diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 6921235f8b..95d3383ee1 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1155,7 +1155,7 @@ static void mndBuildDBVgroupInfo(SDbObj *pDb, SMnode *pMnode, SArray *pVgList) { pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); if (pIter == NULL) break; - if (NULL == pDb || pVgroup->dbUid == pDb->uid) { + if ((NULL == pDb || pVgroup->dbUid == pDb->uid) && !pVgroup->isTsma) { SVgroupInfo vgInfo = {0}; vgInfo.vgId = pVgroup->vgId; vgInfo.hashBegin = pVgroup->hashBegin; diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index b38e901d49..ce3a4b1a74 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -354,6 +354,22 @@ static int32_t mndSetCreateSmaCommitLogs(SMnode *pMnode, STrans *pTrans, SSmaObj return 0; } +static int32_t mndSetCreateSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { + SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); + if (pVgRaw == NULL) return -1; + if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1; + if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1; + return 0; +} + +static int32_t mndSetCreateSmaVgroupCommitLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { + SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); + if (pVgRaw == NULL) return -1; + if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) return -1; + if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1; + return 0; +} + static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) { SSdb *pSdb = pMnode->pSdb; SVgObj *pVgroup = NULL; @@ -393,6 +409,34 @@ static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj return 0; } +static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) { + SVnodeGid *pVgid = pVgroup->vnodeGid + 0; + SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId); + if (pDnode == NULL) return -1; + + STransAction action = {0}; + action.epSet = mndGetDnodeEpset(pDnode); + mndReleaseDnode(pMnode, pDnode); + + // todo add sma info here + + int32_t contLen = 0; + void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen); + if (pReq == NULL) return -1; + + action.pCont = pReq; + action.contLen = contLen; + action.msgType = TDMT_DND_CREATE_VNODE; + action.acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED; + + if (mndTransAppendRedoAction(pTrans, &action) != 0) { + taosMemoryFree(pReq); + return -1; + } + + return 0; +} + static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCreate, SDbObj *pDb, SStbObj *pStb) { SSmaObj smaObj = {0}; memcpy(smaObj.name, pCreate->name, TSDB_TABLE_FNAME_LEN); @@ -438,6 +482,14 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea memcpy(smaObj.ast, pCreate->ast, smaObj.astLen); } + SVgObj smaVgObj = {0}; + if (mndAllocSmaVgroup(pMnode, pDb, &smaVgObj) != 0) { + mError("sma:%s, failed to create since %s", smaObj.name, terrstr()); + return -1; + } + + smaObj.dstVgId = smaVgObj.vgId; + SStreamObj streamObj = {0}; tstrncpy(streamObj.name, pCreate->name, TSDB_STREAM_FNAME_LEN); tstrncpy(streamObj.sourceDb, pDb->name, TSDB_DB_FNAME_LEN); @@ -460,8 +512,11 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea mndTransSetDbInfo(pTrans, pDb); if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER; + if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &smaVgObj) != 0) goto _OVER; if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER; + if (mndSetCreateSmaVgroupCommitLogs(pMnode, pTrans, &smaVgObj) != 0) goto _OVER; if (mndSetCreateSmaRedoActions(pMnode, pTrans, pDb, &smaObj) != 0) goto _OVER; + if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &smaVgObj) != 0) goto _OVER; if (mndAddStreamToTrans(pMnode, &streamObj, pCreate->ast, STREAM_TRIGGER_AT_ONCE, 0, pTrans) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; @@ -480,7 +535,6 @@ static int32_t mndCheckCreateSmaReq(SMCreateSmaReq *pCreate) { if (pCreate->intervalUnit < 0) return -1; if (pCreate->slidingUnit < 0) return -1; if (pCreate->timezone < 0) return -1; - if (pCreate->dstVgId < 0) return -1; if (pCreate->interval < 0) return -1; if (pCreate->offset < 0) return -1; if (pCreate->sliding < 0) return -1; @@ -602,6 +656,24 @@ static int32_t mndSetDropSmaCommitLogs(SMnode *pMnode, STrans *pTrans, SSmaObj * return 0; } +static int32_t mndSetDropSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { + SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); + if (pVgRaw == NULL) return -1; + if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1; + if (sdbSetRawStatus(pVgRaw, SDB_STATUS_DROPPING) != 0) return -1; + + return 0; +} + +static int32_t mndSetDropSmaVgroupCommitLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { + SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); + if (pVgRaw == NULL) return -1; + if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) return -1; + if (sdbSetRawStatus(pVgRaw, SDB_STATUS_DROPPED) != 0) return -1; + + return 0; +} + static int32_t mndSetDropSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) { SSdb *pSdb = pMnode->pSdb; SVgObj *pVgroup = NULL; @@ -643,23 +715,59 @@ static int32_t mndSetDropSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj * return 0; } +static int32_t mndSetDropSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) { + SVnodeGid *pVgid = pVgroup->vnodeGid + 0; + SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId); + if (pDnode == NULL) return -1; + + STransAction action = {0}; + action.epSet = mndGetDnodeEpset(pDnode); + mndReleaseDnode(pMnode, pDnode); + + int32_t contLen = 0; + void *pReq = mndBuildDropVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen); + if (pReq == NULL) return -1; + + action.pCont = pReq; + action.contLen = contLen; + action.msgType = TDMT_DND_DROP_VNODE; + action.acceptableCode = TSDB_CODE_NODE_NOT_DEPLOYED; + + if (mndTransAppendRedoAction(pTrans, &action) != 0) { + taosMemoryFree(pReq); + return -1; + } + + return 0; +} + static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *pSma) { int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_SMA, pReq); + SVgObj *pVgroup = NULL; + STrans *pTrans = NULL; + + pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId); + if (pVgroup == NULL) goto _OVER; + + pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_SMA, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name); mndTransSetDbInfo(pTrans, pDb); if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER; + if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER; + if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; if (mndSetDropSmaRedoActions(pMnode, pTrans, pDb, pSma) != 0) goto _OVER; + if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; code = 0; _OVER: mndTransDrop(pTrans); + mndReleaseVgroup(pMnode, pVgroup); return code; } @@ -846,6 +954,9 @@ static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)n1, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pSma->dstVgId, false); + numOfRows++; sdbRelease(pSdb, pSma); } diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 62021c6a7e..ba1d4508a5 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -80,6 +80,7 @@ SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup) { SDB_SET_INT32(pRaw, dataPos, pVgroup->hashEnd, _OVER) SDB_SET_BINARY(pRaw, dataPos, pVgroup->dbName, TSDB_DB_FNAME_LEN, _OVER) SDB_SET_INT64(pRaw, dataPos, pVgroup->dbUid, _OVER) + SDB_SET_INT8(pRaw, dataPos, pVgroup->isTsma, _OVER) SDB_SET_INT8(pRaw, dataPos, pVgroup->replica, _OVER) for (int8_t i = 0; i < pVgroup->replica; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; @@ -127,6 +128,7 @@ SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw) { SDB_GET_INT32(pRaw, dataPos, &pVgroup->hashEnd, _OVER) SDB_GET_BINARY(pRaw, dataPos, pVgroup->dbName, TSDB_DB_FNAME_LEN, _OVER) SDB_GET_INT64(pRaw, dataPos, &pVgroup->dbUid, _OVER) + SDB_GET_INT8(pRaw, dataPos, &pVgroup->isTsma, _OVER) SDB_GET_INT8(pRaw, dataPos, &pVgroup->replica, _OVER) for (int8_t i = 0; i < pVgroup->replica; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; @@ -167,6 +169,7 @@ static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew) { pOld->hashBegin = pNew->hashBegin; pOld->hashEnd = pNew->hashEnd; pOld->replica = pNew->replica; + pOld->isTsma = pNew->isTsma; memcpy(pOld->vnodeGid, pNew->vnodeGid, TSDB_MAX_REPLICA * sizeof(SVnodeGid)); return 0; } @@ -426,6 +429,25 @@ static int32_t mndGetAvailableDnode(SMnode *pMnode, SVgObj *pVgroup, SArray *pAr return 0; } +int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) { + SArray *pArray = mndBuildDnodesArray(pMnode); + if (pArray == NULL) return -1; + + pVgroup->vgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP); + pVgroup->isTsma = 1; + pVgroup->createdTime = taosGetTimestampMs(); + pVgroup->updateTime = pVgroup->createdTime; + pVgroup->version = 1; + memcpy(pVgroup->dbName, pDb->name, TSDB_DB_FNAME_LEN); + pVgroup->dbUid = pDb->uid; + pVgroup->replica = 1; + + if (mndGetAvailableDnode(pMnode, pVgroup, pArray) != 0) return -1; + + mInfo("db:%s, sma vgId:%d is alloced", pDb->name, pVgroup->vgId); + return 0; +} + int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) { int32_t code = -1; SArray *pArray = NULL; @@ -705,6 +727,9 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p pColInfo = taosArrayGet(pBlock->pDataBlock, cols); colDataAppendNULL(pColInfo, numOfRows); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->isTsma, false); + numOfRows++; sdbRelease(pSdb, pVgroup); } From 0dcd7ab854847c66eb9e43ea978ce8cc1ab38ea4 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 13:19:48 +0800 Subject: [PATCH 10/66] feat: add vgroup for tsma --- source/dnode/mnode/impl/src/mndSma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index ce3a4b1a74..5c3c4d66a2 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -358,7 +358,7 @@ static int32_t mndSetCreateSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVg SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); if (pVgRaw == NULL) return -1; if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1; - if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1; + if (sdbSetRawStatus(pVgRaw, SDB_STATUS_CREATING) != 0) return -1; return 0; } From 4e1aa6b5c5fdca03ad2a593a1b30e29be0d1d3da Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Thu, 26 May 2022 14:21:24 +0800 Subject: [PATCH 11/66] refactor: config change --- include/libs/sync/sync.h | 3 ++ source/libs/sync/inc/syncInt.h | 2 +- source/libs/sync/src/syncAppendEntries.c | 41 +++++++++++++++---- source/libs/sync/src/syncCommit.c | 41 +++++++++++++++---- source/libs/sync/src/syncMain.c | 33 +++++++-------- .../libs/sync/test/syncConfigChangeTest.cpp | 14 ++++--- 6 files changed, 97 insertions(+), 37 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 6635bdbb96..08640e25c2 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -88,6 +88,9 @@ typedef struct SReConfigCbMeta { SyncIndex index; SyncTerm term; SyncTerm currentTerm; + SSyncCfg oldCfg; + bool isDrop; + uint64_t flag; } SReConfigCbMeta; typedef struct SSyncFSM { diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 69549d2a7e..2e71745f61 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -182,7 +182,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S cJSON* syncNode2Json(const SSyncNode* pSyncNode); char* syncNode2Str(const SSyncNode* pSyncNode); char* syncNode2SimpleStr(const SSyncNode* pSyncNode); -void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig); +void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDrop); SSyncNode* syncNodeAcquire(int64_t rid); void syncNodeRelease(SSyncNode* pNode); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 4899839b29..b0c33ba347 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -333,7 +333,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { cbMeta.seqNum = pEntry->seqNum; cbMeta.term = pEntry->term; cbMeta.currentTerm = ths->pRaftStore->currentTerm; - cbMeta.flag = 9; + cbMeta.flag = 0x11; bool needExecute = true; if (ths->pSnapshot != NULL && cbMeta.index <= ths->pSnapshot->lastApplyIndex) { @@ -347,24 +347,51 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { // config change if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) { + SSyncCfg oldSyncCfg = ths->pRaftCfg->cfg; + SSyncCfg newSyncCfg; int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg); ASSERT(ret == 0); - syncNodeUpdateConfig(ths, &newSyncCfg); - if (ths->state == TAOS_SYNC_STATE_LEADER) { - syncNodeBecomeLeader(ths); - } else { - syncNodeBecomeFollower(ths); + // update new config myIndex + bool hit = false; + for (int i = 0; i < newSyncCfg.replicaNum; ++i) { + if (strcmp(ths->myNodeInfo.nodeFqdn, (newSyncCfg.nodeInfo)[i].nodeFqdn) == 0 && + ths->myNodeInfo.nodePort == (newSyncCfg.nodeInfo)[i].nodePort) { + newSyncCfg.myIndex = i; + hit = true; + break; + } + } + ASSERT(hit == true); + + bool isDrop; + syncNodeUpdateConfig(ths, &newSyncCfg, &isDrop); + + // change isStandBy to normal + if (!isDrop) { + if (ths->state == TAOS_SYNC_STATE_LEADER) { + syncNodeBecomeLeader(ths); + } else { + syncNodeBecomeFollower(ths); + } } - // maybe newSyncCfg.myIndex is updated in syncNodeUpdateConfig + char* sOld = syncCfg2Str(&oldSyncCfg); + char* sNew = syncCfg2Str(&newSyncCfg); + sInfo("==config change== 0x11 old:%s new:%s isDrop:%d \n", sOld, sNew, isDrop); + taosMemoryFree(sOld); + taosMemoryFree(sNew); + if (ths->pFsm->FpReConfigCb != NULL) { SReConfigCbMeta cbMeta = {0}; cbMeta.code = 0; cbMeta.currentTerm = ths->pRaftStore->currentTerm; cbMeta.index = pEntry->index; cbMeta.term = pEntry->term; + cbMeta.oldCfg = oldSyncCfg; + cbMeta.flag = 0x11; + cbMeta.isDrop = isDrop; ths->pFsm->FpReConfigCb(ths->pFsm, newSyncCfg, cbMeta); } } diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index d8f693da6e..a9b055820b 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -111,7 +111,7 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { cbMeta.seqNum = pEntry->seqNum; cbMeta.term = pEntry->term; cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm; - cbMeta.flag = 7; + cbMeta.flag = 0x1; bool needExecute = true; if (pSyncNode->pSnapshot != NULL && cbMeta.index <= pSyncNode->pSnapshot->lastApplyIndex) { @@ -125,24 +125,51 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { // config change if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) { + SSyncCfg oldSyncCfg = pSyncNode->pRaftCfg->cfg; + SSyncCfg newSyncCfg; int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg); ASSERT(ret == 0); - syncNodeUpdateConfig(pSyncNode, &newSyncCfg); - if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { - syncNodeBecomeLeader(pSyncNode); - } else { - syncNodeBecomeFollower(pSyncNode); + // update new config myIndex + bool hit = false; + for (int i = 0; i < newSyncCfg.replicaNum; ++i) { + if (strcmp(pSyncNode->myNodeInfo.nodeFqdn, (newSyncCfg.nodeInfo)[i].nodeFqdn) == 0 && + pSyncNode->myNodeInfo.nodePort == (newSyncCfg.nodeInfo)[i].nodePort) { + newSyncCfg.myIndex = i; + hit = true; + break; + } + } + ASSERT(hit == true); + + bool isDrop; + syncNodeUpdateConfig(pSyncNode, &newSyncCfg, &isDrop); + + // change isStandBy to normal + if (!isDrop) { + if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { + syncNodeBecomeLeader(pSyncNode); + } else { + syncNodeBecomeFollower(pSyncNode); + } } - // maybe newSyncCfg.myIndex is updated in syncNodeUpdateConfig + char* sOld = syncCfg2Str(&oldSyncCfg); + char* sNew = syncCfg2Str(&newSyncCfg); + sInfo("==config change== 0x1 old:%s new:%s isDrop:%d \n", sOld, sNew, isDrop); + taosMemoryFree(sOld); + taosMemoryFree(sNew); + if (pSyncNode->pFsm->FpReConfigCb != NULL) { SReConfigCbMeta cbMeta = {0}; cbMeta.code = 0; cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm; cbMeta.index = pEntry->index; cbMeta.term = pEntry->term; + cbMeta.oldCfg = oldSyncCfg; + cbMeta.flag = 0x1; + cbMeta.isDrop = isDrop; pSyncNode->pFsm->FpReConfigCb(pSyncNode->pFsm, newSyncCfg, cbMeta); } } diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 9b7440e48b..9e6765e1d5 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -951,21 +951,9 @@ char* syncNode2SimpleStr(const SSyncNode* pSyncNode) { return s; } -void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig) { - bool hit = false; - for (int i = 0; i < newConfig->replicaNum; ++i) { - if (strcmp(pSyncNode->myNodeInfo.nodeFqdn, (newConfig->nodeInfo)[i].nodeFqdn) == 0 && - pSyncNode->myNodeInfo.nodePort == (newConfig->nodeInfo)[i].nodePort) { - newConfig->myIndex = i; - hit = true; - break; - } - } - ASSERT(hit == true); - +void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDrop) { pSyncNode->pRaftCfg->cfg = *newConfig; - int32_t ret = raftCfgPersist(pSyncNode->pRaftCfg); - ASSERT(ret == 0); + int32_t ret = 0; // init internal pSyncNode->myNodeInfo = pSyncNode->pRaftCfg->cfg.nodeInfo[pSyncNode->pRaftCfg->cfg.myIndex]; @@ -995,9 +983,22 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig) { voteGrantedUpdate(pSyncNode->pVotesGranted, pSyncNode); votesRespondUpdate(pSyncNode->pVotesRespond, pSyncNode); - pSyncNode->pRaftCfg->isStandBy = 0; - raftCfgPersist(pSyncNode->pRaftCfg); + // isDrop + *isDrop = true; + for (int i = 0; i < newConfig->replicaNum; ++i) { + if (strcmp((newConfig->nodeInfo)[i].nodeFqdn, pSyncNode->myNodeInfo.nodeFqdn) == 0 && + (newConfig->nodeInfo)[i].nodePort == pSyncNode->myNodeInfo.nodePort) { + *isDrop = false; + break; + } + } + if (!(*isDrop)) { + // change isStandBy to normal + pSyncNode->pRaftCfg->isStandBy = 0; + } + + raftCfgPersist(pSyncNode->pRaftCfg); syncNodeLog2("==syncNodeUpdateConfig==", pSyncNode); } diff --git a/source/libs/sync/test/syncConfigChangeTest.cpp b/source/libs/sync/test/syncConfigChangeTest.cpp index fa453577e7..580c4248a3 100644 --- a/source/libs/sync/test/syncConfigChangeTest.cpp +++ b/source/libs/sync/test/syncConfigChangeTest.cpp @@ -43,8 +43,9 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { if (cbMeta.index > beginIndex) { char logBuf[256]; - snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", - pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); + snprintf(logBuf, sizeof(logBuf), + "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, + cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg); } else { sTrace("==callback== ==CommitCb== do not apply again %ld", cbMeta.index); @@ -54,15 +55,16 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { void PreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { char logBuf[256]; snprintf(logBuf, sizeof(logBuf), - "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, cbMeta.index, - cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); + "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, + cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg); } void RollBackCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { char logBuf[256]; - snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", - pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); + snprintf(logBuf, sizeof(logBuf), + "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, + cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg); } From 22470344e05e672885d67902ef974a2712990a7a Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 26 May 2022 14:23:07 +0800 Subject: [PATCH 12/66] scheduler async api test --- include/libs/scheduler/scheduler.h | 26 +++---- source/client/src/clientImpl.c | 97 ++++++++++++++++++++++++++ source/libs/parser/src/parTranslater.c | 1 + source/libs/scheduler/src/schDbg.c | 42 +++++++++++ source/libs/scheduler/src/schJob.c | 7 +- source/libs/scheduler/src/schUtil.c | 1 + source/libs/scheduler/src/scheduler.c | 2 +- 7 files changed, 157 insertions(+), 19 deletions(-) create mode 100644 source/libs/scheduler/src/schDbg.c diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h index a72489f338..0d32cce20b 100644 --- a/include/libs/scheduler/scheduler.h +++ b/include/libs/scheduler/scheduler.h @@ -23,6 +23,8 @@ extern "C" { #include "catalog.h" #include "planner.h" +extern tsem_t schdRspSem; + typedef struct SSchedulerCfg { uint32_t maxJobNum; int32_t maxNodeTableNum; @@ -62,6 +64,11 @@ typedef struct STaskInfo { SSubQueryMsg *msg; } STaskInfo; +typedef struct SSchdFetchParam { + void **pData; + int32_t* code; +} SSchdFetchParam; + typedef void (*schedulerExecCallback)(SQueryResult* pResult, void* param, int32_t code); typedef void (*schedulerFetchCallback)(void* pResult, void* param, int32_t code); @@ -113,23 +120,8 @@ void schedulerFreeJob(int64_t job); void schedulerDestroy(void); -/** - * convert dag to task list - * @param pDag - * @param pTasks SArray** - * @return - */ -int32_t schedulerConvertDagToTaskList(SQueryPlan* pDag, SArray **pTasks); - -/** - * make one task info's multiple copies - * @param src - * @param dst SArray** - * @return - */ -int32_t schedulerCopyTask(STaskInfo *src, SArray **dst, int32_t copyNum); - -void schedulerFreeTaskList(SArray *taskList); +void schdExecCallback(SQueryResult* pResult, void* param, int32_t code); +void schdFetchCallback(void* pResult, void* param, int32_t code); #ifdef __cplusplus diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index f6ba72db52..96adba9cfc 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -289,6 +289,52 @@ void setResPrecision(SReqResultInfo* pResInfo, int32_t precision) { pResInfo->precision = precision; } +int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** pRes) { + void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter; + + tsem_init(&schdRspSem, 0, 0); + + SQueryResult res = {.code = 0, .numOfRows = 0}; + int32_t code = schedulerAsyncExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr, + pRequest->metric.start, schdExecCallback, &res); + while (true) { + if (code != TSDB_CODE_SUCCESS) { + if (pRequest->body.queryJob != 0) { + schedulerFreeJob(pRequest->body.queryJob); + } + + *pRes = res.res; + + pRequest->code = code; + terrno = code; + return pRequest->code; + } else { + tsem_wait(&schdRspSem); + + if (res.code) { + code = res.code; + } else { + break; + } + } + } + + if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_CREATE_TABLE == pRequest->type) { + pRequest->body.resInfo.numOfRows = res.numOfRows; + + if (pRequest->body.queryJob != 0) { + schedulerFreeJob(pRequest->body.queryJob); + } + } + + *pRes = res.res; + + pRequest->code = res.code; + terrno = res.code; + return pRequest->code; +} + + int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** pRes) { void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter; @@ -796,7 +842,58 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) { } } +void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { + assert(pRequest != NULL); + + SReqResultInfo* pResultInfo = &pRequest->body.resInfo; + if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) { + // All data has returned to App already, no need to try again + if (pResultInfo->completed) { + pResultInfo->numOfRows = 0; + return NULL; + } + + tsem_init(&schdRspSem, 0, 0); + + SReqResultInfo* pResInfo = &pRequest->body.resInfo; + SSchdFetchParam param = {.pData = (void**)&pResInfo->pData, .code = &pRequest->code}; + pRequest->code = schedulerAsyncFetchRows(pRequest->body.queryJob, schdFetchCallback, ¶m); + if (pRequest->code != TSDB_CODE_SUCCESS) { + pResultInfo->numOfRows = 0; + return NULL; + } + + tsem_wait(&schdRspSem); + if (pRequest->code != TSDB_CODE_SUCCESS) { + pResultInfo->numOfRows = 0; + return NULL; + } + + pRequest->code = setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4); + if (pRequest->code != TSDB_CODE_SUCCESS) { + pResultInfo->numOfRows = 0; + return NULL; + } + + tscDebug("0x%" PRIx64 " fetch results, numOfRows:%d total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64, + pRequest->self, pResInfo->numOfRows, pResInfo->totalRows, pResInfo->completed, pRequest->requestId); + + if (pResultInfo->numOfRows == 0) { + return NULL; + } + } + + if (setupOneRowPtr) { + doSetOneRowPtr(pResultInfo); + pResultInfo->current += 1; + } + + return pResultInfo->row; +} + + void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { + //return doAsyncFetchRows(pRequest, setupOneRowPtr, convertUcs4); assert(pRequest != NULL); SReqResultInfo* pResultInfo = &pRequest->body.resInfo; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 559fe3e85a..a239151809 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2799,6 +2799,7 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt, int32_t code = getDBCfg(pCxt, pStmt->dbName, &dbCfg); int32_t num = taosArrayGetSize(dbCfg.pRetensions); if (TSDB_CODE_SUCCESS != code || num < 2) { + taosArrayDestroy(dbCfg.pRetensions); return code; } for (int32_t i = 1; i < num; ++i) { diff --git a/source/libs/scheduler/src/schDbg.c b/source/libs/scheduler/src/schDbg.c new file mode 100644 index 0000000000..4b5f74114d --- /dev/null +++ b/source/libs/scheduler/src/schDbg.c @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "query.h" +#include "schedulerInt.h" + +tsem_t schdRspSem; + +void schdExecCallback(SQueryResult* pResult, void* param, int32_t code) { + if (code) { + pResult->code = code; + } + + *(SQueryResult*)param = *pResult; + + taosMemoryFree(pResult); + + tsem_post(&schdRspSem); +} + +void schdFetchCallback(void* pResult, void* param, int32_t code) { + SSchdFetchParam* fParam = (SSchdFetchParam*)param; + + *fParam->pData = pResult; + *fParam->code = code; + + tsem_post(&schdRspSem); +} + + diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index d64c944994..af249334b7 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -856,7 +856,12 @@ _return: void schProcessOnDataFetched(SSchJob *job) { atomic_val_compare_exchange_32(&job->remoteFetch, 1, 0); - tsem_post(&job->rspSem); + + if (job->attr.syncSchedule) { + tsem_post(&job->rspSem); + } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&job->userCb, SCH_FETCH_CB, 0)) { + schNotifyUserFetchRes(job); + } } // Note: no more task error processing, handled in function internal diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c index 3862ba76f6..cec754bdcd 100644 --- a/source/libs/scheduler/src/schUtil.c +++ b/source/libs/scheduler/src/schUtil.c @@ -66,6 +66,7 @@ void schFreeRpcCtxVal(const void *arg) { SMsgSendInfo *pMsgSendInfo = (SMsgSendInfo *)arg; taosMemoryFreeClear(pMsgSendInfo->param); + taosMemoryFreeClear(pMsgSendInfo->msgInfo.pData); taosMemoryFreeClear(pMsgSendInfo); } diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c index 8d802980ea..3ecc4f4a30 100644 --- a/source/libs/scheduler/src/scheduler.c +++ b/source/libs/scheduler/src/scheduler.c @@ -124,7 +124,7 @@ int32_t schedulerAsyncFetchRows(int64_t job, schedulerFetchCallback fp, void* pa pJob->userRes.fetchFp = fp; pJob->userRes.userParam = param; - code = schFetchRows(pJob); + code = schAsyncFetchRows(pJob); schReleaseJob(job); From 27af0747d64ddc771bb213b0c07bddcad558700e Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 26 May 2022 14:34:05 +0800 Subject: [PATCH 13/66] feat(sma): using separate vg --- source/dnode/mnode/impl/inc/mndDef.h | 3 ++- source/dnode/mnode/impl/src/mndScheduler.c | 6 ++++- source/dnode/mnode/impl/src/mndSma.c | 29 ++++++++++------------ 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 97ee6ad162..9af4eced72 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -588,7 +588,8 @@ typedef struct { int8_t status; int8_t createdBy; // STREAM_CREATED_BY__USER or SMA int32_t fixedSinkVgId; // 0 for shuffle - int64_t smaId; // 0 for unused + SVgObj fixedSinkVg; + int64_t smaId; // 0 for unused int8_t trigger; int32_t triggerParam; int64_t waterMark; diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 22a5f37334..d42610efa5 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -229,11 +229,14 @@ int32_t mndAddFixedSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStr taosArrayPush(tasks, &pTask); pTask->nodeId = pStream->fixedSinkVgId; +#if 0 SVgObj* pVgroup = mndAcquireVgroup(pMnode, pStream->fixedSinkVgId); if (pVgroup == NULL) { return -1; } pTask->epSet = mndGetVgroupEpset(pMnode, pVgroup); +#endif + pTask->epSet = mndGetVgroupEpset(pMnode, &pStream->fixedSinkVg); // source pTask->sourceType = TASK_SOURCE__MERGE; pTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK; @@ -254,7 +257,8 @@ int32_t mndAddFixedSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStr // dispatch pTask->dispatchType = TASK_DISPATCH__NONE; - mndPersistTaskDeployReq(pTrans, pTask, &pTask->epSet, TDMT_VND_TASK_DEPLOY, pVgroup->vgId); + /*mndPersistTaskDeployReq(pTrans, pTask, &pTask->epSet, TDMT_VND_TASK_DEPLOY, pVgroup->vgId);*/ + mndPersistTaskDeployReq(pTrans, pTask, &pTask->epSet, TDMT_VND_TASK_DEPLOY, pStream->fixedSinkVg.vgId); return 0; } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 5c3c4d66a2..7aeac146dd 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -295,9 +295,9 @@ static void *mndBuildVCreateSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSm } static void *mndBuildVDropSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSma, int32_t *pContLen) { - SEncoder encoder = {0}; - int32_t contLen; - SName name = {0}; + SEncoder encoder = {0}; + int32_t contLen; + SName name = {0}; tNameFromString(&name, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); SVDropTSmaReq req = {0}; @@ -482,14 +482,6 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea memcpy(smaObj.ast, pCreate->ast, smaObj.astLen); } - SVgObj smaVgObj = {0}; - if (mndAllocSmaVgroup(pMnode, pDb, &smaVgObj) != 0) { - mError("sma:%s, failed to create since %s", smaObj.name, terrstr()); - return -1; - } - - smaObj.dstVgId = smaVgObj.vgId; - SStreamObj streamObj = {0}; tstrncpy(streamObj.name, pCreate->name, TSDB_STREAM_FNAME_LEN); tstrncpy(streamObj.sourceDb, pDb->name, TSDB_DB_FNAME_LEN); @@ -502,7 +494,12 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea streamObj.createdBy = STREAM_CREATED_BY__SMA; streamObj.fixedSinkVgId = smaObj.dstVgId; streamObj.smaId = smaObj.uid; - /*streamObj.physicalPlan = "";*/ + + if (mndAllocSmaVgroup(pMnode, pDb, &streamObj.fixedSinkVg) != 0) { + mError("sma:%s, failed to create since %s", smaObj.name, terrstr()); + return -1; + } + smaObj.dstVgId = streamObj.fixedSinkVg.vgId; int32_t code = -1; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_SMA, pReq); @@ -512,11 +509,11 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea mndTransSetDbInfo(pTrans, pDb); if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER; - if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &smaVgObj) != 0) goto _OVER; + if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER; if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER; - if (mndSetCreateSmaVgroupCommitLogs(pMnode, pTrans, &smaVgObj) != 0) goto _OVER; + if (mndSetCreateSmaVgroupCommitLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER; if (mndSetCreateSmaRedoActions(pMnode, pTrans, pDb, &smaObj) != 0) goto _OVER; - if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &smaVgObj) != 0) goto _OVER; + if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &streamObj.fixedSinkVg) != 0) goto _OVER; if (mndAddStreamToTrans(pMnode, &streamObj, pCreate->ast, STREAM_TRIGGER_AT_ONCE, 0, pTrans) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; @@ -656,7 +653,7 @@ static int32_t mndSetDropSmaCommitLogs(SMnode *pMnode, STrans *pTrans, SSmaObj * return 0; } -static int32_t mndSetDropSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { +static int32_t mndSetDropSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); if (pVgRaw == NULL) return -1; if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1; From f078f5b30016821d400105acb0520dcabb0b9c52 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Thu, 26 May 2022 15:08:20 +0800 Subject: [PATCH 14/66] fix(sync): syncSetStandby --- include/libs/sync/sync.h | 1 + source/libs/sync/src/syncMain.c | 29 +++++++++++++++++++ .../libs/sync/test/syncConfigChangeTest.cpp | 14 +++++++-- 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 08640e25c2..2e04afdbdc 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -166,6 +166,7 @@ void syncCleanUp(); int64_t syncOpen(const SSyncInfo* pSyncInfo); void syncStart(int64_t rid); void syncStop(int64_t rid); +int32_t syncSetStandby(int64_t rid); int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg); ESyncState syncGetMyRole(int64_t rid); const char* syncGetMyRoleStr(int64_t rid); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 9e6765e1d5..99aac7991a 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -141,9 +141,38 @@ void syncStop(int64_t rid) { taosRemoveRef(tsNodeRefId, rid); } +int32_t syncSetStandby(int64_t rid) { + SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); + if (pSyncNode == NULL) { + return -1; + } + + if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { + taosReleaseRef(tsNodeRefId, pSyncNode->rid); + return -1; + } + + // state change + pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER; + syncNodeStopHeartbeatTimer(pSyncNode); + + // reset elect timer, long enough + int32_t electMS = TIMER_MAX_MS; + int32_t ret = syncNodeRestartElectTimer(pSyncNode, electMS); + ASSERT(ret == 0); + + pSyncNode->pRaftCfg->isStandBy = 1; + raftCfgPersist(pSyncNode->pRaftCfg); + + taosReleaseRef(tsNodeRefId, pSyncNode->rid); + return 0; +} + int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg) { int32_t ret = 0; char* configChange = syncCfg2Str((SSyncCfg*)pSyncCfg); + sInfo("==syncReconfig== newconfig:%s", configChange); + SRpcMsg rpcMsg = {0}; rpcMsg.msgType = TDMT_VND_SYNC_CONFIG_CHANGE; rpcMsg.info.noResp = 1; diff --git a/source/libs/sync/test/syncConfigChangeTest.cpp b/source/libs/sync/test/syncConfigChangeTest.cpp index 580c4248a3..7efc3f50c0 100644 --- a/source/libs/sync/test/syncConfigChangeTest.cpp +++ b/source/libs/sync/test/syncConfigChangeTest.cpp @@ -77,13 +77,23 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { void RestoreFinishCb(struct SSyncFSM* pFsm) { sTrace("==callback== ==RestoreFinishCb=="); } +void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) { + sTrace("==callback== ==ReConfigCb== flag:0x%lX, isDrop:%d, index:%ld, code:%d, currentTerm:%lu, term:%lu", cbMeta.flag, cbMeta.isDrop, cbMeta.index, cbMeta.code, cbMeta.currentTerm, cbMeta.term); +} + SSyncFSM* createFsm() { SSyncFSM* pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(SSyncFSM)); pFsm->FpCommitCb = CommitCb; pFsm->FpPreCommitCb = PreCommitCb; pFsm->FpRollBackCb = RollBackCb; + pFsm->FpGetSnapshot = GetSnapshotCb; pFsm->FpRestoreFinishCb = RestoreFinishCb; + pFsm->FpSnapshotApply = NULL; + pFsm->FpSnapshotRead = NULL; + + pFsm->FpReConfigCb = ReConfigCb; + return pFsm; } @@ -183,7 +193,7 @@ SRpcMsg* createRpcMsg(int i, int count, int myIndex) { int main(int argc, char** argv) { tsAsyncLog = 0; - sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE; + sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE + DEBUG_INFO; if (argc != 7) { usage(argv[0]); exit(-1); @@ -229,7 +239,7 @@ int main(int argc, char** argv) { assert(pSyncNode != NULL); if (isConfigChange) { - configChange(rid, 3, myIndex); + configChange(rid, 2, myIndex); } //--------------------------- From 1af3a5c01764072fa9130b79225f5e7f50510c2c Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 15:46:31 +0800 Subject: [PATCH 15/66] feat: add vgroup for tsma --- source/dnode/mnode/impl/src/mndSma.c | 2 +- source/dnode/mnode/impl/src/mndTrans.c | 2 +- source/dnode/mnode/impl/src/mndVgroup.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 7aeac146dd..cb7d3e81f6 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -492,7 +492,6 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea streamObj.version = 1; streamObj.sql = pCreate->sql; streamObj.createdBy = STREAM_CREATED_BY__SMA; - streamObj.fixedSinkVgId = smaObj.dstVgId; streamObj.smaId = smaObj.uid; if (mndAllocSmaVgroup(pMnode, pDb, &streamObj.fixedSinkVg) != 0) { @@ -500,6 +499,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea return -1; } smaObj.dstVgId = streamObj.fixedSinkVg.vgId; + streamObj.fixedSinkVgId = smaObj.dstVgId; int32_t code = -1; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_SMA, pReq); diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 2a500829e5..3d512c6dba 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -835,7 +835,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { sendRsp = true; } } else { - if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 0) { + if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 6) { if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR; sendRsp = true; } diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index ba1d4508a5..e05b38a7c0 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -724,7 +724,7 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppendNULL(pColInfo, numOfRows); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppendNULL(pColInfo, numOfRows); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); From 051020f9326e6926e12871bbed993283743b52a0 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 26 May 2022 16:05:27 +0800 Subject: [PATCH 16/66] fix: remove table group code in 2.x --- include/common/tcommon.h | 5 +- source/dnode/vnode/inc/vnode.h | 7 +- source/dnode/vnode/src/inc/vnodeInt.h | 5 +- source/dnode/vnode/src/tsdb/tsdbRead.c | 392 +++++++----------- source/dnode/vnode/src/vnd/vnodeQuery.c | 8 +- source/libs/executor/inc/executorimpl.h | 29 +- source/libs/executor/src/executorimpl.c | 213 ++++------ source/libs/executor/src/groupoperator.c | 4 +- source/libs/executor/src/scanoperator.c | 15 +- source/libs/executor/src/timewindowoperator.c | 15 +- 10 files changed, 267 insertions(+), 426 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 0ff13963c0..45745403f3 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -53,10 +53,9 @@ typedef enum EStreamType { } EStreamType; typedef struct { - uint32_t numOfTables; - SArray* pGroupList; + SArray* pTableList; SHashObj* map; // speedup acquire the tableQueryInfo by table uid -} STableGroupInfo; +} STableListInfo; typedef struct SColumnDataAgg { int16_t colId; diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index e8de74e639..28c9b8518f 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -99,9 +99,9 @@ typedef void *tsdbReaderT; #define BLOCK_LOAD_TABLE_SEQ_ORDER 2 #define BLOCK_LOAD_TABLE_RR_ORDER 3 -tsdbReaderT *tsdbQueryTables(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *tableInfoGroup, uint64_t qId, +tsdbReaderT *tsdbQueryTables(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *tableInfoGroup, uint64_t qId, uint64_t taskId); -tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId, +tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *groupList, uint64_t qId, void *pMemRef); int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo); bool isTsdbCacheLastRow(tsdbReaderT *pReader); @@ -112,9 +112,6 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockI int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT *pTsdbReadHandle, SColumnDataAgg ***pBlockStatis, bool *allHave); SArray *tsdbRetrieveDataBlock(tsdbReaderT *pTsdbReadHandle, SArray *pColumnIdList); void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond); -void tsdbDestroyTableGroup(STableGroupInfo *pGroupList); -int32_t tsdbGetOneTableGroup(void *pMeta, uint64_t uid, TSKEY startKey, STableGroupInfo *pGroupInfo); -int32_t tsdbGetTableGroupFromIdList(SVnode *pVnode, SArray *pTableIdList, STableGroupInfo *pGroupInfo); void tsdbCleanupReadHandle(tsdbReaderT queryHandle); // tq diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index faf0ddcd4a..d38ff716ab 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -114,11 +114,10 @@ int tsdbCommit(STsdb* pTsdb); int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq* pMsg); int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp); int tsdbInsertTableData(STsdb* pTsdb, SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock, SSubmitBlkRsp* pRsp); -tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId, +tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, uint64_t taskId); -tsdbReaderT tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId, +tsdbReaderT tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, void* pMemRef); -int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo); int32_t tsdbSnapshotReaderOpen(STsdb* pTsdb, STsdbSnapshotReader** ppReader, int64_t sver, int64_t ever); int32_t tsdbSnapshotReaderClose(STsdbSnapshotReader* pReader); int32_t tsdbSnapshotRead(STsdbSnapshotReader* pReader, void** ppData, uint32_t* nData); diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 28677fd589..c0b97f7536 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -146,8 +146,8 @@ typedef struct STableGroupSupporter { SSchema* pTagSchema; } STableGroupSupporter; -static STimeWindow updateLastrowForEachGroup(STableGroupInfo* groupList); -static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* groupList); +static STimeWindow updateLastrowForEachGroup(STableListInfo* pList); +static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableListInfo* pList); static int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle); // static int32_t tsdbGetCachedLastRow(STable* pTable, STSRow** pRes, TSKEY* lastKey); @@ -233,41 +233,34 @@ int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT* pHandle) { return rows; } -static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* pGroupList) { - size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); - assert(numOfGroup >= 1); +static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, STableListInfo* pTableList) { + size_t tableSize = taosArrayGetSize(pTableList->pTableList); + assert(tableSize >= 1); // allocate buffer in order to load data blocks from file - SArray* pTableCheckInfo = taosArrayInit(pGroupList->numOfTables, sizeof(STableCheckInfo)); + SArray* pTableCheckInfo = taosArrayInit(tableSize, sizeof(STableCheckInfo)); if (pTableCheckInfo == NULL) { return NULL; } // todo apply the lastkey of table check to avoid to load header file - for (int32_t i = 0; i < numOfGroup; ++i) { - SArray* group = *(SArray**)taosArrayGet(pGroupList->pGroupList, i); + for (int32_t j = 0; j < tableSize; ++j) { + STableKeyInfo* pKeyInfo = (STableKeyInfo*)taosArrayGet(pTableList->pTableList, j); - size_t gsize = taosArrayGetSize(group); - assert(gsize > 0); - - for (int32_t j = 0; j < gsize; ++j) { - STableKeyInfo* pKeyInfo = (STableKeyInfo*)taosArrayGet(group, j); - - STableCheckInfo info = {.lastKey = pKeyInfo->lastKey, .tableId = pKeyInfo->uid}; - if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) { - if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReadHandle->window.skey) { - info.lastKey = pTsdbReadHandle->window.skey; - } - - assert(info.lastKey >= pTsdbReadHandle->window.skey && info.lastKey <= pTsdbReadHandle->window.ekey); - } else { + STableCheckInfo info = {.lastKey = pKeyInfo->lastKey, .tableId = pKeyInfo->uid}; + if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) { + if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReadHandle->window.skey) { info.lastKey = pTsdbReadHandle->window.skey; } - taosArrayPush(pTableCheckInfo, &info); - tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, - info.lastKey, pTsdbReadHandle->idStr); + assert(info.lastKey >= pTsdbReadHandle->window.skey && info.lastKey <= pTsdbReadHandle->window.ekey); + } else { + info.lastKey = pTsdbReadHandle->window.skey; } + + taosArrayPush(pTableCheckInfo, &info); + tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, + info.lastKey, pTsdbReadHandle->idStr); } // TODO group table according to the tag value. @@ -478,7 +471,7 @@ _end: return NULL; } -tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId, +tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, uint64_t taskId) { STsdbReadHandle* pTsdbReadHandle = tsdbQueryTablesImpl(pVnode, pCond, qId, taskId); if (pTsdbReadHandle == NULL) { @@ -490,7 +483,7 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableG } // todo apply the lastkey of table check to avoid to load header file - pTsdbReadHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pTsdbReadHandle, groupList); + pTsdbReadHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pTsdbReadHandle, tableList); if (pTsdbReadHandle->pTableCheckInfo == NULL) { // tsdbCleanupReadHandle(pTsdbReadHandle); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -520,8 +513,8 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableG } } - tsdbDebug("%p total numOfTable:%" PRIzu " in this query, group %" PRIzu " %s", pTsdbReadHandle, - taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo), taosArrayGetSize(groupList->pGroupList), + tsdbDebug("%p total numOfTable:%" PRIzu " in this query, table %" PRIzu " %s", pTsdbReadHandle, + taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo), taosArrayGetSize(tableList->pTableList), pTsdbReadHandle->idStr); return (tsdbReaderT)pTsdbReadHandle; @@ -565,7 +558,7 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { resetCheckInfo(pTsdbReadHandle); } -void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableGroupInfo* groupList) { +void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList) { STsdbReadHandle* pTsdbReadHandle = queryHandle; pTsdbReadHandle->order = pCond->order; @@ -607,21 +600,21 @@ void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCon // pTsdbReadHandle->next = doFreeColumnInfoData(pTsdbReadHandle->next); } -tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId, +tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* pList, uint64_t qId, uint64_t taskId) { - pCond->twindow = updateLastrowForEachGroup(groupList); + pCond->twindow = updateLastrowForEachGroup(pList); // no qualified table - if (groupList->numOfTables == 0) { + if (taosArrayGetSize(pList->pTableList) == 0) { return NULL; } - STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, groupList, qId, taskId); + STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, pList, qId, taskId); if (pTsdbReadHandle == NULL) { return NULL; } - int32_t code = checkForCachedLastRow(pTsdbReadHandle, groupList); + int32_t code = checkForCachedLastRow(pTsdbReadHandle, pList); if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0 terrno = code; return NULL; @@ -667,60 +660,60 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) { } // leave only one table for each group -static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { - assert(pGroupList); - size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); +//static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { +// assert(pGroupList); +// size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); +// +// STableGroupInfo* pNew = taosMemoryCalloc(1, sizeof(STableGroupInfo)); +// pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES); +// +// for (int32_t i = 0; i < numOfGroup; ++i) { +// SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i); +// size_t numOfTables = taosArrayGetSize(oneGroup); +// +// SArray* px = taosArrayInit(4, sizeof(STableKeyInfo)); +// for (int32_t j = 0; j < numOfTables; ++j) { +// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(oneGroup, j); +// // if (window->skey <= pInfo->lastKey && ((STable*)pInfo->pTable)->lastKey != TSKEY_INITIAL_VAL) { +// // taosArrayPush(px, pInfo); +// // pNew->numOfTables += 1; +// // break; +// // } +// } +// +// // there are no data in this group +// if (taosArrayGetSize(px) == 0) { +// taosArrayDestroy(px); +// } else { +// taosArrayPush(pNew->pGroupList, &px); +// } +// } +// +// return pNew; +//} - STableGroupInfo* pNew = taosMemoryCalloc(1, sizeof(STableGroupInfo)); - pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES); - - for (int32_t i = 0; i < numOfGroup; ++i) { - SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i); - size_t numOfTables = taosArrayGetSize(oneGroup); - - SArray* px = taosArrayInit(4, sizeof(STableKeyInfo)); - for (int32_t j = 0; j < numOfTables; ++j) { - STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(oneGroup, j); - // if (window->skey <= pInfo->lastKey && ((STable*)pInfo->pTable)->lastKey != TSKEY_INITIAL_VAL) { - // taosArrayPush(px, pInfo); - // pNew->numOfTables += 1; - // break; - // } - } - - // there are no data in this group - if (taosArrayGetSize(px) == 0) { - taosArrayDestroy(px); - } else { - taosArrayPush(pNew->pGroupList, &px); - } - } - - return pNew; -} - -tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, - uint64_t qId, uint64_t taskId) { - STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); - - if (pNew->numOfTables == 0) { - tsdbDebug("update query time range to invalidate time window"); - - assert(taosArrayGetSize(pNew->pGroupList) == 0); - bool asc = ASCENDING_TRAVERSE(pCond->order); - if (asc) { - pCond->twindow.ekey = pCond->twindow.skey - 1; - } else { - pCond->twindow.skey = pCond->twindow.ekey - 1; - } - } - - STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, pNew, qId, taskId); - pTsdbReadHandle->loadExternalRow = true; - pTsdbReadHandle->currentLoadExternalRows = true; - - return pTsdbReadHandle; -} +//tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, +// uint64_t qId, uint64_t taskId) { +// STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); +// +// if (pNew->numOfTables == 0) { +// tsdbDebug("update query time range to invalidate time window"); +// +// assert(taosArrayGetSize(pNew->pGroupList) == 0); +// bool asc = ASCENDING_TRAVERSE(pCond->order); +// if (asc) { +// pCond->twindow.ekey = pCond->twindow.skey - 1; +// } else { +// pCond->twindow.skey = pCond->twindow.ekey - 1; +// } +// } +// +// STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, pNew, qId, taskId); +// pTsdbReadHandle->loadExternalRow = true; +// pTsdbReadHandle->currentLoadExternalRows = true; +// +// return pTsdbReadHandle; +//} static bool initTableMemIterator(STsdbReadHandle* pHandle, STableCheckInfo* pCheckInfo) { if (pCheckInfo->initBuf) { @@ -3338,8 +3331,8 @@ bool isTsdbCacheLastRow(tsdbReaderT* pReader) { return ((STsdbReadHandle*)pReader)->cachelastrow > TSDB_CACHED_TYPE_NONE; } -int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* groupList) { - assert(pTsdbReadHandle != NULL && groupList != NULL); +int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableListInfo* tableList) { + assert(pTsdbReadHandle != NULL && tableList != NULL); // TSKEY key = TSKEY_INITIAL_VAL; // @@ -3386,68 +3379,68 @@ int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle) { return code; } -STimeWindow updateLastrowForEachGroup(STableGroupInfo* groupList) { +STimeWindow updateLastrowForEachGroup(STableListInfo* pList) { STimeWindow window = {INT64_MAX, INT64_MIN}; - int32_t totalNumOfTable = 0; - SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); - - // NOTE: starts from the buffer in case of descending timestamp order check data blocks - size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); - for (int32_t j = 0; j < numOfGroups; ++j) { - SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); - TSKEY key = TSKEY_INITIAL_VAL; - - STableKeyInfo keyInfo = {0}; - - size_t numOfTables = taosArrayGetSize(pGroup); - for (int32_t i = 0; i < numOfTables; ++i) { - STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); - - // if the lastKey equals to INT64_MIN, there is no data in this table - TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; - if (key < lastKey) { - key = lastKey; - - // keyInfo.pTable = pInfo->pTable; - keyInfo.lastKey = key; - pInfo->lastKey = key; - - if (key < window.skey) { - window.skey = key; - } - - if (key > window.ekey) { - window.ekey = key; - } - } - } - - // more than one table in each group, only one table left for each group - // if (keyInfo.pTable != NULL) { - // totalNumOfTable++; - // if (taosArrayGetSize(pGroup) == 1) { - // // do nothing - // } else { - // taosArrayClear(pGroup); - // taosArrayPush(pGroup, &keyInfo); - // } - // } else { // mark all the empty groups, and remove it later - // taosArrayDestroy(pGroup); - // taosArrayPush(emptyGroup, &j); - // } - } - - // window does not being updated, so set the original - if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { - window = TSWINDOW_INITIALIZER; - assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); - } - - taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); - taosArrayDestroy(emptyGroup); - - groupList->numOfTables = totalNumOfTable; +// int32_t totalNumOfTable = 0; +// SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); +// +// // NOTE: starts from the buffer in case of descending timestamp order check data blocks +// size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); +// for (int32_t j = 0; j < numOfGroups; ++j) { +// SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); +// TSKEY key = TSKEY_INITIAL_VAL; +// +// STableKeyInfo keyInfo = {0}; +// +// size_t numOfTables = taosArrayGetSize(pGroup); +// for (int32_t i = 0; i < numOfTables; ++i) { +// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); +// +// // if the lastKey equals to INT64_MIN, there is no data in this table +// TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; +// if (key < lastKey) { +// key = lastKey; +// +// // keyInfo.pTable = pInfo->pTable; +// keyInfo.lastKey = key; +// pInfo->lastKey = key; +// +// if (key < window.skey) { +// window.skey = key; +// } +// +// if (key > window.ekey) { +// window.ekey = key; +// } +// } +// } +// +// // more than one table in each group, only one table left for each group +// // if (keyInfo.pTable != NULL) { +// // totalNumOfTable++; +// // if (taosArrayGetSize(pGroup) == 1) { +// // // do nothing +// // } else { +// // taosArrayClear(pGroup); +// // taosArrayPush(pGroup, &keyInfo); +// // } +// // } else { // mark all the empty groups, and remove it later +// // taosArrayDestroy(pGroup); +// // taosArrayPush(emptyGroup, &j); +// // } +// } +// +// // window does not being updated, so set the original +// if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { +// window = TSWINDOW_INITIALIZER; +// assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); +// } +// +// taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); +// taosArrayDestroy(emptyGroup); +// +// groupList->numOfTables = totalNumOfTable; return window; } @@ -3873,81 +3866,6 @@ SArray* createTableGroup(SArray* pTableList, SSchemaWrapper* pTagSchema, SColInd // return TSDB_CODE_SUCCESS; //} -int32_t tsdbGetOneTableGroup(void* pMeta, uint64_t uid, TSKEY startKey, STableGroupInfo* pGroupInfo) { - SMetaReader mr = {0}; - - metaReaderInit(&mr, (SMeta*)pMeta, 0); - - if (metaGetTableEntryByUid(&mr, uid) < 0) { - terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST; - goto _error; - } - - metaReaderClear(&mr); - - pGroupInfo->numOfTables = 1; - pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - - SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); - - STableKeyInfo info = {.lastKey = startKey, .uid = uid}; - taosArrayPush(group, &info); - - taosArrayPush(pGroupInfo->pGroupList, &group); - return TSDB_CODE_SUCCESS; - -_error: - metaReaderClear(&mr); - return terrno; -} - -#if 0 -int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo) { - if (tsdbRLockRepoMeta(tsdb) < 0) { - return terrno; - } - - assert(pTableIdList != NULL); - size_t size = taosArrayGetSize(pTableIdList); - pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); - - for(int32_t i = 0; i < size; ++i) { - STableIdInfo *id = taosArrayGet(pTableIdList, i); - - STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), id->uid); - if (pTable == NULL) { - tsdbWarn("table uid:%"PRIu64", tid:%d has been drop already", id->uid, id->tid); - continue; - } - - if (pTable->type == TSDB_SUPER_TABLE) { - tsdbError("direct query on super tale is not allowed, table uid:%"PRIu64", tid:%d", id->uid, id->tid); - terrno = TSDB_CODE_QRY_INVALID_MSG; - tsdbUnlockRepoMeta(tsdb); - taosArrayDestroy(group); - return terrno; - } - - STableKeyInfo info = {.pTable = pTable, .lastKey = id->key}; - taosArrayPush(group, &info); - } - - if (tsdbUnlockRepoMeta(tsdb) < 0) { - taosArrayDestroy(group); - return terrno; - } - - pGroupInfo->numOfTables = (uint32_t) taosArrayGetSize(group); - if (pGroupInfo->numOfTables > 0) { - taosArrayPush(pGroupInfo->pGroupList, &group); - } else { - taosArrayDestroy(group); - } - - return TSDB_CODE_SUCCESS; -} -#endif static void* doFreeColumnInfoData(SArray* pColumnInfoData) { if (pColumnInfoData == NULL) { return NULL; @@ -4018,30 +3936,6 @@ void tsdbCleanupReadHandle(tsdbReaderT queryHandle) { } #if 0 -void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) { - assert(pGroupList != NULL); - - size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); - - for(int32_t i = 0; i < numOfGroup; ++i) { - SArray* p = taosArrayGetP(pGroupList->pGroupList, i); - - size_t numOfTables = taosArrayGetSize(p); - for(int32_t j = 0; j < numOfTables; ++j) { - STable* pTable = taosArrayGetP(p, j); - if (pTable != NULL) { // in case of handling retrieve data from tsdb - tsdbUnRefTable(pTable); - } - //assert(pTable != NULL); - } - - taosArrayDestroy(p); - } - - taosHashCleanup(pGroupList->map); - taosArrayDestroy(pGroupList->pGroupList); - pGroupList->numOfTables = 0; -} static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) { SSkipListIterator* iter = tSkipListCreateIter(pSkipList); diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 3b47b90254..8317834316 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -147,16 +147,10 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId) { } // wrapper of tsdb read interface -tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId, +tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo* tableList, uint64_t qId, void *pMemRef) { #if 0 return tsdbQueryCacheLastT(pVnode->pTsdb, pCond, groupList, qId, pMemRef); #endif return 0; -} -int32_t tsdbGetTableGroupFromIdList(SVnode *pVnode, SArray *pTableIdList, STableGroupInfo *pGroupInfo) { -#if 0 - return tsdbGetTableGroupFromIdListT(pVnode->pTsdb, pTableIdList, pGroupInfo); -#endif - return 0; } \ No newline at end of file diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 53dddd9c22..e4daae4e7c 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -49,7 +49,7 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int #define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u) #define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP) -#define GET_TABLEGROUP(q, _index) ((SArray*)taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) +//#define GET_TABLEGROUP(q, _index) ((SArray*)taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) #define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0) @@ -153,7 +153,7 @@ typedef struct STaskAttr { int32_t numOfFilterCols; int64_t* fillVal; void* tsdb; - STableGroupInfo tableGroupInfo; // table list SArray +// STableListInfo tableGroupInfo; // table list int32_t vgId; } STaskAttr; @@ -193,7 +193,7 @@ typedef struct SExecTaskInfo { int32_t tversion; } schemaVer; - STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure + STableListInfo tableqinfoList; // this is a table list char* sql; // query sql string jmp_buf env; // jump to this position when error happens. EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] @@ -215,7 +215,7 @@ typedef struct STaskRuntimeEnv { STSCursor cur; char* tagVal; // tag value of current data block - STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure +// STableGroupInfo tableqinfoGroupInfo; // this is a table list struct SOperatorInfo* proot; SGroupResInfo groupResInfo; int64_t currentOffset; // dynamic offset value @@ -344,7 +344,7 @@ typedef struct STagScanInfo { SArray *pColMatchInfo; int32_t curPos; SReadHandle readHandle; - STableGroupInfo *pTableGroups; + STableListInfo *pTableList; } STagScanInfo; typedef enum EStreamScanMode { @@ -707,7 +707,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, tsdbReaderT pDataReader, SReadHandle* pHandle, SExecTaskInfo* pTaskInfo); SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SExprInfo* pScalarExprInfo, - int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo); + int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t num, SSDataBlock* pResBlock, SLimit* pLimit, SLimit* pSlimit, SExecTaskInfo* pTaskInfo); SOperatorInfo *createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols, @@ -720,21 +720,19 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* pSysTableReadHandle, SSDataB SExecTaskInfo* pTaskInfo, bool showRewrite, int32_t accountId); SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); + STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); + STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); + STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap, int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition, - SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, - const STableGroupInfo* pTableGroupInfo); + SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle, uint64_t uid, SSDataBlock* pResBlock, SArray* pColList, @@ -748,14 +746,13 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId, SColumn* pStateKeyCol, SExecTaskInfo* pTaskInfo); SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResultBlock, SArray* pGroupColList, SExecTaskInfo* pTaskInfo, - const STableGroupInfo* pTableGroupInfo); + SSDataBlock* pResultBlock, SArray* pGroupColList, SExecTaskInfo* pTaskInfo); SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SExecTaskInfo* pTaskInfo); SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, STableListInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap, @@ -771,8 +768,6 @@ void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlo void copyTsColoum(SSDataBlock* pRes, SqlFunctionCtx* pCtx, int32_t numOfOutput); -STableQueryInfo* createTableQueryInfo(void* buf, STimeWindow win); - bool isTaskKilled(SExecTaskInfo* pTaskInfo); int32_t checkForQueryBuf(size_t numOfTables); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 8c6c6f0f64..f0291e3c5b 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -88,7 +88,7 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #endif #define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) -#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) +//#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; } @@ -1846,12 +1846,6 @@ void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) { } } -STableQueryInfo* createTableQueryInfo(void* buf, STimeWindow win) { - STableQueryInfo* pTableQueryInfo = buf; - pTableQueryInfo->lastKey = win.skey; - return pTableQueryInfo; -} - void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) { if (pTableQueryInfo == NULL) { return; @@ -2431,7 +2425,7 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t return TSDB_CODE_SUCCESS; } -static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo); +static void doDestroyTableList(STableListInfo* pTableqinfoList); static void doTableQueryInfoTimeWindowCheck(SExecTaskInfo* pTaskInfo, STableQueryInfo* pTableQueryInfo, int32_t order) { #if 0 @@ -3999,35 +3993,30 @@ void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) { } } -static STableQueryInfo* initTableQueryInfo(const STableGroupInfo* pTableGroupInfo) { - if (pTableGroupInfo->numOfTables == 0) { - return NULL; - } - - STableQueryInfo* pTableQueryInfo = taosMemoryCalloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo)); - if (pTableQueryInfo == NULL) { - return NULL; - } - - int32_t index = 0; - for (int32_t i = 0; i < taosArrayGetSize(pTableGroupInfo->pGroupList); ++i) { - SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, i); - for (int32_t j = 0; j < taosArrayGetSize(pa); ++j) { - STableKeyInfo* pk = taosArrayGet(pa, j); - STableQueryInfo* pTQueryInfo = &pTableQueryInfo[index++]; - pTQueryInfo->lastKey = pk->lastKey; - } - } - - STimeWindow win = {0, INT64_MAX}; - createTableQueryInfo(pTableQueryInfo, win); - return pTableQueryInfo; -} +//static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) { +// int32_t size = taosArrayGetSize(pTableListInfo->pTableList); +// if (size == 0) { +// return NULL; +// } +// +// STableQueryInfo* pTableQueryInfo = taosMemoryCalloc(size, sizeof(STableQueryInfo)); +// if (pTableQueryInfo == NULL) { +// return NULL; +// } +// +// for (int32_t j = 0; j < size; ++j) { +// STableKeyInfo* pk = taosArrayGet(pTableListInfo->pTableList, j); +// STableQueryInfo* pTQueryInfo = &pTableQueryInfo[j]; +// pTQueryInfo->lastKey = pk->lastKey; +// } +// +// pTableQueryInfo->lastKey = 0; +// return pTableQueryInfo; +//} SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SExprInfo* pScalarExprInfo, - int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, - const STableGroupInfo* pTableGroupInfo) { + int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) { SAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -4040,7 +4029,6 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* initResultSizeInfo(pOperator, numOfRows); int32_t code = initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResultBlock, keyBufSize, pTaskInfo->id.str); - pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -4438,11 +4426,10 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT } static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagNode); + STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagNode); -static int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUid, STableGroupInfo* pGroupInfo, - uint64_t queryId, uint64_t taskId, SNode* pTagCond); -static SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo); +static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, SNode* pTagCond); +static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo); static SArray* extractColumnInfo(SNodeList* pNodeList); static SArray* createSortInfo(SNodeList* pNodeList); @@ -4471,14 +4458,24 @@ void extractTableSchemaVersion(SReadHandle* pHandle, uint64_t uid, SExecTaskInfo } SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, - uint64_t queryId, uint64_t taskId, STableGroupInfo* pTableGroupInfo, SNode* pTagCond) { + uint64_t queryId, uint64_t taskId, STableListInfo* pTableListInfo, SNode* pTagCond) { int32_t type = nodeType(pPhyNode); if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) { if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId, pTagCond); + int32_t code = getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); + if (code != TSDB_CODE_SUCCESS) { + return NULL; + } + + if (taosArrayGetSize(pTableListInfo->pTableList) == 0) { + qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId); + return NULL; + } + + tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); if (pDataReader == NULL && terrno != 0) { return NULL; } @@ -4500,11 +4497,19 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo int32_t numOfCols = 0; + int32_t code = getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); + if (code != TSDB_CODE_SUCCESS) { + return NULL; + } + + if (taosArrayGetSize(pTableListInfo->pTableList) == 0) { + qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId); + return NULL; + } + tsdbReaderT pDataReader = NULL; if (pHandle->vnode) { - pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId, pTagCond); - } else { - doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, queryId, taskId, pTagCond); + pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); } if (pDataReader == NULL && terrno != 0) { @@ -4517,7 +4522,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; SOperatorInfo* pOperatorDumy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo); - SArray* tableIdList = extractTableIdList(pTableGroupInfo); + SArray* tableIdList = extractTableIdList(pTableListInfo); SSDataBlock* pResBlock = createResDataBlock(pDescNode); SArray* pCols = @@ -4550,8 +4555,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SSDataBlock* pResBlock = createResDataBlock(pDescNode); - int32_t code = doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, - queryId, taskId, pTagCond); + int32_t code = getTableList(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableListInfo, pTagCond); if (code != TSDB_CODE_SUCCESS) { return NULL; } @@ -4564,7 +4568,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo COL_MATCH_FROM_COL_ID); SOperatorInfo* pOperator = - createTagScanOperatorInfo(pHandle, pExprInfo, num, pResBlock, colList, pTableGroupInfo, pTaskInfo); + createTagScanOperatorInfo(pHandle, pExprInfo, num, pResBlock, colList, pTableListInfo, pTaskInfo); return pOperator; } else { ASSERT(0); @@ -4577,7 +4581,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES); for (int32_t i = 0; i < size; ++i) { SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i); - ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableGroupInfo, pTagCond); + ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableListInfo, pTagCond); if (ops[i] == NULL) { return NULL; } @@ -4606,10 +4610,10 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo if (pAggNode->pGroupKeys != NULL) { SArray* pColList = extractColumnInfo(pAggNode->pGroupKeys); pOptr = createGroupOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pAggNode->node.pConditions, - pScalarExprInfo, numOfScalarExpr, pTaskInfo, NULL); + pScalarExprInfo, numOfScalarExpr, pTaskInfo); } else { pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, - pTaskInfo, pTableGroupInfo); + pTaskInfo); } } else if (QUERY_NODE_PHYSICAL_PLAN_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) { SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode; @@ -4628,8 +4632,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo .calTrigger = pIntervalPhyNode->window.triggerType}; int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; - pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTableGroupInfo, - pTaskInfo); + pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) { SSortPhysiNode* pSortPhyNode = (SSortPhysiNode*)pPhyNode; @@ -4678,7 +4681,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &num); - pOptr = createPartitionOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pTaskInfo, NULL); + pOptr = createPartitionOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW == type) { SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode; @@ -4892,75 +4895,57 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod return pList; } -int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUid, STableGroupInfo* pGroupInfo, - uint64_t queryId, uint64_t taskId, SNode* pTagCond) { +int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, + STableListInfo* pListInfo, SNode* pTagCond) { int32_t code = TSDB_CODE_SUCCESS; + pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo)); + if (tableType == TSDB_SUPER_TABLE) { - SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); - if(pTagCond){ + SArray* res = taosArrayInit(8, sizeof(uint64_t)); code = doFilterTag(pTagCond, res); + if (code != TSDB_CODE_SUCCESS) { + qError("doFilterTag error:%d", code); + taosArrayDestroy(res); + return code; + } + for(int i = 0; i < taosArrayGetSize(res); i++){ + STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)}; + taosArrayPush(pListInfo->pTableList, &info); + } + taosArrayDestroy(res); }else{ - code = tsdbGetAllTableList(metaHandle, tableUid, res); + code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList); } - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); - pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - taosArrayPush(pGroupInfo->pGroupList, &res); } else { // Create one table group. - code = tsdbGetOneTableGroup(metaHandle, tableUid, 0, pGroupInfo); + STableKeyInfo info = {.lastKey = 0, .uid = tableUid}; + taosArrayPush(pListInfo->pTableList, &info); } return code; } -SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo) { +SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { SArray* tableIdList = taosArrayInit(4, sizeof(uint64_t)); - if (pTableGroupInfo->numOfTables > 0) { - SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, 0); - ASSERT(taosArrayGetSize(pTableGroupInfo->pGroupList) == 1); - - // Transfer the Array of STableKeyInfo into uid list. - size_t numOfTables = taosArrayGetSize(pa); - for (int32_t i = 0; i < numOfTables; ++i) { - STableKeyInfo* pkeyInfo = taosArrayGet(pa, i); - taosArrayPush(tableIdList, &pkeyInfo->uid); - } + // Transfer the Array of STableKeyInfo into uid list. + for (int32_t i = 0; i < taosArrayGetSize(pTableGroupInfo->pTableList); ++i) { + STableKeyInfo* pkeyInfo = taosArrayGet(pTableGroupInfo->pTableList, i); + taosArrayPush(tableIdList, &pkeyInfo->uid); } return tableIdList; } tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagNode) { - uint64_t uid = pTableScanNode->scan.uid; - int32_t code = - doCreateTableGroup(pHandle->meta, pTableScanNode->scan.tableType, uid, pTableGroupInfo, queryId, taskId, pTagNode); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - if (pTableGroupInfo->numOfTables == 0) { - code = 0; - qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId); - goto _error; - } - + STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagNode) { SQueryTableDataCond cond = {0}; - code = initQueryTableDataCond(&cond, pTableScanNode); + int32_t code = initQueryTableDataCond(&cond, pTableScanNode); if (code != TSDB_CODE_SUCCESS) { - goto _error; + return NULL; } - return tsdbQueryTables(pHandle->vnode, &cond, pTableGroupInfo, queryId, taskId); - -_error: - terrno = code; - return NULL; + return tsdbQueryTables(pHandle->vnode, &cond, pTableListInfo, queryId, taskId); } int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, @@ -4975,7 +4960,7 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead } (*pTaskInfo)->pRoot = - createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoGroupInfo, pPlan->pTagCond); + createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond); if (NULL == (*pTaskInfo)->pRoot) { code = terrno; goto _complete; @@ -5034,34 +5019,18 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { taosMemoryFree(pFilter); } -static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo) { - if (pTableqinfoGroupInfo->pGroupList != NULL) { - int32_t numOfGroups = (int32_t)taosArrayGetSize(pTableqinfoGroupInfo->pGroupList); - for (int32_t i = 0; i < numOfGroups; ++i) { - SArray* p = taosArrayGetP(pTableqinfoGroupInfo->pGroupList, i); +static void doDestroyTableList(STableListInfo* pTableqinfoList) { + taosArrayDestroy(pTableqinfoList->pTableList); + taosHashCleanup(pTableqinfoList->map); - size_t num = taosArrayGetSize(p); - for (int32_t j = 0; j < num; ++j) { - STableQueryInfo* item = taosArrayGetP(p, j); - destroyTableQueryInfoImpl(item); - } - - taosArrayDestroy(p); - } - } - - taosArrayDestroy(pTableqinfoGroupInfo->pGroupList); - taosHashCleanup(pTableqinfoGroupInfo->map); - - pTableqinfoGroupInfo->pGroupList = NULL; - pTableqinfoGroupInfo->map = NULL; - pTableqinfoGroupInfo->numOfTables = 0; + pTableqinfoList->pTableList = NULL; + pTableqinfoList->map = NULL; } void doDestroyTask(SExecTaskInfo* pTaskInfo) { qDebug("%s execTask is freed", GET_TASKID(pTaskInfo)); - doDestroyTableQueryInfo(&pTaskInfo->tableqinfoGroupInfo); + doDestroyTableList(&pTaskInfo->tableqinfoList); destroyOperatorInfo(pTaskInfo->pRoot); // taosArrayDestroy(pTaskInfo->summary.queryProfEvents); // taosHashCleanup(pTaskInfo->summary.operatorProfResults); diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 2600c17060..dfcd494144 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -346,7 +346,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { } SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList, - SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo) { + SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) { SGroupbyOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SGroupbyOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -616,7 +616,7 @@ static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) { } SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList, - SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo) { + SExecTaskInfo* pTaskInfo) { SPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SPartitionOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 17238bbd9b..2833c54e2e 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1610,20 +1610,19 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { SExprInfo* pExprInfo = &pOperator->pExpr[0]; SSDataBlock* pRes = pInfo->pRes; - if (taosArrayGetSize(pInfo->pTableGroups->pGroupList) == 0) { + int32_t size = taosArrayGetSize(pInfo->pTableList->pTableList); + if (size == 0) { setTaskStatus(pTaskInfo, TASK_COMPLETED); return NULL; } - SArray* pa = taosArrayGetP(pInfo->pTableGroups->pGroupList, 0); - char str[512] = {0}; int32_t count = 0; SMetaReader mr = {0}; metaReaderInit(&mr, pInfo->readHandle.meta, 0); - while (pInfo->curPos < pInfo->pTableGroups->numOfTables && count < pOperator->resultInfo.capacity) { - STableKeyInfo* item = taosArrayGet(pa, pInfo->curPos); + while (pInfo->curPos < size && count < pOperator->resultInfo.capacity) { + STableKeyInfo* item = taosArrayGet(pInfo->pTableList->pTableList, pInfo->curPos); metaGetTableEntryByUid(&mr, item->uid); for (int32_t j = 0; j < pOperator->numOfExprs; ++j) { @@ -1655,7 +1654,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { } count += 1; - if (++pInfo->curPos >= pInfo->pTableGroups->numOfTables) { + if (++pInfo->curPos >= size) { doSetOperatorCompleted(pOperator); } } @@ -1680,14 +1679,14 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, - STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo) { + STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->pTableGroups = pTableGroupInfo; + pInfo->pTableList = pTableListInfo; pInfo->pColMatchInfo = pColMatchInfo; pInfo->pRes = pResBlock; pInfo->readHandle = *pReadHandle; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 9346dbf54a..9e4db1870b 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1167,8 +1167,7 @@ bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo, - SExecTaskInfo* pTaskInfo) { + STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -1192,8 +1191,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pInfo->invertible = allInvertible(pInfo->binfo.pCtx, numOfCols); pInfo->invertible = false; // Todo(liuyao): Dependent TSDB API - // pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo); - if (code != TSDB_CODE_SUCCESS /* || pInfo->pTableQueryInfo == NULL*/) { + if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -1228,8 +1226,7 @@ _error: SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo, - SExecTaskInfo* pTaskInfo) { + STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { SStreamFinalIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamFinalIntervalOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -1285,8 +1282,7 @@ _error: SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo, - SExecTaskInfo* pTaskInfo) { + STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -1308,8 +1304,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win); - // pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo); - if (code != TSDB_CODE_SUCCESS /* || pInfo->pTableQueryInfo == NULL*/) { + if (code != TSDB_CODE_SUCCESS) { goto _error; } From 998980079f2da2fc0ec60f247f603beb6f55af35 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 26 May 2022 16:23:29 +0800 Subject: [PATCH 17/66] fix(query): fix sum/avg function floating type overflow --- source/libs/function/src/builtinsimpl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 659c7447d6..e4ef0d1d39 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -503,7 +503,7 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) { } //check for overflow - if (isinf(pSumRes->dsum) || isnan(pSumRes->dsum)) { + if (IS_FLOAT_TYPE(type) && (isinf(pSumRes->dsum) || isnan(pSumRes->dsum))) { GET_RES_INFO(pCtx)->isNullRes = 1; } From f5e3b409ab0b1f0cff28d2000b826d23744261c4 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 16:47:37 +0800 Subject: [PATCH 18/66] refactor: add some log --- source/dnode/mnode/impl/src/mndSync.c | 1 + tests/script/tsim/mnode/basic2.sim | 3 + tests/script/tsim/mnode/basic3.sim | 107 ++++++++++++++++++++++++++ 3 files changed, 111 insertions(+) create mode 100644 tests/script/tsim/mnode/basic3.sim diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index aa391ad0d3..f34ab28cce 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -51,6 +51,7 @@ int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) { void mndRestoreFinish(struct SSyncFSM *pFsm) { SMnode *pMnode = pFsm->data; if (!pMnode->deploy) { + mInfo("mnode sync restore finished"); mndTransPullup(pMnode); pMnode->syncMgmt.restored = true; } diff --git a/tests/script/tsim/mnode/basic2.sim b/tests/script/tsim/mnode/basic2.sim index f1a3a8c251..4be8fe9962 100644 --- a/tests/script/tsim/mnode/basic2.sim +++ b/tests/script/tsim/mnode/basic2.sim @@ -77,9 +77,12 @@ endi # return -1 #endi +sleep 5000 + system sh/exec.sh -n dnode1 -s stop system sh/exec.sh -n dnode2 -s stop sleep 100 +return system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode2 -s start diff --git a/tests/script/tsim/mnode/basic3.sim b/tests/script/tsim/mnode/basic3.sim new file mode 100644 index 0000000000..42a702bee7 --- /dev/null +++ b/tests/script/tsim/mnode/basic3.sim @@ -0,0 +1,107 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sql connect + +print =============== step1: create dnodes +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 + +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi + +sql show dnodes +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi +if $data(3)[4] != ready then + goto step1 +endi + +print =============== step2: create mnode 2 +sql create mnode on dnode 2 +sql create mnode on dnode 3 + +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi + +sql show mnodes +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] +print $data(3)[0] $data(3)[1] $data(3)[2] + +if $data(1)[2] != LEADER then + goto step2 +endi +if $data(2)[2] != FOLLOWER then + goto step2 +endi +if $data(3)[2] != FOLLOWER then + goto step2 +endi + +print =============== step3: create user +sql create user user1 PASS 'user1' +sql show users +if $rows != 2 then + return -1 +endi + +sleep 10000 + +print =============== step4: stop dnode1 +system sh/exec.sh -n dnode1 -s stop + + +return +system sh/exec.sh -n dnode2 -s stop +sleep 100 +return +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start + +sleep 10000 +sql connect + +sql show mnodes +if $rows != 2 then + return -1 +endi +if $data(1)[0] != 1 then + return -1 +endi +if $data(1)[2] != LEADER then + return -1 +endi + +sql show users +if $rows != 2 then + return -1 +endi + +#sql show databases +#if $rows != 3 then +# return -1 +#endi + +return + +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode2 -s stop \ No newline at end of file From 270771e39ef6685d9157da49065faf017b02e9ea Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 26 May 2022 14:06:06 +0800 Subject: [PATCH 19/66] feat(stream): stream final session operator --- .gitignore | 1 + include/libs/nodes/nodes.h | 1 + source/libs/executor/inc/executorimpl.h | 6 +- source/libs/executor/src/executorimpl.c | 2 +- source/libs/executor/src/scanoperator.c | 23 +- source/libs/executor/src/timewindowoperator.c | 227 +++++++++++++++--- 6 files changed, 205 insertions(+), 55 deletions(-) diff --git a/.gitignore b/.gitignore index f70f5987b2..d5c7f763cf 100644 --- a/.gitignore +++ b/.gitignore @@ -110,3 +110,4 @@ contrib/* !contrib/CMakeLists.txt !contrib/test sql +debug*/ diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 3c5278011a..3860266725 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -214,6 +214,7 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_FILL, QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW, QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW, + QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW, QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW, QUERY_NODE_PHYSICAL_PLAN_PARTITION, QUERY_NODE_PHYSICAL_PLAN_DISPATCH, diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index ec204a8f60..b36d3566bf 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -465,6 +465,7 @@ typedef struct SStreamFinalIntervalOperatorInfo { SAggSupporter aggSup; // aggregate supporter int32_t order; // current SSDataBlock scan order STimeWindowAggSupp twAggSup; + SArray* pChildren; } SStreamFinalIntervalOperatorInfo; typedef struct SAggOperatorInfo { @@ -581,6 +582,7 @@ typedef struct SStreamSessionAggOperatorInfo { SSDataBlock* pDelRes; SHashObj* pStDeleted; void* pDelIterator; + SArray* pChildren; // cache for children's result; } SStreamSessionAggOperatorInfo; typedef struct STimeSliceOperatorInfo { @@ -722,7 +724,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); + STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, @@ -797,7 +799,7 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order); int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); -int32_t initCatchSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, +int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, const char* pDir); int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey); SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 593b79ecc8..c983dd5c2c 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -5162,7 +5162,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo return TSDB_CODE_SUCCESS; } -int32_t initCatchSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, +int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, const char* pDir) { pCatchSup->keySize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY); pCatchSup->pKeyBuf = taosMemoryCalloc(1, pCatchSup->keySize); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 6c9ad4579c..75907d1a0d 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -833,15 +833,6 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { size_t total = taosArrayGetSize(pInfo->pBlockLists); if (pInfo->blockType == STREAM_DATA_TYPE_SSDATA_BLOCK) { - if (pInfo->scanMode == STREAM_SCAN_FROM_UPDATERES) { - SSDataBlock* pDB = getDataFromCatch(pInfo); - if (pDB != NULL) { - return pDB; - } else { - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - } - } - if (pInfo->validBlockIndex >= total) { doClearBufferedBlocks(pInfo); pOperator->status = OP_EXEC_DONE; @@ -849,17 +840,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } int32_t current = pInfo->validBlockIndex++; - SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current); - if (pBlock->info.type == STREAM_REPROCESS) { - pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES; - } else { - int32_t code = catchDatablock(pBlock, &pInfo->childAggSup, pInfo->primaryTsIndex, 0); - if (code != TDB_CODE_SUCCESS) { - pTaskInfo->code = code; - longjmp(pTaskInfo->env, code); - } - } - return pBlock; + return taosArrayGetP(pInfo->pBlockLists, current); } else { if (pInfo->scanMode == STREAM_SCAN_FROM_RES) { blockDataDestroy(pInfo->pUpdateRes); @@ -1023,7 +1004,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR pInfo->interval = pSTInfo->interval; pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; - initCatchSupporter(&pInfo->childAggSup, 1024, "StreamFinalInterval", "/tmp/"); // TODO(liuyao) get row size from phy plan + initCacheSupporter(&pInfo->childAggSup, 1024, "StreamFinalInterval", "/tmp/"); // TODO(liuyao) get row size from phy plan pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 9346dbf54a..6965771c73 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1067,7 +1067,8 @@ void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData, } static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, - SInterval* pIntrerval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock) { + SInterval* pIntrerval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, + SArray* pUpWins) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); TSKEY *tsCols = (TSKEY*)pColDataInfo->pData; int32_t step = 0; @@ -1079,6 +1080,9 @@ static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); doClearWindow(pSup, pBinfo, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput); + if (pUpWins) { + taosArrayPush(pUpWins, &win); + } } } @@ -1119,7 +1123,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { if (pBlock->info.type == STREAM_REPROCESS) { doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval, 0, - pOperator->numOfExprs, pBlock); + pOperator->numOfExprs, pBlock, NULL); qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo)); continue; } @@ -1154,6 +1158,15 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo *)param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); cleanupAggSup(&pInfo->aggSup); + if (pInfo->pChildren) { + int32_t size = taosArrayGetSize(pInfo->pChildren); + for (int32_t i = 0; i < size; i++) { + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i); + destroyIntervalOperatorInfo(pChildOp->info, numOfOutput); + taosMemoryFreeClear(pChildOp->info); + taosMemoryFreeClear(pChildOp); + } + } } bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { @@ -1228,32 +1241,38 @@ _error: SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo, - SExecTaskInfo* pTaskInfo) { + STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { SStreamFinalIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamFinalIntervalOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->order = TSDB_ORDER_ASC; pInfo->interval = *pInterval; pInfo->twAggSup = *pTwAggSupp; pInfo->primaryTsIndex = primaryTsSlotId; - size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(pOperator, 4096); - int32_t code = initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); - initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); if (code != TSDB_CODE_SUCCESS) { goto _error; } - initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); + int32_t numOfChild = 8;// Todo(liuyao) get it from phy plan + pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo)); + for (int32_t i = 0; i < numOfChild; i++) { + SSDataBlock* chRes = createOneDataBlock(pResBlock, false); + SOperatorInfo* pChildOp = createIntervalOperatorInfo(NULL, pExprInfo, numOfCols, + chRes, pInterval, primaryTsSlotId, pTwAggSupp, NULL, pTaskInfo); + if (pChildOp && chRes) { + taosArrayPush(pInfo->pChildren, &pChildOp); + continue; + } + goto _error; + } pOperator->name = "StreamFinalIntervalOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL; @@ -1703,6 +1722,51 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB return pUpdated; } +bool isFinalInterval(SStreamFinalIntervalOperatorInfo* pInfo) { + return pInfo->pChildren != NULL; +} + +void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, + int32_t numOfOutput, SExecTaskInfo* pTaskInfo) { + for (int32_t k = 0; k < numOfOutput; ++k) { + if (fmIsWindowPseudoColumnFunc(pDestCtx[k].functionId)) { + continue; + } + int32_t code = TSDB_CODE_SUCCESS; + if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) { + code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]); + if (code != TSDB_CODE_SUCCESS) { + qError("%s apply functions error, code: %s", GET_TASKID(pTaskInfo), tstrerror(code)); + pTaskInfo->code = code; + longjmp(pTaskInfo->env, code); + } + } + } +} + +static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SArray *pWinArray, + int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo) { + int32_t size = taosArrayGetSize(pWinArray); + ASSERT(pInfo->pChildren); + for (int32_t i = 0; i < size; i++) { + STimeWindow* pParentWin = taosArrayGet(pWinArray, i); + SResultRow* pCurResult = NULL; + setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, pParentWin, true, &pCurResult, 0, + pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, + pTaskInfo); + int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren); + for (int32_t j = 0; j < numOfChildren; j++) { + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, j); + SIntervalAggOperatorInfo* pChInfo = pChildOp->info; + SResultRow* pChResult = NULL; + setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, pParentWin, true, &pChResult, + 0, pChInfo->binfo.pCtx, pChildOp->numOfExprs, pChInfo->binfo.rowCellInfoOffset, + &pChInfo->aggSup, pTaskInfo); + compactFunctions(pInfo->binfo.pCtx, pChInfo->binfo.pCtx, numOfOutput, pTaskInfo); + } + } +} + static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info; SOperatorInfo* downstream = pOperator->pDownstream[0]; @@ -1726,10 +1790,26 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, pInfo->order, MAIN_SCAN, true); if (pBlock->info.type == STREAM_REPROCESS) { + SArray *pUpWins = taosArrayInit(8, sizeof(STimeWindow)); doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval, - pInfo->primaryTsIndex, pOperator->numOfExprs, pBlock); + pInfo->primaryTsIndex, pOperator->numOfExprs, pBlock, pUpWins); + if (isFinalInterval(pInfo)) { + int32_t childIndex = 0; //Todo(liuyao) get child id from SSDataBlock + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); + SIntervalAggOperatorInfo* pChildInfo = pChildOp->info; + doClearWindows(&pChildInfo->aggSup, &pChildInfo->binfo, &pChildInfo->interval, + pChildInfo->primaryTsIndex, pChildOp->numOfExprs, pBlock, NULL); + rebuildIntervalWindow(pInfo, pUpWins, pInfo->binfo.pRes->info.groupId, + pOperator->numOfExprs, pOperator->pTaskInfo); + } + taosArrayDestroy(pUpWins); continue; } + if (isFinalInterval(pInfo)) { + int32_t chIndex = 1; //Todo(liuyao) get it from SSDataBlock + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex); + doStreamIntervalAgg(pChildOp); + } pUpdated = doHashInterval(pOperator, pBlock, 0); } @@ -1752,6 +1832,16 @@ void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) { doDestroyBasicInfo(&pInfo->binfo, numOfOutput); destroyStreamAggSupporter(&pInfo->streamAggSup); cleanupGroupResInfo(&pInfo->groupResInfo); + if (pInfo->pChildren != NULL) { + int32_t size = taosArrayGetSize(pInfo->pChildren); + for (int32_t i = 0; i < size; i++) { + SOperatorInfo *pChild = taosArrayGetP(pInfo->pChildren, i); + SStreamSessionAggOperatorInfo* pChInfo = pChild->info; + destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput); + taosMemoryFreeClear(pChild); + taosMemoryFreeClear(pChInfo); + } + } } int32_t initBiasicInfo(SOptrBasicInfo* pBasicInfo, SExprInfo* pExprInfo, @@ -1780,6 +1870,7 @@ void initDownStream(SOperatorInfo* downstream, SStreamSessionAggOperatorInfo* pI SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap, int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { + int32_t code = TSDB_CODE_OUT_OF_MEMORY; SStreamSessionAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamSessionAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); @@ -1789,7 +1880,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, initResultSizeInfo(pOperator, 4096); - int32_t code = initStreamAggSupporter(&pInfo->streamAggSup, "StreamSessionAggOperatorInfo"); + code = initStreamAggSupporter(&pInfo->streamAggSup, "StreamSessionAggOperatorInfo"); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -1820,6 +1911,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, pInfo->pDelIterator = NULL; pInfo->pDelRes = createOneDataBlock(pResBlock, false); blockDataEnsureCapacity(pInfo->pDelRes, 64); + pInfo->pChildren = NULL; pOperator->name = "StreamSessionWindowAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW; @@ -2068,24 +2160,6 @@ int32_t getNumCompactWindow(SArray* pWinInfos, int32_t startIndex, int64_t gap) return size - startIndex - 1; } -void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, - int32_t numOfOutput, SExecTaskInfo* pTaskInfo) { - for (int32_t k = 0; k < numOfOutput; ++k) { - if (fmIsWindowPseudoColumnFunc(pDestCtx[k].functionId)) { - continue; - } - int32_t code = TSDB_CODE_SUCCESS; - if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) { - code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]); - if (code != TSDB_CODE_SUCCESS) { - qError("%s apply functions error, code: %s", GET_TASKID(pTaskInfo), tstrerror(code)); - pTaskInfo->code = code; - longjmp(pTaskInfo->env, code); - } - } - } -} - void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex, int32_t num, int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo, SHashObj* pStUpdated, SHashObj* pStDeleted) { SResultWindowInfo* pCurWin = taosArrayGet(pInfo->streamAggSup.pResultRows, startIndex); @@ -2164,7 +2238,7 @@ static void doStreamSessionWindowAggImpl(SOperatorInfo* pOperator, } static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SOptrBasicInfo* pBinfo, - SSDataBlock* pBlock, int32_t tsIndex, int32_t numOfOutput, int64_t gap) { + SSDataBlock* pBlock, int32_t tsIndex, int32_t numOfOutput, int64_t gap, SArray* result) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); TSKEY *tsCols = (TSKEY*)pColDataInfo->pData; int32_t step = 0; @@ -2173,7 +2247,11 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SOptrBasicInfo* SResultWindowInfo* pCurWin = getSessionTimeWindow(pAggSup->pResultRows, tsCols[i], gap, &winIndex); step = updateSessionWindowInfo(pCurWin, tsCols, pBlock->info.rows, i, gap, NULL); + ASSERT(isInWindow(pCurWin, tsCols[i], gap)); doClearWindowImpl(&pCurWin->pos, pAggSup->pResultBuf, pBinfo, numOfOutput); + if (result) { + taosArrayPush(result, pCurWin); + } } } @@ -2215,6 +2293,42 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It } } +static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray *pWinArray, + int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo) { + int32_t size = taosArrayGetSize(pWinArray); + ASSERT(pInfo->pChildren); + for (int32_t i = 0; i < size; i++) { + SResultWindowInfo* pParentWin = taosArrayGet(pWinArray, i); + SResultRow* pCurResult = NULL; + setWindowOutputBuf(pParentWin, &pCurResult, pInfo->binfo.pCtx, groupId, + numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->streamAggSup, pTaskInfo); + int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren); + for (int32_t j = 0; j < numOfChildren; j++) { + SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, j); + SStreamSessionAggOperatorInfo* pChInfo = pChild->info; + SArray* pChWins = pChInfo->streamAggSup.pResultRows; + int32_t chWinSize = taosArrayGetSize(pChWins); + int32_t index = binarySearch(pChWins, chWinSize, pParentWin->win.skey, + TSDB_ORDER_DESC, getSessionWindowEndkey); + for (int32_t k = index; k > 0 && k < chWinSize; k++) { + SResultWindowInfo* pcw = taosArrayGet(pChWins, k); + if (pParentWin->win.skey <= pcw->win.skey && pcw->win.ekey <= pParentWin->win.ekey) { + SResultRow* pChResult = NULL; + setWindowOutputBuf(pcw, &pChResult, pChInfo->binfo.pCtx, groupId, + numOfOutput, pChInfo->binfo.rowCellInfoOffset, &pChInfo->streamAggSup, pTaskInfo); + compactFunctions(pInfo->binfo.pCtx, pChInfo->binfo.pCtx, numOfOutput, pTaskInfo); + continue; + } + break; + } + } + } +} + +bool isFinalSession(SStreamSessionAggOperatorInfo* pInfo) { + return pInfo->pChildren != NULL; +} + static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -2247,10 +2361,25 @@ static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); if (pBlock->info.type == STREAM_REPROCESS) { + SArray *pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); doClearSessionWindows(&pInfo->streamAggSup, &pInfo->binfo, pBlock, 0, - pOperator->numOfExprs, pInfo->gap); + pOperator->numOfExprs, pInfo->gap, pWins); + if (isFinalSession(pInfo)) { + int32_t childIndex = 0; //Todo(liuyao) get child id from SSDataBlock + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); + SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info; + doClearSessionWindows(&pChildInfo->streamAggSup, &pChildInfo->binfo, pBlock, 0, + pChildOp->numOfExprs, pChildInfo->gap, NULL); + rebuildTimeWindow(pInfo, pWins, pInfo->binfo.pRes->info.groupId, pOperator->numOfExprs, pOperator->pTaskInfo); + } + taosArrayDestroy(pWins); continue; } + if (isFinalSession(pInfo)) { + int32_t childIndex = 0; //Todo(liuyao) get child id from SSDataBlock + SOptrBasicInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); + doStreamSessionWindowAggImpl(pOperator, pBlock, NULL, NULL); + } doStreamSessionWindowAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted); } @@ -2271,3 +2400,39 @@ static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { pInfo->streamAggSup.pResultBuf); return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; } + +SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, + SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap, + int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { + int32_t code = TSDB_CODE_OUT_OF_MEMORY; + SStreamSessionAggOperatorInfo* pInfo = NULL; + SOperatorInfo* pOperator = createStreamSessionAggOperatorInfo(downstream, pExprInfo, + numOfCols, pResBlock, gap, tsSlotId, pTwAggSupp, pTaskInfo); + if (pOperator == NULL) { + goto _error; + } + pOperator->name = "StreamFinalSessionWindowAggOperator"; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW; + int32_t numOfChild = 1; //Todo(liuyao) get it from phy plan + pInfo = pOperator->info; + pInfo->pChildren = taosArrayInit(8, sizeof(void *)); + for (int32_t i = 0; i < numOfChild; i++) { + SOperatorInfo* pChild = createStreamSessionAggOperatorInfo(NULL, pExprInfo, + numOfCols, NULL, gap, tsSlotId, pTwAggSupp, pTaskInfo); + if (pChild == NULL) { + goto _error; + } + taosArrayPush(pInfo->pChildren, &pChild); + } + return pOperator; + +_error: + if (pInfo != NULL) { + destroyStreamSessionAggOperatorInfo(pInfo, numOfCols); + } + + taosMemoryFreeClear(pInfo); + taosMemoryFreeClear(pOperator); + pTaskInfo->code = code; + return NULL; +} From e99cb305ffbf34b89c4ac9cfd2cd07ac9352f572 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 26 May 2022 16:58:57 +0800 Subject: [PATCH 20/66] fix: add retry --- source/libs/transport/src/transCli.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 159b0cdd07..395938c9d1 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -926,11 +926,11 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { */ tmsg_t msgType = pCtx->msgType; if ((pTransInst->retry != NULL && (pTransInst->retry(pResp->code))) || - ((pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) && msgType == TDMT_MND_CONNECT)) { + (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL)) { pMsg->sent = 0; pMsg->st = taosGetTimestampUs(); pCtx->retryCount += 1; - if (msgType == TDMT_MND_CONNECT && pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + if (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { if (pCtx->retryCount < pEpSet->numOfEps) { pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps; From 14c95a0b31901d6508217226a35f20ef77b151b4 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 17:12:07 +0800 Subject: [PATCH 21/66] fix: return correct mnode epset --- source/dnode/mnode/impl/src/mndMnode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index afdc27a96a..82e6256295 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -233,7 +233,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { if (pObj->pDnode == NULL) { mError("mnode:%d, no corresponding dnode exists", pObj->id); } else { - if (pObj->state == TAOS_SYNC_STATE_LEADER) { + if (pObj->id == pMnode->selfDnodeId || pObj->state == TAOS_SYNC_STATE_LEADER) { pEpSet->inUse = pEpSet->numOfEps; } addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port); From ed6eae4d5894295ecded05b828c6fd729b0a8e13 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 26 May 2022 17:13:13 +0800 Subject: [PATCH 22/66] fix(query): fix elapsed function time_unit cannot be smaller than db precision TD-16018 --- source/libs/function/src/builtins.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 8914dca639..f5fa7b8855 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -318,6 +318,11 @@ static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len if (!IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + + if (pValue->datum.i == 0) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "ELAPSED function time unit parameter should be greater than db precision"); + } } pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; From b137be98982ff222f5fb775c2375585358b23425 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 26 May 2022 17:17:56 +0800 Subject: [PATCH 23/66] fix(query): prepare enough buffer before convert string. --- source/libs/scalar/src/sclfunc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 12496eec55..0d47595b3e 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -707,6 +707,7 @@ int32_t substrFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int16_t inputType = GET_PARAM_TYPE(&pInput[0]); + int16_t inputLen = GET_PARAM_BYTES(&pInput[0]); int16_t outputType = GET_PARAM_TYPE(&pOutput[0]); int64_t outputLen = GET_PARAM_BYTES(&pOutput[0]); @@ -718,15 +719,15 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp colDataAppendNULL(pOutput->columnData, i); continue; } + char *input = colDataGetData(pInput[0].columnData, i); switch(outputType) { case TSDB_DATA_TYPE_BIGINT: { if (inputType == TSDB_DATA_TYPE_BINARY) { - memcpy(output, varDataVal(input), varDataLen(input)); - *(int64_t *)output = taosStr2Int64(output, NULL, 10); + *(int64_t *)output = taosStr2Int64(varDataVal(input), NULL, 10); } else if (inputType == TSDB_DATA_TYPE_NCHAR) { - char *newBuf = taosMemoryCalloc(1, outputLen * TSDB_NCHAR_SIZE + 1); + char *newBuf = taosMemoryCalloc(1, inputLen); int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(input), varDataLen(input), newBuf); if (len < 0) { taosMemoryFree(newBuf); @@ -742,10 +743,9 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp } case TSDB_DATA_TYPE_UBIGINT: { if (inputType == TSDB_DATA_TYPE_BINARY) { - memcpy(output, varDataVal(input), varDataLen(input)); - *(uint64_t *)output = taosStr2UInt64(output, NULL, 10); + *(uint64_t *)output = taosStr2UInt64(varDataVal(input), NULL, 10); } else if (inputType == TSDB_DATA_TYPE_NCHAR) { - char *newBuf = taosMemoryCalloc(1, outputLen * TSDB_NCHAR_SIZE + 1); + char *newBuf = taosMemoryCalloc(1, inputLen); int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(input), varDataLen(input), newBuf); if (len < 0) { taosMemoryFree(newBuf); From 48bb336dd67f569149edef88d8d10932dff05068 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 26 May 2022 17:18:25 +0800 Subject: [PATCH 24/66] fix(query): fix memory leak. --- source/dnode/mnode/impl/src/mndShow.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index def6c06896..6b70825ed4 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -257,6 +257,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { terrno = rowsRead; mDebug("show:0x%" PRIx64 ", retrieve completed", pShow->id); mndReleaseShowObj(pShow, true); + blockDataDestroy(pBlock); return -1; } From a2a76740212338764decee939313a76ee2e917ff Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 26 May 2022 17:18:56 +0800 Subject: [PATCH 25/66] fix(query): do some internal refactor. --- source/libs/executor/inc/executorimpl.h | 8 +++----- source/libs/executor/src/executorMain.c | 14 +++++++------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index ec204a8f60..1b26d666df 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -94,10 +94,8 @@ typedef struct SLimit { typedef struct STableScanAnalyzeInfo SFileBlockLoadRecorder; typedef struct STaskCostInfo { - int64_t created; - int64_t start; - int64_t end; - + int64_t created; + int64_t start; uint64_t loadStatisTime; uint64_t loadFileBlockTime; uint64_t loadDataInCacheTime; @@ -185,7 +183,7 @@ typedef struct SExecTaskInfo { STaskCostInfo cost; int64_t owner; // if it is in execution int32_t code; - uint64_t totalRows; // total number of rows +// uint64_t totalRows; // total number of rows struct { char *tablename; char *dbname; diff --git a/source/libs/executor/src/executorMain.c b/source/libs/executor/src/executorMain.c index 6689def7a7..7757825733 100644 --- a/source/libs/executor/src/executorMain.c +++ b/source/libs/executor/src/executorMain.c @@ -126,8 +126,7 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t *useconds) { if (ret != TSDB_CODE_SUCCESS) { pTaskInfo->code = ret; cleanUpUdfs(); - qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), - tstrerror(pTaskInfo->code)); + qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code)); return pTaskInfo->code; } @@ -142,12 +141,13 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t *useconds) { *useconds = pTaskInfo->cost.elapsedTime; } - int32_t current = (*pRes != NULL)? (*pRes)->info.rows:0; - pTaskInfo->totalRows += current; - cleanUpUdfs(); + + int32_t current = (*pRes != NULL)? (*pRes)->info.rows:0; + uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows; + qDebug("%s task suspended, %d rows returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms", - GET_TASKID(pTaskInfo), current, pTaskInfo->totalRows, 0, el/1000.0); + GET_TASKID(pTaskInfo), current, total, 0, el/1000.0); atomic_store_64(&pTaskInfo->owner, 0); return pTaskInfo->code; @@ -197,7 +197,7 @@ int32_t qIsTaskCompleted(qTaskInfo_t qinfo) { void qDestroyTask(qTaskInfo_t qTaskHandle) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*) qTaskHandle; - qDebug("%s execTask completed, numOfRows:%"PRId64, GET_TASKID(pTaskInfo), pTaskInfo->totalRows); + qDebug("%s execTask completed, numOfRows:%"PRId64, GET_TASKID(pTaskInfo), pTaskInfo->pRoot->resultInfo.totalRows); queryCostStatis(pTaskInfo); // print the query cost summary doDestroyTask(pTaskInfo); From 510b5fce63c7dc3bb6105283983dcedd2cc4e036 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 17:24:32 +0800 Subject: [PATCH 26/66] refactor: use tagver and colver insteadof sversion --- include/common/tmsg.h | 3 ++- source/common/src/tmsg.c | 6 ++++-- source/dnode/mnode/impl/inc/mndDef.h | 1 - source/dnode/mnode/impl/src/mndStb.c | 22 ++++++---------------- source/dnode/mnode/impl/test/stb/stb.cpp | 3 ++- tests/test/c/sdbDump.c | 1 - 6 files changed, 14 insertions(+), 22 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 88c66df52e..d27e274eba 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -455,7 +455,8 @@ int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq); typedef struct { char name[TSDB_TABLE_FNAME_LEN]; int8_t alterType; - int32_t verInBlock; + int32_t tagVer; + int32_t colVer; int32_t numOfFields; SArray* pFields; int32_t ttl; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 23efb8ef9d..6066a5fbfb 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -600,7 +600,8 @@ int32_t tSerializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq) if (tStartEncode(&encoder) < 0) return -1; if (tEncodeCStr(&encoder, pReq->name) < 0) return -1; if (tEncodeI8(&encoder, pReq->alterType) < 0) return -1; - if (tEncodeI32(&encoder, pReq->verInBlock) < 0) return -1; + if (tEncodeI32(&encoder, pReq->tagVer) < 0) return -1; + if (tEncodeI32(&encoder, pReq->colVer) < 0) return -1; if (tEncodeI32(&encoder, pReq->numOfFields) < 0) return -1; for (int32_t i = 0; i < pReq->numOfFields; ++i) { SField *pField = taosArrayGet(pReq->pFields, i); @@ -627,7 +628,8 @@ int32_t tDeserializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq if (tStartDecode(&decoder) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1; if (tDecodeI8(&decoder, &pReq->alterType) < 0) return -1; - if (tDecodeI32(&decoder, &pReq->verInBlock) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->tagVer) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->colVer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->numOfFields) < 0) return -1; pReq->pFields = taosArrayInit(pReq->numOfFields, sizeof(SField)); if (pReq->pFields == NULL) { diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 26cfaa62ff..8ad1131bc3 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -366,7 +366,6 @@ typedef struct { int64_t updateTime; int64_t uid; int64_t dbUid; - int32_t version; int32_t tagVer; int32_t colVer; int32_t nextColId; diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index a9408f8bab..6bb1097e85 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -87,7 +87,6 @@ SSdbRaw *mndStbActionEncode(SStbObj *pStb) { SDB_SET_INT64(pRaw, dataPos, pStb->updateTime, _OVER) SDB_SET_INT64(pRaw, dataPos, pStb->uid, _OVER) SDB_SET_INT64(pRaw, dataPos, pStb->dbUid, _OVER) - SDB_SET_INT32(pRaw, dataPos, pStb->version, _OVER) SDB_SET_INT32(pRaw, dataPos, pStb->tagVer, _OVER) SDB_SET_INT32(pRaw, dataPos, pStb->colVer, _OVER) SDB_SET_INT32(pRaw, dataPos, pStb->nextColId, _OVER) @@ -167,7 +166,6 @@ static SSdbRow *mndStbActionDecode(SSdbRaw *pRaw) { SDB_GET_INT64(pRaw, dataPos, &pStb->updateTime, _OVER) SDB_GET_INT64(pRaw, dataPos, &pStb->uid, _OVER) SDB_GET_INT64(pRaw, dataPos, &pStb->dbUid, _OVER) - SDB_GET_INT32(pRaw, dataPos, &pStb->version, _OVER) SDB_GET_INT32(pRaw, dataPos, &pStb->tagVer, _OVER) SDB_GET_INT32(pRaw, dataPos, &pStb->colVer, _OVER) SDB_GET_INT32(pRaw, dataPos, &pStb->nextColId, _OVER) @@ -320,7 +318,6 @@ static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew) { } pOld->updateTime = pNew->updateTime; - pOld->version = pNew->version; pOld->tagVer = pNew->tagVer; pOld->colVer = pNew->colVer; pOld->nextColId = pNew->nextColId; @@ -390,7 +387,7 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt req.rollup = pStb->ast1Len > 0 ? 1 : 0; // todo req.schemaRow.nCols = pStb->numOfColumns; - req.schemaRow.version = pStb->version; + req.schemaRow.version = pStb->colVer; req.schemaRow.pSchema = pStb->pColumns; req.schemaTag.nCols = pStb->numOfTags; req.schemaTag.version = pStb->tagVer; @@ -665,7 +662,6 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat pDst->updateTime = pDst->createdTime; pDst->uid = mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN); pDst->dbUid = pDb->uid; - pDst->version = 1; pDst->tagVer = 1; pDst->colVer = 1; pDst->nextColId = 1; @@ -957,7 +953,6 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p mDebug("stb:%s, start to add tag %s", pNew->name, pSchema->name); } - pNew->version++; pNew->tagVer++; return 0; } @@ -976,7 +971,6 @@ static int32_t mndDropSuperTableTag(const SStbObj *pOld, SStbObj *pNew, const ch memmove(pNew->pTags + tag, pNew->pTags + tag + 1, sizeof(SSchema) * (pNew->numOfTags - tag - 1)); pNew->numOfTags--; - pNew->version++; pNew->tagVer++; mDebug("stb:%s, start to drop tag %s", pNew->name, tagName); return 0; @@ -1017,7 +1011,6 @@ static int32_t mndAlterStbTagName(const SStbObj *pOld, SStbObj *pNew, SArray *pF SSchema *pSchema = (SSchema *)(pNew->pTags + tag); memcpy(pSchema->name, newTagName, TSDB_COL_NAME_LEN); - pNew->version++; pNew->tagVer++; mDebug("stb:%s, start to modify tag %s to %s", pNew->name, oldTagName, newTagName); return 0; @@ -1047,7 +1040,6 @@ static int32_t mndAlterStbTagBytes(const SStbObj *pOld, SStbObj *pNew, const SFi } pTag->bytes = pField->bytes; - pNew->version++; pNew->tagVer++; mDebug("stb:%s, start to modify tag len %s to %d", pNew->name, pField->name, pField->bytes); @@ -1087,7 +1079,6 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray mDebug("stb:%s, start to add column %s", pNew->name, pSchema->name); } - pNew->version++; pNew->colVer++; return 0; } @@ -1116,7 +1107,6 @@ static int32_t mndDropSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, const memmove(pNew->pColumns + col, pNew->pColumns + col + 1, sizeof(SSchema) * (pNew->numOfColumns - col - 1)); pNew->numOfColumns--; - pNew->version++; pNew->colVer++; mDebug("stb:%s, start to drop col %s", pNew->name, colName); return 0; @@ -1155,7 +1145,6 @@ static int32_t mndAlterStbColumnBytes(const SStbObj *pOld, SStbObj *pNew, const } pCol->bytes = pField->bytes; - pNew->version++; pNew->colVer++; mDebug("stb:%s, start to modify col len %s to %d", pNew->name, pField->name, pField->bytes); @@ -1316,9 +1305,10 @@ static int32_t mndProcessMAlterStbReq(SRpcMsg *pReq) { goto _OVER; } - if (alterReq.verInBlock > 0 && alterReq.verInBlock <= pStb->version) { - mDebug("stb:%s, already exist, verInBlock:%d smaller than verInStb:%d, alter success", alterReq.name, - alterReq.verInBlock, pStb->version); + if ((alterReq.tagVer > 0 && alterReq.colVer > 0) && + (alterReq.tagVer <= pStb->tagVer || alterReq.colVer <= pStb->colVer)) { + mDebug("stb:%s, already exist, tagVer:%d colVer:%d smaller than in mnode, tagVer:%d colVer:%d, alter success", + alterReq.name, alterReq.tagVer, alterReq.colVer, pStb->tagVer, pStb->colVer); code = 0; goto _OVER; } @@ -1512,7 +1502,7 @@ static int32_t mndBuildStbSchemaImp(SDbObj *pDb, SStbObj *pStb, const char *tbNa pRsp->numOfColumns = pStb->numOfColumns; pRsp->precision = pDb->cfg.precision; pRsp->tableType = TSDB_SUPER_TABLE; - pRsp->sversion = pStb->version; + pRsp->sversion = pStb->colVer; pRsp->suid = pStb->uid; pRsp->tuid = pStb->uid; diff --git a/source/dnode/mnode/impl/test/stb/stb.cpp b/source/dnode/mnode/impl/test/stb/stb.cpp index 56f1b8240d..1d98199103 100644 --- a/source/dnode/mnode/impl/test/stb/stb.cpp +++ b/source/dnode/mnode/impl/test/stb/stb.cpp @@ -277,7 +277,8 @@ void* MndTestStb::BuildAlterStbUpdateColumnBytesReq(const char* stbname, const c req.numOfFields = 1; req.pFields = taosArrayInit(1, sizeof(SField)); req.alterType = TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES; - req.verInBlock = verInBlock; + req.tagVer = verInBlock; + req.colVer = verInBlock; SField field = {0}; field.bytes = bytes; diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c index d7f50a2fae..5641587c56 100644 --- a/tests/test/c/sdbDump.c +++ b/tests/test/c/sdbDump.c @@ -110,7 +110,6 @@ void dumpStb(SSdb *pSdb, SJson *json) { tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime)); tjsonAddStringToObject(item, "uid", i642str(pObj->uid)); tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid)); - tjsonAddIntegerToObject(item, "version", pObj->version); tjsonAddIntegerToObject(item, "tagVer", pObj->tagVer); tjsonAddIntegerToObject(item, "colVer", pObj->colVer); tjsonAddIntegerToObject(item, "nextColId", pObj->nextColId); From f08365e2b00d02ac8135d3124f1f5ea1fab75e96 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 26 May 2022 17:30:00 +0800 Subject: [PATCH 27/66] fix(query): fix cast function error msg TD-16016 --- source/libs/function/src/builtins.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index f5fa7b8855..64b4c48a35 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -808,9 +808,14 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { (para2Type == TSDB_DATA_TYPE_BINARY && para1Type == TSDB_DATA_TYPE_NCHAR)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + int32_t para2Bytes = pFunc->node.resType.bytes; + if (IS_VAR_DATA_TYPE(para2Type)) { + para2Bytes -= VARSTR_HEADER_SIZE; + } if (para2Bytes <= 0 || para2Bytes > 1000) { // cast dst var type length limits to 1000 - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "CAST function converted length should be in range [0, 1000]"); } return TSDB_CODE_SUCCESS; } From e87ba7d526644596ebb8796c61aab63895d13535 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 26 May 2022 17:46:52 +0800 Subject: [PATCH 28/66] fix(stream): function error --- source/libs/executor/src/executorimpl.c | 2 +- source/libs/executor/src/scanoperator.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index fbcb473636..a4a4b2b506 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -5162,7 +5162,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo return TSDB_CODE_SUCCESS; } -int32_t initCatchSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, const char* pDir) { +int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, const char* pDir) { pCatchSup->keySize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY); pCatchSup->pKeyBuf = taosMemoryCalloc(1, pCatchSup->keySize); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 998d5eb02d..ceece4f9b5 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1000,7 +1000,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR pInfo->interval = pSTInfo->interval; pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; - initCatchSupporter(&pInfo->childAggSup, 1024, "StreamFinalInterval", + initCacheSupporter(&pInfo->childAggSup, 1024, "StreamFinalInterval", "/tmp/"); // TODO(liuyao) get row size from phy plan pOperator->name = "StreamBlockScanOperator"; From 2ced335b04c2a75da8ddfb539e90e3e6c21798bc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 26 May 2022 17:48:07 +0800 Subject: [PATCH 29/66] fix(query): set the proper buffer page size. --- source/common/src/tdatablock.c | 2 +- source/libs/executor/inc/executorimpl.h | 1 + source/libs/executor/src/executorimpl.c | 30 +++++++++++++++--------- source/libs/executor/src/groupoperator.c | 17 +++++++------- 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 5216922643..2d77905d86 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1246,7 +1246,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) { } int32_t newRows = (payloadSize - additional) / rowSize; - ASSERT(newRows <= nRows && newRows > 1); + ASSERT(newRows <= nRows && newRows >= 1); return newRows; } diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 1b26d666df..1c9a49b679 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -672,6 +672,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI SArray* pColList); void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key, STimeWindow* win); int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag); +int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz); void doSetOperatorCompleted(SOperatorInfo* pOperator); void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, SArray* pColMatchInfo); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 593b79ecc8..07e774c7c4 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3938,6 +3938,21 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) { taosMemoryFreeClear(pOperator); } +int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz) { + *defaultPgsz = 4096; + while (*defaultPgsz < rowSize * 4) { + *defaultPgsz <<= 1u; + } + + // at least four pages need to be in buffer + *defaultBufsz = 4096 * 256; + if ((*defaultBufsz) <= (*defaultPgsz)) { + (*defaultBufsz) = (*defaultPgsz) * 4; + } + + return 0; +} + int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize, const char* pKey) { _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); @@ -3950,18 +3965,11 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n return TSDB_CODE_OUT_OF_MEMORY; } - uint32_t defaultPgsz = 4096; - while (defaultPgsz < pAggSup->resultRowSize * 4) { - defaultPgsz <<= 1u; - } + uint32_t defaultPgsz = 0; + uint32_t defaultBufsz = 0; + getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); - // at least four pages need to be in buffer - int32_t defaultBufsz = 4096 * 256; - if (defaultBufsz <= defaultPgsz) { - defaultBufsz = defaultPgsz * 4; - } - - int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH); + int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH); if (code != TSDB_CODE_SUCCESS) { return code; } diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 2600c17060..3ab296f62e 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -439,7 +439,6 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) { memcpy(data + (*columnLen), src, varDataTLen(src)); int32_t v = (data + (*columnLen) + varDataTLen(src) - (char*)pPage); ASSERT(v > 0); - printf("len:%d\n", v); contentLen = varDataTLen(src); } @@ -490,16 +489,13 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf int32_t *rows = (int32_t*) pPage; if (*rows >= pInfo->rowCapacity) { + // release buffer + releaseBufPage(pInfo->pBuf, pPage); + // add a new page for current group int32_t pageId = 0; pPage = getNewBufPage(pInfo->pBuf, 0, &pageId); taosArrayPush(p->pPageList, &pageId); - -// // number of rows -// *(int32_t*) pPage = 0; -// -// uint64_t* groupId = (pPage + sizeof(int32_t)); -// *groupId = 0; memset(pPage, 0, getBufPageSize(pInfo->pBuf)); } } @@ -566,6 +562,7 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) { blockDataFromBuf1(pInfo->binfo.pRes, page, pInfo->rowCapacity); pInfo->pageIndex += 1; + releaseBufPage(pInfo->pBuf, page); blockDataUpdateTsWindow(pInfo->binfo.pRes, 0); pInfo->binfo.pRes->info.groupId = pGroupInfo->groupId; @@ -631,7 +628,11 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* goto _error; } - int32_t code = createDiskbasedBuf(&pInfo->pBuf, 4096, 4096 * 256, pTaskInfo->id.str, TD_TMP_DIR_PATH); + uint32_t defaultPgsz = 0; + uint32_t defaultBufsz = 0; + getBufferPgSize(pResultBlock->info.rowSize, &defaultPgsz, &defaultBufsz); + + int32_t code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, TD_TMP_DIR_PATH); if (code != TSDB_CODE_SUCCESS) { goto _error; } From b7729cf6e31ca14f9c38a1907f6b745756ace5fe Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Thu, 26 May 2022 17:58:46 +0800 Subject: [PATCH 30/66] fix(query): fix first(*) result incorrect for all NULL entry tables; TD-15957 --- source/libs/function/inc/builtinsimpl.h | 4 ++-- source/libs/function/src/builtins.c | 4 ++-- source/libs/function/src/builtinsimpl.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index a8eccb57e9..cac86be917 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -84,9 +84,9 @@ int32_t diffFunction(SqlFunctionCtx *pCtx); bool getFirstLastFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t firstFunction(SqlFunctionCtx *pCtx); -int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); int32_t lastFunction(SqlFunctionCtx *pCtx); -int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool getTopBotFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 8914dca639..d3bea535d4 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1063,7 +1063,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = firstFunction, - .finalizeFunc = functionFinalize, + .finalizeFunc = firstLastFinalize, .combineFunc = firstCombine, }, { @@ -1074,7 +1074,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = lastFunction, - .finalizeFunc = lastFinalize, + .finalizeFunc = firstLastFinalize, .combineFunc = lastCombine, }, { diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index fe5737220b..e273ba4cb1 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2219,7 +2219,7 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } -int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { +int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t slotId = pCtx->pExpr->base.resSchema.slotId; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); From c444574b2ec08ba9a3a1af284918df05d30a6001 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 26 May 2022 18:00:44 +0800 Subject: [PATCH 31/66] add column length error retry --- include/libs/qcom/query.h | 2 +- include/util/taoserror.h | 1 + source/common/src/ttime.c | 9 +++++++++ source/libs/parser/src/parInsert.c | 9 ++++++++- source/libs/parser/src/parUtil.c | 2 ++ source/libs/scheduler/src/schRemote.c | 1 + 6 files changed, 22 insertions(+), 2 deletions(-) diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index 6b1f2903a3..a30f3be7a1 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -191,7 +191,7 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t #define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \ ((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST || \ (_code) == TSDB_CODE_PAR_INVALID_COLUMNS_NUM || (_code) == TSDB_CODE_PAR_INVALID_COLUMN || \ - (_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED) + (_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED || (_code == TSDB_CODE_PAR_VALUE_TOO_LONG)) #define NEED_CLIENT_REFRESH_VG_ERROR(_code) \ ((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID) #define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 0ba1d0c0f2..a924719cf9 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -641,6 +641,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY TAOS_DEF_ERROR_CODE(0, 0x2650) #define TSDB_CODE_PAR_INVALID_DROP_COL TAOS_DEF_ERROR_CODE(0, 0x2651) #define TSDB_CODE_PAR_INVALID_COL_JSON TAOS_DEF_ERROR_CODE(0, 0x2652) +#define TSDB_CODE_PAR_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2653) //planner #define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700) diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 69ba964187..2345a55afb 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -184,6 +184,15 @@ int32_t parseTimezone(char* str, int64_t* tzOffset) { i++; + while (str[i]) { + if ((str[i] >= '0' && str[i] <= '9') || str[i] == ':') { + ++i; + continue; + } + + return -1; + } + char* sep = strchr(&str[i], ':'); if (sep != NULL) { int32_t len = (int32_t)(sep - &str[i]); diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index ad36e06117..33bb5acfb0 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -610,7 +610,7 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int case TSDB_DATA_TYPE_BINARY: { // Too long values will raise the invalid sql error message if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { - return buildSyntaxErrMsg(pMsgBuf, "string data overflow", pToken->z); + return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); } return func(pMsgBuf, pToken->z, pToken->n, param); @@ -658,6 +658,9 @@ static FORCE_INLINE int32_t MemRowAppend(SMsgBuf* pMsgBuf, const void* value, in int32_t output = 0; const char* rowEnd = tdRowEnd(rb->pBuf); if (!taosMbsToUcs4(value, len, (TdUcs4*)varDataVal(rowEnd), pa->schema->bytes - VARSTR_HEADER_SIZE, &output)) { + if (errno == E2BIG) { + return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pa->schema->name); + } char buf[512] = {0}; snprintf(buf, tListLen(buf), "%s", strerror(errno)); return buildSyntaxErrMsg(pMsgBuf, buf, value); @@ -771,6 +774,10 @@ static int32_t KvRowAppend(SMsgBuf* pMsgBuf, const void* value, int32_t len, voi // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' int32_t output = 0; if (!taosMbsToUcs4(value, len, (TdUcs4*)varDataVal(pa->buf), pa->schema->bytes - VARSTR_HEADER_SIZE, &output)) { + if (errno == E2BIG) { + return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pa->schema->name); + } + char buf[512] = {0}; snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno)); return buildSyntaxErrMsg(pMsgBuf, buf, value); diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 652ed10ce8..ff206c1a76 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -173,6 +173,8 @@ static char* getSyntaxErrFormat(int32_t errCode) { return "No columns can be dropped"; case TSDB_CODE_PAR_INVALID_COL_JSON: return "Only tag can be json type"; + case TSDB_CODE_PAR_VALUE_TOO_LONG: + return "Value too long for column/tag: %s"; case TSDB_CODE_OUT_OF_MEMORY: return "Out of memory"; default: diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 947a4fd1f2..e9017b937d 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -440,6 +440,7 @@ int32_t schHandleExplainCallback(void *param, const SDataBuf *pMsg, int32_t code int32_t schHandleDropCallback(void *param, const SDataBuf *pMsg, int32_t code) { SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; qDebug("QID:%" PRIx64 ",TID:%" PRIx64 " drop task rsp received, code:%x", pParam->queryId, pParam->taskId, code); + taosMemoryFreeClear(param); return TSDB_CODE_SUCCESS; } From 1a22de765cff422ce3d7fc2b1535ecaab8abc978 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Thu, 26 May 2022 18:03:38 +0800 Subject: [PATCH 32/66] fix(sync) add multi mnodes --- source/libs/sync/src/syncAppendEntries.c | 36 +++++++++++++----------- source/libs/sync/src/syncCommit.c | 5 +++- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index b0c33ba347..008bc00dbc 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -363,28 +363,32 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { break; } } - ASSERT(hit == true); + SReConfigCbMeta cbMeta = {0}; bool isDrop; - syncNodeUpdateConfig(ths, &newSyncCfg, &isDrop); - // change isStandBy to normal - if (!isDrop) { - if (ths->state == TAOS_SYNC_STATE_LEADER) { - syncNodeBecomeLeader(ths); - } else { - syncNodeBecomeFollower(ths); + // I am in newConfig + if (hit) { + syncNodeUpdateConfig(ths, &newSyncCfg, &isDrop); + + // change isStandBy to normal + if (!isDrop) { + if (ths->state == TAOS_SYNC_STATE_LEADER) { + syncNodeBecomeLeader(ths); + } else { + syncNodeBecomeFollower(ths); + } } + + char* sOld = syncCfg2Str(&oldSyncCfg); + char* sNew = syncCfg2Str(&newSyncCfg); + sInfo("==config change== 0x11 old:%s new:%s isDrop:%d \n", sOld, sNew, isDrop); + taosMemoryFree(sOld); + taosMemoryFree(sNew); } - char* sOld = syncCfg2Str(&oldSyncCfg); - char* sNew = syncCfg2Str(&newSyncCfg); - sInfo("==config change== 0x11 old:%s new:%s isDrop:%d \n", sOld, sNew, isDrop); - taosMemoryFree(sOld); - taosMemoryFree(sNew); - - if (ths->pFsm->FpReConfigCb != NULL) { - SReConfigCbMeta cbMeta = {0}; + // always call FpReConfigCb + if (ths->pFsm->FpReConfigCb != NULL) { cbMeta.code = 0; cbMeta.currentTerm = ths->pRaftStore->currentTerm; cbMeta.index = pEntry->index; diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index a9b055820b..4a1a40a2d7 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -141,7 +141,10 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { break; } } - ASSERT(hit == true); + + if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { + ASSERT(hit == true); + } bool isDrop; syncNodeUpdateConfig(pSyncNode, &newSyncCfg, &isDrop); From a5a57c3c993f40244e87a0fed8aa043c88339a77 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Thu, 26 May 2022 18:10:26 +0800 Subject: [PATCH 33/66] test: add error test case for rerun --- tests/system-test/99-TDcase/TD-16025.py | 481 ++++++++++++++++++++++++ 1 file changed, 481 insertions(+) create mode 100644 tests/system-test/99-TDcase/TD-16025.py diff --git a/tests/system-test/99-TDcase/TD-16025.py b/tests/system-test/99-TDcase/TD-16025.py new file mode 100644 index 0000000000..3a70eaf71b --- /dev/null +++ b/tests/system-test/99-TDcase/TD-16025.py @@ -0,0 +1,481 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + ctbDict = {} + for i in range(ctbNum): + ctbDict[i] = 0 + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfCtb = 0 + while rowsOfCtb < rowsPerTbl: + for i in range(ctbNum): + sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) + for k in range(batchNum): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i]) + ctbDict[i] += 1 + if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl): + tsql.execute(sql) + sql = "insert into " + break + rowsOfCtb = ctbDict[0] + + tdLog.debug("insert data ............ [OK]") + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(ctbPrefix,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data wiht auto create child table ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 23, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + time.sleep(3) + tdLog.info("================= restart dnode ===========================") + tdDnodes.stop(1) + tdDnodes.start(1) + time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] * 2 + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 50 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("create some new child table and insert data ") + parameterDict['batchNum'] = 100 + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("================= restart dnode ===========================") + tdDnodes.stop(1) + tdDnodes.start(1) + time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + + + + # 自动建表完成数据插入,启动消费 + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db3', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + #self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + #self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # tdLog.info("================= restart dnode ===========================") + # tdDnodes.stop(1) + # tdDnodes.start(1) + # time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + # self.tmqCase1(cfgPath, buildPath) + # self.tmqCase2(cfgPath, buildPath) + self.tmqCase3(cfgPath, buildPath) + # self.tmqCase4(cfgPath, buildPath) + # self.tmqCase5(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 593c79d512e09d02180f5aa389a340fe437aad5a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 26 May 2022 18:15:20 +0800 Subject: [PATCH 34/66] fix(query): add finalize for first function. --- source/libs/function/inc/builtinsimpl.h | 2 +- source/libs/function/src/builtins.c | 4 ++-- source/libs/function/src/builtinsimpl.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index a8eccb57e9..37f469f644 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -86,7 +86,7 @@ bool getFirstLastFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t firstFunction(SqlFunctionCtx *pCtx); int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); int32_t lastFunction(SqlFunctionCtx *pCtx); -int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t firstlastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool getTopBotFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 8914dca639..7526722c7c 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1063,7 +1063,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = firstFunction, - .finalizeFunc = functionFinalize, + .finalizeFunc = firstlastFinalize, .combineFunc = firstCombine, }, { @@ -1074,7 +1074,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = lastFunction, - .finalizeFunc = lastFinalize, + .finalizeFunc = firstlastFinalize, .combineFunc = lastCombine, }, { diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index fe5737220b..d4dfc2d636 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2219,7 +2219,7 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } -int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { +int32_t firstlastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t slotId = pCtx->pExpr->base.resSchema.slotId; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); From 62f915c1ab52b0673a08351a5228173aa354d712 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 26 May 2022 19:08:55 +0800 Subject: [PATCH 35/66] fix(query): remove redundant definition of stream table in mnode. --- include/common/systable.h | 1 - source/common/src/systable.c | 30 +++++++++++++++----------- source/libs/parser/src/parTranslater.c | 2 +- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/include/common/systable.h b/include/common/systable.h index e36beb13f2..8b0bb4a3fb 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -34,7 +34,6 @@ extern "C" { #define TSDB_INS_TABLE_USER_FUNCTIONS "user_functions" #define TSDB_INS_TABLE_USER_INDEXES "user_indexes" #define TSDB_INS_TABLE_USER_STABLES "user_stables" -#define TSDB_INS_TABLE_USER_STREAMS "user_streams" #define TSDB_INS_TABLE_USER_TABLES "user_tables" #define TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED "user_table_distributed" #define TSDB_INS_TABLE_USER_USERS "user_users" diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 708e1b2df4..971c75e41b 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -123,7 +123,21 @@ static const SSysDbTableSchema userStbsSchema[] = { {.name = "table_comment", .bytes = 1024 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; -static const SSysDbTableSchema userStreamsSchema[] = { +/** + * static const SSysDbTableSchema streamSchema[] = { + {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, + {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, +}; + */ + +static const SSysDbTableSchema streamSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "dest_table", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, @@ -234,7 +248,7 @@ static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_USER_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)}, {TSDB_INS_TABLE_USER_INDEXES, userIdxSchema, tListLen(userIdxSchema)}, {TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)}, - {TSDB_INS_TABLE_USER_STREAMS, userStreamsSchema, tListLen(userStreamsSchema)}, + {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)}, {TSDB_INS_TABLE_USER_TABLES, userTblsSchema, tListLen(userTblsSchema)}, {TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)}, {TSDB_INS_TABLE_USER_USERS, userUsersSchema, tListLen(userUsersSchema)}, @@ -307,17 +321,7 @@ static const SSysDbTableSchema querySchema[] = { {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; -static const SSysDbTableSchema streamSchema[] = { - {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, -}; + static const SSysTableMeta perfsMeta[] = { {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)}, diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index b26aeb35bd..d3cda950d8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3800,7 +3800,7 @@ static const char* getSysTableName(ENodeType type) { case QUERY_NODE_SHOW_INDEXES_STMT: return TSDB_INS_TABLE_USER_INDEXES; case QUERY_NODE_SHOW_STREAMS_STMT: - return TSDB_INS_TABLE_USER_STREAMS; + return TSDB_PERFS_TABLE_STREAMS; case QUERY_NODE_SHOW_BNODES_STMT: return TSDB_INS_TABLE_BNODES; case QUERY_NODE_SHOW_SNODES_STMT: From 985fde731292cf74d6a95f399c1119a4288b6d44 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 26 May 2022 19:11:08 +0800 Subject: [PATCH 36/66] fix time issue --- source/common/src/ttime.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 2345a55afb..38ad948981 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -184,9 +184,10 @@ int32_t parseTimezone(char* str, int64_t* tzOffset) { i++; - while (str[i]) { - if ((str[i] >= '0' && str[i] <= '9') || str[i] == ':') { - ++i; + int32_t j = i; + while (str[j]) { + if ((str[j] >= '0' && str[j] <= '9') || str[j] == ':') { + ++j; continue; } From c8d55750306d86aedb4fa7ed2f0739e8ff8cb275 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 19:29:29 +0800 Subject: [PATCH 37/66] fix: status msg --- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 6 +++++- source/dnode/mnode/impl/src/mndCluster.c | 1 + source/dnode/mnode/impl/src/mndDnode.c | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index bb2c069eaa..44958d4312 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -92,7 +92,11 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { SEpSet epSet = {0}; dmGetMnodeEpSet(pMgmt->pData, &epSet); rpcSendRecv(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, &rpcRsp); - dmProcessStatusRsp(pMgmt, &rpcRsp); + if (rpcRsp.code != 0) { + dError("failed to send status msg since %s", tstrerror(rpcRsp.code)); + } else { + dmProcessStatusRsp(pMgmt, &rpcRsp); + } } int32_t dmProcessAuthRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index 6266f22f39..a421be5c06 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -144,6 +144,7 @@ _OVER: static int32_t mndClusterActionInsert(SSdb *pSdb, SClusterObj *pCluster) { mTrace("cluster:%" PRId64 ", perform insert action, row:%p", pCluster->id, pCluster); + pSdb->pMnode->clusterId = pCluster->id; return 0; } diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 047562ec02..22f858c60b 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -441,7 +441,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { pDnode->numOfSupportVnodes = statusReq.numOfSupportVnodes; SStatusRsp statusRsp = {0}; - statusRsp.dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE); + statusRsp.dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE); statusRsp.dnodeCfg.dnodeId = pDnode->id; statusRsp.dnodeCfg.clusterId = pMnode->clusterId; statusRsp.pDnodeEps = taosArrayInit(mndGetDnodeSize(pMnode), sizeof(SDnodeEp)); From 6229ba0a199aba944ec6c501e5be4c59953f040b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 26 May 2022 20:06:22 +0800 Subject: [PATCH 38/66] test: update unit test case. --- source/common/src/systable.c | 14 -------------- source/libs/parser/src/parTranslater.c | 2 +- source/libs/parser/test/mockCatalog.cpp | 10 +++++----- tests/script/tsim/show/basic.sim | 6 +++--- 4 files changed, 9 insertions(+), 23 deletions(-) diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 971c75e41b..1517684ccd 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -123,20 +123,6 @@ static const SSysDbTableSchema userStbsSchema[] = { {.name = "table_comment", .bytes = 1024 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; -/** - * static const SSysDbTableSchema streamSchema[] = { - {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, -}; - */ - static const SSysDbTableSchema streamSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_VARCHAR}, diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index d3cda950d8..4b4560122f 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3758,7 +3758,6 @@ static const char* getSysDbName(ENodeType type) { case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_FUNCTIONS_STMT: case QUERY_NODE_SHOW_INDEXES_STMT: - case QUERY_NODE_SHOW_STREAMS_STMT: case QUERY_NODE_SHOW_BNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_LICENCE_STMT: @@ -3767,6 +3766,7 @@ static const char* getSysDbName(ENodeType type) { case QUERY_NODE_SHOW_CONNECTIONS_STMT: case QUERY_NODE_SHOW_QUERIES_STMT: case QUERY_NODE_SHOW_TOPICS_STMT: + case QUERY_NODE_SHOW_STREAMS_STMT: case QUERY_NODE_SHOW_TRANSACTIONS_STMT: return TSDB_PERFORMANCE_SCHEMA_DB; default: diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index a0acb76ae9..f27a4aa896 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -71,11 +71,6 @@ void generateInformationSchema(MockCatalogService* mcs) { .addColumn("stable_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); builder.done(); } - { - ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_streams", TSDB_SYSTEM_TABLE, 1) - .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); - builder.done(); - } { ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_tables", TSDB_SYSTEM_TABLE, 2) .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN) @@ -106,6 +101,11 @@ void generatePerformanceSchema(MockCatalogService* mcs) { .addColumn("id", TSDB_DATA_TYPE_INT); builder.done(); } + { + ITableBuilder& builder = mcs->createTableBuilder("performance_schema", "streams", TSDB_SYSTEM_TABLE, 1) + .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); + builder.done(); + } } /* diff --git a/tests/script/tsim/show/basic.sim b/tests/script/tsim/show/basic.sim index e171d1abb9..95201bc48e 100644 --- a/tests/script/tsim/show/basic.sim +++ b/tests/script/tsim/show/basic.sim @@ -25,7 +25,7 @@ sql connect # select */column from information_schema.xxxx; xxxx include: # dnodes, mnodes, modules, qnodes, -# user_databases, user_functions, user_indexes, user_stables, user_streams, +# user_databases, user_functions, user_indexes, user_stables, streams, # user_tables, user_table_distributed, user_users, vgroups, print =============== add dnode2 into cluster @@ -96,7 +96,7 @@ sql select * from information_schema.user_stables if $rows != 1 then return -1 endi -#sql select * from information_schema.user_streams +#sql select * from information_schema.`streams` sql select * from information_schema.user_tables if $rows != 28 then return -1 @@ -194,7 +194,7 @@ sql select * from information_schema.user_stables if $rows != 1 then return -1 endi -#sql select * from information_schema.user_streams +#sql select * from performance_schema.`streams` sql select * from information_schema.user_tables if $rows != 28 then return -1 From 5687135b708705d143c691e74c938982aa4508f7 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 20:21:00 +0800 Subject: [PATCH 39/66] fix: status msg --- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 6 +- tests/script/tsim/mnode/basic2.sim | 82 +++++++-------- tests/script/tsim/mnode/basic3.sim | 104 +++++++++++++------- 3 files changed, 115 insertions(+), 77 deletions(-) diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 44958d4312..f98d7a6fd5 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -93,7 +93,11 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { dmGetMnodeEpSet(pMgmt->pData, &epSet); rpcSendRecv(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, &rpcRsp); if (rpcRsp.code != 0) { - dError("failed to send status msg since %s", tstrerror(rpcRsp.code)); + dError("failed to send status msg since %s, numOfEps:%d inUse:%d", tstrerror(rpcRsp.code), epSet.numOfEps, + epSet.inUse); + for (int32_t i = 0; i < epSet.numOfEps; ++i) { + dDebug("index:%d, mnode ep:%s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port); + } } else { dmProcessStatusRsp(pMgmt, &rpcRsp); } diff --git a/tests/script/tsim/mnode/basic2.sim b/tests/script/tsim/mnode/basic2.sim index 4be8fe9962..66dee512eb 100644 --- a/tests/script/tsim/mnode/basic2.sim +++ b/tests/script/tsim/mnode/basic2.sim @@ -21,29 +21,31 @@ endi print =============== create dnodes sql create dnode $hostname port 7200 -sql create dnode $hostname port 7300 -sleep 2000 - -sql show dnodes; -if $rows != 3 then - return -1 +$x = 0 +step1: + $x = $x + 1 + sleep 500 + if $x == 20 then + return -1 + endi +sql show dnodes -x step1 +if $data(1)[4] != ready then + goto step1 endi - -sql show mnodes; -if $rows != 1 then - return -1 -endi - -if $data00 != 1 then - return -1 -endi - -if $data02 != LEADER then - return -1 +if $data(2)[4] != ready then + goto step1 endi print =============== create mnode 2 sql create mnode on dnode 2 + +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi sql show mnodes print $data(1)[0] $data(1)[1] $data(1)[2] print $data(2)[0] $data(2)[1] $data(2)[2] @@ -60,8 +62,8 @@ endi if $data(2)[0] != 2 then return -1 endi -if $data(2)[2] == LEADER then - return -1 +if $data(2)[2] != FOLLOWER then + goto step1 endi print =============== create user @@ -71,45 +73,47 @@ if $rows != 2 then return -1 endi -#sql create database db -#sql show databases -#if $rows != 3 then -# return -1 -#endi +sql create database db +sql show databases +if $rows != 3 then + return -1 +endi sleep 5000 +print =============== restart system sh/exec.sh -n dnode1 -s stop system sh/exec.sh -n dnode2 -s stop -sleep 100 -return system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode2 -s start sql connect - sql show mnodes if $rows != 2 then return -1 endi -if $data(1)[0] != 1 then - return -1 -endi -if $data(1)[2] != LEADER then - return -1 -endi sql show users if $rows != 2 then return -1 endi -#sql show databases -#if $rows != 3 then -# return -1 -#endi +sql show databases +if $rows != 3 then + return -1 +endi -return +sql show dnodes +if $data(1)[4] != ready then + return -1 +endi +if $data(2)[4] != ready then + return -1 +endi + +print =============== insert data +sql create table db.stb (ts timestamp, i int) tags (j int) +sql create table db.ctb using db.stb tags(1); system sh/exec.sh -n dnode1 -s stop system sh/exec.sh -n dnode2 -s stop \ No newline at end of file diff --git a/tests/script/tsim/mnode/basic3.sim b/tests/script/tsim/mnode/basic3.sim index 42a702bee7..40c0f01229 100644 --- a/tests/script/tsim/mnode/basic3.sim +++ b/tests/script/tsim/mnode/basic3.sim @@ -15,11 +15,10 @@ $x = 0 step1: $x = $x + 1 sleep 1000 - if $x == 10 then + if $x == 20 then return -1 endi - -sql show dnodes +sql show dnodes -x step1 if $data(1)[4] != ready then goto step1 endi @@ -38,15 +37,10 @@ $x = 0 step2: $x = $x + 1 sleep 1000 - if $x == 10 then + if $x == 20 then return -1 endi - -sql show mnodes -print $data(1)[0] $data(1)[1] $data(1)[2] -print $data(2)[0] $data(2)[1] $data(2)[2] -print $data(3)[0] $data(3)[1] $data(3)[2] - +sql show mnodes -x step2 if $data(1)[2] != LEADER then goto step2 endi @@ -64,44 +58,80 @@ if $rows != 2 then return -1 endi +# wait mnode2 mnode3 recv data finish sleep 10000 print =============== step4: stop dnode1 system sh/exec.sh -n dnode1 -s stop - -return -system sh/exec.sh -n dnode2 -s stop -sleep 100 -return -system sh/exec.sh -n dnode1 -s start -system sh/exec.sh -n dnode2 -s start - -sleep 10000 -sql connect - -sql show mnodes -if $rows != 2 then - return -1 -endi -if $data(1)[0] != 1 then - return -1 -endi -if $data(1)[2] != LEADER then - return -1 -endi +$x = 0 +step4: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi +sql show mnodes -x step4 +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] +print $data(3)[0] $data(3)[1] $data(3)[2] sql show users if $rows != 2 then return -1 endi -#sql show databases -#if $rows != 3 then -# return -1 -#endi +sleep 1000 +sql show dnodes +if $data(2)[4] != ready then + return -1 +endi +if $data(3)[4] != ready then + return -1 +endi -return +print =============== step5: stop dnode1 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s stop + +$x = 0 +step5: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi +sql show mnodes -x step5 +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] +print $data(3)[0] $data(3)[1] $data(3)[2] + +sql show users +if $rows != 2 then + return -1 +endi + +print =============== step6: stop dnode1 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s stop + +$x = 0 +step6: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi +sql show mnodes -x step6 +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] +print $data(3)[0] $data(3)[1] $data(3)[2] + +sql show users +if $rows != 2 then + return -1 +endi system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop \ No newline at end of file +system sh/exec.sh -n dnode2 -s stop +system sh/exec.sh -n dnode3 -s stop \ No newline at end of file From e40dd52100ff65196a767bdbd2c3f5554190af25 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 20:34:03 +0800 Subject: [PATCH 40/66] fix: drop mnode --- tests/script/tsim/mnode/basic1.sim | 149 +++++++++++++++++------------ 1 file changed, 88 insertions(+), 61 deletions(-) diff --git a/tests/script/tsim/mnode/basic1.sim b/tests/script/tsim/mnode/basic1.sim index 235889ece6..198f36cdd2 100644 --- a/tests/script/tsim/mnode/basic1.sim +++ b/tests/script/tsim/mnode/basic1.sim @@ -6,15 +6,6 @@ system sh/exec.sh -n dnode2 -s start sql connect print =============== show dnodes -sql show dnodes; -if $rows != 1 then - return -1 -endi - -if $data00 != 1 then - return -1 -endi - sql show mnodes; if $rows != 1 then return -1 @@ -30,63 +21,55 @@ endi print =============== create dnodes sql create dnode $hostname port 7200 -sleep 2000 - -sql show dnodes; -if $rows != 2 then - return -1 +$x = 0 +step1: + $x = $x + 1 + sleep 500 + if $x == 20 then + return -1 + endi +sql show dnodes -x step1 +if $data(1)[4] != ready then + goto step1 endi - -if $data00 != 1 then - return -1 -endi - -if $data10 != 2 then - return -1 -endi - -print $data02 -if $data02 != 0 then - return -1 -endi - -if $data12 != 0 then - return -1 -endi - -if $data04 != ready then - return -1 -endi - -if $data14 != ready then - return -1 -endi - -sql show mnodes; -if $rows != 1 then - return -1 -endi - -if $data00 != 1 then - return -1 -endi - -if $data02 != LEADER then - return -1 +if $data(2)[4] != ready then + goto step1 endi print =============== create drop mnode 1 sql_error create mnode on dnode 1 sql_error drop mnode on dnode 1 - -print =============== create drop mnode 2 sql create mnode on dnode 2 + +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi sql show mnodes +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] + if $rows != 2 then return -1 endi -sql_error create mnode on dnode 2 +if $data(1)[0] != 1 then + return -1 +endi +if $data(1)[2] != LEADER then + return -1 +endi +if $data(2)[0] != 2 then + return -1 +endi +if $data(2)[2] != FOLLOWER then + goto step1 +endi +sleep 2000 +print ============ drop mnodes sql drop mnode on dnode 2 sql show mnodes if $rows != 1 then @@ -94,6 +77,35 @@ if $rows != 1 then endi sql_error drop mnode on dnode 2 +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi +sql show mnodes +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] + +if $rows != 2 then + return -1 +endi +if $data(1)[0] != 1 then + return -1 +endi +if $data(1)[2] != LEADER then + return -1 +endi +if $data(2)[0] != NULL then + goto step2 +endi +if $data(2)[2] != NULL then + goto step2 +endi + +sleep 2000 + print =============== create drop mnodes sql create mnode on dnode 2 sql show mnodes @@ -101,17 +113,32 @@ if $rows != 2 then return -1 endi -print =============== restart -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode1 -s start -system sh/exec.sh -n dnode2 -s start - -sleep 2000 +$x = 0 +step3: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi sql show mnodes +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] + if $rows != 2 then return -1 endi +if $data(1)[0] != 1 then + return -1 +endi +if $data(1)[2] != LEADER then + return -1 +endi +if $data(2)[0] != 2 then + return -1 +endi +if $data(2)[2] != FOLLOWER then + goto step3 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT From 7c46aea979e8d248f9f710d9896670d800789854 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 26 May 2022 20:37:55 +0800 Subject: [PATCH 41/66] add tversion --- include/libs/catalog/catalog.h | 1 + source/client/src/clientImpl.c | 2 +- source/dnode/mnode/impl/src/mndStb.c | 3 ++- source/libs/catalog/inc/catalogInt.h | 2 +- source/libs/catalog/src/catalog.c | 5 +++-- source/libs/catalog/src/ctgCache.c | 9 ++++++--- 6 files changed, 14 insertions(+), 8 deletions(-) diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h index e68d799dc1..5b746015e3 100644 --- a/include/libs/catalog/catalog.h +++ b/include/libs/catalog/catalog.h @@ -101,6 +101,7 @@ typedef struct SDbVgVersion { typedef struct STbSVersion { char* tbFName; int32_t sver; + int32_t tver; } STbSVersion; typedef struct SUserAuthVersion { diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 96adba9cfc..a23b587698 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -413,7 +413,7 @@ int32_t validateSversion(SRequestObj* pRequest, void* res) { for (int32_t i = 0; i < tbNum; ++i) { STbVerInfo* tbInfo = taosArrayGet(pTbArray, i); - STbSVersion tbSver = {.tbFName = tbInfo->tbFName, .sver = tbInfo->sversion}; + STbSVersion tbSver = {.tbFName = tbInfo->tbFName, .sver = tbInfo->sversion, .tver = tbInfo->tversion}; taosArrayPush(pArray, &tbSver); } } diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 6bb1097e85..b33c09a0f9 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -1503,6 +1503,7 @@ static int32_t mndBuildStbSchemaImp(SDbObj *pDb, SStbObj *pStb, const char *tbNa pRsp->precision = pDb->cfg.precision; pRsp->tableType = TSDB_SUPER_TABLE; pRsp->sversion = pStb->colVer; + pRsp->tversion = pStb->tagVer; pRsp->suid = pStb->uid; pRsp->tuid = pStb->uid; @@ -1629,7 +1630,7 @@ int32_t mndValidateStbInfo(SMnode *pMnode, SSTableMetaVersion *pStbVersions, int metaRsp.suid = pStbVersion->suid; } - if (pStbVersion->sversion != metaRsp.sversion) { + if (pStbVersion->sversion != metaRsp.sversion || pStbVersion->tversion != metaRsp.tversion) { taosArrayPush(batchMetaRsp.pArray, &metaRsp); } else { tFreeSTableMetaRsp(&metaRsp); diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index d59bd1c50b..40bd3659a3 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -447,7 +447,7 @@ void ctgReleaseVgInfo(SCtgDBCache *dbCache); int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache); int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32_t *exist); int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); -int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tbType, uint64_t *suid, char *stbName); +int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, char *stbName); int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type, bool *inCache, bool *pass); int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId); int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq); diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index bbb8983713..861de1ab60 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -812,6 +812,7 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, void *pTrans, const SEpSet* pMgm SName name; int32_t sver = 0; + int32_t tver = 0; int32_t tbNum = taosArrayGetSize(pTables); for (int32_t i = 0; i < tbNum; ++i) { STbSVersion* pTb = (STbSVersion*)taosArrayGet(pTables, i); @@ -828,8 +829,8 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, void *pTrans, const SEpSet* pMgm int32_t tbType = 0; uint64_t suid = 0; char stbName[TSDB_TABLE_FNAME_LEN]; - ctgReadTbSverFromCache(pCtg, &name, &sver, &tbType, &suid, stbName); - if (sver >= 0 && sver < pTb->sver) { + ctgReadTbVerFromCache(pCtg, &name, &sver, &tver, &tbType, &suid, stbName); + if ((sver >= 0 && sver < pTb->sver) || (tver >= 0 && tver < pTb->tver)) { switch (tbType) { case TSDB_CHILD_TABLE: { SName stb = name; diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index 0cda4a0482..9161c7cb32 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -322,9 +322,10 @@ _return: CTG_RET(code); } -int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tbType, uint64_t *suid, +int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, char *stbName) { *sver = -1; + *tver = -1; if (NULL == pCtg->dbCache) { ctgDebug("empty tbmeta cache, tbName:%s", pTableName->tname); @@ -348,6 +349,7 @@ int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *suid = tbMeta->suid; if (*tbType != TSDB_CHILD_TABLE) { *sver = tbMeta->sversion; + *tver = tbMeta->tversion; } } CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); @@ -359,7 +361,7 @@ int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t if (*tbType != TSDB_CHILD_TABLE) { ctgReleaseDBCache(pCtg, dbCache); - ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tbType, dbFName, pTableName->tname); + ctgDebug("Got sver %d tver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tver, *tbType, dbFName, pTableName->tname); return TSDB_CODE_SUCCESS; } @@ -391,12 +393,13 @@ int32_t ctgReadTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t stbName[nameLen] = 0; *sver = (*stbMeta)->sversion; + *tver = (*stbMeta)->tversion; CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); ctgReleaseDBCache(pCtg, dbCache); - ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tbType, dbFName, pTableName->tname); + ctgDebug("Got sver %d tver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tver, *tbType, dbFName, pTableName->tname); return TSDB_CODE_SUCCESS; } From 3ecdebc4e84e289de85d0ce9373d20c89090602f Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 20:51:35 +0800 Subject: [PATCH 42/66] fix: status msg --- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index f98d7a6fd5..2533f268e5 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -98,9 +98,8 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { for (int32_t i = 0; i < epSet.numOfEps; ++i) { dDebug("index:%d, mnode ep:%s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port); } - } else { - dmProcessStatusRsp(pMgmt, &rpcRsp); } + dmProcessStatusRsp(pMgmt, &rpcRsp); } int32_t dmProcessAuthRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { From d3a2e98d605b0e2dbc1504b7fc2ce34ba592a609 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 26 May 2022 20:52:27 +0800 Subject: [PATCH 43/66] fix(stream): double free --- include/libs/stream/tstream.h | 10 +- source/dnode/vnode/src/tq/tq.c | 38 ++----- source/libs/stream/src/tstream.c | 103 +++++-------------- tests/pytest/cq.py | 169 ------------------------------- tests/pytest/stream/test2.py | 93 +++++++++++++++++ 5 files changed, 137 insertions(+), 276 deletions(-) delete mode 100644 tests/pytest/cq.py create mode 100644 tests/pytest/stream/test2.py diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index ae25e1bffd..8aaf9a79dc 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -84,15 +84,16 @@ typedef struct { } SStreamCheckpoint; static FORCE_INLINE SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq) { - SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosMemoryCalloc(1, sizeof(SStreamDataSubmit)); + SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); if (pDataSubmit == NULL) return NULL; - pDataSubmit->data = pReq; pDataSubmit->dataRef = (int32_t*)taosMemoryMalloc(sizeof(int32_t)); - if (pDataSubmit->data == NULL) goto FAIL; + if (pDataSubmit->dataRef == NULL) goto FAIL; + pDataSubmit->data = pReq; *pDataSubmit->dataRef = 1; + pDataSubmit->type = STREAM_INPUT__DATA_SUBMIT; return pDataSubmit; FAIL: - taosMemoryFree(pDataSubmit); + taosFreeQitem(pDataSubmit); return NULL; } @@ -107,7 +108,6 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit) if (ref == 0) { taosMemoryFree(pDataSubmit->data); taosMemoryFree(pDataSubmit->dataRef); - taosFreeQitem(pDataSubmit); } } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 7bfb43bfb1..9941b00ff7 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -829,27 +829,15 @@ FAIL: } int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq) { - void* pIter = NULL; - bool failed = false; + void* pIter = NULL; + bool failed = false; + SStreamDataSubmit* pSubmit = NULL; - SStreamDataSubmit* pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); + pSubmit = streamDataSubmitNew(pReq); if (pSubmit == NULL) { failed = true; - goto SET_TASK_FAIL; - } - pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t)); - if (pSubmit->dataRef == NULL) { - failed = true; - goto SET_TASK_FAIL; } - pSubmit->type = STREAM_INPUT__DATA_SUBMIT; - /*pSubmit->sourceVer = ver;*/ - /*pSubmit->sourceVg = pTq->pVnode->config.vgId;*/ - pSubmit->data = pReq; - *pSubmit->dataRef = 1; - -SET_TASK_FAIL: while (1) { pIter = taosHashIterate(pTq->pStreamTasks, pIter); if (pIter == NULL) break; @@ -864,7 +852,9 @@ SET_TASK_FAIL: } streamDataSubmitRefInc(pSubmit); - taosWriteQitem(pTask->inputQ, pSubmit); + SStreamDataSubmit* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); + memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit)); + taosWriteQitem(pTask->inputQ, pSubmitClone); int8_t execStatus = atomic_load_8(&pTask->status); if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) { @@ -887,18 +877,12 @@ SET_TASK_FAIL: } } - if (!failed) { + if (pSubmit) { streamDataSubmitRefDec(pSubmit); - return 0; - } else { - if (pSubmit) { - if (pSubmit->dataRef) { - taosMemoryFree(pSubmit->dataRef); - } - taosFreeQitem(pSubmit); - } - return -1; + taosFreeQitem(pSubmit); } + + return failed ? -1 : 0; } int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c index 775a185da7..67f7a44a71 100644 --- a/source/libs/stream/src/tstream.c +++ b/source/libs/stream/src/tstream.c @@ -166,6 +166,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) // destroy if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) { streamDataSubmitRefDec((SStreamDataSubmit*)data); + taosFreeQitem(data); } else { taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock); taosFreeQitem(data); @@ -173,6 +174,25 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) return 0; } +static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) { + while (1) { + void* data = NULL; + taosGetQitem(pTask->inputQAll, &data); + if (data == NULL) break; + + streamTaskExecImpl(pTask, data, pRes); + + if (taosArrayGetSize(pRes) != 0) { + SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); + qRes->type = STREAM_INPUT__DATA_BLOCK; + qRes->blocks = pRes; + taosWriteQitem(pTask->outputQ, qRes); + return taosArrayInit(0, sizeof(SSDataBlock)); + } + } + return pRes; +} + // TODO: handle version int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) { SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); @@ -182,88 +202,21 @@ int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) { void* exec = pTask->exec.executor; if (execStatus == TASK_STATUS__IDLE) { // first run, from qall, handle failure from last exec - while (1) { - void* data = NULL; - taosGetQitem(pTask->inputQAll, &data); - if (data == NULL) break; + pRes = streamExecForQall(pTask, pRes); + if (pRes == NULL) goto FAIL; - streamTaskExecImpl(pTask, data, pRes); - - /*taosFreeQitem(data);*/ - - if (taosArrayGetSize(pRes) != 0) { - SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); - resQ->type = STREAM_INPUT__DATA_BLOCK; - resQ->blocks = pRes; - taosWriteQitem(pTask->outputQ, resQ); - pRes = taosArrayInit(0, sizeof(SSDataBlock)); - if (pRes == NULL) goto FAIL; - } - } // second run, from inputQ taosReadAllQitems(pTask->inputQ, pTask->inputQAll); - while (1) { - void* data = NULL; - taosGetQitem(pTask->inputQAll, &data); - if (data == NULL) break; + pRes = streamExecForQall(pTask, pRes); + if (pRes == NULL) goto FAIL; - streamTaskExecImpl(pTask, data, pRes); - - /*taosFreeQitem(data);*/ - - if (taosArrayGetSize(pRes) != 0) { - SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); - resQ->type = STREAM_INPUT__DATA_BLOCK; - resQ->blocks = pRes; - taosWriteQitem(pTask->outputQ, resQ); - pRes = taosArrayInit(0, sizeof(SSDataBlock)); - if (pRes == NULL) goto FAIL; - } - } // set status closing atomic_store_8(&pTask->status, TASK_STATUS__CLOSING); - // third run, make sure all inputQ is cleared + + // third run, make sure inputQ and qall are cleared taosReadAllQitems(pTask->inputQ, pTask->inputQAll); - while (1) { - void* data = NULL; - taosGetQitem(pTask->inputQAll, &data); - if (data == NULL) break; - - streamTaskExecImpl(pTask, data, pRes); - - /*taosFreeQitem(data);*/ - - if (taosArrayGetSize(pRes) != 0) { - SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); - resQ->type = STREAM_INPUT__DATA_BLOCK; - resQ->blocks = pRes; - taosWriteQitem(pTask->outputQ, resQ); - pRes = taosArrayInit(0, sizeof(SSDataBlock)); - if (pRes == NULL) goto FAIL; - } - } - // set status closing - atomic_store_8(&pTask->status, TASK_STATUS__CLOSING); - // third run, make sure all inputQ is cleared - taosReadAllQitems(pTask->inputQ, pTask->inputQAll); - while (1) { - void* data = NULL; - taosGetQitem(pTask->inputQAll, &data); - if (data == NULL) break; - - streamTaskExecImpl(pTask, data, pRes); - - taosFreeQitem(data); - - if (taosArrayGetSize(pRes) != 0) { - SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); - resQ->type = STREAM_INPUT__DATA_BLOCK; - resQ->blocks = pRes; - taosWriteQitem(pTask->outputQ, resQ); - pRes = taosArrayInit(0, sizeof(SSDataBlock)); - if (pRes == NULL) goto FAIL; - } - } + pRes = streamExecForQall(pTask, pRes); + if (pRes == NULL) goto FAIL; atomic_store_8(&pTask->status, TASK_STATUS__IDLE); break; diff --git a/tests/pytest/cq.py b/tests/pytest/cq.py deleted file mode 100644 index 7778969619..0000000000 --- a/tests/pytest/cq.py +++ /dev/null @@ -1,169 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- -import threading -import taos -import sys -import json -import time -import random -# query sql -query_sql = [ -# first supertable -"select count(*) from test.meters ;", -"select count(*) from test.meters where t3 > 2;", -"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters where t7 like 'taos_1%';", -"select count(*) from test.meters where t7 like '_____2';", -"select count(*) from test.meters where t8 like '%思%';", -"select count(*) from test.meters interval(1n) order by ts desc;", -#"select max(c0) from test.meters group by tbname", -"select first(ts) from test.meters where t5 >5000 and t5<5100;", -"select last(ts) from test.meters where t5 >5000 and t5<5100;", -"select last_row(*) from test.meters;", -"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters where t5 >5000 and t5<5100;", -"select bottom(c1, 2) from test.t1;", -"select diff(c1) from test.t1;", -"select leastsquares(c1, 1, 1) from test.t1 ;", -"select max(c1) from test.meters where t5 >5000 and t5<5100;", -"select min(c1) from test.meters where t5 >5000 and t5<5100;", -"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;", -"select percentile(c1, 50) from test.t1;", -"select spread(c1) from test.t1 ;", -"select stddev(c1) from test.t1;", -"select sum(c1) from test.meters where t5 >5000 and t5<5100;", -"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;" -"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c4) from test.meters where t5 >5000 and t5<5100;", -"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;", -"select diff(c4) from test.t1 where t5 >5000 and t5<5100;", -"select leastsquares(c4, 1, 1) from test.t1 ;", -"select max(c4) from test.meters where t5 >5000 and t5<5100;", -"select min(c4) from test.meters where t5 >5000 and t5<5100;", -"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;", -"select percentile(c5, 50) from test.t1;", -"select spread(c5) from test.t1 ;", -"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;", -"select sum(c5) from test.meters where t5 >5000 and t5<5100;", -"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;", -#all vnode -"select count(*) from test.meters where t5 >5000 and t5<5100", -"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100", -"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100", -"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100", -"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100", -# second supertable -"select count(*) from test.meters1 where t3 > 2;", -"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters where t7 like 'taos_1%';", -"select count(*) from test.meters where t7 like '_____2';", -"select count(*) from test.meters where t8 like '%思%';", -"select count(*) from test.meters1 interval(1n) order by ts desc;", -#"select max(c0) from test.meters1 group by tbname", -"select first(ts) from test.meters1 where t5 >5000 and t5<5100;", -"select last(ts) from test.meters1 where t5 >5000 and t5<5100;", -"select last_row(*) from test.meters1 ;", -"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;", -"select diff(c1) from test.m1 ;", -"select leastsquares(c1, 1, 1) from test.m1 ;", -"select max(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select min(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select c1 + c2 + c1 / c0 + c2 from test.m1 ;", -"select percentile(c1, 50) from test.m1;", -"select spread(c1) from test.m1 ;", -"select stddev(c1) from test.m1;", -"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;", -"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select bottom(c5, 2) from test.m1;", -"select diff(c5) from test.m1;", -"select leastsquares(c5, 1, 1) from test.m1 ;", -"select max(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select min(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select c5 + c2 + c4 / c5 + c0 from test.m1;", -"select percentile(c4, 50) from test.m1;", -"select spread(c4) from test.m1 ;", -"select stddev(c4) from test.m1;", -"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;", -"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;", -"select count(*) from test.meters1 where t5 >5100 and t5<5300", -#all vnode -"select count(*) from test.meters1 where t5 >5100 and t5<5300", -"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100", -"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100", -"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100", -"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100", -#join -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5", -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7", -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8", -# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8" -] - -class ConcurrentInquiry: - def initConnection(self): - self.numOfTherads = 50 - self.ts=1500000001000 - - def SetThreadsNum(self,num): - self.numOfTherads=num - def query_thread(self,threadID): - host = "10.211.55.14" - user = "root" - password = "taosdata" - conn = taos.connect( - host, - user, - password, - ) - cl = conn.cursor() - cl.execute("use test;") - - print("Thread %d: starting" % threadID) - - while True: - ran_query_sql=query_sql - random.shuffle(ran_query_sql) - for i in ran_query_sql: - print("Thread %d : %s"% (threadID,i)) - try: - start = time.time() - cl.execute(i) - cl.fetchall() - end = time.time() - print("time cost :",end-start) - except Exception as e: - print( - "Failure thread%d, sql: %s,exception: %s" % - (threadID, str(i),str(e))) - exit(-1) - - - print("Thread %d: finishing" % threadID) - - - - def run(self): - - threads = [] - for i in range(self.numOfTherads): - thread = threading.Thread(target=self.query_thread, args=(i,)) - threads.append(thread) - thread.start() - -q = ConcurrentInquiry() -q.initConnection() -q.run() diff --git a/tests/pytest/stream/test2.py b/tests/pytest/stream/test2.py new file mode 100644 index 0000000000..a441174722 --- /dev/null +++ b/tests/pytest/stream/test2.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + #for i in range(100): + tdSql.prepare() + dbname = tdCom.getLongName(10, "letters") + tdSql.execute('show databases') + tdSql.execute('drop database if exists ttxkbrzmpo') + tdSql.execute('create database if not exists ttxkbrzmpo vgroups 1') + tdSql.execute('use ttxkbrzmpo') + tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);') + tdSql.execute('create table downsampling_ct1 using downsampling_stb tags(10, 10.1, "Beijing", True);') + tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 nchar(20), c5 nchar(20)) tags (t1 int);') + tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);') + tdSql.execute('create stream downsampling_stream into output_downsampling_stb as select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591, 100, 100.1, "Beijing", True);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+1s, -100, -100.1, "Tianjin", False);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+2s, 50, 50.3, "HeBei", False);') + tdSql.execute('select * from output_downsampling_stb;') + tdSql.execute('select start, `min(c1)`, `max(c2)`, `sum(c1)` from output_downsampling_stb;') + tdSql.execute('select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+10m, 60, 60.3, "heilongjiang", True);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+11m, 70, 70.3, "JiLin", True);') + tdSql.execute('select * from output_downsampling_stb;') + tdSql.execute('select start, `min(c1)`, `max(c2)`, `sum(c1)` from output_downsampling_stb;') + tdSql.execute('select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+21m, 70, 70.3, "JiLin", True);') + tdSql.execute('select * from output_downsampling_stb;') + tdSql.execute('select * from output_downsampling_stb;') + tdSql.execute('select start, `min(c1)`, `max(c2)`, `sum(c1)` from output_downsampling_stb;') + tdSql.execute('select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);') + tdSql.execute('create stream abs_stream into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb;') + tdSql.query('describe output_abs_stb') + tdSql.execute('create stream acos_stream into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb;') + tdSql.query('describe output_acos_stb') + tdSql.execute('create stream asin_stream into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb;') + tdSql.query('describe output_asin_stb') + tdSql.execute('create stream atan_stream into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb;') + tdSql.query('describe output_atan_stb') + tdSql.execute('create stream ceil_stream into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb;') + tdSql.query('describe output_ceil_stb') + tdSql.execute('create stream cos_stream into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb;') + tdSql.query('describe output_cos_stb') + tdSql.execute('create stream floor_stream into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb;') + tdSql.query('describe output_floor_stb') + tdSql.execute('create stream log_stream into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb;') + tdSql.query('describe output_log_stb') + tdSql.execute('create stream pow_stream into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb;') + tdSql.query('describe output_pow_stb') + tdSql.execute('create stream round_stream into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb;') + tdSql.query('describe output_round_stb') + tdSql.execute('create stream sin_stream into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb;') + tdSql.query('describe output_sin_stb') + tdSql.execute('create stream sqrt_stream into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb;') + tdSql.query('describe output_sqrt_stb') + tdSql.execute('create stream tan_stream into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb;') + tdSql.query('describe output_tan_stb') + tdSql.execute('create stream char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb;') + tdSql.query('describe output_char_length_stb') + tdSql.execute('create stream concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb;') + tdSql.execute('create stream concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb;') + tdSql.execute('create stream length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb;') + tdSql.query('describe output_length_stb') + tdSql.execute('create stream lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb;') + tdSql.query('describe output_lower_stb') + tdSql.execute('create stream ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb;') + tdSql.query('describe output_ltrim_stb') + tdSql.execute('create stream rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb;') + tdSql.query('describe output_rtrim_stb') + tdSql.execute('create stream substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb;') + tdSql.query('describe output_substr_stb') + tdSql.execute('create stream upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb;') + tdSql.query('describe output_upper_stb') + tdSql.execute('insert into scalar_ct1 values (1653560440733, 100, 100.1, "beijing", "taos", "Taos");') + tdSql.execute('insert into scalar_ct1 values (1653560440733+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");') + tdSql.execute('insert into scalar_ct1 values (1653560440733+2s, 0, Null, "hebei", "TDengine", Null);') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 782ad93dc9751c30cfab4102e95c5f1f53a639b4 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 26 May 2022 21:16:06 +0800 Subject: [PATCH 44/66] fix update mgmt epset issue --- source/client/src/clientImpl.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index a23b587698..8819e3763c 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -745,12 +745,12 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { pRequest->metric.rsp = taosGetTimestampUs(); - STscObj* pTscObj = pRequest->pTscObj; - if (pEpSet) { - if (!isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, pEpSet)) { - updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); - } - } + //STscObj* pTscObj = pRequest->pTscObj; + //if (pEpSet) { + // if (!isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, pEpSet)) { + // updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); + // } + //} /* * There is not response callback function for submit response. From ddbcaee1ff1a0d64f26a70f17eedf3f8e98cecec Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 26 May 2022 21:23:19 +0800 Subject: [PATCH 45/66] fix: add debug info --- source/libs/transport/inc/transComm.h | 1 + source/libs/transport/src/transCli.c | 8 +++++++- source/libs/transport/src/transComm.c | 12 ++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 683f6c88c6..18a85865df 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -318,6 +318,7 @@ void transDQDestroy(SDelayQueue* queue); int transDQSched(SDelayQueue* queue, void (*func)(void* arg), void* arg, uint64_t timeoutMs); +void transPrintEpSet(SEpSet* pEpSet); /* * init global func */ diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 395938c9d1..ba0da63dc1 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -921,6 +921,8 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { STransConnCtx* pCtx = pMsg->ctx; SEpSet* pEpSet = &pCtx->epSet; + transPrintEpSet(pEpSet); + /* * upper layer handle retry if code equal TSDB_CODE_RPC_NETWORK_UNAVAIL */ @@ -972,7 +974,11 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { pCtx->pRsp = NULL; } else { tTrace("%s cli conn %p handle resp", pTransInst->label, pConn); - pTransInst->cfp(pTransInst->parent, pResp, pEpSet); + if (pResp->code != 0) { + pTransInst->cfp(pTransInst->parent, pResp, NULL); + } else { + pTransInst->cfp(pTransInst->parent, pResp, pEpSet); + } } return 0; } diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 1ea03083b2..526f896ad2 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -446,4 +446,16 @@ int transDQSched(SDelayQueue* queue, void (*func)(void* arg), void* arg, uint64_ uv_timer_start(queue->timer, transDQTimeout, timeoutMs, 0); return 0; } + +void transPrintEpSet(SEpSet* pEpSet) { + if (pEpSet == NULL) { + tTrace("NULL epset"); + return; + } + tTrace("epset begin: inUse: %d", pEpSet->inUse); + for (int i = 0; i < pEpSet->numOfEps; i++) { + tTrace("ip: %s, port: %d", pEpSet->eps[i].fqdn, pEpSet->eps[i].port); + } + tTrace("epset end"); +} #endif From 750b7ccec98a6da4add836a8011715d61de67fe4 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 26 May 2022 21:45:49 +0800 Subject: [PATCH 46/66] fix: remove table group code in 2.x --- source/libs/executor/src/timewindowoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 6ba8f08a26..47dda8fc2b 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1264,7 +1264,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, for (int32_t i = 0; i < numOfChild; i++) { SSDataBlock* chRes = createOneDataBlock(pResBlock, false); SOperatorInfo* pChildOp = createIntervalOperatorInfo(NULL, pExprInfo, numOfCols, - chRes, pInterval, primaryTsSlotId, pTwAggSupp, NULL, pTaskInfo); + chRes, pInterval, primaryTsSlotId, pTwAggSupp, pTaskInfo); if (pChildOp && chRes) { taosArrayPush(pInfo->pChildren, &pChildOp); continue; From d5d8581bcffcb291b2332ecc3298ee29a66524a9 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 26 May 2022 21:45:59 +0800 Subject: [PATCH 47/66] refactor(stream): remove out-of-date msg --- include/common/tmsgdef.h | 15 ++++++++------- source/dnode/mgmt/mgmt_snode/src/smHandle.c | 2 +- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 3 --- source/dnode/mnode/impl/src/mndScheduler.c | 9 ++++++--- source/dnode/snode/src/snode.c | 14 +++++++------- source/libs/stream/src/tstream.c | 12 ++++++------ 6 files changed, 28 insertions(+), 27 deletions(-) diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 4bbb3a4a48..4687062b18 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -195,11 +195,8 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_EXPLAIN, "vnode-explain", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SUBSCRIBE, "vnode-subscribe", SMVSubscribeReq, SMVSubscribeRsp) - TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqCVConsumeReq, SMqCVConsumeRsp) + TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqPollReq, SMqDataBlkRsp) TD_DEF_MSG_TYPE(TDMT_VND_TASK_DEPLOY, "vnode-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp) - TD_DEF_MSG_TYPE(TDMT_VND_TASK_PIPE_EXEC, "vnode-task-pipe-exec", SStreamTaskExecReq, SStreamTaskExecRsp) - TD_DEF_MSG_TYPE(TDMT_VND_TASK_MERGE_EXEC, "vnode-task-merge-exec", SStreamTaskExecReq, SStreamTaskExecRsp) - TD_DEF_MSG_TYPE(TDMT_VND_TASK_WRITE_EXEC, "vnode-task-write-exec", SStreamTaskExecReq, SStreamTaskExecRsp) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TASK_RUN, "vnode-stream-task-run", NULL, NULL) @@ -236,9 +233,13 @@ enum { // Requests handled by SNODE TD_NEW_MSG_SEG(TDMT_SND_MSG) TD_DEF_MSG_TYPE(TDMT_SND_TASK_DEPLOY, "snode-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp) - TD_DEF_MSG_TYPE(TDMT_SND_TASK_EXEC, "snode-task-exec", SStreamTaskExecReq, SStreamTaskExecRsp) - TD_DEF_MSG_TYPE(TDMT_SND_TASK_PIPE_EXEC, "snode-task-pipe-exec", SStreamTaskExecReq, SStreamTaskExecRsp) - TD_DEF_MSG_TYPE(TDMT_SND_TASK_MERGE_EXEC, "snode-task-merge-exec", SStreamTaskExecReq, SStreamTaskExecRsp) + //TD_DEF_MSG_TYPE(TDMT_SND_TASK_EXEC, "snode-task-exec", SStreamTaskExecReq, SStreamTaskExecRsp) + //TD_DEF_MSG_TYPE(TDMT_SND_TASK_PIPE_EXEC, "snode-task-pipe-exec", SStreamTaskExecReq, SStreamTaskExecRsp) + //TD_DEF_MSG_TYPE(TDMT_SND_TASK_MERGE_EXEC, "snode-task-merge-exec", SStreamTaskExecReq, SStreamTaskExecRsp) + + TD_DEF_MSG_TYPE(TDMT_SND_TASK_RUN, "snode-stream-task-run", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_SND_TASK_DISPATCH, "snode-stream-task-dispatch", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_SND_TASK_RECOVER, "snode-stream-task-recover", NULL, NULL) // Requests handled by SCHEDULER TD_NEW_MSG_SEG(TDMT_SCH_MSG) diff --git a/source/dnode/mgmt/mgmt_snode/src/smHandle.c b/source/dnode/mgmt/mgmt_snode/src/smHandle.c index bf1bb145b7..a3aab439de 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smHandle.c +++ b/source/dnode/mgmt/mgmt_snode/src/smHandle.c @@ -96,7 +96,7 @@ SArray *smGetMsgHandles() { // Requests handled by SNODE if (dmSetMgmtHandle(pArray, TDMT_SND_TASK_DEPLOY, smPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_SND_TASK_EXEC, smPutNodeMsgToExecQueue, 0) == NULL) goto _OVER; + /*if (dmSetMgmtHandle(pArray, TDMT_SND_TASK_EXEC, smPutNodeMsgToExecQueue, 0) == NULL) goto _OVER;*/ code = 0; _OVER: diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 0af503bbcf..ac56a9ba3d 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -310,9 +310,6 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DEPLOY, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_PIPE_EXEC, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_MERGE_EXEC, vmPutNodeMsgToMergeQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_WRITE_EXEC, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RUN, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DISPATCH, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 204c6c16a3..58b51e4c54 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -359,7 +359,8 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { // one merge only ASSERT(taosArrayGetSize(pArray) == 1); SStreamTask* lastLevelTask = taosArrayGetP(pArray, 0); - pTask->dispatchMsgType = TDMT_VND_TASK_MERGE_EXEC; + /*pTask->dispatchMsgType = TDMT_VND_TASK_MERGE_EXEC;*/ + pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH; pTask->dispatchType = TASK_DISPATCH__FIXED; pTask->fixedEpDispatcher.taskId = lastLevelTask->taskId; @@ -404,7 +405,8 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { if (pStream->fixedSinkVgId == 0) { pTask->dispatchType = TASK_DISPATCH__SHUFFLE; - pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC; + /*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/ + pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH; SDbObj* pDb = mndAcquireDb(pMnode, pStream->sourceDb); ASSERT(pDb); if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) { @@ -434,7 +436,8 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { } } else { pTask->dispatchType = TASK_DISPATCH__FIXED; - pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC; + /*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/ + pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH; SArray* pArray = taosArrayGetP(pStream->tasks, 0); // one sink only ASSERT(taosArrayGetSize(pArray) == 1); diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index ec75ffcae1..37b406466d 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -103,8 +103,8 @@ void sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg) { tDecoderClear(&decoder); sndMetaDeployTask(pSnode->pMeta, pTask); - } else if (pMsg->msgType == TDMT_SND_TASK_EXEC) { - sndProcessTaskExecReq(pSnode, pMsg); + /*} else if (pMsg->msgType == TDMT_SND_TASK_EXEC) {*/ + /*sndProcessTaskExecReq(pSnode, pMsg);*/ } else { ASSERT(0); } @@ -112,9 +112,9 @@ void sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg) { void sndProcessSMsg(SSnode *pSnode, SRpcMsg *pMsg) { // operator exec - if (pMsg->msgType == TDMT_SND_TASK_EXEC) { - sndProcessTaskExecReq(pSnode, pMsg); - } else { - ASSERT(0); - } + /*if (pMsg->msgType == TDMT_SND_TASK_EXEC) {*/ + /*sndProcessTaskExecReq(pSnode, pMsg);*/ + /*} else {*/ + ASSERT(0); + /*}*/ } diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c index 67f7a44a71..dc0fbf2bbe 100644 --- a/source/libs/stream/src/tstream.c +++ b/source/libs/stream/src/tstream.c @@ -278,13 +278,13 @@ int32_t streamSink(SStreamTask* pTask, SMsgCb* pMsgCb) { } int32_t qType; - if (pTask->dispatchMsgType == TDMT_VND_TASK_PIPE_EXEC || pTask->dispatchMsgType == TDMT_SND_TASK_PIPE_EXEC) { + if (pTask->dispatchMsgType == TDMT_VND_TASK_DISPATCH || pTask->dispatchMsgType == TDMT_SND_TASK_DISPATCH) { qType = FETCH_QUEUE; - } else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC || - pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) { - qType = MERGE_QUEUE; - } else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) { - qType = WRITE_QUEUE; + /*} else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC ||*/ + /*pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) {*/ + /*qType = MERGE_QUEUE;*/ + /*} else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) {*/ + /*qType = WRITE_QUEUE;*/ } else { ASSERT(0); } From 823bfcb32b50c2131d06a2921cbd06e23e98abeb Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 26 May 2022 22:16:38 +0800 Subject: [PATCH 48/66] fix case issue --- source/dnode/mnode/impl/test/stb/stb.cpp | 2 +- source/libs/qworker/inc/qworkerInt.h | 1 + source/libs/qworker/src/qworker.c | 2 +- source/libs/scheduler/src/schRemote.c | 9 +++++++-- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/source/dnode/mnode/impl/test/stb/stb.cpp b/source/dnode/mnode/impl/test/stb/stb.cpp index 1d98199103..56b8936cf4 100644 --- a/source/dnode/mnode/impl/test/stb/stb.cpp +++ b/source/dnode/mnode/impl/test/stb/stb.cpp @@ -344,7 +344,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) { EXPECT_EQ(metaRsp.precision, TSDB_TIME_PRECISION_MILLI); EXPECT_EQ(metaRsp.tableType, TSDB_SUPER_TABLE); EXPECT_EQ(metaRsp.sversion, 1); - EXPECT_EQ(metaRsp.tversion, 0); + EXPECT_EQ(metaRsp.tversion, 1); EXPECT_GT(metaRsp.suid, 0); EXPECT_GT(metaRsp.tuid, 0); EXPECT_EQ(metaRsp.vgId, 0); diff --git a/source/libs/qworker/inc/qworkerInt.h b/source/libs/qworker/inc/qworkerInt.h index 511327658f..48ad737334 100644 --- a/source/libs/qworker/inc/qworkerInt.h +++ b/source/libs/qworker/inc/qworkerInt.h @@ -227,6 +227,7 @@ typedef struct SQWorkerMgmt { #define QW_ELOG(_param, ...) qError("QW:%p " _param, mgmt, __VA_ARGS__) #define QW_DLOG(_param, ...) qDebug("QW:%p " _param, mgmt, __VA_ARGS__) +#define QW_TLOG(_param, ...) qTrace("QW:%p " _param, mgmt, __VA_ARGS__) #define QW_DUMP(_param, ...) \ do { \ diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index e7a680de3c..5cf9f62fd8 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -1409,7 +1409,7 @@ void qwProcessHbTimerEvent(void *param, void *tmrId) { SQWSchStatus *sch = (SQWSchStatus *)pIter; if (NULL == sch->hbConnInfo.handle) { uint64_t *sId = taosHashGetKey(pIter, NULL); - QW_DLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId); + QW_TLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId); pIter = taosHashIterate(mgmt->schHash, pIter); continue; } diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index e9017b937d..666c24cf01 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -557,7 +557,9 @@ int32_t schMakeHbCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam) { SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); param->nodeEpId.nodeId = addr->nodeId; - memcpy(¶m->nodeEpId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp)); + SEp* pEp = SCH_GET_CUR_EP(addr); + strcpy(param->nodeEpId.ep.fqdn, pEp->fqdn); + param->nodeEpId.ep.port = pEp->port; param->pTrans = pJob->pTrans; *pParam = param; @@ -788,7 +790,10 @@ int32_t schEnsureHbConnection(SSchJob *pJob, SSchTask *pTask) { SQueryNodeEpId epId = {0}; epId.nodeId = addr->nodeId; - memcpy(&epId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp)); + + SEp* pEp = SCH_GET_CUR_EP(addr); + strcpy(epId.ep.fqdn, pEp->fqdn); + epId.ep.port = pEp->port; SSchHbTrans *hb = taosHashGet(schMgmt.hbConnections, &epId, sizeof(SQueryNodeEpId)); if (NULL == hb) { From 1ed7a4be11770db381de9ab0974bb63db5d08c53 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 26 May 2022 22:38:31 +0800 Subject: [PATCH 49/66] fix: msg redirect while mnode replica large than 0 --- source/libs/transport/src/transCli.c | 3 ++- tests/script/jenkins/basic.txt | 2 +- tests/script/tsim/mnode/basic2.sim | 22 +++++++++++++++------- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index ba0da63dc1..0313ea5832 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -928,7 +928,8 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { */ tmsg_t msgType = pCtx->msgType; if ((pTransInst->retry != NULL && (pTransInst->retry(pResp->code))) || - (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL)) { + (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pResp->code == TSDB_CODE_APP_NOT_READY || + pResp->code == TSDB_CODE_NODE_NOT_DEPLOYED || pResp->code == TSDB_CODE_SYN_NOT_LEADER)) { pMsg->sent = 0; pMsg->st = taosGetTimestampUs(); pCtx->retryCount += 1; diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index cec52ee40f..623182fddf 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -56,7 +56,7 @@ # ---- mnode #./test.sh -f tsim/mnode/basic1.sim -#./test.sh -f tsim/mnode/basic2.sim +./test.sh -f tsim/mnode/basic2.sim # ---- show ./test.sh -f tsim/show/basic.sim diff --git a/tests/script/tsim/mnode/basic2.sim b/tests/script/tsim/mnode/basic2.sim index 66dee512eb..024e2b2406 100644 --- a/tests/script/tsim/mnode/basic2.sim +++ b/tests/script/tsim/mnode/basic2.sim @@ -40,7 +40,7 @@ print =============== create mnode 2 sql create mnode on dnode 2 $x = 0 -step1: +step2: $x = $x + 1 sleep 1000 if $x == 20 then @@ -63,7 +63,7 @@ if $data(2)[0] != 2 then return -1 endi if $data(2)[2] != FOLLOWER then - goto step1 + goto step2 endi print =============== create user @@ -103,17 +103,25 @@ if $rows != 3 then return -1 endi -sql show dnodes +$x = 0 +step3: + $x = $x + 1 + sleep 500 + if $x == 20 then + return -1 + endi +sql show dnodes -x step3 if $data(1)[4] != ready then - return -1 + goto step3 endi if $data(2)[4] != ready then - return -1 + goto step3 endi print =============== insert data -sql create table db.stb (ts timestamp, i int) tags (j int) -sql create table db.ctb using db.stb tags(1); +#sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd" +#sql create table db.ctb using db.stb tags(101, 102, "103") +#sql insert into db.ctb values(now, 1, "2") system sh/exec.sh -n dnode1 -s stop system sh/exec.sh -n dnode2 -s stop \ No newline at end of file From 3e94a856297902e517ef7b0053ae703d117ebb53 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 26 May 2022 23:05:06 +0800 Subject: [PATCH 50/66] fix: remove table group code in 2.x --- source/libs/executor/src/executorimpl.c | 48 ++++++++++++------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 13f7ff8024..684c657d17 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4434,7 +4434,7 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT } static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagNode); + STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond); static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, SNode* pTagCond); static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo); @@ -4473,16 +4473,6 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - int32_t code = getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); - if (code != TSDB_CODE_SUCCESS) { - return NULL; - } - - if (taosArrayGetSize(pTableListInfo->pTableList) == 0) { - qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId); - return NULL; - } - tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); if (pDataReader == NULL && terrno != 0) { return NULL; @@ -4505,19 +4495,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo int32_t numOfCols = 0; - int32_t code = getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); - if (code != TSDB_CODE_SUCCESS) { - return NULL; - } - - if (taosArrayGetSize(pTableListInfo->pTableList) == 0) { - qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId); - return NULL; - } - tsdbReaderT pDataReader = NULL; if (pHandle->vnode) { pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); + } else { + getTableList(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableListInfo, pTagCond); } if (pDataReader == NULL && terrno != 0) { @@ -4915,6 +4897,7 @@ int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, if (code != TSDB_CODE_SUCCESS) { qError("doFilterTag error:%d", code); taosArrayDestroy(res); + terrno = code; return code; } for(int i = 0; i < taosArrayGetSize(res); i++){ @@ -4946,14 +4929,29 @@ SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { } tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagNode) { - SQueryTableDataCond cond = {0}; - int32_t code = initQueryTableDataCond(&cond, pTableScanNode); + STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { + int32_t code = getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); if (code != TSDB_CODE_SUCCESS) { - return NULL; + goto _error; + } + + if (taosArrayGetSize(pTableListInfo->pTableList) == 0) { + code = 0; + qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId); + goto _error; + } + + SQueryTableDataCond cond = {0}; + code = initQueryTableDataCond(&cond, pTableScanNode); + if (code != TSDB_CODE_SUCCESS) { + goto _error; } return tsdbQueryTables(pHandle->vnode, &cond, pTableListInfo, queryId, taskId); + +_error: + terrno = code; + return NULL; } int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, From 9a67d1f41acab754a761e29cf17447637b88ecce Mon Sep 17 00:00:00 2001 From: Sean Ely <105326513+sean-tdengine@users.noreply.github.com> Date: Thu, 26 May 2022 06:32:14 -0700 Subject: [PATCH 51/66] docs: grammar and formatting: fault tolerance Minor grammar and formatting for the Fault Tolerance & Disaster Recovery Page --- docs-en/13-operation/03-tolerance.md | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/docs-en/13-operation/03-tolerance.md b/docs-en/13-operation/03-tolerance.md index 367474cddb..9f74760278 100644 --- a/docs-en/13-operation/03-tolerance.md +++ b/docs-en/13-operation/03-tolerance.md @@ -7,12 +7,15 @@ title: Fault Tolerance & Disaster Recovery TDengine uses **WAL**, i.e. Write Ahead Log, to achieve fault tolerance and high reliability. -When a data block is received by TDengine, the original data block is firstly written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted. +When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted. There are 2 configuration parameters related to WAL: -- walLevel:0:wal is disabled; 1:wal is enabled without fsync; 2:wal is enabled with fsync. -- fsync:only valid when walLevel is set to 2, it specified the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. +- walLevel: + - 0:wal is disabled; + - 1:wal is enabled without fsync; + - 2:wal is enabled with fsync. +- fsync:only valid when walLevel is set to 2, it specifies the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync needs to be set to 1. The penalty is the performance of data ingestion downgrades. However, if the concurrent threads of data insertion on the client side can reach a big enough number, for example 50, the data ingestion performance would be still good enough, our verification shows that the drop is only 30% compared to fsync is set to 3,000 milliseconds. @@ -20,10 +23,10 @@ To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync need TDengine uses replications to provide high availability and disaster recovery capability. -TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by system parameter `numOfMnodes`. The data replication between mnode replicas is in synchronous way to guarantee the metadata consistency. +TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee the metadata consistency. -The number of replicas for time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. +The number of replicas for the time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. -The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create table. +The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table. -As long as the dnodes of a TDengine cluster are deployed on different physical machines and replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too. +As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too. From 50dcf04e591c69087438e01bd4c201d658c67fb8 Mon Sep 17 00:00:00 2001 From: Sean Ely <105326513+sean-tdengine@users.noreply.github.com> Date: Thu, 26 May 2022 06:40:50 -0700 Subject: [PATCH 52/66] docs: grammar: user management Minor grammar changes for the User Management page --- docs-en/13-operation/06-admin.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs-en/13-operation/06-admin.md b/docs-en/13-operation/06-admin.md index 1ca0dfeaf4..458a91b88c 100644 --- a/docs-en/13-operation/06-admin.md +++ b/docs-en/13-operation/06-admin.md @@ -2,7 +2,7 @@ title: User Management --- -System operator can use TDengine CLI `taos` to create or remove user or change password. The SQL command is as low: +A system operator can use TDengine CLI `taos` to create or remove users or change passwords. The SQL commands are documented below: ## Create User @@ -10,7 +10,7 @@ System operator can use TDengine CLI `taos` to create or remove user or change p CREATE USER PASS <'password'>; ``` -When creating a user and specifying the user name and password, password needs to be quoted using single quotes. +When creating a user and specifying the user name and password, the password needs to be quoted using single quotes. ## Drop User @@ -18,7 +18,7 @@ When creating a user and specifying the user name and password, password needs t DROP USER ; ``` -Drop a user can only be performed by root. +Dropping a user can only be performed by root. ## Change Password @@ -26,7 +26,7 @@ Drop a user can only be performed by root. ALTER USER PASS <'password'>; ``` -To keep the case of the password when changing password, password needs to be quoted using single quotes. +To keep the case of the password when changing password, the password needs to be quoted using single quotes. ## Change Privilege @@ -36,7 +36,7 @@ ALTER USER PRIVILEGE ; The privileges that can be changed to are `read` or `write` without single quotes. -Note:there is another privilege `super`, which not allowed to be authorized to any user. +Note:there is another privilege `super`, which is not allowed to be authorized to any user. ## Show Users @@ -45,6 +45,6 @@ SHOW USERS; ``` :::note -In SQL syntax, `< >` means the part that needs to be input by user, excluding the `< >` itself. +In SQL syntax, `< >` means the part that needs to be input by the user, excluding the `< >` itself. ::: From c37d13b285c8e4da43c28b62061ec5f58531801b Mon Sep 17 00:00:00 2001 From: Sean Ely <105326513+sean-tdengine@users.noreply.github.com> Date: Thu, 26 May 2022 10:17:12 -0700 Subject: [PATCH 53/66] docs: grammar: admin import Minor grammar updates to the Admin > Data Import page. --- docs-en/13-operation/07-import.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs-en/13-operation/07-import.md b/docs-en/13-operation/07-import.md index befca38652..8362cec1ab 100644 --- a/docs-en/13-operation/07-import.md +++ b/docs-en/13-operation/07-import.md @@ -2,26 +2,26 @@ title: Data Import --- -There are multiple ways of importing data provided byTDengine: import with script, import from data file, import using `taosdump`. +There are multiple ways of importing data provided by TDengine: import with script, import from data file, import using `taosdump`. ## Import Using Script -TDengine CLI `taos` supports `source ` command for executing the SQL statements in the file in batch. The SQL statements for creating databases, creating tables, and inserting rows can be written in single file with one statement on each line, then the file can be executed using `source` command in TDengine CLI `taos` to execute the SQL statements in order and in batch. In the script file, any line beginning with "#" is treated as comments and ignored silently. +TDengine CLI `taos` supports `source ` command for executing the SQL statements in the file in batch. The SQL statements for creating databases, creating tables, and inserting rows can be written in a single file with one statement on each line, then the file can be executed using the `source` command in TDengine CLI `taos` to execute the SQL statements in order and in batch. In the script file, any line beginning with "#" is treated as comments and ignored silently. ## Import from Data File -In TDengine CLI, data can be imported from a CSV file into an existing table. The data in single CSV must belong to same table and must be consistent with the schema of that table. The SQL statement is as below: +In TDengine CLI, data can be imported from a CSV file into an existing table. The data in a single CSV must belong to the same table and must be consistent with the schema of that table. The SQL statement is as below: ```sql insert into tb1 file 'path/data.csv'; ``` :::note -If there is description in the first line of a CSV file, please remove it before importing. If there is no value for a column, please use `NULL` without quotes. +If there is a description in the first line of the CSV file, please remove it before importing. If there is no value for a column, please use `NULL` without quotes. ::: -For example, there is a sub table d1001 whose schema is as below: +For example, there is a subtable d1001 whose schema is as below: ```sql taos> DESCRIBE d1001 @@ -49,7 +49,7 @@ The format of the CSV file to be imported, data.csv, is as below: '2018-10-12 06:38:05.000',18.30000,219,0.31000 ``` -Then, below SQL statement can be used to import data from file "data.csv", assuming the file is located under the home directory of current Linux user. +Then, the below SQL statement can be used to import data from file "data.csv", assuming the file is located under the home directory of the current Linux user. ```sql taos> insert into d1001 file '~/data.csv'; @@ -58,4 +58,4 @@ Query OK, 9 row(s) affected (0.004763s) ## Import using taosdump -A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). +A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). From bda7cbbbb26cec76be5f8b4e92913ba34e0ff54c Mon Sep 17 00:00:00 2001 From: Sean Ely <105326513+sean-tdengine@users.noreply.github.com> Date: Thu, 26 May 2022 10:21:56 -0700 Subject: [PATCH 54/66] Docs: grammar: manage connections Minor grammar edits to the Admin > Manage connections page --- docs-en/13-operation/09-status.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs-en/13-operation/09-status.md b/docs-en/13-operation/09-status.md index 3f3c6c9f1e..ca8974bb8f 100644 --- a/docs-en/13-operation/09-status.md +++ b/docs-en/13-operation/09-status.md @@ -3,7 +3,7 @@ sidebar_label: Connections & Tasks title: Manage Connections and Query Tasks --- -System operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing. +A system operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing. ## Show Connections @@ -51,4 +51,4 @@ The first column of the output is stream ID, which is composed of the connection KILL STREAM ; ``` -The the above SQL command, `stream-id` is from the first column of the output of `SHOW STREAMS`. +The above SQL command, `stream-id` is from the first column of the output of `SHOW STREAMS`. From b61b1d556ea75f3a98300ed91904b78360913e56 Mon Sep 17 00:00:00 2001 From: Sean Ely <105326513+sean-tdengine@users.noreply.github.com> Date: Thu, 26 May 2022 10:31:46 -0700 Subject: [PATCH 55/66] docs: phrasing: admin monitor Phrasing for the Admin > TDengine Monitoring page --- docs-en/13-operation/10-monitor.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs-en/13-operation/10-monitor.md b/docs-en/13-operation/10-monitor.md index 019cf4f294..615f79ca73 100644 --- a/docs-en/13-operation/10-monitor.md +++ b/docs-en/13-operation/10-monitor.md @@ -2,19 +2,19 @@ title: TDengine Monitoring --- -After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Besides, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into `log` database too. System operator can view the data in `log` database from TDengine CLI or from a web console. +After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. -Collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in configuration file. +The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file. ## TDinsight -TDinsight is a total solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster. +TDinsight is a complete solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster. From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine. -A script `TDinsight.sh` is provided to deploy TDinsight in automatic way. +A script `TDinsight.sh` is provided to deploy TDinsight automatically. -Download `TDinsight.sh` with below command: +Download `TDinsight.sh` with the below command: ```bash wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh @@ -38,7 +38,7 @@ There are two ways to setup Grafana alert notification. sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E ``` -- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of this way are as follows: +- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of enabling this plugin are listed below: - `-I`: AliCloud SMS Key ID - `-K`: AliCloud SMS Key Secret @@ -47,7 +47,7 @@ There are two ways to setup Grafana alert notification. - `-T`: Input parameters in JSON format for the SMS notification template, for example`{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}` - `-B`: List of mobile numbers to be notified - Below is an example of the full command using this way. + Below is an example of the full command using the AliCloud SMS alert. ```bash sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \ @@ -55,6 +55,6 @@ There are two ways to setup Grafana alert notification. -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' ``` -Launch `TDinsight.sh` as above command and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`. +Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`. For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/). From 0c1ed45d48a59df8dc54deee151249eb80cc5687 Mon Sep 17 00:00:00 2001 From: Sean Ely <105326513+sean-tdengine@users.noreply.github.com> Date: Thu, 26 May 2022 11:07:30 -0700 Subject: [PATCH 56/66] docs: grammar and phrasing: admin diagnostics Minor grammar, edits, and phrasing for the Admin > Problem Diagnostics page. --- docs-en/13-operation/17-diagnose.md | 42 ++++++++++++++--------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/docs-en/13-operation/17-diagnose.md b/docs-en/13-operation/17-diagnose.md index b140d925c0..53d808ef51 100644 --- a/docs-en/13-operation/17-diagnose.md +++ b/docs-en/13-operation/17-diagnose.md @@ -10,13 +10,13 @@ The diagnostic for network connection can be executed between Linux and Linux or Diagnostic steps: -1. If the port range to be diagnosed are being occupied by a `taosd` server process, please firstly stop `taosd. -2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server. -3. On the client side, execute command `taos -n client -h -P -l ` to send testing package to the specified server and port. +1. If the port range to be diagnosed are being occupied by a `taosd` server process, please first stop `taosd. +2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server". +3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port. --l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please be noted that the package length must be same in the above 2 commands executed on server side and client side respectively. +-l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please note that the package length must be same in the above 2 commands executed on server side and client side respectively. -Output of the server side is as below for example: +Output of the server side for the example is below: ```bash # taos -n server -P 6000 @@ -47,7 +47,7 @@ Output of the server side is as below for example: 12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011 ``` -Output of the client side is as below for example: +Output of the client side for the example is below: ```bash # taos -n client -h 172.27.0.7 -P 6000 @@ -65,13 +65,13 @@ Output of the client side is as below for example: 12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011 ``` -The output needs to be checked carefully for the system operator to find out root cause and solve the problem. +The output needs to be checked carefully for the system operator to find out the root cause and solve the problem. ## Startup Status and RPC Diagnostic `taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a comman task for a system operator to do to determine whether `taosd` has been started successfully, especially in case of cluster. -`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or work abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's network problem or `taosd` is abnormal. +`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or `taosd` is abnormal. ## Sync and Arbitrator Diagnostic @@ -80,7 +80,7 @@ taos -n sync -P 6040 -h taos -n sync -P 6042 -h ``` -The above commands can be executed on Linux Shell to check whether the port for sync works well and whether the sync module of the server side works well. Besides, `-P 6042` is used to check whether the arbitrator is configured properly and works well. +The above commands can be executed on Linux Shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well. ## Network Speed Diagnostic @@ -88,12 +88,12 @@ The above commands can be executed on Linux Shell to check whether the port for From version 2.2.0.0, the above command can be executed on Linux Shell to test the network speed, it sends uncompressed package to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: --n:When set to "speed", it means testing network speed --h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used --P:The port of the server process to connect to, the default value is 6030 --N:The number of packages that will be sent in the test, range is [1,10000], default value is 100 --l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024 --S:The type of network packages to send, can be either TCP or UDP, default value is +-n:When set to "speed", it means testing network speed. +-h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used. +-P:The port of the server process to connect to, the default value is 6030. +-N:The number of packages that will be sent in the test, range is [1,10000], default value is 100. +-l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024. +-S:The type of network packages to send, can be either TCP or UDP, default value is TCP. ## FQDN Resolution Diagnostic @@ -101,22 +101,22 @@ From version 2.2.0.0, the above command can be executed on Linux Shell to test t From version 2.2.0.0, the above command can be executed on Linux Shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: --n:When set to "fqdn", it means testing the speed of resolving FQDN --h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default. +-n:When set to "fqdn", it means testing the speed of resolving FQDN. +-h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default. ## Server Log -The parameter `debugFlag` is used to control the log level of `taosd` server process. The default value is 131, for debug purpose it needs to be escalated to 135 or 143. +The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131, for debug purpose it needs to be escalated to 135 or 143. -Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily, so on server side important information is stored at different place from other logs. +Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily, so on server side important information is stored at different place from other logs. - The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information - The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog` ## Client Log -An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only log at level of INFO/ERROR/WARNING is recorded, it and needs to be changed to 135 or 143 so that log at DEBUG or TRACE level can be recorded for debugging purpose. +An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded, for debugging purposes it needs to be changed to 135 or 143 so that logs at DEBUG or TRACE level can be recorded. The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process. -log file is written in async way to minimize the workload on disk, bu the penalty is that a few log lines may be lost in some extreme conditions. +Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions. From 7b86c581ef71fab405b764cf8bda86ee96d24676 Mon Sep 17 00:00:00 2001 From: Chait Diwadkar <94201190+cdiwadkar16@users.noreply.github.com> Date: Thu, 26 May 2022 11:59:44 -0700 Subject: [PATCH 57/66] docs:cdiwadkar16-patch-4 - added best practices and explicit clarifications Added best practices for firewall and data protections. Added explicit hostnames (fqdn) in cluster set up examples for clarity. --- docs-en/10-cluster/01-deploy.md | 46 ++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 12 deletions(-) diff --git a/docs-en/10-cluster/01-deploy.md b/docs-en/10-cluster/01-deploy.md index 844a026ff6..200da1be3f 100644 --- a/docs-en/10-cluster/01-deploy.md +++ b/docs-en/10-cluster/01-deploy.md @@ -6,29 +6,35 @@ title: Deployment ### Step 1 -The FQDN of all hosts needs to be setup properly, all the FQDNs need to be configured in the /etc/hosts of each host. It must be confirmed that each FQDN can be accessed (by ping, for example) from any other hosts. +The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be configured in the /etc/hosts file on each host. You must confirm that each FQDN can be accessed from any other host. For e.g. you can do this by using the `ping` command. -On each host the command `hostname -f` can be executed to get the hostname. `ping` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, need to be checked and revised to make any two hosts accessible to each other. +To get the hostname on any host, the command `hostname -f` can be executed. `ping ` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, needs to be checked and revised, to make any two hosts accessible to each other. :::note - The host where the client program runs also needs to be configured properly for FQDN, to make sure all hosts for client or server can be accessed from any other. In other words, the hosts where the client is running are also considered as a part of the cluster. -- It's suggested to disable the firewall for all hosts in the cluster. At least TCP/UDP for port 6030~6042 need to be open if a firewall is enabled. +- Please ensure that your firewall rules do not block TCP/UDP on ports 6030-6042 on all hosts in the cluster. ::: ### Step 2 -If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. +If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. + +:::note + +As a best practice, before cleaning up any data files or directories, please ensure that your data has been backed up correctly, if required by your data integrity, backup, security, or other standard operating protocols (SOP). + +::: ### Step 3 -Now it's time to install TDengine on all hosts without starting `taosd`, the versions on all hosts should be same. If it's prompted to input the existing TDengine cluster, simply press carriage return to ignore it. `install.sh -e no` can also be used to disable this prompt. For details please refer to [Install and Uninstall](/operation/pkg-install). +Now it's time to install TDengine on all hosts but without starting `taosd`. Note that the versions on all hosts should be same. If you are prompted to input the existing TDengine cluster, simply press carriage return to ignore the prompt. `install.sh -e no` can also be used to disable this prompt. For details please refer to [Install and Uninstall](/operation/pkg-install). ### Step 4 -Now each physical node (referred to as `dnode` hereinafter, it's abbreviation for "data node") of TDengine needs to be configured properly. Please note that one dnode doesn't stand for one host, multiple TDengine nodes can be started on single host as long as they are configured properly without conflicting. More specifically each instance of the configuration file `taos.cfg` stands for a dnode. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following. +Now each physical node (referred to, hereinafter, as `dnode` which is an abbreviation for "data node") of TDengine needs to be configured properly. Please note that one dnode doesn't stand for one host. Multiple TDengine dnodes can be started on a single host as long as they are configured properly without conflicting. More specifically each instance of the configuration file `taos.cfg` stands for a dnode. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following. ```c // firstEp is the end point to connect to when any dnode starts @@ -67,9 +73,11 @@ Prior to version 2.0.19.0, besides the above parameters, `locale` and `charset` ## Start Cluster +In the following example we assume that first dnode has FQDN h1.taosdata.com and the second dnode has FQDN h2.taosdata.com. + ### Start The First DNODE -The first dnode can be started following the instructions in [Get Started](/get-started/), for example h1.taosdata.com. Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example: +The first dnode can be started following the instructions in [Get Started](/get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example: ``` Welcome to the TDengine shell from Linux, Client Version:2.0.0.0 @@ -80,27 +88,41 @@ Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. taos> show dnodes; id | end_point | vnodes | cores | status | role | create_time | ===================================================================================== - 1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 | + 1 | h1.taosdata.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 | Query OK, 1 row(s) in set (0.006385s) taos> ``` -From the above output, it is shown that the end point of the started dnode is "h1.taos.com:6030", which is the `firstEp` of the cluster. +From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster. ### Start Other DNODEs There are a few steps necessary to add other dnodes in the cluster. -First, start `taosd` as instructed in [Get Started](/get-started/), assuming it's for the second dnode. Before starting `taosd`, please making sure the configuration is correct, especially `firstEp`, `FQDN` and `serverPort`, `firstEp` must be same as the dnode shown in the section "Start First DNODE", i.e. "h1.taosdata.com" in this example. +Let's assume we are starting the second dnode with FQDN, h2.taosdata.com. First we make sure the configuration is correct. -Then, on the first dnode, use TDengine CLI `taos` to execute below command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes. +```c +// firstEp is the end point to connect to when any dnode starts +firstEp h1.taosdata.com:6030 + +// must be configured to the FQDN of the host where the dnode is launched +fqdn h2.taosdata.com + +// the port used by the dnode, default is 6030 +serverPort 6030 + +``` + +Second, we can start `taosd` as instructed in [Get Started](/get-started/). + +Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes. ```sql CREATE DNODE "h2.taos.com:6030"; ``` -Then on the first dnode, execute `show dnodes` in `taos` to show whether the second dnode has been added in the cluster successfully or not. +Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos` to show whether the second dnode has been added in the cluster successfully or not. ```sql SHOW DNODES; From 76868b76bc99320938282f9f6f30eff06ee00f18 Mon Sep 17 00:00:00 2001 From: Chait Diwadkar <94201190+cdiwadkar16@users.noreply.github.com> Date: Thu, 26 May 2022 12:46:55 -0700 Subject: [PATCH 58/66] docs:cdiwadkar16-patch-4 - several changes Some stylistic changes. Some clarifying changes. --- docs-en/07-develop/02-model/index.mdx | 35 +++++++++++++++++---------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/docs-en/07-develop/02-model/index.mdx b/docs-en/07-develop/02-model/index.mdx index bdeca37ec1..86853aaaa3 100644 --- a/docs-en/07-develop/02-model/index.mdx +++ b/docs-en/07-develop/02-model/index.mdx @@ -2,19 +2,26 @@ title: Data Model --- -The data model employed by TDengine is similar to a relational database, you need to create databases and tables. Design the data model based on your own application scenarios and you should design the STable (abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntax details. +The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. ## Create Database -The characteristics of data from different data collection points may be different, such as collection frequency, days to keep, number of replicas, data block size, whether it's allowed to update data, etc. For TDengine to operate with the best performance, it's strongly suggested to put the data with different characteristics into different databases because different storage policies can be set for each database. When creating a database, there are a lot of parameters that can be configured, such as the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, compress or not, the time range of the data in single data file, etc. Below is an example of the SQL statement for creating a database. +The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database. ```sql CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; ``` -In the above SQL statement, a database named "power" will be created, the data in it will be kept for 365 days, which means the data older than 365 days will be deleted automatically, a new data file will be created every 10 days, the number of memory blocks is 6, data is allowed to be updated. For more details please refer to [Database](/taos-sql/database). +In the above SQL statement: +- a database named "power" will be created +- the data in it will be kept for 365 days, which means that data older than 365 days will be deleted automatically +- a new data file will be created every 10 days +- the number of memory blocks is 6 +- data is allowed to be updated -After creating a database, the current database in use can be switched using SQL command `USE`, for example below SQL statement switches the current database to `power`. Without the current database specified, table name must be preceded with the corresponding database name. +For more details please refer to [Database](/taos-sql/database). + +After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`. Without the current database specified, table name must be preceded with the corresponding database name. ```sql USE power; @@ -30,7 +37,7 @@ USE power; ## Create STable -In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/tdinternal/arch#model_table1), the below SQL statement can be used to create the super table. +In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/tdinternal/arch#model_table1), the SQL statement below can be used to create the super table. ```sql CREATE STable meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); @@ -41,15 +48,15 @@ If you are using versions prior to 2.0.15, the `STable` keyword needs to be repl ::: -Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must be timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The column type can be integer, float, double, string ,etc. Besides, the schema for tags need to be provided, like location and groupId in the example. The tag type can be integer, float, string, etc. The static properties of a data collection point can be defined as tags, like the location, device type, device group ID, manager ID, etc. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. +Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. -For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another point for environmental data like temperature, humidity and wind direction, multiple STables are required for such kind of device. +For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices. At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to be collected for a data collection point, multiple STables are required. There can be multiple databases in a system, while one or more STables can exist in a database. ## Create Table -A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Beside, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement. +A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement. ```sql CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); @@ -57,17 +64,17 @@ CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details. -In TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables. +In the TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables. :::warning It's not recommended to create a table in a database while using a STable from another database as template. :::tip -It's suggested to use the global unique ID of a data collection point as the table name, for example the device serial number. If there isn't such a unique ID, multiple IDs that are not global unique can be combined to form a global unique ID. It's not recommended to use a global unique ID as tag value. +It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value. ## Create Table Automatically -In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exist. +In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exists. ```sql INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32); @@ -79,6 +86,8 @@ For more details please refer to [Create Table Automatically](/taos-sql/insert#a ## Single Column vs Multiple Column -A multiple columns data model is supported in TDengine. As long as multiple metrics are collected by the same data collection point at the same time, i.e. the timestamp are identical, these metrics can be put in a single STable as columns. However, there is another kind of design, i.e. single column data model, a table is created for each metric, which means a STable is required for each kind of metric. For example, 3 STables are required for current, voltage and phase. +A multiple columns data model is supported in TDengine. As long as multiple metrics are collected by the same data collection point at the same time, i.e. the timestamps are identical, these metrics can be put in a single STable as columns. -It's recommended to use a multiple column data model as much as possible because it's better in the performance of inserting or querying rows. In some cases, however, the metrics to be collected vary frequently and correspondingly the STable schema needs to be changed frequently too. In such case, it's more convenient to use single column data model. +However, there is another kind of design, i.e. single column data model in which a table is created for each metric. This means that a STable is required for each kind of metric. For example in a single column model, 3 STables would be required for current, voltage and phase. + +It's recommended to use a multiple column data model as much as possible because insert and query performance is higher. In some cases, however, the collected metrics may vary frequently and so the corresponding STable schema needs to be changed frequently too. In such cases, it's more convenient to use single column data model. From fe5ebe38bafa533a4ed0dc3c872bd623097de8fc Mon Sep 17 00:00:00 2001 From: Chait Diwadkar <94201190+cdiwadkar16@users.noreply.github.com> Date: Thu, 26 May 2022 13:24:10 -0700 Subject: [PATCH 59/66] docs:cdiwadkar16-patch-4-8 - minor updates Spelling correction Stylistic changes --- docs-en/07-develop/03-insert-data/02-influxdb-line.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx b/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx index 06f6387b8a..be46ebf0c9 100644 --- a/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx +++ b/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx @@ -15,13 +15,13 @@ import CLine from "./_c_line.mdx"; ## Introduction -A single line of text is used in InfluxDB Line protocol format represents one row of data, each line contains 4 parts as shown below. +In the InfluxDB Line protocol format, a single line of text is used to represent one row of data. Each line contains 4 parts as shown below. ``` measurement,tag_set field_set timestamp ``` -- `measurement` will be used as the STable name +- `measurement` will be used as the name of the STable - `tag_set` will be used as tags, with format like `=,=` - `field_set`will be used as data columns, with format like `=,=` - `timestamp` is the primary key timestamp corresponding to this row of data @@ -34,8 +34,8 @@ meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0 :::note -- All the data in `tag_set` will be converted to ncahr type automatically . -- Each data in `field_set` must be self-description for its data type. For example 1.2f32 means a value 1.2 of float type, it will be treated as double without the "f" type suffix. +- All the data in `tag_set` will be converted to nchar type automatically . +- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double. - Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h). ::: From 751cd8a8547ba5be15505abd722dfb95a2f03def Mon Sep 17 00:00:00 2001 From: Chait Diwadkar <94201190+cdiwadkar16@users.noreply.github.com> Date: Thu, 26 May 2022 13:29:38 -0700 Subject: [PATCH 60/66] docs:cdiwadkar16-patch-4-9 - spelling change Spelling correction. --- docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx index b83bbdf61e..18a695cda8 100644 --- a/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx +++ b/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx @@ -15,7 +15,7 @@ import CTelnet from "./_c_opts_telnet.mdx"; ## Introduction -A single line of text is used in OpenTSDB line protocol to represent one row of data. OpenTSDB employs single column data model, so one line can only contain a single data column. There can be multiple tags. Each line contains 4 parts as below: +A single line of text is used in OpenTSDB line protocol to represent one row of data. OpenTSDB employs a single column data model, so each line can only contain a single data column. There can be multiple tags. Each line contains 4 parts as below: ``` =[ =] @@ -60,7 +60,7 @@ Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_te -2 STables will be crated automatically while each STable has 4 rows of data in the above sample code. +2 STables will be created automatically and each STable has 4 rows of data in the above sample code. ```cmd taos> use test; From 6b861ad00320f0c22ffc14912fbda804448d64d4 Mon Sep 17 00:00:00 2001 From: Chait Diwadkar <94201190+cdiwadkar16@users.noreply.github.com> Date: Thu, 26 May 2022 13:33:27 -0700 Subject: [PATCH 61/66] docs:cdiwadkar16-patch-4-10 - grammar change Grammar changes. --- docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx index 74267a344b..3a23944031 100644 --- a/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx +++ b/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx @@ -47,7 +47,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http :::note - In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type. -- Only data in array format is accepted, array must be used even there is only one row. +- Only data in array format is accepted and so an array must be used even if there is only one row. ::: From 13c916684bc00c2737cf6c1d9be10e33fd691f9c Mon Sep 17 00:00:00 2001 From: Chait Diwadkar <94201190+cdiwadkar16@users.noreply.github.com> Date: Thu, 26 May 2022 14:37:46 -0700 Subject: [PATCH 62/66] docs/cdiwadkar16-patch-4-11 - minor changes Stylistic and grammar changes. --- docs-en/07-develop/04-query-data/index.mdx | 24 +++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs-en/07-develop/04-query-data/index.mdx b/docs-en/07-develop/04-query-data/index.mdx index 761fe1889b..74562c8823 100644 --- a/docs-en/07-develop/04-query-data/index.mdx +++ b/docs-en/07-develop/04-query-data/index.mdx @@ -20,7 +20,7 @@ import CAsync from "./_c_async.mdx"; ## Introduction -SQL is used by TDengine as the query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine CLI `taos` can also be used to execute SQL Ad-Hoc queries. Here is the list of major query functionalities supported by TDengine: +SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: - Query on single column or multiple columns - Filter on tags or data columns:>, <, =, <\>, like @@ -31,7 +31,7 @@ SQL is used by TDengine as the query language. Application programs can send SQL - Join query with timestamp alignment - Aggregate functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff -For example, the SQL statement below can be executed in TDengine CLI `taos` to select the rows whose voltage column is bigger than 215 and limit the output to only 2 rows. +For example, the SQL statement below can be executed in TDengine CLI `taos` to select records with voltage greater than 215 and limit the output to only 2 rows. ```sql select * from d1001 where voltage > 215 order by ts desc limit 2; @@ -46,46 +46,46 @@ taos> select * from d1001 where voltage > 215 order by ts desc limit 2; Query OK, 2 row(s) in set (0.001100s) ``` -To meet the requirements of many use cases, some special functions have been added in TDengine, for example `twa` (Time Weighted Average), `spared` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. +To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. For detailed query syntax please refer to [Select](/taos-sql/select). ## Aggregation among Tables -In many use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviated for super table), is used in TDengine to represent a kind of data collection point, and a subtable is used to represent a specific data collection point. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same kind of data collection points. Aggregate functions applicable for tables can be used directly on STables, the syntax is exactly the same. +In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same. -In summary, for a STable, its subtables can be aggregated by a simple query on the STable, it's a kind of join operation. But tables belong to different STables can not be aggregated. +In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. ### Example 1 -In TDengine CLI `taos`, use below SQL to get the average voltage of all the meters in California grouped by location. +In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location. ``` taos> SELECT AVG(voltage) FROM meters GROUP BY location; avg(voltage) | location | ============================================================= - 222.000000000 | California.LoSangeles | + 222.000000000 | California.LosAngeles | 219.200000000 | California.SanFrancisco | Query OK, 2 row(s) in set (0.002136s) ``` ### Example 2 -In TDengine CLI `taos`, use below SQL to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. +In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. ``` taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; - cunt(*) | max(current) | + count(*) | max(current) | ================================== 5 | 13.4 | Query OK, 1 row(s) in set (0.002136s) ``` -Join queries are only allowed between the subtables of the same STable. In [Select](/taos-sql/select), all query operations are marked as to whether they supports STables or not. +Join queries are only allowed between subtables of the same STable. In [Select](/taos-sql/select), all query operations are marked as to whether they support STables or not. ## Down Sampling and Interpolation -In IoT use cases, down sampling is widely used to aggregate the data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001. +In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001. ``` taos> SELECT sum(current) FROM d1001 INTERVAL(10s); @@ -169,7 +169,7 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database ### Asynchronous Query -Besides synchronous queries, an asynchronous query API is also provided by TDengine to insert or query data more efficiently. With a similar hardware and software environment, the async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other works to improve the performance of the whole application system. Async APIs perform especially better in the case of poor networks. +Besides synchronous queries, an asynchronous query API is also provided by TDengine to insert or query data more efficiently. With a similar hardware and software environment, the async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other work to improve the performance of the whole application system. Async APIs perform especially better in the case of poor networks. Please note that async query can only be used with a native connection. From 3103b55bc3b00eed1399acfe18ce1471ccd01c13 Mon Sep 17 00:00:00 2001 From: Chait Diwadkar <94201190+cdiwadkar16@users.noreply.github.com> Date: Thu, 26 May 2022 14:47:02 -0700 Subject: [PATCH 63/66] docs:cdiwadkar16-patch-4-12 - minor changes Minor changes for spelling and style. --- docs-en/07-develop/05-continuous-query.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs-en/07-develop/05-continuous-query.mdx b/docs-en/07-develop/05-continuous-query.mdx index f233deba31..1aea5783fc 100644 --- a/docs-en/07-develop/05-continuous-query.mdx +++ b/docs-en/07-develop/05-continuous-query.mdx @@ -1,12 +1,12 @@ --- sidebar_label: Continuous Query -description: "Continuous query is a query that's executed automatically according to predefined frequency to provide aggregate query capability by time window, it's actually a simplified time driven stream computing." +description: "Continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing." title: "Continuous Query" --- -Continuous query is a query that's executed automatically according to a predefined frequency to provide aggregate query capability by time window, it's actually a simplified time driven stream computing. Continuous query can be performed on a table or STable in TDengine. The result of continuous query can be pushed to clients or written back to TDengine. Each query is executed on a time window, which moves forward with time. The size of time window and the forward sliding time need to be specified with parameter `INTERVAL` and `SLIDING` respectively. +A continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing. A continuous query can be performed on a table or STable in TDengine. The results of a continuous query can be pushed to clients or written back to TDengine. Each query is executed on a time window, which moves forward with time. The size of time window and the forward sliding time need to be specified with parameter `INTERVAL` and `SLIDING` respectively. -Continuous query in TDengine is time driven, and can be defined using TAOS SQL directly without any extra operations. With continuous query, the result can be generated according to a time window to achieve down sampling of the original data. Once a continuous query is defined using TAOS SQL, the query is automatically executed at the end of each time window and the result is pushed back to clients or written to TDengine. +A continuous query in TDengine is time driven, and can be defined using TAOS SQL directly without any extra operations. With a continuous query, the result can be generated based on a time window to achieve down sampling of the original data. Once a continuous query is defined using TAOS SQL, the query is automatically executed at the end of each time window and the result is pushed back to clients or written to TDengine. There are some differences between continuous query in TDengine and time window computation in stream computing: @@ -35,7 +35,7 @@ In this section the use case of meters will be used to introduce how to use cont ```sql create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int); create table D1001 using meters tags ("California.SanFrancisco", 2); -create table D1002 using meters tags ("California.LoSangeles", 2); +create table D1002 using meters tags ("California.LosAngeles", 2); ``` The SQL statement below retrieves the average voltage for a one minute time window, with each time window moving forward by 30 seconds. @@ -68,7 +68,7 @@ taos> select * from avg_vol; 2020-07-29 13:39:00.000 | 223.0800000 | ``` -Please note that the minimum allowed time window is 10 milliseconds, and no upper limit. +Please note that the minimum allowed time window is 10 milliseconds, and there is no upper limit. It's possible to specify the start and end time of a continuous query. If the start time is not specified, the timestamp of the first row will be considered as the start time; if the end time is not specified, the continuous query will be performed indefinitely, otherwise it will be terminated once the end time is reached. For example, the continuous query in the SQL statement below will be started from now and terminated one hour later. From d29a31efe6ef46a5657940b44db0128ba21eaee4 Mon Sep 17 00:00:00 2001 From: Zhengmao Zhu <70138133+fenghuazzm@users.noreply.github.com> Date: Sun, 22 May 2022 20:26:09 +0800 Subject: [PATCH 64/66] Update index.md --- docs-cn/14-reference/12-config/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-cn/14-reference/12-config/index.md b/docs-cn/14-reference/12-config/index.md index cbb3833b5b..65c0e6bcac 100644 --- a/docs-cn/14-reference/12-config/index.md +++ b/docs-cn/14-reference/12-config/index.md @@ -590,7 +590,7 @@ charset 的有效值是 UTF-8。 | 适用范围 | 仅服务端适用 | | 含义 | 每个 DB 中 能够使用的最大 vnode 个数 | | 取值范围 | 0-8192 | -| 缺省值 | | +| 缺省值 | 0 | ### maxTablesPerVnode From 91a2687bc4d26dcc225bdf34e0a7f5943e913ff7 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Fri, 27 May 2022 08:40:23 +0800 Subject: [PATCH 65/66] docs: refine description about firewall --- docs-cn/10-cluster/01-deploy.md | 2 +- docs-cn/14-reference/12-config/index.md | 2 +- docs-cn/21-tdinternal/01-arch.md | 2 +- docs-cn/27-train-faq/01-faq.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs-cn/10-cluster/01-deploy.md b/docs-cn/10-cluster/01-deploy.md index cee140c0ec..b44d2942f2 100644 --- a/docs-cn/10-cluster/01-deploy.md +++ b/docs-cn/10-cluster/01-deploy.md @@ -22,7 +22,7 @@ title: 集群部署 ### 第二步 -建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042 的 TCP 和 UDP 端口都是开放的。强烈建议先关闭防火墙,集群搭建完毕之后,再来配置端口; +确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。 ### 第三步 diff --git a/docs-cn/14-reference/12-config/index.md b/docs-cn/14-reference/12-config/index.md index 65c0e6bcac..89c414a5b8 100644 --- a/docs-cn/14-reference/12-config/index.md +++ b/docs-cn/14-reference/12-config/index.md @@ -80,7 +80,7 @@ taos --dump-config | 补充说明 | RESTful 服务在 2.4.0.0 之前(不含)由 taosd 提供,默认端口为 6041; 在 2.4.0.0 及后续版本由 taosAdapter,默认端口为 6041 | :::note -对于端口,TDengine 会使用从 serverPort 起 13 个连续的 TCP 和 UDP 端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从 6030 到 6042 共 13 个端口,而且必须 TCP 和 UDP 都打开。(详细的端口情况请参见下表) +确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。(详细的端口情况请参见下表) ::: | 协议 | 默认端口 | 用途说明 | 修改方法 | | :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- | diff --git a/docs-cn/21-tdinternal/01-arch.md b/docs-cn/21-tdinternal/01-arch.md index 456d4bea91..b9b13468fc 100644 --- a/docs-cn/21-tdinternal/01-arch.md +++ b/docs-cn/21-tdinternal/01-arch.md @@ -41,7 +41,7 @@ TDengine 分布式架构的逻辑结构图如下: - 集群数据节点对外提供 RESTful 服务占用一个 TCP 端口,是 serverPort+11。 - 集群内数据节点与 Arbitrator 节点之间通讯占用一个 TCP 端口,是 serverPort+12。 -因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的 serverPort。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port) +因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port) **集群对外连接:**TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的 End Point(FQDN 加配置的端口号)。通过命令行 CLI 启动应用 taos 时,可以通过选项-h 来指定数据节点的 FQDN,-P 来指定其配置的端口号,如果端口不配置,将采用 TDengine 的系统配置参数 serverPort。 diff --git a/docs-cn/27-train-faq/01-faq.md b/docs-cn/27-train-faq/01-faq.md index 0ceae1fa49..a657a95e8d 100644 --- a/docs-cn/27-train-faq/01-faq.md +++ b/docs-cn/27-train-faq/01-faq.md @@ -61,7 +61,7 @@ title: 常见问题及反馈 5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。 -6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确认 TCP/UDP 端口 6030-6042 是打开的 +6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。 7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里 From bcb6f4d1689c64e97f8f17611291f6af45b537f2 Mon Sep 17 00:00:00 2001 From: Chait Diwadkar <94201190+cdiwadkar16@users.noreply.github.com> Date: Thu, 26 May 2022 18:44:41 -0700 Subject: [PATCH 66/66] docs:cdiwadkar16-patch-4-13 - several Stylistic changes. Clarifying changes. Removed reference to taos_use_result since it is not mentioned in code snippet. --- docs-en/07-develop/06-subscribe.mdx | 38 +++++++++++++++-------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/docs-en/07-develop/06-subscribe.mdx b/docs-en/07-develop/06-subscribe.mdx index 3fa2d1280f..66c8f51290 100644 --- a/docs-en/07-develop/06-subscribe.mdx +++ b/docs-en/07-develop/06-subscribe.mdx @@ -1,6 +1,6 @@ --- sidebar_label: Subscription -description: "Lightweight service for data subscription and pushing, the time series data inserted into TDengine continuously can be pushed automatically to the subscribing clients." +description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients." title: Data Subscription --- @@ -16,9 +16,9 @@ import CDemo from "./_sub_c.mdx"; ## Introduction -Due to the nature of time series data, data inserting in TDengine is similar to data publishing in message queues. Data is stored in ascending order of timestamp inside TDengine, so each table in TDengine can essentially be considered as a message queue. +Due to the nature of time series data, data insertion into TDengine is similar to data publishing in message queues. Data is stored in ascending order of timestamp inside TDengine, and so each table in TDengine can essentially be considered as a message queue. -A lightweight service for data subscription and pushing is built in TDengine. With the API provided by TDengine, client programs can use `select` statements to subscribe to data from one or more tables. The subscription and state maintenance is performed on the client side, the client programs poll the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start for retrieving new data is up to the client side. +A lightweight service for data subscription and publishing is built into TDengine. With the API provided by TDengine, client programs can use `select` statements to subscribe to data from one or more tables. The subscription and state maintenance is performed on the client side. The client programs poll the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start retrieving new data is up to the client side. There are 3 major APIs related to subscription provided in the TDengine client driver. @@ -32,7 +32,7 @@ For more details about these APIs please refer to [C/C++ Connector](/reference/c If we want to get a notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways: -The first way is to query on each sub table and record the last timestamp matching the criteria, then after some time query on the data later than recorded timestamp and repeat this process. The SQL statements for this way are as below. +The first way is to query each sub table and record the last timestamp matching the criteria. Then after some time, query the data later than the recorded timestamp, and repeat this process. The SQL statements for this way are as below. ```sql select * from D1001 where ts > {last_timestamp1} and current > 10; @@ -50,7 +50,7 @@ select * from meters where ts > {last_timestamp} and current > 10; However, this presents a new problem in how to choose `last_timestamp`. First, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Second, the time when the data from different meters arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fastest" meter is used as `last_timestamp`, some data from other meters may be missed. -All the problems mentioned above can be resolved thoroughly using subscription provided by TDengine. +All the problems mentioned above can be resolved easily using the subscription functionality provided by TDengine. The first step is to create subscription using `taos_subscribe`. @@ -65,31 +65,33 @@ if (async) { } ``` -The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing, `subscribe_callback` is a call back function provided by the client program and it's suggested not to do time consuming operation in the call back function. +The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing. `subscribe_callback` is a callback function provided by the client program. You should not perform time consuming operations in the callback function. -The parameter `taos` is an established connection. There is nothing special in sync subscription mode. In async subscription, it should be exclusively by current thread, otherwise unpredictable error may occur. +The parameter `taos` is an established connection. Nothing special needs to be done for thread safety for synchronous subscription. For asynchronous subscription, the taos_subscribe function should be called exclusively by the current thread, to avoid unpredictable errors. -The parameter `sql` is a `select` statement in which `where` clause can be used to specify filter conditions. In our example, the data whose current exceeds 10A needs to be subscribed like below SQL statement: +The parameter `sql` is a `select` statement in which the `where` clause can be used to specify filter conditions. In our example, we can subscribe to the records in which the current exceeds 10A, with the following SQL statement: ```sql select * from meters where current > 10; ``` -Please note that, all the data will be processed because no start time is specified. If only the data from one day ago needs to be processed, a time related condition can be added: +Please note that, all the data will be processed because no start time is specified. If we only want to process data for the past day, a time related condition can be added: ```sql select * from meters where ts > now - 1d and current > 10; ``` -The parameter `topic` is the name of the subscription, it needs to be guaranteed unique in the client program, but it's not necessary to be globally unique because subscription is implemented in the APIs on the client side. +The parameter `topic` is the name of the subscription. The client application must guarantee that the name is unique. However, it doesn't have to be globally unique because subscription is implemented in the APIs on the client side. -If the subscription named as `topic` doesn't exist, the parameter `restart` will be ignored. If the subscription named as `topic` has been created before by the client program, when the client program is restarted with the subscription named `topic`, parameter `restart` is used to determine whether to retrieve data from the beginning or from the last point where the subscription was broken. If the value of `restart` is **true** (i.e. a non-zero value), the data will be retrieved from beginning, or if it is **false** (i.e. zero), the data already consumed before will not be processed again. +If the subscription named as `topic` doesn't exist, the parameter `restart` will be ignored. If the subscription named as `topic` has been created before by the client program, when the client program is restarted with the subscription named `topic`, parameter `restart` is used to determine whether to retrieve data from the beginning or from the last point where the subscription was broken. -The last parameter of `taos_subscribe` is the polling interval in unit of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` will be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function. +If the value of `restart` is **true** (i.e. a non-zero value), data will be retrieved from the beginning. If it is **false** (i.e. zero), the data already consumed before will not be processed again. + +The last parameter of `taos_subscribe` is the polling interval in units of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` will be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function. The second to last parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode. -After a subscription is created, its data can be consumed and processed, below is the sample code of how to consume data in sync mode, in the else part if `if (async)`. +After a subscription is created, its data can be consumed and processed. Shown below is the sample code to consume data in sync mode, in the else condition of `if (async)`. ```c if (async) { @@ -106,7 +108,7 @@ if (async) { } ``` -In the above sample code, there is an infinite loop, each time carriage return is entered `taos_consume` is invoked, the return value of `taos_consume` is the selected result set, exactly as the input of `taos_use_result`, in the above sample `print_result` is used instead to simplify the sample. Below is the implementation of `print_result`. +In the above sample code in the else condition, there is an infinite loop. Each time carriage return is entered `taos_consume` is invoked. The return value of `taos_consume` is the selected result set. In the above sample, `print_result` is used to simplify the printing of the result set. Below is the implementation of `print_result`. ```c void print_result(TAOS_RES* res, int blockFetch) { @@ -133,9 +135,9 @@ void print_result(TAOS_RES* res, int blockFetch) { } ``` -In the above code `taos_print_row` is used to process the data consumed. All the matching rows will be printed. +In the above code `taos_print_row` is used to process the data consumed. All matching rows are printed. -In async mode, the data consuming is simpler as below. +In async mode, consuming data is simpler as shown below. ```c void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { @@ -175,7 +177,7 @@ Then, this row of data will be shown by the example program on the first termina ## Examples -Below example program demonstrates how to subscribe the data rows whose current exceeds 10A using connectors. +The example program below demonstrates how to subscribe, using connectors, to data rows in which current exceeds 10A. ### Prepare Data @@ -250,7 +252,7 @@ taos> use power; taos> insert into d1001 values(now, 12.4, 220, 1); ``` -Because the current in inserted row exceeds 10A, it will be consumed by the example program. +Because the current in the inserted row exceeds 10A, it will be consumed by the example program. ``` ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2