From 4dca6df296a3e3882f5cbd7a1e04a867377dc900 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 9 Jan 2023 11:50:34 +0800 Subject: [PATCH 01/45] enh: optimize the error message of the situation that percentile function does not support --- source/libs/parser/src/parTranslater.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 05d49bb027..69a39b956f 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1550,11 +1550,14 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p // select percentile() without from clause is also valid if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) || (TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType && - TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType))) || - NULL != pSelect->pPartitionByList) { + TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, "%s is only supported in single table query", pFunc->functionName); } + if (NULL != pSelect->pPartitionByList) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, + "%s function is not supported in fill query", pFunc->functionName); + } return TSDB_CODE_SUCCESS; } From 7466b5042d2249f70d4429605e4c068e81c5fac5 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 12 Jan 2023 13:50:31 +0800 Subject: [PATCH 02/45] fix: stmt memory leak --- source/libs/parser/src/parInsertStmt.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 4ed72e6c14..f5c362edf9 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -47,7 +47,7 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen) { STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock; SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen}; - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SParsedDataColInfo* tags = (SParsedDataColInfo*)boundTags; if (NULL == tags) { return TSDB_CODE_APP_ERROR; @@ -137,7 +137,8 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch } SVCreateTbReq tbReq = {0}; - insBuildCreateTbReq(&tbReq, tName, pTag, suid, sTableName, tagName, pDataBlock->pTableMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + insBuildCreateTbReq(&tbReq, tName, pTag, suid, sTableName, tagName, pDataBlock->pTableMeta->tableInfo.numOfTags, + TSDB_DEFAULT_TABLE_TTL); code = insBuildCreateTbMsg(pDataBlock, &tbReq); tdDestroySVCreateTbReq(&tbReq); @@ -460,9 +461,7 @@ void qFreeStmtDataBlock(void* pDataBlock) { return; } - taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pTableMeta); - taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pData); - taosMemoryFreeClear(pDataBlock); + insDestroyDataBlock((STableDataBlocks*)pDataBlock); } void qDestroyStmtDataBlock(void* pBlock) { From 0e061fb53cb44117f5dcb5342b45f540625c7b58 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 12 Jan 2023 13:57:47 +0800 Subject: [PATCH 03/45] fix: stmt memory leak --- source/libs/parser/src/parTranslater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 69a39b956f..9991c2c6ae 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1556,7 +1556,7 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p } if (NULL != pSelect->pPartitionByList) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s function is not supported in fill query", pFunc->functionName); + "%s function is not supported in partition query", pFunc->functionName); } return TSDB_CODE_SUCCESS; } From 4359562e9cde38df77ce722abc8c16794ea29913 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 12 Jan 2023 15:01:19 +0800 Subject: [PATCH 04/45] fix: stmt memory leak --- source/client/src/clientStmt.c | 15 ++++++++------- source/libs/parser/src/parInsertStmt.c | 4 +++- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 82ea9e0d8f..c040d389cd 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -152,9 +152,10 @@ int32_t stmtRestoreQueryFields(STscStmt* pStmt) { return TSDB_CODE_SUCCESS; } -int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, const char* sTableName, bool autoCreateTbl) { +int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, const char* sTableName, + bool autoCreateTbl) { STscStmt* pStmt = (STscStmt*)stmt; - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; tNameExtractFullName(tbName, tbFName); memcpy(&pStmt->bInfo.sname, tbName, sizeof(*tbName)); @@ -300,7 +301,7 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { continue; } - if (STMT_TYPE_MULTI_INSERT == pStmt->sql.type) { + if (pBlocks->cloned) { qFreeStmtDataBlock(pBlocks); } else { qDestroyStmtDataBlock(pBlocks); @@ -776,9 +777,9 @@ int stmtAddBatch(TAOS_STMT* stmt) { int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) { tscDebug("stmt start to update tbUid, blockNum: %d", pRsp->nBlocks); - int32_t code = 0; - int32_t finalCode = 0; - size_t keyLen = 0; + int32_t code = 0; + int32_t finalCode = 0; + size_t keyLen = 0; STableDataBlocks** pIter = taosHashIterate(pStmt->exec.pBlockHash, NULL); while (pIter) { STableDataBlocks* pBlock = *pIter; @@ -848,7 +849,7 @@ int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) { pMeta->uid = pTableMeta->uid; pStmt->bInfo.tbUid = pTableMeta->uid; - taosMemoryFree(pTableMeta); + taosMemoryFree(pTableMeta); } pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter); diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index f5c362edf9..2466e784c1 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -461,7 +461,9 @@ void qFreeStmtDataBlock(void* pDataBlock) { return; } - insDestroyDataBlock((STableDataBlocks*)pDataBlock); + taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pTableMeta); + taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pData); + taosMemoryFreeClear(pDataBlock); } void qDestroyStmtDataBlock(void* pBlock) { From 4c8a2caa1238afeb57e49806efac6d82c6bf56e8 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 12 Jan 2023 17:24:04 +0800 Subject: [PATCH 05/45] fix: stmt memory leak --- include/libs/parser/parser.h | 4 +++- source/client/src/clientStmt.c | 6 +----- source/libs/parser/src/parInsertStmt.c | 13 +++++++++++++ 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 9be79a539f..e6240f64b5 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -90,6 +90,7 @@ int32_t qCloneStmtDataBlock(void** pDst, void* pSrc); void qFreeStmtDataBlock(void* pDataBlock); int32_t qRebuildStmtDataBlock(void** pDst, void* pSrc, uint64_t uid, int32_t vgId); void qDestroyStmtDataBlock(void* pBlock); +void qDestroyStmtDataBlockExt(void* pBlock); STableMeta* qGetTableMetaInDataBlock(void* pDataBlock); int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx); @@ -108,7 +109,8 @@ int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* void* smlInitHandle(SQuery* pQuery); void smlDestroyHandle(void* pHandle); int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta, - char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int16_t msgBufLen); + char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, + int16_t msgBufLen); int32_t smlBuildOutput(void* handle, SHashObj* pVgHash); int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray); diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index c040d389cd..8a677d7a5e 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -301,11 +301,7 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { continue; } - if (pBlocks->cloned) { - qFreeStmtDataBlock(pBlocks); - } else { - qDestroyStmtDataBlock(pBlocks); - } + qDestroyStmtDataBlockExt(pBlocks); taosHashRemove(pStmt->exec.pBlockHash, key, keyLen); pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter); diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 2466e784c1..8a23cd32b4 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -476,3 +476,16 @@ void qDestroyStmtDataBlock(void* pBlock) { pDataBlock->cloned = false; insDestroyDataBlock(pDataBlock); } + +void qDestroyStmtDataBlockExt(void* pBlock) { + if (pBlock == NULL) { + return; + } + + STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock; + if (pDataBlock->cloned) { + qFreeStmtDataBlock(pBlock); + } else { + qDestroyStmtDataBlock(pBlock); + } +} From 9ba8d4f935204480a562cd736649089d20f48e05 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 16 Jan 2023 09:53:43 +0800 Subject: [PATCH 06/45] fix: rollback --- include/libs/parser/parser.h | 1 - source/client/src/clientStmt.c | 7 ++++++- source/libs/parser/src/parInsertStmt.c | 13 ------------- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index e6240f64b5..52edd9708c 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -90,7 +90,6 @@ int32_t qCloneStmtDataBlock(void** pDst, void* pSrc); void qFreeStmtDataBlock(void* pDataBlock); int32_t qRebuildStmtDataBlock(void** pDst, void* pSrc, uint64_t uid, int32_t vgId); void qDestroyStmtDataBlock(void* pBlock); -void qDestroyStmtDataBlockExt(void* pBlock); STableMeta* qGetTableMetaInDataBlock(void* pDataBlock); int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx); diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 8a677d7a5e..3be1b58bad 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -301,7 +301,12 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { continue; } - qDestroyStmtDataBlockExt(pBlocks); + if (STMT_TYPE_MULTI_INSERT == pStmt->sql.type) { + qFreeStmtDataBlock(pBlocks); + } else { + qDestroyStmtDataBlock(pBlocks); + } + taosHashRemove(pStmt->exec.pBlockHash, key, keyLen); pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter); diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 8a23cd32b4..2466e784c1 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -476,16 +476,3 @@ void qDestroyStmtDataBlock(void* pBlock) { pDataBlock->cloned = false; insDestroyDataBlock(pDataBlock); } - -void qDestroyStmtDataBlockExt(void* pBlock) { - if (pBlock == NULL) { - return; - } - - STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock; - if (pDataBlock->cloned) { - qFreeStmtDataBlock(pBlock); - } else { - qDestroyStmtDataBlock(pBlock); - } -} From d9b22b32991c072fe81e5ffd2ea4e3e8f5c3a087 Mon Sep 17 00:00:00 2001 From: slzhou Date: Wed, 18 Jan 2023 15:13:16 +0800 Subject: [PATCH 07/45] fix: add node stopped and get/accquire ctx return node stopped --- source/libs/qworker/inc/qwInt.h | 2 ++ source/libs/qworker/src/qwUtil.c | 21 +++++++++++++++++---- source/libs/qworker/src/qworker.c | 3 +++ 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index aa1ce80903..bde05d4116 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -194,6 +194,8 @@ typedef struct SQWorker { SMsgCb msgCb; SQWStat stat; int32_t *destroyed; + + int8_t nodeStopped; } SQWorker; typedef struct SQWorkerMgmt { diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index fdd2775daa..7ee7c50c96 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -213,9 +213,15 @@ int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { QW_SET_QTID(id, qId, tId, eId); *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id)); + int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped); if (NULL == (*ctx)) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + if (!nodeStopped) { + QW_TASK_DLOG_E("task ctx not exist, may be dropped"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } else { + QW_TASK_DLOG_E("node stopped"); + QW_ERR_RET(TSDB_CODE_VND_STOPPED); + } } return TSDB_CODE_SUCCESS; @@ -226,9 +232,16 @@ int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { QW_SET_QTID(id, qId, tId, eId); *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); + int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped); + if (NULL == (*ctx)) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + if (!nodeStopped) { + QW_TASK_DLOG_E("task ctx not exist, may be dropped"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } else { + QW_TASK_DLOG_E("node stopped"); + QW_ERR_RET(TSDB_CODE_VND_STOPPED); + } } return TSDB_CODE_SUCCESS; diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index e38361d87f..fedaa96ed9 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -1188,6 +1188,9 @@ void qWorkerStopAllTasks(void *qWorkerMgmt) { uint64_t qId, tId, sId; int32_t eId; int64_t rId = 0; + + atomic_store_8(&mgmt->nodeStopped, 1); + void *pIter = taosHashIterate(mgmt->ctxHash, NULL); while (pIter) { SQWTaskCtx *ctx = (SQWTaskCtx *)pIter; From fae4f2c4ed96427cfefa25c6609455ed804d1e46 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 20 Jan 2023 18:08:34 +0800 Subject: [PATCH 08/45] refactor: do some internal refactor. --- source/libs/executor/inc/executorimpl.h | 3 ++- source/libs/executor/src/exchangeoperator.c | 30 ++++++--------------- source/libs/executor/src/executil.c | 4 +++ source/libs/executor/src/projectoperator.c | 29 +++----------------- source/libs/executor/src/scanoperator.c | 16 +++++------ source/libs/executor/src/sortoperator.c | 28 +++++-------------- 6 files changed, 32 insertions(+), 78 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index c68f7c4697..4ae178d508 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -704,9 +704,10 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG SDiskbasedBuf* pBuf); bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo); +bool hasSlimitOffsetInfo(SLimitInfo* pLimitInfo); void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo); void resetLimitInfoForNextGroup(SLimitInfo* pLimitInfo); -bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator); +bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput); diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 037b33dc9f..08b7d371e2 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -707,6 +707,8 @@ int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) { } int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + if (pLimitInfo->remainGroupOffset > 0) { if (pLimitInfo->currentGroupId == 0) { // it is the first group pLimitInfo->currentGroupId = pBlock->info.id.groupId; @@ -750,36 +752,20 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa // set current group id pLimitInfo->currentGroupId = pBlock->info.id.groupId; - if (pLimitInfo->remainOffset >= pBlock->info.rows) { - pLimitInfo->remainOffset -= pBlock->info.rows; - blockDataCleanup(pBlock); + bool limitReached = applyLimitOffset(pLimitInfo, pBlock, pTaskInfo); + if (pBlock->info.rows == 0) { return PROJECT_RETRIEVE_CONTINUE; - } else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) { - blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset); - pLimitInfo->remainOffset = 0; - } - - // check for the limitation in each group - if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) { - int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); - blockDataKeepFirstNRows(pBlock, keepRows); - if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) { + } else { + if (limitReached && (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { setOperatorCompleted(pOperator); - } else { - // current group limitation is reached, and future blocks of this group need to be discarded. - if (pBlock->info.rows == 0) { - return PROJECT_RETRIEVE_CONTINUE; - } + return PROJECT_RETRIEVE_DONE; } - - return PROJECT_RETRIEVE_DONE; } // todo optimize performance // If there are slimit/soffset value exists, multi-round result can not be packed into one group, since the // they may not belong to the same group the limit/offset value is not valid in this case. - if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || pLimitInfo->slimit.offset != -1 || - pLimitInfo->slimit.limit != -1) { + if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || hasSlimitOffsetInfo(pLimitInfo)) { return PROJECT_RETRIEVE_DONE; } else { // not full enough, continue to accumulate the output data in the buffer. return PROJECT_RETRIEVE_CONTINUE; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 757324a773..92d52fbb0a 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1749,6 +1749,10 @@ bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) { pLimitInfo->slimit.offset != -1); } +bool hasSlimitOffsetInfo(SLimitInfo* pLimitInfo) { + return (pLimitInfo->slimit.limit != -1 || pLimitInfo->slimit.offset != -1); +} + void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo) { SLimit limit = {.limit = getLimit(pLimit), .offset = getOffset(pLimit)}; SLimit slimit = {.limit = getLimit(pSLimit), .offset = getOffset(pSLimit)}; diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 3e3610827b..b1dc217bf5 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -185,36 +185,15 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS SOperatorInfo* pOperator) { // set current group id pLimitInfo->currentGroupId = groupId; - - if (pLimitInfo->remainOffset >= pBlock->info.rows) { - pLimitInfo->remainOffset -= pBlock->info.rows; - blockDataCleanup(pBlock); + bool limitReached = applyLimitOffset(pLimitInfo, pBlock, pOperator->pTaskInfo); + if (pBlock->info.rows == 0) { return PROJECT_RETRIEVE_CONTINUE; - } else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) { - blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset); - pLimitInfo->remainOffset = 0; - } - - // check for the limitation in each group - if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) { - int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); - blockDataKeepFirstNRows(pBlock, keepRows); - - // TODO: optimize it later when partition by + limit - // all retrieved requirement has been fulfilled, let's finish this - if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) || - (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { + } else { + if (limitReached && (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { setOperatorCompleted(pOperator); - } else { - // Even current group is done, there may be many vgroups remain existed, and we need to continue to retrieve data - // from next group. So let's continue this retrieve process - if (keepRows == 0) { - return PROJECT_RETRIEVE_CONTINUE; - } } } - pLimitInfo->numOfOutputRows += pBlock->info.rows; return PROJECT_RETRIEVE_DONE; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 813763fffa..2813ef3505 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -256,12 +256,11 @@ static void doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlo } } -// todo handle the slimit info -bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator) { +bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) { SLimit* pLimit = &pLimitInfo->limit; const char* id = GET_TASKID(pTaskInfo); - if (pLimit->offset > 0 && pLimitInfo->remainOffset > 0) { + if (pLimitInfo->remainOffset > 0) { if (pLimitInfo->remainOffset >= pBlock->info.rows) { pLimitInfo->remainOffset -= pBlock->info.rows; blockDataEmpty(pBlock); @@ -276,12 +275,14 @@ bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo if (pLimit->limit != -1 && pLimit->limit <= (pLimitInfo->numOfOutputRows + pBlock->info.rows)) { // limit the output rows int32_t keep = (int32_t)(pLimit->limit - pLimitInfo->numOfOutputRows); - blockDataKeepFirstNRows(pBlock, keep); + + pLimitInfo->numOfOutputRows += pBlock->info.rows; qDebug("output limit %" PRId64 " has reached, %s", pLimit->limit, id); return true; } + pLimitInfo->numOfOutputRows += pBlock->info.rows; return false; } @@ -393,13 +394,12 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca } } - bool limitReached = applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo, pOperator); + bool limitReached = applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo); if (limitReached) { // set operator flag is done setOperatorCompleted(pOperator); } pCost->totalRows += pBlock->info.rows; - pTableScanInfo->limitInfo.numOfOutputRows = pCost->totalRows; return TSDB_CODE_SUCCESS; } @@ -2714,9 +2714,7 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* } } - applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo, pOperator); - pInfo->limitInfo.numOfOutputRows += pResBlock->info.rows; - + applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo); qDebug("%s get sorted row block, rows:%d, limit:%"PRId64, GET_TASKID(pTaskInfo), pResBlock->info.rows, pInfo->limitInfo.numOfOutputRows); diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 97b4fd9dc4..e91d41897d 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -222,6 +222,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { T_LONG_JMP(pTaskInfo->env, code); } + // multi-group case not handle here SSDataBlock* pBlock = NULL; while (1) { pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, @@ -236,28 +237,13 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { continue; } - // todo add the limit/offset info - if (pInfo->limitInfo.remainOffset > 0) { - if (pInfo->limitInfo.remainOffset >= blockDataGetNumOfRows(pBlock)) { - pInfo->limitInfo.remainOffset -= pBlock->info.rows; - continue; - } - - blockDataTrimFirstNRows(pBlock, pInfo->limitInfo.remainOffset); - pInfo->limitInfo.remainOffset = 0; + bool limitReached = applyLimitOffset(&pInfo->limitInfo, pBlock, pTaskInfo); + if (limitReached) { + resetLimitInfoForNextGroup(&pInfo->limitInfo); } - if (pInfo->limitInfo.limit.limit > 0 && - pInfo->limitInfo.limit.limit <= pInfo->limitInfo.numOfOutputRows + blockDataGetNumOfRows(pBlock)) { - int32_t remain = pInfo->limitInfo.limit.limit - pInfo->limitInfo.numOfOutputRows; - blockDataKeepFirstNRows(pBlock, remain); - } - - size_t numOfRows = blockDataGetNumOfRows(pBlock); - pInfo->limitInfo.numOfOutputRows += numOfRows; - pOperator->resultInfo.totalRows += numOfRows; - - if (numOfRows > 0) { + pOperator->resultInfo.totalRows += pBlock->info.rows; + if (pBlock->info.rows > 0) { break; } } @@ -680,7 +666,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData break; } - bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator); + bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo); if (limitReached) { resetLimitInfoForNextGroup(&pInfo->limitInfo); } From 7803104b7e31ab9283b29aa33ec391804b721123 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 20 Jan 2023 22:50:35 +0800 Subject: [PATCH 09/45] fix(query): do some internal refactor, and identify a bug. --- source/common/src/tdatablock.c | 1 + source/libs/executor/src/projectoperator.c | 11 ++++++++++- source/libs/executor/src/sortoperator.c | 2 ++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 43f272d599..f41eb1adaf 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1431,6 +1431,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) { pBlock->info.rows = 0; pBlock->info.capacity = 0; pBlock->info.rowSize = 0; + pBlock->info.id = pDataBlock->info.id; size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index b1dc217bf5..d641810cee 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -90,7 +90,16 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys pInfo->binfo.pRes = pResBlock; pInfo->pFinalRes = createOneDataBlock(pResBlock, false); - pInfo->mergeDataBlocks = (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) ? false : pProjPhyNode->mergeDataBlock; + + if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) { + pInfo->mergeDataBlocks = false; + } else { + if (!pProjPhyNode->ignoreGroupId) { + pInfo->mergeDataBlocks = false; + } else { + pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock; + } + } int32_t numOfRows = 4096; size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index e91d41897d..6d3da3e111 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -237,6 +237,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { continue; } + // there are bugs? bool limitReached = applyLimitOffset(&pInfo->limitInfo, pBlock, pTaskInfo); if (limitReached) { resetLimitInfoForNextGroup(&pInfo->limitInfo); @@ -666,6 +667,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData break; } + // todo fix it: we need to decide whether this block is belonged to previous group or not . bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo); if (limitReached) { resetLimitInfoForNextGroup(&pInfo->limitInfo); From a898be4f7d1bc5fb9c528353d4c2ddf039c86b1e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 20 Jan 2023 23:38:31 +0800 Subject: [PATCH 10/45] fix(query): set correct total rsp rows. --- source/libs/executor/src/exchangeoperator.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 08b7d371e2..e5089ab4a9 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -218,10 +218,7 @@ static SSDataBlock* loadRemoteData(SOperatorInfo* pOperator) { if (status == PROJECT_RETRIEVE_CONTINUE) { continue; } else if (status == PROJECT_RETRIEVE_DONE) { - size_t rows = pBlock->info.rows; - pExchangeInfo->limitInfo.numOfOutputRows += rows; - - if (rows == 0) { + if (pBlock->info.rows == 0) { setOperatorCompleted(pOperator); return NULL; } else { From ec1592390af6c346c0d56e750f12fcdc5cd6f247 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Sat, 28 Jan 2023 10:35:21 +0800 Subject: [PATCH 11/45] fix: limit push down error --- source/libs/planner/src/planSpliter.c | 32 +++++++++++++++++---------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index a7eac2c853..d85e4ca10d 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -348,7 +348,8 @@ static bool stbSplIsPartTableAgg(SAggLogicNode* pAgg) { return false; } if (NULL != pAgg->pGroupKeys) { - return stbSplHasPartTbname(pAgg->pGroupKeys) && stbSplNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); + return stbSplHasPartTbname(pAgg->pGroupKeys) && + stbSplNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); } return stbSplHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); } @@ -1025,21 +1026,29 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) return code; } -static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) { - SLogicNode* pSplitNode = pInfo->pSplitNode; +static int32_t stbSplGetSplitNodeForScan(SStableSplitInfo* pInfo, SLogicNode** pSplitNode) { + *pSplitNode = pInfo->pSplitNode; if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) && NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) { - pSplitNode = pInfo->pSplitNode->pParent; + *pSplitNode = pInfo->pSplitNode->pParent; if (NULL != pInfo->pSplitNode->pLimit) { - pSplitNode->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit); - if (NULL == pSplitNode->pLimit) { + (*pSplitNode)->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit); + if (NULL == (*pSplitNode)->pLimit) { return TSDB_CODE_OUT_OF_MEMORY; } ((SLimitNode*)pInfo->pSplitNode->pLimit)->limit += ((SLimitNode*)pInfo->pSplitNode->pLimit)->offset; ((SLimitNode*)pInfo->pSplitNode->pLimit)->offset = 0; } } - int32_t code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pSplitNode, SUBPLAN_TYPE_MERGE); + return TSDB_CODE_SUCCESS; +} + +static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + SLogicNode* pSplitNode = NULL; + int32_t code = stbSplGetSplitNodeForScan(pInfo, &pSplitNode); + if (TSDB_CODE_SUCCESS == code) { + code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pSplitNode, SUBPLAN_TYPE_MERGE); + } if (TSDB_CODE_SUCCESS == code) { code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, (SNode*)splCreateScanSubplan(pCxt, pSplitNode, SPLIT_FLAG_STABLE_SPLIT)); @@ -1049,12 +1058,11 @@ static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSp } static int32_t stbSplSplitScanNodeWithPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) { - SLogicNode* pSplitNode = pInfo->pSplitNode; - if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) && - NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) { - pSplitNode = pInfo->pSplitNode->pParent; + SLogicNode* pSplitNode = NULL; + int32_t code = stbSplGetSplitNodeForScan(pInfo, &pSplitNode); + if (TSDB_CODE_SUCCESS == code) { + code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pSplitNode, NULL, pSplitNode, true); } - int32_t code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pSplitNode, NULL, pSplitNode, true); if (TSDB_CODE_SUCCESS == code) { code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, (SNode*)splCreateScanSubplan(pCxt, pSplitNode, SPLIT_FLAG_STABLE_SPLIT)); From beb3de8530ddd38e29efdc094badaf46158e360b Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Sat, 28 Jan 2023 10:53:43 +0800 Subject: [PATCH 12/45] fix: limit push down error --- source/libs/planner/src/planSpliter.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index d85e4ca10d..4c8b996a75 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -560,6 +560,8 @@ static int32_t stbSplCreateMergeNode(SSplitContext* pCxt, SLogicSubplan* pSubpla if (NULL == pMerge->node.pLimit) { code = TSDB_CODE_OUT_OF_MEMORY; } + ((SLimitNode*)pSplitNode->pLimit)->limit += ((SLimitNode*)pSplitNode->pLimit)->offset; + ((SLimitNode*)pSplitNode->pLimit)->offset = 0; } if (TSDB_CODE_SUCCESS == code) { if (NULL == pSubplan) { From 8258c68b6dd76e7c14b50ceb3f4fde4183ecb79f Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Sat, 28 Jan 2023 11:11:37 +0800 Subject: [PATCH 13/45] fix: subquery output ignores group id --- source/libs/planner/src/planLogicCreater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 084d99cae5..bd1823a770 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -1016,7 +1016,7 @@ static int32_t createProjectLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSel TSWAP(pProject->node.pLimit, pSelect->pLimit); TSWAP(pProject->node.pSlimit, pSelect->pSlimit); - pProject->ignoreGroupId = (NULL == pSelect->pPartitionByList); + pProject->ignoreGroupId = pSelect->isSubquery ? true : (NULL == pSelect->pPartitionByList); pProject->node.groupAction = (!pSelect->isSubquery && pCxt->pPlanCxt->streamQuery) ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR; pProject->node.requireDataOrder = DATA_ORDER_LEVEL_NONE; From 1741e98a4e8fd5353aa541ca9a06def8546f5349 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 30 Jan 2023 10:13:54 +0800 Subject: [PATCH 14/45] fix(query): reset the del file index when beginning last file check. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 44 ++++++++++++++++---------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 85282a2340..a77eb8298e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -215,6 +215,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader); static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader); static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo); static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter); +static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order); static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); } @@ -2526,6 +2527,14 @@ _end: void setComposedBlockFlag(STsdbReader* pReader, bool composed) { pReader->status.composedDataBlock = composed; } +int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order) { + if (pDelSkyline == NULL) { + return 0; + } + + return ASCENDING_TRAVERSE(order) ? 0 : taosArrayGetSize(pDelSkyline) - 1; +} + int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData, STbData* piMemTbData) { if (pBlockScanInfo->delSkyline != NULL) { @@ -2543,7 +2552,6 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* if (pIdx != NULL) { code = tsdbReadDelData(pReader->pDelFReader, pIdx, pDelData); } - if (code != TSDB_CODE_SUCCESS) { goto _err; } @@ -2572,11 +2580,13 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* } taosArrayDestroy(pDelData); - pBlockScanInfo->iter.index = - ASCENDING_TRAVERSE(pReader->order) ? 0 : taosArrayGetSize(pBlockScanInfo->delSkyline) - 1; - pBlockScanInfo->iiter.index = pBlockScanInfo->iter.index; - pBlockScanInfo->fileDelIndex = pBlockScanInfo->iter.index; - pBlockScanInfo->lastBlockDelIndex = pBlockScanInfo->iter.index; + int32_t index = getInitialDelIndex(pBlockScanInfo->delSkyline, pReader->order); + + pBlockScanInfo->iter.index = index; + pBlockScanInfo->iiter.index = index; + pBlockScanInfo->fileDelIndex = index; + pBlockScanInfo->lastBlockDelIndex = index; + return code; _err: @@ -2676,13 +2686,17 @@ static int32_t uidComparFunc(const void* p1, const void* p2) { } } -static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) { +static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) { int32_t index = 0; int32_t total = taosHashGetSize(pStatus->pTableMap); void* p = taosHashIterate(pStatus->pTableMap, NULL); while (p != NULL) { STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; + + // reset the last del file index + pScanInfo->lastBlockDelIndex = getInitialDelIndex(pScanInfo->delSkyline, order); + pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid; p = taosHashIterate(pStatus->pTableMap, p); } @@ -2690,7 +2704,9 @@ static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SRea taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc); } -static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) { +static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) { + SReaderStatus* pStatus = &pReader->status; + int32_t total = taosHashGetSize(pStatus->pTableMap); if (total == 0) { return TSDB_CODE_SUCCESS; @@ -2703,7 +2719,7 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderSt return TSDB_CODE_OUT_OF_MEMORY; } - extractOrderedTableUidList(pOrderCheckInfo, pStatus); + extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); uint64_t uid = pOrderCheckInfo->tableUidList[0]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); } else { @@ -2720,7 +2736,7 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderSt } pOrderCheckInfo->tableUidList = p; - extractOrderedTableUidList(pOrderCheckInfo, pStatus); + extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); uid = pOrderCheckInfo->tableUidList[0]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); @@ -2740,11 +2756,7 @@ static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus uint64_t uid = pOrderedCheckInfo->tableUidList[pOrderedCheckInfo->currentIndex]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - if (pStatus->pTableIter == NULL) { - return false; - } - - return true; + return (pStatus->pTableIter != NULL); } static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { @@ -2752,7 +2764,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader; SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo; - int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pStatus); + int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pReader); if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) { return code; } From 1ce1f8143cd638ec9cab541aba160c2e2ddfaa73 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Mon, 30 Jan 2023 10:45:12 +0800 Subject: [PATCH 15/45] fix:process data with incorrect timestamp --- source/libs/executor/src/timewindowoperator.c | 14 ++- tests/script/tsim/stream/basic1.sim | 53 +++++++++++ tests/script/tsim/stream/triggerInterval0.sim | 88 +++++++++++++++---- 3 files changed, 135 insertions(+), 20 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d78e9c4edf..d320ef6e9e 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -2477,7 +2477,19 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p pInfo->delKey = key; } int32_t prevEndPos = (forwardRows - 1) * step + startPos; - ASSERT(pSDataBlock->info.window.skey > 0 && pSDataBlock->info.window.ekey > 0); + if (pSDataBlock->info.window.skey <= 0 || pSDataBlock->info.window.ekey <= 0) { + qError("table uid %" PRIu64 " data block timestamp range may not be calculated! minKey %" PRId64 + ",maxKey %" PRId64, + pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey); + blockDataUpdateTsWindow(pSDataBlock, 0); + + // timestamp of the data is incorrect + if (pSDataBlock->info.window.skey <= 0 || pSDataBlock->info.window.ekey <= 0) { + qError("table uid %" PRIu64 " data block timestamp is out of range! minKey %" PRId64 ",maxKey %" PRId64, + pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey); + } + } + if (IS_FINAL_OP(pInfo)) { startPos = getNextQualifiedFinalWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos); } else { diff --git a/tests/script/tsim/stream/basic1.sim b/tests/script/tsim/stream/basic1.sim index 7bf10df637..c61c7667f8 100644 --- a/tests/script/tsim/stream/basic1.sim +++ b/tests/script/tsim/stream/basic1.sim @@ -834,4 +834,57 @@ endi print ====== test _wstart end +print insert into ts1 values(-1648791211000,1,2,3) + +sql create database test7 vgroups 1; +sql use test7; +sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int); +sql create table ts1 using st tags(1,1,1); +sql create stream streams7 trigger at_once into streamt7 as select _wstart, count(*) from ts1 interval(10s) ; + +sql insert into ts1 values(1648791211000,1,2,3); +sql_error insert into ts1 values(-1648791211000,1,2,3); + +loop18: + +sleep 200 +sql select * from streamt7; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 1 then + print =====rows=$rows + goto loop18 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop18 +endi + +sql_error insert into ts1 values(-1648791211001,1,2,3) (1648791211001,1,2,3); + +sql select _wstart, count(*) from ts1 interval(10s) ; + +print $data00 $data01 +print $data10 $data11 + +loop19: + +sleep 200 +sql select * from streamt7; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 1 then + print =====rows=$rows + goto loop19 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim index 7353f026bb..b522dcf035 100644 --- a/tests/script/tsim/stream/triggerInterval0.sim +++ b/tests/script/tsim/stream/triggerInterval0.sim @@ -29,69 +29,119 @@ sql insert into t1 values(1648791223001,2,2,3,1.1); sql insert into t1 values(1648791223002,2,2,3,1.1); sql insert into t1 values(1648791223003,2,2,3,1.1); sql insert into t1 values(1648791223001,2,2,3,1.1); + +print step 0 + +$loop_count = 0 + +loop0: sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + sql select * from streamt; + if $rows != 1 then print ======$rows - return -1 + goto loop0 endi if $data01 != 1 then print ======$data01 - return -1 + goto loop0 endi sql insert into t1 values(1648791233001,2,2,3,1.1); + +print step 1 + +$loop_count = 0 + +loop1: sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + sql select * from streamt; if $rows != 2 then print ======$rows - return -1 + goto loop1 endi if $data01 != 1 then print ======$data01 - return -1 + goto loop1 endi if $data11 != 3 then print ======$data11 - return -1 + goto loop1 endi sql insert into t1 values(1648791223004,2,2,3,1.1); sql insert into t1 values(1648791223004,2,2,3,1.1); sql insert into t1 values(1648791223005,2,2,3,1.1); + +print step 2 + +$loop_count = 0 + +loop2: sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + sql select * from streamt; if $rows != 2 then print ======$rows - return -1 + goto loop2 endi + if $data01 != 1 then print ======$data01 - return -1 + goto loop2 endi if $data11 != 5 then print ======$data11 - return -1 + goto loop2 endi sql insert into t1 values(1648791233002,3,2,3,2.1); sql insert into t1 values(1648791213002,4,2,3,3.1) sql insert into t1 values(1648791213002,4,2,3,4.1); + +print step 3 + +$loop_count = 0 + +loop3: sleep 300 -sql select * from streamt; -if $rows != 2 then - print ======$rows - return -1 -endi -if $data01 != 2 then - print ======$data01 - return -1 -endi -if $data11 != 5 then - print ======$data11 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then return -1 endi +sql select * from streamt; +if $rows != 2 then + print ======$rows + goto loop3 +endi +if $data01 != 2 then + print ======$data01 + goto loop3 +endi +if $data11 != 5 then + print ======$data11 + goto loop3 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file From ac9cdf6c58eaf5f4e4303a52fe855485a7f321eb Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 31 Jan 2023 00:46:58 +0800 Subject: [PATCH 16/45] fix: install script refine sentence for main (#19703) * fix: packaging/tools/install.sh for main * fix: refine sentence * fix: dpkg grep ii to detech deb package --- packaging/tools/install.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 4be179b04d..dfdbaa6fdd 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -210,8 +210,8 @@ function install_bin() { [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || : [ -x ${install_main_dir}/bin/${udfdName} ] && ${csudo}ln -s ${install_main_dir}/bin/${udfdName} ${bin_link_dir}/${udfdName} || : [ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || : - [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || : - [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : + [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || : + [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : [ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || : [ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -s ${install_main_dir}/bin/${xname} ${bin_link_dir}/${xname} || : [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : @@ -746,7 +746,7 @@ function is_version_compatible() { deb_erase() { confirm="" while [ "" == "${confirm}" ]; do - echo -e -n "${RED}Exist tdengine deb detected, do you want to remove it? [yes|no] ${NC}:" + echo -e -n "${RED}Existing TDengine deb is detected, do you want to remove it? [yes|no] ${NC}:" read confirm if [ "yes" == "$confirm" ]; then ${csudo}dpkg --remove tdengine ||: @@ -760,7 +760,7 @@ deb_erase() { rpm_erase() { confirm="" while [ "" == "${confirm}" ]; do - echo -e -n "${RED}Exist tdengine rpm detected, do you want to remove it? [yes|no] ${NC}:" + echo -e -n "${RED}Existing TDengine rpm is detected, do you want to remove it? [yes|no] ${NC}:" read confirm if [ "yes" == "$confirm" ]; then ${csudo}rpm -e tdengine ||: @@ -787,7 +787,7 @@ function updateProduct() { if echo $osinfo | grep -qwi "centos"; then rpm -q tdengine 2>&1 > /dev/null && rpm_erase tdengine ||: elif echo $osinfo | grep -qwi "ubuntu"; then - dpkg -l tdengine 2>&1 > /dev/null && deb_erase tdengine ||: + dpkg -l tdengine 2>&1 | grep ii > /dev/null && deb_erase tdengine ||: fi tar -zxf ${tarName} From 889019b253c7236e9d922689330e3ff04761e36f Mon Sep 17 00:00:00 2001 From: sunpeng Date: Tue, 31 Jan 2023 09:37:50 +0800 Subject: [PATCH 17/45] build: add python demo to ci (#19699) * build: add python demo to ci * build: fix python demo --- docs/examples/go/go.mod | 6 - docs/examples/python/conn_native_pandas.py | 7 +- docs/examples/python/conn_rest_pandas.py | 6 +- docs/examples/python/connect_rest_examples.py | 23 ++-- .../connection_usage_native_reference.py | 8 +- docs/examples/python/fast_write_example.py | 119 ++++++++++++------ docs/examples/python/kafka_example.py | 80 +++++++++--- docs/examples/python/mockdatasource.py | 19 ++- docs/examples/python/sql_writer.py | 24 +++- docs/examples/python/tmq_example.py | 95 +++++++------- tests/docs-examples-test/python.sh | 43 ++++++- 11 files changed, 298 insertions(+), 132 deletions(-) delete mode 100644 docs/examples/go/go.mod diff --git a/docs/examples/go/go.mod b/docs/examples/go/go.mod deleted file mode 100644 index 2bc1a74cb6..0000000000 --- a/docs/examples/go/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module goexample - -go 1.17 - -require github.com/taosdata/driver-go/v3 3.0 - diff --git a/docs/examples/python/conn_native_pandas.py b/docs/examples/python/conn_native_pandas.py index 56942ef570..f3bab15efb 100644 --- a/docs/examples/python/conn_native_pandas.py +++ b/docs/examples/python/conn_native_pandas.py @@ -1,8 +1,11 @@ import pandas -from sqlalchemy import create_engine +from sqlalchemy import create_engine, text engine = create_engine("taos://root:taosdata@localhost:6030/power") -df = pandas.read_sql("SELECT * FROM meters", engine) +conn = engine.connect() +df = pandas.read_sql(text("SELECT * FROM power.meters"), conn) +conn.close() + # print index print(df.index) diff --git a/docs/examples/python/conn_rest_pandas.py b/docs/examples/python/conn_rest_pandas.py index 0164080cd5..1b207d6ff1 100644 --- a/docs/examples/python/conn_rest_pandas.py +++ b/docs/examples/python/conn_rest_pandas.py @@ -1,8 +1,10 @@ import pandas -from sqlalchemy import create_engine +from sqlalchemy import create_engine, text engine = create_engine("taosrest://root:taosdata@localhost:6041") -df: pandas.DataFrame = pandas.read_sql("SELECT * FROM power.meters", engine) +conn = engine.connect() +df: pandas.DataFrame = pandas.read_sql(text("SELECT * FROM power.meters"), conn) +conn.close() # print index print(df.index) diff --git a/docs/examples/python/connect_rest_examples.py b/docs/examples/python/connect_rest_examples.py index 900ec1022e..0f8625ae53 100644 --- a/docs/examples/python/connect_rest_examples.py +++ b/docs/examples/python/connect_rest_examples.py @@ -1,24 +1,25 @@ # ANCHOR: connect from taosrest import connect, TaosRestConnection, TaosRestCursor -conn: TaosRestConnection = connect(url="http://localhost:6041", - user="root", - password="taosdata", - timeout=30) +conn = connect(url="http://localhost:6041", + user="root", + password="taosdata", + timeout=30) # ANCHOR_END: connect # ANCHOR: basic # create STable -cursor: TaosRestCursor = conn.cursor() +cursor = conn.cursor() cursor.execute("DROP DATABASE IF EXISTS power") cursor.execute("CREATE DATABASE power") -cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +cursor.execute( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") # insert data -cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") +cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") print("inserted row count:", cursor.rowcount) # query data @@ -28,7 +29,7 @@ print("queried row count:", cursor.rowcount) # get column names from cursor column_names = [meta[0] for meta in cursor.description] # get rows -data: list[tuple] = cursor.fetchall() +data = cursor.fetchall() print(column_names) for row in data: print(row) diff --git a/docs/examples/python/connection_usage_native_reference.py b/docs/examples/python/connection_usage_native_reference.py index 4803511e42..0a23c5f95b 100644 --- a/docs/examples/python/connection_usage_native_reference.py +++ b/docs/examples/python/connection_usage_native_reference.py @@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test") # change database. same as execute "USE db" conn.select_db("test") conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)") -affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m 24.4)") +affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)") print("affected_row", affected_row) # output: # affected_row 3 @@ -16,10 +16,10 @@ print("affected_row", affected_row) # ANCHOR: query # Execute a sql and get its result set. It's useful for SELECT statement -result: taos.TaosResult = conn.query("SELECT * from weather") +result = conn.query("SELECT * from weather") # Get fields from result -fields: taos.field.TaosFields = result.fields +fields = result.fields for field in fields: print(field) # {name: ts, type: 9, bytes: 8} @@ -42,4 +42,4 @@ print(data) # ANCHOR_END: query -conn.close() +conn.close() \ No newline at end of file diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py index c9d606388f..626e3310b1 100644 --- a/docs/examples/python/fast_write_example.py +++ b/docs/examples/python/fast_write_example.py @@ -1,15 +1,14 @@ # install dependencies: # recommend python >= 3.8 -# pip3 install faster-fifo # import logging import math +import multiprocessing import sys import time import os -from multiprocessing import Process -from faster_fifo import Queue +from multiprocessing import Process, Queue from mockdatasource import MockDataSource from queue import Empty from typing import List @@ -22,8 +21,7 @@ TABLE_COUNT = 1000 QUEUE_SIZE = 1000000 MAX_BATCH_SIZE = 3000 -read_processes = [] -write_processes = [] +_DONE_MESSAGE = '__DONE__' def get_connection(): @@ -44,41 +42,64 @@ def get_connection(): # ANCHOR: read -def run_read_task(task_id: int, task_queues: List[Queue]): +def run_read_task(task_id: int, task_queues: List[Queue], infinity): table_count_per_task = TABLE_COUNT // READ_TASK_COUNT - data_source = MockDataSource(f"tb{task_id}", table_count_per_task) + data_source = MockDataSource(f"tb{task_id}", table_count_per_task, infinity) try: for batch in data_source: + if isinstance(batch, tuple): + batch = [batch] for table_id, rows in batch: # hash data to different queue i = table_id % len(task_queues) # block putting forever when the queue is full - task_queues[i].put_many(rows, block=True, timeout=-1) + for row in rows: + task_queues[i].put(row) + if not infinity: + for queue in task_queues: + queue.put(_DONE_MESSAGE) except KeyboardInterrupt: pass + finally: + logging.info('read task over') # ANCHOR_END: read + # ANCHOR: write -def run_write_task(task_id: int, queue: Queue): +def run_write_task(task_id: int, queue: Queue, done_queue: Queue): from sql_writer import SQLWriter log = logging.getLogger(f"WriteTask-{task_id}") writer = SQLWriter(get_connection) lines = None try: while True: - try: - # get as many as possible - lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE) + over = False + lines = [] + for _ in range(MAX_BATCH_SIZE): + try: + line = queue.get_nowait() + if line == _DONE_MESSAGE: + over = True + break + if line: + lines.append(line) + except Empty: + time.sleep(0.1) + if len(lines) > 0: writer.process_lines(lines) - except Empty: - time.sleep(0.01) + if over: + done_queue.put(_DONE_MESSAGE) + break except KeyboardInterrupt: pass except BaseException as e: log.debug(f"lines={lines}") raise e + finally: + writer.close() + log.debug('write task over') # ANCHOR_END: write @@ -103,47 +124,64 @@ def set_global_config(): # ANCHOR: monitor -def run_monitor_process(): +def run_monitor_process(done_queue: Queue): log = logging.getLogger("DataBaseMonitor") - conn = get_connection() - conn.execute("DROP DATABASE IF EXISTS test") - conn.execute("CREATE DATABASE test") - conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " - "TAGS (location BINARY(64), groupId INT)") + conn = None + try: + conn = get_connection() - def get_count(): - res = conn.query("SELECT count(*) FROM test.meters") - rows = res.fetch_all() - return rows[0][0] if rows else 0 + def get_count(): + res = conn.query("SELECT count(*) FROM test.meters") + rows = res.fetch_all() + return rows[0][0] if rows else 0 - last_count = 0 - while True: - time.sleep(10) - count = get_count() - log.info(f"count={count} speed={(count - last_count) / 10}") - last_count = count + last_count = 0 + while True: + try: + done = done_queue.get_nowait() + if done == _DONE_MESSAGE: + break + except Empty: + pass + time.sleep(10) + count = get_count() + log.info(f"count={count} speed={(count - last_count) / 10}") + last_count = count + finally: + conn.close() # ANCHOR_END: monitor # ANCHOR: main -def main(): +def main(infinity): set_global_config() logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, " f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}") - monitor_process = Process(target=run_monitor_process) + conn = get_connection() + conn.execute("DROP DATABASE IF EXISTS test") + conn.execute("CREATE DATABASE IF NOT EXISTS test") + conn.execute("CREATE STABLE IF NOT EXISTS test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " + "TAGS (location BINARY(64), groupId INT)") + conn.close() + + done_queue = Queue() + monitor_process = Process(target=run_monitor_process, args=(done_queue,)) monitor_process.start() - time.sleep(3) # waiting for database ready. + logging.debug(f"monitor task started with pid {monitor_process.pid}") task_queues: List[Queue] = [] + write_processes = [] + read_processes = [] + # create task queues for i in range(WRITE_TASK_COUNT): - queue = Queue(max_size_bytes=QUEUE_SIZE) + queue = Queue() task_queues.append(queue) # create write processes for i in range(WRITE_TASK_COUNT): - p = Process(target=run_write_task, args=(i, task_queues[i])) + p = Process(target=run_write_task, args=(i, task_queues[i], done_queue)) p.start() logging.debug(f"WriteTask-{i} started with pid {p.pid}") write_processes.append(p) @@ -151,13 +189,19 @@ def main(): # create read processes for i in range(READ_TASK_COUNT): queues = assign_queues(i, task_queues) - p = Process(target=run_read_task, args=(i, queues)) + p = Process(target=run_read_task, args=(i, queues, infinity)) p.start() logging.debug(f"ReadTask-{i} started with pid {p.pid}") read_processes.append(p) try: monitor_process.join() + for p in read_processes: + p.join() + for p in write_processes: + p.join() + time.sleep(1) + return except KeyboardInterrupt: monitor_process.terminate() [p.terminate() for p in read_processes] @@ -176,5 +220,6 @@ def assign_queues(read_task_id, task_queues): if __name__ == '__main__': - main() + multiprocessing.set_start_method('spawn') + main(False) # ANCHOR_END: main diff --git a/docs/examples/python/kafka_example.py b/docs/examples/python/kafka_example.py index 735059eec0..a89287d372 100644 --- a/docs/examples/python/kafka_example.py +++ b/docs/examples/python/kafka_example.py @@ -26,7 +26,8 @@ class Consumer(object): 'bath_consume': True, 'batch_size': 1000, 'async_model': True, - 'workers': 10 + 'workers': 10, + 'testing': False } LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose', @@ -46,11 +47,12 @@ class Consumer(object): def __init__(self, **configs): self.config: dict = self.DEFAULT_CONFIGS self.config.update(configs) - self.consumer = KafkaConsumer( - self.config.get('kafka_topic'), # topic - bootstrap_servers=self.config.get('kafka_brokers'), - group_id=self.config.get('kafka_group_id'), - ) + if not self.config.get('testing'): + self.consumer = KafkaConsumer( + self.config.get('kafka_topic'), # topic + bootstrap_servers=self.config.get('kafka_brokers'), + group_id=self.config.get('kafka_group_id'), + ) self.taos = taos.connect( host=self.config.get('taos_host'), user=self.config.get('taos_user'), @@ -60,7 +62,7 @@ class Consumer(object): ) if self.config.get('async_model'): self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers')) - self.tasks: list[Future] = [] + self.tasks = [] # tags and table mapping # key: {location}_{groupId} value: self.tag_table_mapping = {} i = 0 @@ -104,8 +106,8 @@ class Consumer(object): for task in self.tasks: while not task.done(): pass - if self.pool is not None: - self.pool.shutdown() + if self.pool is not None: + self.pool.shutdown() # clean data if self.config.get('clean_after_testing'): @@ -115,14 +117,14 @@ class Consumer(object): if self.taos is not None: self.taos.close() - def _run(self, f: Callable[[ConsumerRecord], bool]): + def _run(self, f): for message in self.consumer: if self.config.get('async_model'): self.pool.submit(f(message)) else: f(message) - def _run_batch(self, f: Callable[[list[list[ConsumerRecord]]], None]): + def _run_batch(self, f): while True: messages = self.consumer.poll(timeout_ms=500, max_records=self.config.get('batch_size')) if messages: @@ -140,7 +142,7 @@ class Consumer(object): logging.info('## insert sql %s', sql) return self.taos.execute(sql=sql) == 1 - def _to_taos_batch(self, messages: list[list[ConsumerRecord]]): + def _to_taos_batch(self, messages): sql = self._build_sql_batch(messages=messages) if len(sql) == 0: # decode error, skip return @@ -162,7 +164,7 @@ class Consumer(object): table_name = self._get_table_name(location=location, group_id=group_id) return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase) - def _build_sql_batch(self, messages: list[list[ConsumerRecord]]) -> str: + def _build_sql_batch(self, messages) -> str: sql_list = [] for partition_messages in messages: for message in partition_messages: @@ -186,7 +188,55 @@ def _get_location_and_group(key: str) -> (str, int): return fields[0], fields[1] +def test_to_taos(consumer: Consumer): + msg = { + 'location': 'California.SanFrancisco', + 'groupId': 1, + 'ts': '2022-12-06 15:13:38.643', + 'current': 3.41, + 'voltage': 105, + 'phase': 0.02027, + } + record = ConsumerRecord(checksum=None, headers=None, offset=1, key=None, value=json.dumps(msg), partition=1, + topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None) + assert consumer._to_taos(message=record) + + +def test_to_taos_batch(consumer: Consumer): + records = [ + [ + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value=json.dumps({'location': 'California.SanFrancisco', + 'groupId': 1, + 'ts': '2022-12-06 15:13:38.643', + 'current': 3.41, + 'voltage': 105, + 'phase': 0.02027, }), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value=json.dumps({'location': 'California.LosAngles', + 'groupId': 2, + 'ts': '2022-12-06 15:13:39.643', + 'current': 3.41, + 'voltage': 102, + 'phase': 0.02027, }), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ] + ] + + consumer._to_taos_batch(messages=records) + + if __name__ == '__main__': - consumer = Consumer(async_model=True) + consumer = Consumer(async_model=True, testing=True) + # init env consumer.init_env() - consumer.consume() \ No newline at end of file + # consumer.consume() + # test build sql + # test build sql batch + test_to_taos(consumer) + test_to_taos_batch(consumer) + \ No newline at end of file diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py index 1c516a800e..15a7d2ff8c 100644 --- a/docs/examples/python/mockdatasource.py +++ b/docs/examples/python/mockdatasource.py @@ -10,13 +10,14 @@ class MockDataSource: "9.4,118,0.141,California.SanFrancisco,4" ] - def __init__(self, tb_name_prefix, table_count): + def __init__(self, tb_name_prefix, table_count, infinity=True): self.table_name_prefix = tb_name_prefix + "_" self.table_count = table_count self.max_rows = 10000000 self.current_ts = round(time.time() * 1000) - self.max_rows * 100 # [(tableId, tableName, values),] self.data = self._init_data() + self.infinity = infinity def _init_data(self): lines = self.samples * (self.table_count // 5 + 1) @@ -28,14 +29,19 @@ class MockDataSource: def __iter__(self): self.row = 0 - return self + if not self.infinity: + return iter(self._iter_data()) + else: + return self def __next__(self): """ next 1000 rows for each table. return: {tableId:[row,...]} """ - # generate 1000 timestamps + return self._iter_data() + + def _iter_data(self): ts = [] for _ in range(1000): self.current_ts += 100 @@ -47,3 +53,10 @@ class MockDataSource: rows = [table_name + ',' + t + ',' + values for t in ts] result.append((table_id, rows)) return result + + +if __name__ == '__main__': + datasource = MockDataSource('t', 10, False) + for data in datasource: + print(data) + \ No newline at end of file diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py index 758167376b..db51bb7174 100644 --- a/docs/examples/python/sql_writer.py +++ b/docs/examples/python/sql_writer.py @@ -10,6 +10,7 @@ class SQLWriter: self._tb_tags = {} self._conn = get_connection_func() self._max_sql_length = self.get_max_sql_length() + self._conn.execute("create database if not exists test") self._conn.execute("USE test") def get_max_sql_length(self): @@ -20,7 +21,7 @@ class SQLWriter: return int(r[1]) return 1024 * 1024 - def process_lines(self, lines: str): + def process_lines(self, lines: [str]): """ :param lines: [[tbName,ts,current,voltage,phase,location,groupId]] """ @@ -60,6 +61,7 @@ class SQLWriter: buf.append(q) sql_len += len(q) sql += " ".join(buf) + self.create_tables() self.execute_sql(sql) self._tb_values.clear() @@ -88,3 +90,23 @@ class SQLWriter: except BaseException as e: self.log.error("Execute SQL: %s", sql) raise e + + def close(self): + if self._conn: + self._conn.close() + + +if __name__ == '__main__': + def get_connection_func(): + conn = taos.connect() + return conn + + + writer = SQLWriter(get_connection_func=get_connection_func) + writer.execute_sql( + "create stable if not exists meters (ts timestamp, current float, voltage int, phase float) " + "tags (location binary(64), groupId int)") + writer.execute_sql( + "INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) " + "VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32)") + \ No newline at end of file diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py index a4625ca11a..32778e9f25 100644 --- a/docs/examples/python/tmq_example.py +++ b/docs/examples/python/tmq_example.py @@ -1,58 +1,55 @@ +from taos.tmq import Consumer import taos -from taos.tmq import * - -conn = taos.connect() - -print("init") -conn.execute("drop topic if exists topic_ctb_column") -conn.execute("drop database if exists py_tmq") -conn.execute("create database if not exists py_tmq vgroups 2") -conn.select_db("py_tmq") -conn.execute( - "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)" -) -conn.execute("create table if not exists tb1 using stb1 tags(1)") -conn.execute("create table if not exists tb2 using stb1 tags(2)") -conn.execute("create table if not exists tb3 using stb1 tags(3)") - -print("create topic") -conn.execute( - "create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1" -) - -print("build consumer") -conf = TaosTmqConf() -conf.set("group.id", "tg2") -conf.set("td.connect.user", "root") -conf.set("td.connect.pass", "taosdata") -conf.set("enable.auto.commit", "true") -def tmq_commit_cb_print(tmq, resp, offset, param=None): - print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}") +def init_tmq_env(db, topic): + conn = taos.connect() + conn.execute("drop topic if exists {}".format(topic)) + conn.execute("drop database if exists {}".format(db)) + conn.execute("create database if not exists {}".format(db)) + conn.select_db(db) + conn.execute( + "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))") + conn.execute("create table if not exists tb1 using stb1 tags(1, 't1')") + conn.execute("create table if not exists tb2 using stb1 tags(2, 't2')") + conn.execute("create table if not exists tb3 using stb1 tags(3, 't3')") + conn.execute("create topic if not exists {} as select ts, c1, c2, c3 from stb1".format(topic)) + conn.execute("insert into tb1 values (now, 1, 1.0, 'tmq test')") + conn.execute("insert into tb2 values (now, 2, 2.0, 'tmq test')") + conn.execute("insert into tb3 values (now, 3, 3.0, 'tmq test')") -conf.set_auto_commit_cb(tmq_commit_cb_print, None) -tmq = conf.new_consumer() +def cleanup(db, topic): + conn = taos.connect() + conn.execute("drop topic if exists {}".format(topic)) + conn.execute("drop database if exists {}".format(db)) -print("build topic list") -topic_list = TaosTmqList() -topic_list.append("topic_ctb_column") +if __name__ == '__main__': + init_tmq_env("tmq_test", "tmq_test_topic") # init env + consumer = Consumer( + { + "group.id": "tg2", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "enable.auto.commit": "true", + } + ) + consumer.subscribe(["tmq_test_topic"]) -print("basic consume loop") -tmq.subscribe(topic_list) + try: + while True: + res = consumer.poll(1) + if not res: + break + err = res.error() + if err is not None: + raise err + val = res.value() -sub_list = tmq.subscription() - -print("subscribed topics: ", sub_list) - -while 1: - res = tmq.poll(1000) - if res: - topic = res.get_topic_name() - vg = res.get_vgroup_id() - db = res.get_db_name() - print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}") - for row in res: - print(row) + for block in val: + print(block.fetchall()) + finally: + consumer.unsubscribe() + consumer.close() + cleanup("tmq_test", "tmq_test_topic") \ No newline at end of file diff --git a/tests/docs-examples-test/python.sh b/tests/docs-examples-test/python.sh index 140d05395b..ccb391b752 100644 --- a/tests/docs-examples-test/python.sh +++ b/tests/docs-examples-test/python.sh @@ -23,7 +23,7 @@ python3 bind_param_example.py # 4 taos -s "drop database power" -python3 multi_bind_example.py +python3 multi_bind_example.py # 5 python3 query_example.py @@ -44,4 +44,43 @@ taos -s "drop database test" python3 json_protocol_example.py # 10 -# python3 subscribe_demo.py +pip install SQLAlchemy +pip install pandas +taosBenchmark -y -d power -t 10 -n 10 +python3 conn_native_pandas.py +python3 conn_rest_pandas.py +taos -s "drop database if exists power" + +# 11 +taos -s "create database if not exists test" +python3 connect_native_reference.py + +# 12 +python3 connect_rest_examples.py + +# 13 +python3 handle_exception.py + +# 14 +taosBenchmark -y -d power -t 2 -n 10 +python3 rest_client_example.py +taos -s "drop database if exists power" + +# 15 +python3 result_set_examples.py + +# 16 +python3 tmq_example.py + +# 17 +python3 sql_writer.py + +# 18 +python3 mockdatasource.py + +# 19 +python3 fast_write_example.py + +# 20 +pip3 install kafka-python +python3 kafka_example.py From cabf3e86839de09bbfa45a623319b7d43f57d0e2 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 31 Jan 2023 11:22:06 +0800 Subject: [PATCH 18/45] fix: invalid read --- source/dnode/vnode/src/tsdb/tsdbSnapshot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 1bfd630331..98c9c0fdda 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -1643,8 +1643,8 @@ _exit: if (code) { tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); } else { - tsdbTrace("vgId:%d %s done, suid:%" PRId64 " uid:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), __func__, pId->suid, - pId->uid); + tsdbTrace("vgId:%d %s done, suid:%" PRId64 " uid:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), __func__, + pWriter->tbid.suid, pWriter->tbid.uid); } return code; } From a84e990c4e1f99cd2f68c7913c2c56197cba2f7f Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Tue, 31 Jan 2023 11:29:46 +0800 Subject: [PATCH 19/45] fix: not try to propose commit on vmCloseVnode --- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 99ba9b9b3b..951544c766 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -79,8 +79,6 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { char path[TSDB_FILENAME_LEN] = {0}; - vnodeProposeCommitOnNeed(pVnode->pImpl); - taosThreadRwlockWrlock(&pMgmt->lock); taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t)); taosThreadRwlockUnlock(&pMgmt->lock); From b56db2026b7c7a30cc94196edf5b1b887ae52996 Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Tue, 31 Jan 2023 15:14:48 +0800 Subject: [PATCH 20/45] fix(taosAdapter): set option `TSDB_OPTION_USE_ADAPTER = true` (#19711) --- cmake/taosadapter_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index 13b247770e..d156057459 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 213f8b3 + GIT_TAG 3e08996 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From f6df973d4d2e3ae42da47fc82f9ead04b353fff2 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 1 Feb 2023 10:15:42 +0800 Subject: [PATCH 21/45] fix: no cache auto create table error --- source/libs/parser/src/parInsertSql.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 98c6aed829..1e1821842f 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -1760,6 +1760,9 @@ static int32_t getTableSchemaFromMetaData(SInsertParseContext* pCxt, const SMeta if (TSDB_CODE_SUCCESS == code && !isStb && TSDB_SUPER_TABLE == pStmt->pTableMeta->tableType) { code = buildInvalidOperationMsg(&pCxt->msg, "insert data into super table is not supported"); } + if (TSDB_CODE_SUCCESS == code && isStb) { + code = storeTableMeta(pCxt, pStmt); + } if (TSDB_CODE_SUCCESS == code) { code = getTableVgroupFromMetaData(pMetaData->pTableHash, pStmt, isStb); } From c4531512303fb4129b22cde6552bfe193cce80c5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 1 Feb 2023 15:56:58 +0800 Subject: [PATCH 22/45] test(query): add regress test case. --- tests/script/tsim/parser/regressiontest.sim | 34 +++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/script/tsim/parser/regressiontest.sim b/tests/script/tsim/parser/regressiontest.sim index 1b127155cb..3ce2b47b44 100644 --- a/tests/script/tsim/parser/regressiontest.sim +++ b/tests/script/tsim/parser/regressiontest.sim @@ -63,4 +63,38 @@ if $rows != 8198 then return -1 endi +print ===========================> TD-22077 && TD-21877 +sql drop database if exists $db -x step1 +sql create database $db vgroups 1; + +sql use $db +sql create stable st1 (ts timestamp, c int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); + +$i = 0 +$ts = 1674977959000 +$rowNum = 200 + +$x = 0 +while $x < $rowNum +$xs = $x * $delta +$ts = $ts0 + $xs +sql insert into t1 values ( $ts , $x ) +sql insert into t2 values ( $ts + 1000a, $x ) +$x = $x + 1 +$ts = $ts + 1000 +endw + +sql flush database $db + +sql insert into t1 values('2018-09-17 09:00:26', 26); +sql insert into t2 values('2018-09-17 09:00:25', 25); + +sql insert into t2 values('2018-09-17 09:00:30', 30); +sql flush database reg_db0; + +sql delete from st1 where ts<='2018-9-17 09:00:26'; +sql select * from st1; + system sh/exec.sh -n dnode1 -s stop -x SIGINT From a3a2af4b3fb35f9c62ad43a86b388a43814bbcf0 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 1 Feb 2023 15:59:29 +0800 Subject: [PATCH 23/45] fix: taosbenchmark schemaless refine for main (#19714) * fix: taos-tools 143b9e4 for taosbenchmark schemaless refine for main * fix: update taos-tools 723f696 * test: fix ../tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py * test: check nchar temporarily as diff behavior between main and 3.0 branch * fix: update taos-tools 181bcac --- cmake/taostools_CMakeLists.txt.in | 2 +- .../taosbenchmark/sml_json_alltypes.py | 62 ++++++++++++------- 2 files changed, 42 insertions(+), 22 deletions(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 13a81f88ea..926d0c63e7 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 0cd564a + GIT_TAG 181bcac SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py index 2c6d09b0f5..1b65e38d72 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py @@ -19,32 +19,38 @@ from util.dnodes import * class TDTestCase: def caseDescription(self): - ''' + """ [TD-11510] taosBenchmark test cases - ''' - return + """ def init(self, conn, logSql, replicaVar=1): - self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) + self.replicaVar = int(replicaVar) tdSql.init(conn.cursor(), logSql) def getPath(self, tool="taosBenchmark"): selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] + elif "src" in selfPath: + projPath = selfPath[: selfPath.find("src")] + elif "/tools/" in selfPath: + projPath = selfPath[: selfPath.find("/tools/")] + elif "/tests/" in selfPath: + projPath = selfPath[: selfPath.find("/tests/")] else: - projPath = selfPath[:selfPath.find("tests")] + tdLog.info("cannot found %s in path: %s, use system's" % (tool, selfPath)) + projPath = "/usr/local/taos/bin/" paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files): + for root, dummy, files in os.walk(projPath): + if (tool) in files: rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): + if "packaging" not in rootRealPath: paths.append(os.path.join(root, tool)) break - if (len(paths) == 0): + if len(paths) == 0: tdLog.exit("taosBenchmark not found!") return else: @@ -52,31 +58,45 @@ class TDTestCase: return paths[0] def run(self): + tdSql.query("select client_version()") + client_ver = "".join(tdSql.queryResult[0]) + major_ver = client_ver.split(".")[0] + binPath = self.getPath() - cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_json_alltypes.json" %binPath + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_json_alltypes.json" % binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe db.stb1") tdSql.checkData(1, 1, "BOOL") tdSql.query("describe db.stb2") - tdSql.checkData(1, 1, "TINYINT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb3") - tdSql.checkData(1, 1, "SMALLINT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb4") - tdSql.checkData(1, 1, "INT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb5") - tdSql.checkData(1, 1, "BIGINT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb6") - tdSql.checkData(1, 1, "FLOAT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb7") tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb8") - tdSql.checkData(1, 1, "VARCHAR") - tdSql.checkData(1, 2, 16) + if major_ver == "3": + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 16) + else: + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 8) + tdSql.query("describe db.stb9") - tdSql.checkData(1, 1, "NCHAR") - tdSql.checkData(1, 2, 16) + if major_ver == "3": + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 16) + else: + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 8) + tdSql.query("select count(*) from db.stb1") tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from db.stb2") From 869ab0d394e08e19839a22300bbd55e059166462 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 1 Feb 2023 17:27:56 +0800 Subject: [PATCH 24/45] fix: snode timer --- source/dnode/snode/src/snode.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 860db20fa8..1d2f4da26b 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -93,6 +93,8 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) { pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle); ASSERT(pTask->exec.executor); + streamSetupTrigger(pTask); + return 0; } From df30b51a7517d63d89c9730d8e52aed920c6de49 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 1 Feb 2023 17:59:39 +0800 Subject: [PATCH 25/45] fix(stream): use tdb page replacement with stream state --- source/libs/stream/src/streamState.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 6670bf463e..2460da25f4 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -192,7 +192,7 @@ void streamStateClose(SStreamState* pState) { } int32_t streamStateBegin(SStreamState* pState) { - if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { tdbAbort(pState->pTdbState->db, pState->pTdbState->txn); return -1; @@ -208,7 +208,7 @@ int32_t streamStateCommit(SStreamState* pState) { return -1; } - if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { return -1; } @@ -220,7 +220,7 @@ int32_t streamStateAbort(SStreamState* pState) { return -1; } - if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { return -1; } From 8e4c6ccbb3c49a92296dc6d424d26af947c467a7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 1 Feb 2023 18:15:35 +0800 Subject: [PATCH 26/45] fix(query): move the reset to other place. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 19 +++++++++++++++---- source/util/src/talgo.c | 8 ++++---- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index a77eb8298e..0bb9ebc40e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2693,10 +2693,6 @@ static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SRea void* p = taosHashIterate(pStatus->pTableMap, NULL); while (p != NULL) { STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; - - // reset the last del file index - pScanInfo->lastBlockDelIndex = getInitialDelIndex(pScanInfo->delSkyline, order); - pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid; p = taosHashIterate(pStatus->pTableMap, p); } @@ -2704,6 +2700,18 @@ static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SRea taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc); } +// reset the last del file index +static void resetScanBlockLastBlockDelIndex(SReaderStatus* pStatus, int32_t order) { + void* p = taosHashIterate(pStatus->pTableMap, NULL); + while (p != NULL) { + STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; + + // reset the last del file index + pScanInfo->lastBlockDelIndex = getInitialDelIndex(pScanInfo->delSkyline, order); + p = taosHashIterate(pStatus->pTableMap, p); + } +} + static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; @@ -3052,6 +3060,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have data files, let's start check the last block file if exists if (pBlockIter->numOfBlocks == 0) { + resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); goto _begin; } } @@ -3083,6 +3092,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // data blocks in current file are exhausted, let's try the next file now tBlockDataReset(&pReader->status.fileBlockData); resetDataBlockIterator(pBlockIter, pReader->order); + resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); goto _begin; } else { code = initForFirstBlockInFile(pReader, pBlockIter); @@ -3094,6 +3104,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have blocks, let's start check the last block file if (pBlockIter->numOfBlocks == 0) { + resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); goto _begin; } } diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c index a06aac6afe..e373850b3c 100644 --- a/source/util/src/talgo.c +++ b/source/util/src/talgo.c @@ -28,14 +28,14 @@ static void median(void *src, int64_t size, int64_t s, int64_t e, const void *pa void *buf) { int32_t mid = ((int32_t)(e - s) >> 1u) + (int32_t)s; - if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, s), param) == 1) { + if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, s), param) > 0) { doswap(elePtrAt(src, size, mid), elePtrAt(src, size, s), size, buf); } - if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, e), param) == 1) { + if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, e), param) > 0) { doswap(elePtrAt(src, size, mid), elePtrAt(src, size, s), size, buf); doswap(elePtrAt(src, size, mid), elePtrAt(src, size, e), size, buf); - } else if (comparFn(elePtrAt(src, size, s), elePtrAt(src, size, e), param) == 1) { + } else if (comparFn(elePtrAt(src, size, s), elePtrAt(src, size, e), param) > 0) { doswap(elePtrAt(src, size, s), elePtrAt(src, size, e), size, buf); } @@ -47,7 +47,7 @@ static void tInsertSort(void *src, int64_t size, int32_t s, int32_t e, const voi void *buf) { for (int32_t i = s + 1; i <= e; ++i) { for (int32_t j = i; j > s; --j) { - if (comparFn(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), param) == -1) { + if (comparFn(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), param) < 0) { doswap(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), size, buf); } else { break; From 043fc8d98046afbfea2ae8cb84899391f88b83de Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Wed, 1 Feb 2023 19:11:51 +0800 Subject: [PATCH 27/45] enh: improve logging msgs for sync snapshot repl --- source/libs/sync/src/syncUtil.c | 133 +++++++++++++++----------------- 1 file changed, 63 insertions(+), 70 deletions(-) diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index b246d9a79d..6a50572cba 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -141,20 +141,15 @@ static void syncLogReplMgrStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bu } static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { - int32_t len = 1; - + int32_t len = 0; + len += snprintf(buf + len, bufLen - len, "%s", "{"); for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) { SPeerState* pState = syncNodeGetPeerState(pSyncNode, &(pSyncNode->replicasId[i])); if (pState == NULL) break; - - if (i < pSyncNode->replicaNum - 1) { - len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 " %" PRId64 ", ", i, pState->lastSendIndex, - pState->lastSendTime); - } else { - len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 " %" PRId64 "}", i, pState->lastSendIndex, - pState->lastSendTime); - } + len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 " %" PRId64 "%s", i, pState->lastSendIndex, + pState->lastSendTime, (i < pSyncNode->replicaNum - 1) ? ", " : ""); } + len += snprintf(buf + len, bufLen - len, "%s", "}"); } void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) { @@ -245,7 +240,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla char cfgStr[1024] = ""; syncCfg2SimpleStr(&pNode->raftCfg.cfg, cfgStr, sizeof(cfgStr)); - char peerStr[1024] = "{"; + char peerStr[1024] = ""; syncPeerState2Str(pNode, peerStr, sizeof(peerStr)); char eventLog[512]; // {0}; @@ -255,20 +250,21 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla va_end(argpointer); taosPrintLog(flags, level, dflag, - "vgId:%d, %s, sync:%s, {%p s-param:%" PRId64 " e-param:%" PRId64 " laindex:%" PRId64 " laterm:%" PRIu64 - " lcindex:%" PRId64 - " seq:%d ack:%d finish:%d replica-index:%d dnode:%d}" - ", tm:%" PRIu64 ", cmt:%" PRId64 ", fst:%" PRId64 ", lst:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64 - ", snap-tm:%" PRIu64 ", sby:%d, stgy:%d, bch:%d, r-num:%d, lcfg:%" PRId64 - ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s, %s", + "vgId:%d, %s, sync:%s, snap-sender:{%p start:%" PRId64 " end:%" PRId64 " last-index:%" PRId64 + " last-term:%" PRIu64 " last-cfg:%" PRId64 + ", seq:%d ack:%d finish:%d, as:%d dnode:%d}" + ", term:%" PRIu64 ", commit-index:%" PRId64 ", firstver:%" PRId64 ", lastver:%" PRId64 + ", min-match:%" PRId64 ", snap:{last-index:%" PRId64 ", term:%" PRIu64 + "}, standby:%d, batch-sz:%d, replicas:%d, last-cfg:%" PRId64 + ", chging:%d, restore:%d, quorum:%d, lc-timer:{elect:%" PRId64 ", hb:%" PRId64 "}, peer:%s, cfg:%s", pNode->vgId, eventLog, syncStr(pNode->state), pSender, pSender->snapshotParam.start, pSender->snapshotParam.end, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, pSender->finish, pSender->replicaIndex, DID(&pNode->replicasId[pSender->replicaIndex]), pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, - pNode->raftCfg.isStandBy, pNode->raftCfg.snapshotStrategy, pNode->raftCfg.batchSize, pNode->replicaNum, - pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), - pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); + pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, + pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, + pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); } void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotReceiver* pReceiver, @@ -291,7 +287,7 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df char cfgStr[1024] = ""; syncCfg2SimpleStr(&pNode->raftCfg.cfg, cfgStr, sizeof(cfgStr)); - char peerStr[1024] = "{"; + char peerStr[1024] = ""; syncPeerState2Str(pNode, peerStr, sizeof(peerStr)); char eventLog[512]; // {0}; @@ -300,22 +296,22 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df int32_t writeLen = vsnprintf(eventLog, sizeof(eventLog), format, argpointer); va_end(argpointer); - taosPrintLog(flags, level, dflag, - "vgId:%d, %s, sync:%s," - " {%p start:%d ack:%d term:%" PRIu64 " start-time:%" PRId64 " from dnode:%d s-param:%" PRId64 - " e-param:%" PRId64 " laindex:%" PRId64 " laterm:%" PRIu64 " lcindex:%" PRId64 - "}" - ", tm:%" PRIu64 ", cmt:%" PRId64 ", fst:%" PRId64 ", lst:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64 - ", snap-tm:%" PRIu64 ", sby:%d, stgy:%d, bch:%d, r-num:%d, lcfg:%" PRId64 - ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s, %s", - pNode->vgId, eventLog, syncStr(pNode->state), pReceiver, pReceiver->start, pReceiver->ack, - pReceiver->term, pReceiver->startTime, DID(&pReceiver->fromId), pReceiver->snapshotParam.start, - pReceiver->snapshotParam.end, pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, - pReceiver->snapshot.lastConfigIndex, pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, - logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, - pNode->raftCfg.isStandBy, pNode->raftCfg.snapshotStrategy, pNode->raftCfg.batchSize, pNode->replicaNum, - pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), - pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); + taosPrintLog( + flags, level, dflag, + "vgId:%d, %s, sync:%s," + " snap-receiver:{%p started:%d acked:%d term:%" PRIu64 " start-time:%" PRId64 " from-dnode:%d, start:%" PRId64 + " end:%" PRId64 " last-index:%" PRId64 " last-term:%" PRIu64 " last-cfg:%" PRId64 + "}" + ", term:%" PRIu64 ", commit-index:%" PRId64 ", firstver:%" PRId64 ", lastver:%" PRId64 ", min-match:%" PRId64 + ", snap:{last-index:%" PRId64 ", last-term:%" PRIu64 "}, standby:%d, batch-sz:%d, replicas:%d, last-cfg:%" PRId64 + ", chging:%d, restore:%d, quorum:%d, lc-timers:{elect:%" PRId64 ", hb:%" PRId64 "}, peer:%s, cfg:%s", + pNode->vgId, eventLog, syncStr(pNode->state), pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, + pReceiver->startTime, DID(&pReceiver->fromId), pReceiver->snapshotParam.start, pReceiver->snapshotParam.end, + pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex, + pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, + snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize, + pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, + syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); } void syncLogRecvTimer(SSyncNode* pSyncNode, const SyncTimeout* pMsg, const char* s) { @@ -351,13 +347,13 @@ void syncLogSendHeartbeat(SSyncNode* pSyncNode, const SyncHeartbeat* pMsg, bool int64_t execTime) { if (printX) { sNTrace(pSyncNode, - "send sync-heartbeat to dnode:%d {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64 - "}, x", + "send sync-heartbeat to dnode:%d {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 + ", ts:%" PRId64 "}, x", DID(&pMsg->destId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp); } else { sNTrace(pSyncNode, - "send sync-heartbeat to dnode:%d {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64 - "}, timer-elapsed:%" PRId64 ", next-exec:%" PRId64, + "send sync-heartbeat to dnode:%d {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 + ", ts:%" PRId64 "}, timer-elapsed:%" PRId64 ", next-exec:%" PRId64, DID(&pMsg->destId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, timerElapsed, execTime); } @@ -368,14 +364,14 @@ void syncLogRecvHeartbeat(SSyncNode* pSyncNode, const SyncHeartbeat* pMsg, int64 pSyncNode->hbSlowNum++; sNInfo(pSyncNode, - "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 + "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64 "}, %s, net elapsed:%" PRId64, DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff); } sNTrace(pSyncNode, - "recv sync-heartbeat from dnode:%d {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64 - "}, %s, net elapsed:%" PRId64, + "recv sync-heartbeat from dnode:%d {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 + ", ts:%" PRId64 "}, %s, net elapsed:%" PRId64, DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff); } @@ -400,67 +396,64 @@ void syncLogRecvHeartbeatReply(SSyncNode* pSyncNode, const SyncHeartbeatReply* p void syncLogSendSyncSnapshotSend(SSyncNode* pSyncNode, const SyncSnapshotSend* pMsg, const char* s) { sNDebug(pSyncNode, - "send sync-snapshot-send to dnode:%d, %s, seq:%d, term:%" PRId64 ", begin:%" PRId64 ", end:%" PRId64 - ", lterm:%" PRId64 ", stime:%" PRId64, + "send sync-snapshot-send to dnode:%d, %s, seq:%d, term:%" PRId64 ", begin-index:%" PRId64 + ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64, DID(&pMsg->destId), s, pMsg->seq, pMsg->term, pMsg->beginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime); } void syncLogRecvSyncSnapshotSend(SSyncNode* pSyncNode, const SyncSnapshotSend* pMsg, const char* s) { sNDebug(pSyncNode, - "recv sync-snapshot-send from dnode:%d, %s, seq:%d, term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64 - ", lterm:%" PRId64 ", stime:%" PRId64 ", len:%u", + "recv sync-snapshot-send from dnode:%d, %s, seq:%d, term:%" PRId64 ", begin-index:%" PRId64 + ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64 ", data-len:%u", DID(&pMsg->srcId), s, pMsg->seq, pMsg->term, pMsg->beginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime, pMsg->dataLen); } void syncLogSendSyncSnapshotRsp(SSyncNode* pSyncNode, const SyncSnapshotRsp* pMsg, const char* s) { sNDebug(pSyncNode, - "send sync-snapshot-rsp to dnode:%d, %s, ack:%d, term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64 - ", lterm:%" PRId64 ", stime:%" PRId64, + "send sync-snapshot-rsp to dnode:%d, %s, acked:%d, term:%" PRId64 ", begin-index:%" PRId64 + ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64, DID(&pMsg->destId), s, pMsg->ack, pMsg->term, pMsg->snapBeginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime); } void syncLogRecvSyncSnapshotRsp(SSyncNode* pSyncNode, const SyncSnapshotRsp* pMsg, const char* s) { sNDebug(pSyncNode, - "recv sync-snapshot-rsp from dnode:%d, %s, ack:%d, term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64 - ", lterm:%" PRId64 ", stime:%" PRId64, + "recv sync-snapshot-rsp from dnode:%d, %s, ack:%d, term:%" PRId64 ", begin-index:%" PRId64 + ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64, DID(&pMsg->srcId), s, pMsg->ack, pMsg->term, pMsg->snapBeginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime); } void syncLogRecvAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s) { sNTrace(pSyncNode, - "recv sync-append-entries from dnode:%d {term:%" PRId64 ", pre-index:%" PRId64 ", pre-term:%" PRId64 - ", cmt:%" PRId64 ", pterm:%" PRId64 ", datalen:%d}, %s", - DID(&pMsg->srcId), pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, pMsg->privateTerm, - pMsg->dataLen, s); + "recv sync-append-entries from dnode:%d {term:%" PRId64 ", prev-log:{index:%" PRId64 ", term:%" PRId64 + "}, commit-index:%" PRId64 ", datalen:%d}, %s", + DID(&pMsg->srcId), pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, pMsg->dataLen, s); } void syncLogSendAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s) { sNTrace(pSyncNode, - "send sync-append-entries to dnode:%d, {term:%" PRId64 ", pre-index:%" PRId64 ", pre-term:%" PRId64 - ", lsend-index:%" PRId64 ", cmt:%" PRId64 ", datalen:%d}, %s", + "send sync-append-entries to dnode:%d, {term:%" PRId64 ", prev-log:{index:%" PRId64 ", term:%" PRId64 + "}, index:%" PRId64 ", commit-index:%" PRId64 ", datalen:%d}, %s", DID(&pMsg->destId), pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, (pMsg->prevLogIndex + 1), pMsg->commitIndex, pMsg->dataLen, s); } -void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted, const char* s) { - if (voteGranted == -1) { - sNInfo(pSyncNode, - "recv sync-request-vote from dnode:%d, {term:%" PRId64 ", lindex:%" PRId64 ", lterm:%" PRId64 "}, %s", - DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, s); - } else { - sNInfo(pSyncNode, - "recv sync-request-vote from dnode:%d, {term:%" PRId64 ", lindex:%" PRId64 ", lterm:%" PRId64 - "}, granted:%d", - DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, voteGranted); - } +void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted, + const char* errmsg) { + char statusMsg[64]; + snprintf(statusMsg, sizeof(statusMsg), "granted:%d", voteGranted); + sNInfo(pSyncNode, + "recv sync-request-vote from dnode:%d, {term:%" PRId64 ", last-index:%" PRId64 ", last-term:%" PRId64 "}, %s", + DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, + (voteGranted != -1) ? statusMsg : errmsg); } void syncLogSendRequestVote(SSyncNode* pNode, const SyncRequestVote* pMsg, const char* s) { - sNInfo(pNode, "send sync-request-vote to dnode:%d {term:%" PRId64 ", lindex:%" PRId64 ", lterm:%" PRId64 "}, %s", + sNInfo(pNode, + "send sync-request-vote to dnode:%d {term:%" PRId64 ", last-index:%" PRId64 ", last-term:%" PRId64 "}, %s", DID(&pMsg->destId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, s); } From 3665ce26e7ca2ab3f6c38747f3ef0c407418ec43 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 1 Feb 2023 21:32:34 +0800 Subject: [PATCH 28/45] fix: update taos-tools a0234fe for main (#19737) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 926d0c63e7..1053caf4ef 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 181bcac + GIT_TAG a0234fe SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 9fe1512c553e0c9f33d858bc85ae26fa48a614e2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 1 Feb 2023 22:41:39 +0800 Subject: [PATCH 29/45] refactor: add some logs. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 0bb9ebc40e..db52d7604f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -1113,9 +1113,9 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn int32_t unDumpedRows = asc ? pBlock->nRow - pDumpInfo->rowIndex : pDumpInfo->rowIndex + 1; tsdbDebug("%p copy file block to sdatablock, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 - ", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s", + ", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", uid:%"PRIu64" elapsed time:%.2f ms, %s", pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, dumpedRows, - unDumpedRows, pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr); + unDumpedRows, pBlock->minVer, pBlock->maxVer, pBlockInfo->uid, elapsedTime, pReader->idStr); return TSDB_CODE_SUCCESS; } @@ -2187,17 +2187,17 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea if (code == TSDB_CODE_SUCCESS) { pBlockScanInfo->iter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iter.iter) != NULL); - tsdbDebug("%p uid:%" PRId64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + tsdbDebug("%p uid:%" PRIu64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 " %s", pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, d->minKey, d->maxKey, pReader->idStr); } else { - tsdbError("%p uid:%" PRId64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid, + tsdbError("%p uid:%" PRIu64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid, tstrerror(code), pReader->idStr); return code; } } } else { - tsdbDebug("%p uid:%" PRId64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); + tsdbDebug("%p uid:%" PRIu64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); } STbData* di = NULL; @@ -2208,17 +2208,17 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea if (code == TSDB_CODE_SUCCESS) { pBlockScanInfo->iiter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iiter.iter) != NULL); - tsdbDebug("%p uid:%" PRId64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + tsdbDebug("%p uid:%" PRIu64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 " %s", pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, di->minKey, di->maxKey, pReader->idStr); } else { - tsdbError("%p uid:%" PRId64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid, + tsdbError("%p uid:%" PRIu64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid, tstrerror(code), pReader->idStr); return code; } } } else { - tsdbDebug("%p uid:%" PRId64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); + tsdbDebug("%p uid:%" PRIu64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); } initDelSkylineIterator(pBlockScanInfo, pReader, d, di); @@ -2837,6 +2837,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader; + ASSERT(pBlockInfo != NULL); + if (pBlockInfo != NULL) { pScanInfo = *(STableBlockScanInfo**)taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); @@ -2857,7 +2859,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { initLastBlockReader(pLastBlockReader, pScanInfo, pReader); TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader); - if (pBlockInfo == NULL) { // build data block from last data file + /*if (pBlockInfo == NULL) { // build data block from last data file SBlockData* pBData = &pReader->status.fileBlockData; tBlockDataReset(pBData); @@ -2889,7 +2891,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows, el, pReader->idStr); } - } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) { + } else*/ if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) { code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid); if (code != TSDB_CODE_SUCCESS) { return code; From 041d0d817a63b514f22637f4478d5eace116f15c Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 2 Feb 2023 09:11:17 +0800 Subject: [PATCH 30/45] fix: tq open --- source/dnode/vnode/src/tq/tq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index b195cfafb0..cca241a1cf 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -93,21 +93,21 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { taosHashSetFreeFp(pTq->pCheckInfo, (FDelete)tDeleteSTqCheckInfo); if (tqMetaOpen(pTq) < 0) { - ASSERT(0); + return NULL; } pTq->pOffsetStore = tqOffsetOpen(pTq); if (pTq->pOffsetStore == NULL) { - ASSERT(0); + return NULL; } pTq->pStreamMeta = streamMetaOpen(path, pTq, (FTaskExpand*)tqExpandTask, pTq->pVnode->config.vgId); if (pTq->pStreamMeta == NULL) { - ASSERT(0); + return NULL; } if (streamLoadTasks(pTq->pStreamMeta) < 0) { - ASSERT(0); + return NULL; } return pTq; From 1e17f1d482d65daba29c3f5d017d604383578913 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 2 Feb 2023 09:42:06 +0800 Subject: [PATCH 31/45] fix: mem leak --- source/client/src/clientTmq.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 90c405c204..22739108e2 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1891,9 +1891,6 @@ int32_t tmq_consumer_close(tmq_t* tmq) { } tmq_list_destroy(lst); - - /*return rsp;*/ - return 0; } taosRemoveRef(tmqMgmt.rsetId, tmq->refId); return 0; From 69d6fb5bfe504f2a661530762395ec2f22f45a9b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 2 Feb 2023 10:20:34 +0800 Subject: [PATCH 32/45] fix(meta): use metaAbort instead of tdbAbort with metaClose --- source/dnode/vnode/src/meta/metaCommit.c | 5 ++++- source/dnode/vnode/src/meta/metaOpen.c | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCommit.c b/source/dnode/vnode/src/meta/metaCommit.c index f61930b84c..f597c100d0 100644 --- a/source/dnode/vnode/src/meta/metaCommit.c +++ b/source/dnode/vnode/src/meta/metaCommit.c @@ -56,4 +56,7 @@ int metaPrepareAsyncCommit(SMeta *pMeta) { } // abort the meta txn -int metaAbort(SMeta *pMeta) { return tdbAbort(pMeta->pEnv, pMeta->txn); } +int metaAbort(SMeta *pMeta) { + if (!pMeta->txn) return 0; + return tdbAbort(pMeta->pEnv, pMeta->txn); +} diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 867b481bcc..35677d6f07 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -203,7 +203,7 @@ _err: int metaClose(SMeta *pMeta) { if (pMeta) { - if (pMeta->pEnv) tdbAbort(pMeta->pEnv, pMeta->txn); + if (pMeta->pEnv) metaAbort(pMeta); if (pMeta->pCache) metaCacheClose(pMeta); if (pMeta->pIdx) metaCloseIdx(pMeta); if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb); From 7f571b1ebdb122d8ce9b0defc29e79076cf47c7f Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 2 Feb 2023 10:59:47 +0800 Subject: [PATCH 33/45] fix: a time point can be fill --- source/libs/parser/src/parTranslater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 9991c2c6ae..183d1b0c6e 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2978,7 +2978,7 @@ static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* intervalRange = pInterval->datum.i; } - if ((timeRange == 0) || (timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) { + if ((timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE); } From e234dda2de599b0f39ab9f272e46131e0727cb61 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 2 Feb 2023 11:44:08 +0800 Subject: [PATCH 34/45] fix: interp support filter and scalar calc --- source/libs/executor/src/timesliceoperator.c | 12 ++++++++ tests/script/tsim/parser/interp.sim | 29 ++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 94c98f41c9..6561e810bb 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -433,6 +433,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { break; } + if (pSliceInfo->scalarSup.pExprInfo != NULL) { + SExprSupp* pExprSup = &pSliceInfo->scalarSup; + projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL); + } + int32_t code = initKeeperInfo(pSliceInfo, pBlock); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); @@ -531,6 +536,8 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); } + doFilter(pResBlock, pOperator->exprSupp.pFilterInfo, NULL); + // restore the value setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED); if (pResBlock->info.rows == 0) { @@ -566,6 +573,11 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode } } + code = filterInitFromNode((SNode*)pInterpPhyNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + pInfo->tsCol = extractColumnFromColumnNode((SColumnNode*)pInterpPhyNode->pTimeSeries); pInfo->fillType = convertFillType(pInterpPhyNode->fillMode); initResultSizeInfo(&pOperator->resultInfo, 4096); diff --git a/tests/script/tsim/parser/interp.sim b/tests/script/tsim/parser/interp.sim index 1b7878178c..e6512a22d7 100644 --- a/tests/script/tsim/parser/interp.sim +++ b/tests/script/tsim/parser/interp.sim @@ -72,4 +72,33 @@ sql_error select interp(*) from nt5931 where ts=now sql_error select interp(*) from st5931 where ts=now sql_error select interp(*) from ct5931 where ts=now +sql create stable sta (ts timestamp, f1 double, f2 binary(200)) tags(t1 int); +sql create table tba1 using sta tags(1); +sql insert into tba1 values ('2022-04-26 15:15:01', -3.0, "a"); +sql insert into tba1 values ('2022-04-26 15:15:05', 3.0, "b"); +sql select a from (select interp(f1) as a from tba1 where ts >= '2022-04-26 15:15:01' and ts <= '2022-04-26 15:15:05' range('2022-04-26 15:15:01','2022-04-26 15:15:05') every(1s) fill(linear)) where a > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 1.500000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi + +sql select a from (select interp(f1+1) as a from tba1 where ts >= '2022-04-26 15:15:01' and ts <= '2022-04-26 15:15:05' range('2022-04-26 15:15:01','2022-04-26 15:15:05') every(1s) fill(linear)) where a > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.500000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT From b9ee0f983f6f7b849d14bc8db3d023ae77fe343d Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 2 Feb 2023 15:32:02 +0800 Subject: [PATCH 35/45] ci:add ci docker file to TD-rep --- tests/ci/Dockerfile | 48 ++++++++++++++++++++++++++++++ tests/ci/build_image.sh | 4 +++ tests/ci/daily_build_image.sh | 56 +++++++++++++++++++++++++++++++++++ 3 files changed, 108 insertions(+) create mode 100644 tests/ci/Dockerfile create mode 100755 tests/ci/build_image.sh create mode 100755 tests/ci/daily_build_image.sh diff --git a/tests/ci/Dockerfile b/tests/ci/Dockerfile new file mode 100644 index 0000000000..594bcc902d --- /dev/null +++ b/tests/ci/Dockerfile @@ -0,0 +1,48 @@ +FROM python:3.8 +RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple +RUN pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro +RUN apt-get update +RUN apt-get install -y psmisc sudo tree libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config build-essential valgrind \ + vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 +RUN add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' +RUN apt install -y r-base +ADD go1.17.6.linux-amd64.tar.gz /usr/local/ +ADD jdk-8u144-linux-x64.tar.gz /usr/local/ +ADD apache-maven-3.8.4-bin.tar.gz /usr/local/ +RUN apt-get install wget -y \ + && wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \ + && dpkg -i packages-microsoft-prod.deb \ + && rm packages-microsoft-prod.deb \ + && apt-get update && apt-get install -y dotnet-sdk-5.0 && apt-get install -y dotnet-sdk-6.0 +ADD node-v12.20.0-linux-x64.tar.gz /usr/local/ +RUN sh -c "rm -f /etc/localtime;ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime;echo \"Asia/Shanghai\" >/etc/timezone" +COPY id_rsa /root/.ssh/id_rsa +COPY .m2 /root/.m2 +COPY .nuget /root/.nuget +COPY .dotnet /root/.dotnet +COPY .cargo /root/.cargo +COPY go /root/go +ADD cmake-3.21.5-linux-x86_64.tar.gz /usr/local/ +RUN echo " export RUSTUP_DIST_SERVER=\"https://rsproxy.cn\" " >> /root/.bashrc +RUN echo " export RUSTUP_UPDATE_ROOT=\"https://rsproxy.cn/rustup\" " >> /root/.bashrc +RUN curl https://sh.rustup.rs -o /tmp/rustup-init.sh +RUN sh /tmp/rustup-init.sh -y +ENV PATH /usr/local/go/bin:/usr/local/node-v12.20.0-linux-x64/bin:/usr/local/apache-maven-3.8.4/bin:/usr/local/jdk1.8.0_144/bin:/usr/local/cmake-3.21.5-linux-x86_64/bin:/root/.cargo/bin:$PATH +ENV JAVA_HOME /usr/local/jdk1.8.0_144 +RUN go env -w GOPROXY=https://goproxy.cn +RUN echo "StrictHostKeyChecking no" >>/etc/ssh/ssh_config +RUN npm config -g set unsafe-perm +RUN npm config -g set registry https://registry.npm.taobao.org +COPY .npm /root/.npm +RUN R CMD javareconf JAVA_HOME=${JAVA_HOME} JAVA=${JAVA_HOME}/bin/java JAVAC=${JAVA_HOME}/bin/javac JAVAH=${JAVA_HOME}/bin/javah JAR=${JAVA_HOME}/bin/jar +RUN echo "install.packages(\"RJDBC\", repos=\"http://cran.us.r-project.org\")"|R --no-save +COPY .gitconfig /root/.gitconfig +RUN mkdir -p /run/sshd +COPY id_rsa.pub /root/.ssh/id_rsa.pub +COPY id_rsa.pub /root/.ssh/authorized_keys +RUN pip3 uninstall -y taostest +COPY repository/TDinternal /home/TDinternal +COPY repository/taos-connector-python /home/taos-connector-python +RUN sh -c "cd /home/taos-connector-python; pip3 install ." +COPY setup.sh /home/setup.sh \ No newline at end of file diff --git a/tests/ci/build_image.sh b/tests/ci/build_image.sh new file mode 100755 index 0000000000..1864df35db --- /dev/null +++ b/tests/ci/build_image.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +docker build --no-cache -t taos_test:v1.0 . + diff --git a/tests/ci/daily_build_image.sh b/tests/ci/daily_build_image.sh new file mode 100755 index 0000000000..47f0c971d9 --- /dev/null +++ b/tests/ci/daily_build_image.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -x + +script_dir=`dirname $0` +cd $script_dir +script_dir=`pwd` +cd $script_dir/repository/taos-connector-python +git pull + +cd $script_dir/repository/TDinternal +git clean -fxd +git pull + +cd $script_dir/repository/TDinternal/community +git clean -fxd +git pull +git submodule update --init --recursive + +cd $script_dir +./build_image.sh || exit 1 +docker image prune -f +ips="\ +192.168.1.47 \ +192.168.1.48 \ +192.168.1.49 \ +192.168.1.52 \ +192.168.0.215 \ +192.168.0.217 \ +192.168.0.219 \ +" + +image=taos_image.tar + +docker save taos_test:v1.0 -o $image + +for ip in $ips; do + echo "scp $image root@$ip:/home/ &" + scp $image root@$ip:/home/ & +done +wait + +for ip in $ips; do + echo "ssh root@$ip docker load -i /home/$image &" + ssh root@$ip docker load -i /home/$image & +done +wait + +for ip in $ips; do + echo "ssh root@$ip rm -f /home/$image &" + ssh root@$ip rm -f /home/$image & +done +wait + +rm -rf taos_image.tar + From 57a0c8abd20ef2291388221c5d6876dd2469e5a1 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 2 Feb 2023 15:52:12 +0800 Subject: [PATCH 36/45] ci:add ci docker file to TD-rep --- tests/ci/daily_build_image.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/ci/daily_build_image.sh b/tests/ci/daily_build_image.sh index 47f0c971d9..01148a3aae 100755 --- a/tests/ci/daily_build_image.sh +++ b/tests/ci/daily_build_image.sh @@ -14,10 +14,14 @@ git pull cd $script_dir/repository/TDinternal/community git clean -fxd -git pull +git checkout main +git pull origin main git submodule update --init --recursive cd $script_dir +cp $script_dir/repository/TDinternal/community/tests/ci/build_image.sh . +cp $script_dir/repository/TDinternal/community/tests/ci/daily_build_image.sh . + ./build_image.sh || exit 1 docker image prune -f ips="\ From a32c8520399e1b5f8694f61d03e77b5e0562d298 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 2 Feb 2023 16:13:17 +0800 Subject: [PATCH 37/45] fix:add ttl drop response for mnode --- source/dnode/mgmt/mgmt_mnode/src/mmHandle.c | 1 + source/dnode/mnode/impl/src/mndStb.c | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index 16fe6c1b91..cf4eaaf7d1 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -167,6 +167,7 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_SUBSCRIBE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index d504a94700..471c53b2f5 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -41,6 +41,7 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq); static int32_t mndProcessCreateStbReq(SRpcMsg *pReq); static int32_t mndProcessAlterStbReq(SRpcMsg *pReq); static int32_t mndProcessDropStbReq(SRpcMsg *pReq); +static int32_t mndProcessDropTtltbReq(SRpcMsg *pReq); static int32_t mndProcessTableMetaReq(SRpcMsg *pReq); static int32_t mndRetrieveStb(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextStb(SMnode *pMnode, void *pIter); @@ -64,6 +65,7 @@ int32_t mndInitStb(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_ALTER_STB, mndProcessAlterStbReq); mndSetMsgHandle(pMnode, TDMT_MND_DROP_STB, mndProcessDropStbReq); mndSetMsgHandle(pMnode, TDMT_VND_CREATE_STB_RSP, mndTransProcessRsp); + mndSetMsgHandle(pMnode, TDMT_VND_DROP_TTL_TABLE_RSP, mndProcessDropTtltbReq); mndSetMsgHandle(pMnode, TDMT_VND_ALTER_STB_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_VND_DROP_STB_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_MND_TABLE_META, mndProcessTableMetaReq); @@ -2176,6 +2178,10 @@ static int32_t mndCheckDropStbForStream(SMnode *pMnode, const char *stbFullName, return 0; } +static int32_t mndProcessDropTtltbReq(SRpcMsg *pRsp) { + return 0; +} + static int32_t mndProcessDropStbReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; From aab31f655c14a06df56be761a397fb51b566b240 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 2 Feb 2023 16:57:05 +0800 Subject: [PATCH 38/45] fix(query): fix bug in multi-group limit/offset of the merge sort . --- source/libs/executor/src/sortoperator.c | 32 +++++++++++++------------ 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 6d3da3e111..98ef6b8a36 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -544,7 +544,6 @@ typedef struct SMultiwayMergeOperatorInfo { SSDataBlock* pIntermediateBlock; // to hold the intermediate result int64_t startTs; // sort start time bool groupSort; - bool hasGroupId; uint64_t groupId; STupleHandle* prefetchedTuple; } SMultiwayMergeOperatorInfo; @@ -591,7 +590,9 @@ int32_t openMultiwayMergeOperator(SOperatorInfo* pOperator) { return TSDB_CODE_SUCCESS; } -static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pHandle, int32_t capacity, SSDataBlock* p) { +static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pHandle, int32_t capacity, + SSDataBlock* p, bool* newgroup) { + *newgroup = false; while (1) { STupleHandle* pTupleHandle = NULL; @@ -600,8 +601,12 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pTupleHandle = tsortNextTuple(pHandle); } else { pTupleHandle = pInfo->prefetchedTuple; - pInfo->groupId = tsortGetGroupId(pTupleHandle); pInfo->prefetchedTuple = NULL; + uint64_t gid = tsortGetGroupId(pTupleHandle); + if (gid != pInfo->groupId) { + *newgroup = true; + pInfo->groupId = gid; + } } } else { pTupleHandle = tsortNextTuple(pHandle); @@ -614,12 +619,10 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* if (pInfo->groupSort) { uint64_t tupleGroupId = tsortGetGroupId(pTupleHandle); - if (!pInfo->hasGroupId) { + if (pInfo->groupId == 0 || pInfo->groupId == tupleGroupId) { + appendOneRowToDataBlock(p, pTupleHandle); + p->info.id.groupId = tupleGroupId; pInfo->groupId = tupleGroupId; - pInfo->hasGroupId = true; - appendOneRowToDataBlock(p, pTupleHandle); - } else if (pInfo->groupId == tupleGroupId) { - appendOneRowToDataBlock(p, pTupleHandle); } else { pInfo->prefetchedTuple = pTupleHandle; break; @@ -632,11 +635,6 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* break; } } - - if (pInfo->groupSort) { - pInfo->hasGroupId = false; - } - } SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, SArray* pColMatchInfo, @@ -660,14 +658,18 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData } SSDataBlock* p = pInfo->pIntermediateBlock; + bool newgroup = false; while (1) { - doGetSortedBlockData(pInfo, pHandle, capacity, p); + doGetSortedBlockData(pInfo, pHandle, capacity, p, &newgroup); if (p->info.rows == 0) { break; } - // todo fix it: we need to decide whether this block is belonged to previous group or not . + if (newgroup) { + resetLimitInfoForNextGroup(&pInfo->limitInfo); + } + bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo); if (limitReached) { resetLimitInfoForNextGroup(&pInfo->limitInfo); From 6cdfa6d60f3fd46989ff084b2793d229dd0254ec Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 2 Feb 2023 17:51:04 +0800 Subject: [PATCH 39/45] fix(tdb): return success if txn is commited --- source/libs/tdb/src/db/tdbPager.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index 62d82edeb1..63e88b0a12 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -457,6 +457,11 @@ int tdbPagerAbort(SPager *pPager, TXN *pTxn) { SPgno journalSize = 0; int ret; + if (pTxn->jfd == 0) { + // txn is commited + return 0; + } + // sync the journal file ret = tdbOsFSync(pTxn->jfd); if (ret < 0) { From 746a6bc243fd110ddd81e1c108244d224e33740b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 2 Feb 2023 18:24:25 +0800 Subject: [PATCH 40/45] fix: taosbenchmark handle mem better patch2 for main (#19743) * fix: update taos-tools a0234fe for main * fix: update taos-tools c4a567b --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 1053caf4ef..4da2d80fbb 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG a0234fe + GIT_TAG c4a567b SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From b6b0b3439e8c519a2210842fdc52089bb5f135be Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Thu, 2 Feb 2023 19:34:18 +0800 Subject: [PATCH 41/45] enh: rename syncNodeOnSnapshotPre to syncNodeOnSnapshotPrep --- source/libs/sync/inc/syncSnapshot.h | 2 +- source/libs/sync/src/syncSnapshot.c | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index 5277e7818f..063b4f51f5 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -24,7 +24,7 @@ extern "C" { #define SYNC_SNAPSHOT_SEQ_INVALID -2 #define SYNC_SNAPSHOT_SEQ_FORCE_CLOSE -3 -#define SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT -1 +#define SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT -1 #define SYNC_SNAPSHOT_SEQ_BEGIN 0 #define SYNC_SNAPSHOT_SEQ_END 0x7FFFFFFF diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 880c76e4dd..e61bcc9ffc 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -112,7 +112,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) { pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex; pMsg->lastConfig = pSender->lastConfig; pMsg->startTime = pSender->startTime; - pMsg->seq = SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT; + pMsg->seq = SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT; // event log syncLogSendSyncSnapshotSend(pSender->pSyncNode, pMsg, "snapshot sender start"); @@ -379,7 +379,7 @@ void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *p } pReceiver->start = true; - pReceiver->ack = SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT; + pReceiver->ack = SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT; pReceiver->term = pReceiver->pSyncNode->raftStore.currentTerm; pReceiver->fromId = pPreMsg->srcId; pReceiver->startTime = pPreMsg->startTime; @@ -527,7 +527,7 @@ SyncIndex syncNodeGetSnapBeginIndex(SSyncNode *ths) { return snapStart; } -static int32_t syncNodeOnSnapshotPre(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { +static int32_t syncNodeOnSnapshotPrep(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { SSyncSnapshotReceiver *pReceiver = pSyncNode->pNewNodeReceiver; int64_t timeNow = taosGetTimestampMs(); int32_t code = 0; @@ -565,7 +565,7 @@ _START_RECEIVER: } else { // waiting for clock match while (timeNow < pMsg->startTime) { - sRInfo(pReceiver, "snapshot receiver pre waitting for true time, now:%" PRId64 ", stime:%" PRId64, timeNow, + sRInfo(pReceiver, "snapshot receiver pre waitting for true time, now:%" PRId64 ", startTime:%" PRId64, timeNow, pMsg->startTime); taosMsleep(10); timeNow = taosGetTimestampMs(); @@ -765,7 +765,7 @@ static int32_t syncNodeOnSnapshotEnd(SSyncNode *pSyncNode, SyncSnapshotSend *pMs // receiver on message // -// condition 1, recv SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT +// condition 1, recv SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT // if receiver already start // if sender.start-time > receiver.start-time, restart receiver(reply snapshot start) // if sender.start-time = receiver.start-time, maybe duplicate msg @@ -809,9 +809,9 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { int32_t code = 0; if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) { if (pMsg->term == pSyncNode->raftStore.currentTerm) { - if (pMsg->seq == SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT) { + if (pMsg->seq == SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq pre-snapshot"); - code = syncNodeOnSnapshotPre(pSyncNode, pMsg); + code = syncNodeOnSnapshotPrep(pSyncNode, pMsg); } else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_BEGIN) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq begin"); code = syncNodeOnSnapshotBegin(pSyncNode, pMsg); @@ -848,7 +848,7 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { return code; } -static int32_t syncNodeOnSnapshotPreRsp(SSyncNode *pSyncNode, SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) { +static int32_t syncNodeOnSnapshotPrepRsp(SSyncNode *pSyncNode, SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) { SSnapshot snapshot = {0}; pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot); @@ -945,8 +945,8 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { if (pMsg->startTime != pSender->startTime) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "snapshot sender and receiver time not match"); - sSError(pSender, "sender:%" PRId64 " receiver:%" PRId64 " time not match, code:0x%x", pMsg->startTime, - pSender->startTime, pMsg->code); + sSError(pSender, "sender:%" PRId64 " receiver:%" PRId64 " time not match, error:%s 0x%x", pMsg->startTime, + pSender->startTime, tstrerror(pMsg->code), pMsg->code); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; goto _ERROR; } @@ -961,15 +961,15 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { if (pMsg->code != 0) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "receive error code"); - sSError(pSender, "snapshot sender receive error code:0x%x and stop sender", pMsg->code); + sSError(pSender, "snapshot sender receive error:%s 0x%x and stop sender", tstrerror(pMsg->code), pMsg->code); terrno = pMsg->code; goto _ERROR; } // prepare , send begin msg - if (pMsg->ack == SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT) { + if (pMsg->ack == SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "process seq pre-snapshot"); - return syncNodeOnSnapshotPreRsp(pSyncNode, pSender, pMsg); + return syncNodeOnSnapshotPrepRsp(pSyncNode, pSender, pMsg); } if (pSender->pReader == NULL || pSender->finish) { From 40b745932ce8b17f1381ae12231c5391b16b1b78 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 2 Feb 2023 19:39:08 +0800 Subject: [PATCH 42/45] fix: scalar expr memory leak issue --- source/libs/executor/src/timesliceoperator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 6561e810bb..bca87079ec 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -634,6 +634,7 @@ void destroyTimeSliceOperatorInfo(void* param) { taosMemoryFree(pKey->end.val); } taosArrayDestroy(pInfo->pLinearInfo); + cleanupExprSupp(&pInfo->scalarSup); taosMemoryFree(pInfo->pFillColInfo); taosMemoryFreeClear(param); From 561ee87e9dd9a5ef2f1a6e9e3edba00b624b01d7 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Fri, 3 Feb 2023 09:22:00 +0800 Subject: [PATCH 43/45] enh: change the level of sync probe logging msg to trace --- source/libs/sync/src/syncPipeline.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 6cc517fda0..bb3bb0d6a4 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -830,7 +830,7 @@ int32_t syncLogReplMgrReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy pMgr->endIndex = index + 1; SSyncLogBuffer* pBuf = pNode->pLogBuf; - sInfo("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term: %" PRId64 ". mgr (rs:%d): [%" PRId64 + sTrace("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term: %" PRId64 ". mgr (rs:%d): [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pNode->vgId, pDestId->addr, index, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); From d83a94bb195bcbd6e0c68ebcbb65cb511b7672f6 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Fri, 3 Feb 2023 09:48:50 +0800 Subject: [PATCH 44/45] fix: check dup rebalance --- source/dnode/mnode/impl/src/mndConsumer.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 3bbf4a4279..1aa2fa997b 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -836,10 +836,13 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, char *addedTopic = strdup(taosArrayGetP(pNewConsumer->rebNewTopics, 0)); // not exist in current topic + bool existing = false; #if 1 for (int32_t i = 0; i < taosArrayGetSize(pOldConsumer->currentTopics); i++) { char *topic = taosArrayGetP(pOldConsumer->currentTopics, i); - ASSERT(strcmp(topic, addedTopic) != 0); + if (strcmp(topic, addedTopic) == 0) { + existing = true; + } } #endif @@ -854,8 +857,10 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, } // add to current topic - taosArrayPush(pOldConsumer->currentTopics, &addedTopic); - taosArraySort(pOldConsumer->currentTopics, taosArrayCompareString); + if (!existing) { + taosArrayPush(pOldConsumer->currentTopics, &addedTopic); + taosArraySort(pOldConsumer->currentTopics, taosArrayCompareString); + } // set status if (taosArrayGetSize(pOldConsumer->rebNewTopics) == 0 && taosArrayGetSize(pOldConsumer->rebRemovedTopics) == 0) { From 49631f6e9647617ef13c230d7e3f31d9b7c27a18 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 3 Feb 2023 14:34:55 +0800 Subject: [PATCH 45/45] fix: snapshot coredump caused by type --- source/dnode/vnode/src/tsdb/tsdbSnapshot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 98c9c0fdda..e8181f922f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -1205,7 +1205,7 @@ static int32_t tsdbSnapWriteTableRow(STsdbSnapWriter* pWriter, TSDBROW* pRow) { TSDB_CHECK_CODE(code, lino, _exit); } - tMapDataPutItem(&pWriter->pDIter->dIter.mDataBlk, &dataBlk, tPutDataBlk); + tMapDataPutItem(&pWriter->mDataBlk, &dataBlk, tPutDataBlk); pWriter->pDIter->dIter.iDataBlk++; } else { code = tsdbReadDataBlockEx(pWriter->pDataFReader, &dataBlk, &pWriter->pDIter->dIter.bData);