From 4dca6df296a3e3882f5cbd7a1e04a867377dc900 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 9 Jan 2023 11:50:34 +0800 Subject: [PATCH 01/57] enh: optimize the error message of the situation that percentile function does not support --- source/libs/parser/src/parTranslater.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 05d49bb027..69a39b956f 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1550,11 +1550,14 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p // select percentile() without from clause is also valid if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) || (TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType && - TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType))) || - NULL != pSelect->pPartitionByList) { + TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, "%s is only supported in single table query", pFunc->functionName); } + if (NULL != pSelect->pPartitionByList) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, + "%s function is not supported in fill query", pFunc->functionName); + } return TSDB_CODE_SUCCESS; } From 7466b5042d2249f70d4429605e4c068e81c5fac5 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 12 Jan 2023 13:50:31 +0800 Subject: [PATCH 02/57] fix: stmt memory leak --- source/libs/parser/src/parInsertStmt.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 4ed72e6c14..f5c362edf9 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -47,7 +47,7 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen) { STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock; SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen}; - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SParsedDataColInfo* tags = (SParsedDataColInfo*)boundTags; if (NULL == tags) { return TSDB_CODE_APP_ERROR; @@ -137,7 +137,8 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch } SVCreateTbReq tbReq = {0}; - insBuildCreateTbReq(&tbReq, tName, pTag, suid, sTableName, tagName, pDataBlock->pTableMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + insBuildCreateTbReq(&tbReq, tName, pTag, suid, sTableName, tagName, pDataBlock->pTableMeta->tableInfo.numOfTags, + TSDB_DEFAULT_TABLE_TTL); code = insBuildCreateTbMsg(pDataBlock, &tbReq); tdDestroySVCreateTbReq(&tbReq); @@ -460,9 +461,7 @@ void qFreeStmtDataBlock(void* pDataBlock) { return; } - taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pTableMeta); - taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pData); - taosMemoryFreeClear(pDataBlock); + insDestroyDataBlock((STableDataBlocks*)pDataBlock); } void qDestroyStmtDataBlock(void* pBlock) { From 0e061fb53cb44117f5dcb5342b45f540625c7b58 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 12 Jan 2023 13:57:47 +0800 Subject: [PATCH 03/57] fix: stmt memory leak --- source/libs/parser/src/parTranslater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 69a39b956f..9991c2c6ae 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1556,7 +1556,7 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p } if (NULL != pSelect->pPartitionByList) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s function is not supported in fill query", pFunc->functionName); + "%s function is not supported in partition query", pFunc->functionName); } return TSDB_CODE_SUCCESS; } From 4359562e9cde38df77ce722abc8c16794ea29913 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 12 Jan 2023 15:01:19 +0800 Subject: [PATCH 04/57] fix: stmt memory leak --- source/client/src/clientStmt.c | 15 ++++++++------- source/libs/parser/src/parInsertStmt.c | 4 +++- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 82ea9e0d8f..c040d389cd 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -152,9 +152,10 @@ int32_t stmtRestoreQueryFields(STscStmt* pStmt) { return TSDB_CODE_SUCCESS; } -int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, const char* sTableName, bool autoCreateTbl) { +int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, const char* sTableName, + bool autoCreateTbl) { STscStmt* pStmt = (STscStmt*)stmt; - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; tNameExtractFullName(tbName, tbFName); memcpy(&pStmt->bInfo.sname, tbName, sizeof(*tbName)); @@ -300,7 +301,7 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { continue; } - if (STMT_TYPE_MULTI_INSERT == pStmt->sql.type) { + if (pBlocks->cloned) { qFreeStmtDataBlock(pBlocks); } else { qDestroyStmtDataBlock(pBlocks); @@ -776,9 +777,9 @@ int stmtAddBatch(TAOS_STMT* stmt) { int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) { tscDebug("stmt start to update tbUid, blockNum: %d", pRsp->nBlocks); - int32_t code = 0; - int32_t finalCode = 0; - size_t keyLen = 0; + int32_t code = 0; + int32_t finalCode = 0; + size_t keyLen = 0; STableDataBlocks** pIter = taosHashIterate(pStmt->exec.pBlockHash, NULL); while (pIter) { STableDataBlocks* pBlock = *pIter; @@ -848,7 +849,7 @@ int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) { pMeta->uid = pTableMeta->uid; pStmt->bInfo.tbUid = pTableMeta->uid; - taosMemoryFree(pTableMeta); + taosMemoryFree(pTableMeta); } pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter); diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index f5c362edf9..2466e784c1 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -461,7 +461,9 @@ void qFreeStmtDataBlock(void* pDataBlock) { return; } - insDestroyDataBlock((STableDataBlocks*)pDataBlock); + taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pTableMeta); + taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pData); + taosMemoryFreeClear(pDataBlock); } void qDestroyStmtDataBlock(void* pBlock) { From 4c8a2caa1238afeb57e49806efac6d82c6bf56e8 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 12 Jan 2023 17:24:04 +0800 Subject: [PATCH 05/57] fix: stmt memory leak --- include/libs/parser/parser.h | 4 +++- source/client/src/clientStmt.c | 6 +----- source/libs/parser/src/parInsertStmt.c | 13 +++++++++++++ 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 9be79a539f..e6240f64b5 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -90,6 +90,7 @@ int32_t qCloneStmtDataBlock(void** pDst, void* pSrc); void qFreeStmtDataBlock(void* pDataBlock); int32_t qRebuildStmtDataBlock(void** pDst, void* pSrc, uint64_t uid, int32_t vgId); void qDestroyStmtDataBlock(void* pBlock); +void qDestroyStmtDataBlockExt(void* pBlock); STableMeta* qGetTableMetaInDataBlock(void* pDataBlock); int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx); @@ -108,7 +109,8 @@ int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* void* smlInitHandle(SQuery* pQuery); void smlDestroyHandle(void* pHandle); int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta, - char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int16_t msgBufLen); + char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, + int16_t msgBufLen); int32_t smlBuildOutput(void* handle, SHashObj* pVgHash); int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray); diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index c040d389cd..8a677d7a5e 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -301,11 +301,7 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { continue; } - if (pBlocks->cloned) { - qFreeStmtDataBlock(pBlocks); - } else { - qDestroyStmtDataBlock(pBlocks); - } + qDestroyStmtDataBlockExt(pBlocks); taosHashRemove(pStmt->exec.pBlockHash, key, keyLen); pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter); diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 2466e784c1..8a23cd32b4 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -476,3 +476,16 @@ void qDestroyStmtDataBlock(void* pBlock) { pDataBlock->cloned = false; insDestroyDataBlock(pDataBlock); } + +void qDestroyStmtDataBlockExt(void* pBlock) { + if (pBlock == NULL) { + return; + } + + STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock; + if (pDataBlock->cloned) { + qFreeStmtDataBlock(pBlock); + } else { + qDestroyStmtDataBlock(pBlock); + } +} From 9ba8d4f935204480a562cd736649089d20f48e05 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 16 Jan 2023 09:53:43 +0800 Subject: [PATCH 06/57] fix: rollback --- include/libs/parser/parser.h | 1 - source/client/src/clientStmt.c | 7 ++++++- source/libs/parser/src/parInsertStmt.c | 13 ------------- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index e6240f64b5..52edd9708c 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -90,7 +90,6 @@ int32_t qCloneStmtDataBlock(void** pDst, void* pSrc); void qFreeStmtDataBlock(void* pDataBlock); int32_t qRebuildStmtDataBlock(void** pDst, void* pSrc, uint64_t uid, int32_t vgId); void qDestroyStmtDataBlock(void* pBlock); -void qDestroyStmtDataBlockExt(void* pBlock); STableMeta* qGetTableMetaInDataBlock(void* pDataBlock); int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx); diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 8a677d7a5e..3be1b58bad 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -301,7 +301,12 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { continue; } - qDestroyStmtDataBlockExt(pBlocks); + if (STMT_TYPE_MULTI_INSERT == pStmt->sql.type) { + qFreeStmtDataBlock(pBlocks); + } else { + qDestroyStmtDataBlock(pBlocks); + } + taosHashRemove(pStmt->exec.pBlockHash, key, keyLen); pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter); diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index 8a23cd32b4..2466e784c1 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -476,16 +476,3 @@ void qDestroyStmtDataBlock(void* pBlock) { pDataBlock->cloned = false; insDestroyDataBlock(pDataBlock); } - -void qDestroyStmtDataBlockExt(void* pBlock) { - if (pBlock == NULL) { - return; - } - - STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock; - if (pDataBlock->cloned) { - qFreeStmtDataBlock(pBlock); - } else { - qDestroyStmtDataBlock(pBlock); - } -} From d9b22b32991c072fe81e5ffd2ea4e3e8f5c3a087 Mon Sep 17 00:00:00 2001 From: slzhou Date: Wed, 18 Jan 2023 15:13:16 +0800 Subject: [PATCH 07/57] fix: add node stopped and get/accquire ctx return node stopped --- source/libs/qworker/inc/qwInt.h | 2 ++ source/libs/qworker/src/qwUtil.c | 21 +++++++++++++++++---- source/libs/qworker/src/qworker.c | 3 +++ 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index aa1ce80903..bde05d4116 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -194,6 +194,8 @@ typedef struct SQWorker { SMsgCb msgCb; SQWStat stat; int32_t *destroyed; + + int8_t nodeStopped; } SQWorker; typedef struct SQWorkerMgmt { diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index fdd2775daa..7ee7c50c96 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -213,9 +213,15 @@ int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { QW_SET_QTID(id, qId, tId, eId); *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id)); + int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped); if (NULL == (*ctx)) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + if (!nodeStopped) { + QW_TASK_DLOG_E("task ctx not exist, may be dropped"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } else { + QW_TASK_DLOG_E("node stopped"); + QW_ERR_RET(TSDB_CODE_VND_STOPPED); + } } return TSDB_CODE_SUCCESS; @@ -226,9 +232,16 @@ int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { QW_SET_QTID(id, qId, tId, eId); *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); + int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped); + if (NULL == (*ctx)) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + if (!nodeStopped) { + QW_TASK_DLOG_E("task ctx not exist, may be dropped"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } else { + QW_TASK_DLOG_E("node stopped"); + QW_ERR_RET(TSDB_CODE_VND_STOPPED); + } } return TSDB_CODE_SUCCESS; diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index e38361d87f..fedaa96ed9 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -1188,6 +1188,9 @@ void qWorkerStopAllTasks(void *qWorkerMgmt) { uint64_t qId, tId, sId; int32_t eId; int64_t rId = 0; + + atomic_store_8(&mgmt->nodeStopped, 1); + void *pIter = taosHashIterate(mgmt->ctxHash, NULL); while (pIter) { SQWTaskCtx *ctx = (SQWTaskCtx *)pIter; From fae4f2c4ed96427cfefa25c6609455ed804d1e46 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 20 Jan 2023 18:08:34 +0800 Subject: [PATCH 08/57] refactor: do some internal refactor. --- source/libs/executor/inc/executorimpl.h | 3 ++- source/libs/executor/src/exchangeoperator.c | 30 ++++++--------------- source/libs/executor/src/executil.c | 4 +++ source/libs/executor/src/projectoperator.c | 29 +++----------------- source/libs/executor/src/scanoperator.c | 16 +++++------ source/libs/executor/src/sortoperator.c | 28 +++++-------------- 6 files changed, 32 insertions(+), 78 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index c68f7c4697..4ae178d508 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -704,9 +704,10 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG SDiskbasedBuf* pBuf); bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo); +bool hasSlimitOffsetInfo(SLimitInfo* pLimitInfo); void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo); void resetLimitInfoForNextGroup(SLimitInfo* pLimitInfo); -bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator); +bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput); diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 037b33dc9f..08b7d371e2 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -707,6 +707,8 @@ int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) { } int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + if (pLimitInfo->remainGroupOffset > 0) { if (pLimitInfo->currentGroupId == 0) { // it is the first group pLimitInfo->currentGroupId = pBlock->info.id.groupId; @@ -750,36 +752,20 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa // set current group id pLimitInfo->currentGroupId = pBlock->info.id.groupId; - if (pLimitInfo->remainOffset >= pBlock->info.rows) { - pLimitInfo->remainOffset -= pBlock->info.rows; - blockDataCleanup(pBlock); + bool limitReached = applyLimitOffset(pLimitInfo, pBlock, pTaskInfo); + if (pBlock->info.rows == 0) { return PROJECT_RETRIEVE_CONTINUE; - } else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) { - blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset); - pLimitInfo->remainOffset = 0; - } - - // check for the limitation in each group - if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) { - int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); - blockDataKeepFirstNRows(pBlock, keepRows); - if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) { + } else { + if (limitReached && (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { setOperatorCompleted(pOperator); - } else { - // current group limitation is reached, and future blocks of this group need to be discarded. - if (pBlock->info.rows == 0) { - return PROJECT_RETRIEVE_CONTINUE; - } + return PROJECT_RETRIEVE_DONE; } - - return PROJECT_RETRIEVE_DONE; } // todo optimize performance // If there are slimit/soffset value exists, multi-round result can not be packed into one group, since the // they may not belong to the same group the limit/offset value is not valid in this case. - if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || pLimitInfo->slimit.offset != -1 || - pLimitInfo->slimit.limit != -1) { + if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || hasSlimitOffsetInfo(pLimitInfo)) { return PROJECT_RETRIEVE_DONE; } else { // not full enough, continue to accumulate the output data in the buffer. return PROJECT_RETRIEVE_CONTINUE; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 757324a773..92d52fbb0a 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1749,6 +1749,10 @@ bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) { pLimitInfo->slimit.offset != -1); } +bool hasSlimitOffsetInfo(SLimitInfo* pLimitInfo) { + return (pLimitInfo->slimit.limit != -1 || pLimitInfo->slimit.offset != -1); +} + void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo) { SLimit limit = {.limit = getLimit(pLimit), .offset = getOffset(pLimit)}; SLimit slimit = {.limit = getLimit(pSLimit), .offset = getOffset(pSLimit)}; diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 3e3610827b..b1dc217bf5 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -185,36 +185,15 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS SOperatorInfo* pOperator) { // set current group id pLimitInfo->currentGroupId = groupId; - - if (pLimitInfo->remainOffset >= pBlock->info.rows) { - pLimitInfo->remainOffset -= pBlock->info.rows; - blockDataCleanup(pBlock); + bool limitReached = applyLimitOffset(pLimitInfo, pBlock, pOperator->pTaskInfo); + if (pBlock->info.rows == 0) { return PROJECT_RETRIEVE_CONTINUE; - } else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) { - blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset); - pLimitInfo->remainOffset = 0; - } - - // check for the limitation in each group - if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) { - int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); - blockDataKeepFirstNRows(pBlock, keepRows); - - // TODO: optimize it later when partition by + limit - // all retrieved requirement has been fulfilled, let's finish this - if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) || - (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { + } else { + if (limitReached && (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { setOperatorCompleted(pOperator); - } else { - // Even current group is done, there may be many vgroups remain existed, and we need to continue to retrieve data - // from next group. So let's continue this retrieve process - if (keepRows == 0) { - return PROJECT_RETRIEVE_CONTINUE; - } } } - pLimitInfo->numOfOutputRows += pBlock->info.rows; return PROJECT_RETRIEVE_DONE; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 813763fffa..2813ef3505 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -256,12 +256,11 @@ static void doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlo } } -// todo handle the slimit info -bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator) { +bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) { SLimit* pLimit = &pLimitInfo->limit; const char* id = GET_TASKID(pTaskInfo); - if (pLimit->offset > 0 && pLimitInfo->remainOffset > 0) { + if (pLimitInfo->remainOffset > 0) { if (pLimitInfo->remainOffset >= pBlock->info.rows) { pLimitInfo->remainOffset -= pBlock->info.rows; blockDataEmpty(pBlock); @@ -276,12 +275,14 @@ bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo if (pLimit->limit != -1 && pLimit->limit <= (pLimitInfo->numOfOutputRows + pBlock->info.rows)) { // limit the output rows int32_t keep = (int32_t)(pLimit->limit - pLimitInfo->numOfOutputRows); - blockDataKeepFirstNRows(pBlock, keep); + + pLimitInfo->numOfOutputRows += pBlock->info.rows; qDebug("output limit %" PRId64 " has reached, %s", pLimit->limit, id); return true; } + pLimitInfo->numOfOutputRows += pBlock->info.rows; return false; } @@ -393,13 +394,12 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca } } - bool limitReached = applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo, pOperator); + bool limitReached = applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo); if (limitReached) { // set operator flag is done setOperatorCompleted(pOperator); } pCost->totalRows += pBlock->info.rows; - pTableScanInfo->limitInfo.numOfOutputRows = pCost->totalRows; return TSDB_CODE_SUCCESS; } @@ -2714,9 +2714,7 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* } } - applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo, pOperator); - pInfo->limitInfo.numOfOutputRows += pResBlock->info.rows; - + applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo); qDebug("%s get sorted row block, rows:%d, limit:%"PRId64, GET_TASKID(pTaskInfo), pResBlock->info.rows, pInfo->limitInfo.numOfOutputRows); diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 97b4fd9dc4..e91d41897d 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -222,6 +222,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { T_LONG_JMP(pTaskInfo->env, code); } + // multi-group case not handle here SSDataBlock* pBlock = NULL; while (1) { pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, @@ -236,28 +237,13 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { continue; } - // todo add the limit/offset info - if (pInfo->limitInfo.remainOffset > 0) { - if (pInfo->limitInfo.remainOffset >= blockDataGetNumOfRows(pBlock)) { - pInfo->limitInfo.remainOffset -= pBlock->info.rows; - continue; - } - - blockDataTrimFirstNRows(pBlock, pInfo->limitInfo.remainOffset); - pInfo->limitInfo.remainOffset = 0; + bool limitReached = applyLimitOffset(&pInfo->limitInfo, pBlock, pTaskInfo); + if (limitReached) { + resetLimitInfoForNextGroup(&pInfo->limitInfo); } - if (pInfo->limitInfo.limit.limit > 0 && - pInfo->limitInfo.limit.limit <= pInfo->limitInfo.numOfOutputRows + blockDataGetNumOfRows(pBlock)) { - int32_t remain = pInfo->limitInfo.limit.limit - pInfo->limitInfo.numOfOutputRows; - blockDataKeepFirstNRows(pBlock, remain); - } - - size_t numOfRows = blockDataGetNumOfRows(pBlock); - pInfo->limitInfo.numOfOutputRows += numOfRows; - pOperator->resultInfo.totalRows += numOfRows; - - if (numOfRows > 0) { + pOperator->resultInfo.totalRows += pBlock->info.rows; + if (pBlock->info.rows > 0) { break; } } @@ -680,7 +666,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData break; } - bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator); + bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo); if (limitReached) { resetLimitInfoForNextGroup(&pInfo->limitInfo); } From 7803104b7e31ab9283b29aa33ec391804b721123 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 20 Jan 2023 22:50:35 +0800 Subject: [PATCH 09/57] fix(query): do some internal refactor, and identify a bug. --- source/common/src/tdatablock.c | 1 + source/libs/executor/src/projectoperator.c | 11 ++++++++++- source/libs/executor/src/sortoperator.c | 2 ++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 43f272d599..f41eb1adaf 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1431,6 +1431,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) { pBlock->info.rows = 0; pBlock->info.capacity = 0; pBlock->info.rowSize = 0; + pBlock->info.id = pDataBlock->info.id; size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index b1dc217bf5..d641810cee 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -90,7 +90,16 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys pInfo->binfo.pRes = pResBlock; pInfo->pFinalRes = createOneDataBlock(pResBlock, false); - pInfo->mergeDataBlocks = (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) ? false : pProjPhyNode->mergeDataBlock; + + if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) { + pInfo->mergeDataBlocks = false; + } else { + if (!pProjPhyNode->ignoreGroupId) { + pInfo->mergeDataBlocks = false; + } else { + pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock; + } + } int32_t numOfRows = 4096; size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index e91d41897d..6d3da3e111 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -237,6 +237,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { continue; } + // there are bugs? bool limitReached = applyLimitOffset(&pInfo->limitInfo, pBlock, pTaskInfo); if (limitReached) { resetLimitInfoForNextGroup(&pInfo->limitInfo); @@ -666,6 +667,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData break; } + // todo fix it: we need to decide whether this block is belonged to previous group or not . bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo); if (limitReached) { resetLimitInfoForNextGroup(&pInfo->limitInfo); From a898be4f7d1bc5fb9c528353d4c2ddf039c86b1e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 20 Jan 2023 23:38:31 +0800 Subject: [PATCH 10/57] fix(query): set correct total rsp rows. --- source/libs/executor/src/exchangeoperator.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 08b7d371e2..e5089ab4a9 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -218,10 +218,7 @@ static SSDataBlock* loadRemoteData(SOperatorInfo* pOperator) { if (status == PROJECT_RETRIEVE_CONTINUE) { continue; } else if (status == PROJECT_RETRIEVE_DONE) { - size_t rows = pBlock->info.rows; - pExchangeInfo->limitInfo.numOfOutputRows += rows; - - if (rows == 0) { + if (pBlock->info.rows == 0) { setOperatorCompleted(pOperator); return NULL; } else { From ec1592390af6c346c0d56e750f12fcdc5cd6f247 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Sat, 28 Jan 2023 10:35:21 +0800 Subject: [PATCH 11/57] fix: limit push down error --- source/libs/planner/src/planSpliter.c | 32 +++++++++++++++++---------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index a7eac2c853..d85e4ca10d 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -348,7 +348,8 @@ static bool stbSplIsPartTableAgg(SAggLogicNode* pAgg) { return false; } if (NULL != pAgg->pGroupKeys) { - return stbSplHasPartTbname(pAgg->pGroupKeys) && stbSplNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); + return stbSplHasPartTbname(pAgg->pGroupKeys) && + stbSplNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); } return stbSplHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); } @@ -1025,21 +1026,29 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) return code; } -static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) { - SLogicNode* pSplitNode = pInfo->pSplitNode; +static int32_t stbSplGetSplitNodeForScan(SStableSplitInfo* pInfo, SLogicNode** pSplitNode) { + *pSplitNode = pInfo->pSplitNode; if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) && NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) { - pSplitNode = pInfo->pSplitNode->pParent; + *pSplitNode = pInfo->pSplitNode->pParent; if (NULL != pInfo->pSplitNode->pLimit) { - pSplitNode->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit); - if (NULL == pSplitNode->pLimit) { + (*pSplitNode)->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit); + if (NULL == (*pSplitNode)->pLimit) { return TSDB_CODE_OUT_OF_MEMORY; } ((SLimitNode*)pInfo->pSplitNode->pLimit)->limit += ((SLimitNode*)pInfo->pSplitNode->pLimit)->offset; ((SLimitNode*)pInfo->pSplitNode->pLimit)->offset = 0; } } - int32_t code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pSplitNode, SUBPLAN_TYPE_MERGE); + return TSDB_CODE_SUCCESS; +} + +static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + SLogicNode* pSplitNode = NULL; + int32_t code = stbSplGetSplitNodeForScan(pInfo, &pSplitNode); + if (TSDB_CODE_SUCCESS == code) { + code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pSplitNode, SUBPLAN_TYPE_MERGE); + } if (TSDB_CODE_SUCCESS == code) { code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, (SNode*)splCreateScanSubplan(pCxt, pSplitNode, SPLIT_FLAG_STABLE_SPLIT)); @@ -1049,12 +1058,11 @@ static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSp } static int32_t stbSplSplitScanNodeWithPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) { - SLogicNode* pSplitNode = pInfo->pSplitNode; - if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) && - NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) { - pSplitNode = pInfo->pSplitNode->pParent; + SLogicNode* pSplitNode = NULL; + int32_t code = stbSplGetSplitNodeForScan(pInfo, &pSplitNode); + if (TSDB_CODE_SUCCESS == code) { + code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pSplitNode, NULL, pSplitNode, true); } - int32_t code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pSplitNode, NULL, pSplitNode, true); if (TSDB_CODE_SUCCESS == code) { code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, (SNode*)splCreateScanSubplan(pCxt, pSplitNode, SPLIT_FLAG_STABLE_SPLIT)); From beb3de8530ddd38e29efdc094badaf46158e360b Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Sat, 28 Jan 2023 10:53:43 +0800 Subject: [PATCH 12/57] fix: limit push down error --- source/libs/planner/src/planSpliter.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index d85e4ca10d..4c8b996a75 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -560,6 +560,8 @@ static int32_t stbSplCreateMergeNode(SSplitContext* pCxt, SLogicSubplan* pSubpla if (NULL == pMerge->node.pLimit) { code = TSDB_CODE_OUT_OF_MEMORY; } + ((SLimitNode*)pSplitNode->pLimit)->limit += ((SLimitNode*)pSplitNode->pLimit)->offset; + ((SLimitNode*)pSplitNode->pLimit)->offset = 0; } if (TSDB_CODE_SUCCESS == code) { if (NULL == pSubplan) { From 8258c68b6dd76e7c14b50ceb3f4fde4183ecb79f Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Sat, 28 Jan 2023 11:11:37 +0800 Subject: [PATCH 13/57] fix: subquery output ignores group id --- source/libs/planner/src/planLogicCreater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 084d99cae5..bd1823a770 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -1016,7 +1016,7 @@ static int32_t createProjectLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSel TSWAP(pProject->node.pLimit, pSelect->pLimit); TSWAP(pProject->node.pSlimit, pSelect->pSlimit); - pProject->ignoreGroupId = (NULL == pSelect->pPartitionByList); + pProject->ignoreGroupId = pSelect->isSubquery ? true : (NULL == pSelect->pPartitionByList); pProject->node.groupAction = (!pSelect->isSubquery && pCxt->pPlanCxt->streamQuery) ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR; pProject->node.requireDataOrder = DATA_ORDER_LEVEL_NONE; From 1741e98a4e8fd5353aa541ca9a06def8546f5349 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 30 Jan 2023 10:13:54 +0800 Subject: [PATCH 14/57] fix(query): reset the del file index when beginning last file check. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 44 ++++++++++++++++---------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 85282a2340..a77eb8298e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -215,6 +215,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader); static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader); static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo); static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter); +static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order); static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); } @@ -2526,6 +2527,14 @@ _end: void setComposedBlockFlag(STsdbReader* pReader, bool composed) { pReader->status.composedDataBlock = composed; } +int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order) { + if (pDelSkyline == NULL) { + return 0; + } + + return ASCENDING_TRAVERSE(order) ? 0 : taosArrayGetSize(pDelSkyline) - 1; +} + int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData, STbData* piMemTbData) { if (pBlockScanInfo->delSkyline != NULL) { @@ -2543,7 +2552,6 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* if (pIdx != NULL) { code = tsdbReadDelData(pReader->pDelFReader, pIdx, pDelData); } - if (code != TSDB_CODE_SUCCESS) { goto _err; } @@ -2572,11 +2580,13 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* } taosArrayDestroy(pDelData); - pBlockScanInfo->iter.index = - ASCENDING_TRAVERSE(pReader->order) ? 0 : taosArrayGetSize(pBlockScanInfo->delSkyline) - 1; - pBlockScanInfo->iiter.index = pBlockScanInfo->iter.index; - pBlockScanInfo->fileDelIndex = pBlockScanInfo->iter.index; - pBlockScanInfo->lastBlockDelIndex = pBlockScanInfo->iter.index; + int32_t index = getInitialDelIndex(pBlockScanInfo->delSkyline, pReader->order); + + pBlockScanInfo->iter.index = index; + pBlockScanInfo->iiter.index = index; + pBlockScanInfo->fileDelIndex = index; + pBlockScanInfo->lastBlockDelIndex = index; + return code; _err: @@ -2676,13 +2686,17 @@ static int32_t uidComparFunc(const void* p1, const void* p2) { } } -static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) { +static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) { int32_t index = 0; int32_t total = taosHashGetSize(pStatus->pTableMap); void* p = taosHashIterate(pStatus->pTableMap, NULL); while (p != NULL) { STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; + + // reset the last del file index + pScanInfo->lastBlockDelIndex = getInitialDelIndex(pScanInfo->delSkyline, order); + pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid; p = taosHashIterate(pStatus->pTableMap, p); } @@ -2690,7 +2704,9 @@ static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SRea taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc); } -static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) { +static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) { + SReaderStatus* pStatus = &pReader->status; + int32_t total = taosHashGetSize(pStatus->pTableMap); if (total == 0) { return TSDB_CODE_SUCCESS; @@ -2703,7 +2719,7 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderSt return TSDB_CODE_OUT_OF_MEMORY; } - extractOrderedTableUidList(pOrderCheckInfo, pStatus); + extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); uint64_t uid = pOrderCheckInfo->tableUidList[0]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); } else { @@ -2720,7 +2736,7 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderSt } pOrderCheckInfo->tableUidList = p; - extractOrderedTableUidList(pOrderCheckInfo, pStatus); + extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); uid = pOrderCheckInfo->tableUidList[0]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); @@ -2740,11 +2756,7 @@ static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus uint64_t uid = pOrderedCheckInfo->tableUidList[pOrderedCheckInfo->currentIndex]; pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - if (pStatus->pTableIter == NULL) { - return false; - } - - return true; + return (pStatus->pTableIter != NULL); } static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { @@ -2752,7 +2764,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader; SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo; - int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pStatus); + int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pReader); if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) { return code; } From 1ce1f8143cd638ec9cab541aba160c2e2ddfaa73 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Mon, 30 Jan 2023 10:45:12 +0800 Subject: [PATCH 15/57] fix:process data with incorrect timestamp --- source/libs/executor/src/timewindowoperator.c | 14 ++- tests/script/tsim/stream/basic1.sim | 53 +++++++++++ tests/script/tsim/stream/triggerInterval0.sim | 88 +++++++++++++++---- 3 files changed, 135 insertions(+), 20 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d78e9c4edf..d320ef6e9e 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -2477,7 +2477,19 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p pInfo->delKey = key; } int32_t prevEndPos = (forwardRows - 1) * step + startPos; - ASSERT(pSDataBlock->info.window.skey > 0 && pSDataBlock->info.window.ekey > 0); + if (pSDataBlock->info.window.skey <= 0 || pSDataBlock->info.window.ekey <= 0) { + qError("table uid %" PRIu64 " data block timestamp range may not be calculated! minKey %" PRId64 + ",maxKey %" PRId64, + pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey); + blockDataUpdateTsWindow(pSDataBlock, 0); + + // timestamp of the data is incorrect + if (pSDataBlock->info.window.skey <= 0 || pSDataBlock->info.window.ekey <= 0) { + qError("table uid %" PRIu64 " data block timestamp is out of range! minKey %" PRId64 ",maxKey %" PRId64, + pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey); + } + } + if (IS_FINAL_OP(pInfo)) { startPos = getNextQualifiedFinalWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos); } else { diff --git a/tests/script/tsim/stream/basic1.sim b/tests/script/tsim/stream/basic1.sim index 7bf10df637..c61c7667f8 100644 --- a/tests/script/tsim/stream/basic1.sim +++ b/tests/script/tsim/stream/basic1.sim @@ -834,4 +834,57 @@ endi print ====== test _wstart end +print insert into ts1 values(-1648791211000,1,2,3) + +sql create database test7 vgroups 1; +sql use test7; +sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int); +sql create table ts1 using st tags(1,1,1); +sql create stream streams7 trigger at_once into streamt7 as select _wstart, count(*) from ts1 interval(10s) ; + +sql insert into ts1 values(1648791211000,1,2,3); +sql_error insert into ts1 values(-1648791211000,1,2,3); + +loop18: + +sleep 200 +sql select * from streamt7; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 1 then + print =====rows=$rows + goto loop18 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop18 +endi + +sql_error insert into ts1 values(-1648791211001,1,2,3) (1648791211001,1,2,3); + +sql select _wstart, count(*) from ts1 interval(10s) ; + +print $data00 $data01 +print $data10 $data11 + +loop19: + +sleep 200 +sql select * from streamt7; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 1 then + print =====rows=$rows + goto loop19 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim index 7353f026bb..b522dcf035 100644 --- a/tests/script/tsim/stream/triggerInterval0.sim +++ b/tests/script/tsim/stream/triggerInterval0.sim @@ -29,69 +29,119 @@ sql insert into t1 values(1648791223001,2,2,3,1.1); sql insert into t1 values(1648791223002,2,2,3,1.1); sql insert into t1 values(1648791223003,2,2,3,1.1); sql insert into t1 values(1648791223001,2,2,3,1.1); + +print step 0 + +$loop_count = 0 + +loop0: sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + sql select * from streamt; + if $rows != 1 then print ======$rows - return -1 + goto loop0 endi if $data01 != 1 then print ======$data01 - return -1 + goto loop0 endi sql insert into t1 values(1648791233001,2,2,3,1.1); + +print step 1 + +$loop_count = 0 + +loop1: sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + sql select * from streamt; if $rows != 2 then print ======$rows - return -1 + goto loop1 endi if $data01 != 1 then print ======$data01 - return -1 + goto loop1 endi if $data11 != 3 then print ======$data11 - return -1 + goto loop1 endi sql insert into t1 values(1648791223004,2,2,3,1.1); sql insert into t1 values(1648791223004,2,2,3,1.1); sql insert into t1 values(1648791223005,2,2,3,1.1); + +print step 2 + +$loop_count = 0 + +loop2: sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + sql select * from streamt; if $rows != 2 then print ======$rows - return -1 + goto loop2 endi + if $data01 != 1 then print ======$data01 - return -1 + goto loop2 endi if $data11 != 5 then print ======$data11 - return -1 + goto loop2 endi sql insert into t1 values(1648791233002,3,2,3,2.1); sql insert into t1 values(1648791213002,4,2,3,3.1) sql insert into t1 values(1648791213002,4,2,3,4.1); + +print step 3 + +$loop_count = 0 + +loop3: sleep 300 -sql select * from streamt; -if $rows != 2 then - print ======$rows - return -1 -endi -if $data01 != 2 then - print ======$data01 - return -1 -endi -if $data11 != 5 then - print ======$data11 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then return -1 endi +sql select * from streamt; +if $rows != 2 then + print ======$rows + goto loop3 +endi +if $data01 != 2 then + print ======$data01 + goto loop3 +endi +if $data11 != 5 then + print ======$data11 + goto loop3 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file From ac9cdf6c58eaf5f4e4303a52fe855485a7f321eb Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 31 Jan 2023 00:46:58 +0800 Subject: [PATCH 16/57] fix: install script refine sentence for main (#19703) * fix: packaging/tools/install.sh for main * fix: refine sentence * fix: dpkg grep ii to detech deb package --- packaging/tools/install.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 4be179b04d..dfdbaa6fdd 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -210,8 +210,8 @@ function install_bin() { [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || : [ -x ${install_main_dir}/bin/${udfdName} ] && ${csudo}ln -s ${install_main_dir}/bin/${udfdName} ${bin_link_dir}/${udfdName} || : [ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || : - [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || : - [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : + [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || : + [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : [ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || : [ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -s ${install_main_dir}/bin/${xname} ${bin_link_dir}/${xname} || : [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : @@ -746,7 +746,7 @@ function is_version_compatible() { deb_erase() { confirm="" while [ "" == "${confirm}" ]; do - echo -e -n "${RED}Exist tdengine deb detected, do you want to remove it? [yes|no] ${NC}:" + echo -e -n "${RED}Existing TDengine deb is detected, do you want to remove it? [yes|no] ${NC}:" read confirm if [ "yes" == "$confirm" ]; then ${csudo}dpkg --remove tdengine ||: @@ -760,7 +760,7 @@ deb_erase() { rpm_erase() { confirm="" while [ "" == "${confirm}" ]; do - echo -e -n "${RED}Exist tdengine rpm detected, do you want to remove it? [yes|no] ${NC}:" + echo -e -n "${RED}Existing TDengine rpm is detected, do you want to remove it? [yes|no] ${NC}:" read confirm if [ "yes" == "$confirm" ]; then ${csudo}rpm -e tdengine ||: @@ -787,7 +787,7 @@ function updateProduct() { if echo $osinfo | grep -qwi "centos"; then rpm -q tdengine 2>&1 > /dev/null && rpm_erase tdengine ||: elif echo $osinfo | grep -qwi "ubuntu"; then - dpkg -l tdengine 2>&1 > /dev/null && deb_erase tdengine ||: + dpkg -l tdengine 2>&1 | grep ii > /dev/null && deb_erase tdengine ||: fi tar -zxf ${tarName} From 889019b253c7236e9d922689330e3ff04761e36f Mon Sep 17 00:00:00 2001 From: sunpeng Date: Tue, 31 Jan 2023 09:37:50 +0800 Subject: [PATCH 17/57] build: add python demo to ci (#19699) * build: add python demo to ci * build: fix python demo --- docs/examples/go/go.mod | 6 - docs/examples/python/conn_native_pandas.py | 7 +- docs/examples/python/conn_rest_pandas.py | 6 +- docs/examples/python/connect_rest_examples.py | 23 ++-- .../connection_usage_native_reference.py | 8 +- docs/examples/python/fast_write_example.py | 119 ++++++++++++------ docs/examples/python/kafka_example.py | 80 +++++++++--- docs/examples/python/mockdatasource.py | 19 ++- docs/examples/python/sql_writer.py | 24 +++- docs/examples/python/tmq_example.py | 95 +++++++------- tests/docs-examples-test/python.sh | 43 ++++++- 11 files changed, 298 insertions(+), 132 deletions(-) delete mode 100644 docs/examples/go/go.mod diff --git a/docs/examples/go/go.mod b/docs/examples/go/go.mod deleted file mode 100644 index 2bc1a74cb6..0000000000 --- a/docs/examples/go/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module goexample - -go 1.17 - -require github.com/taosdata/driver-go/v3 3.0 - diff --git a/docs/examples/python/conn_native_pandas.py b/docs/examples/python/conn_native_pandas.py index 56942ef570..f3bab15efb 100644 --- a/docs/examples/python/conn_native_pandas.py +++ b/docs/examples/python/conn_native_pandas.py @@ -1,8 +1,11 @@ import pandas -from sqlalchemy import create_engine +from sqlalchemy import create_engine, text engine = create_engine("taos://root:taosdata@localhost:6030/power") -df = pandas.read_sql("SELECT * FROM meters", engine) +conn = engine.connect() +df = pandas.read_sql(text("SELECT * FROM power.meters"), conn) +conn.close() + # print index print(df.index) diff --git a/docs/examples/python/conn_rest_pandas.py b/docs/examples/python/conn_rest_pandas.py index 0164080cd5..1b207d6ff1 100644 --- a/docs/examples/python/conn_rest_pandas.py +++ b/docs/examples/python/conn_rest_pandas.py @@ -1,8 +1,10 @@ import pandas -from sqlalchemy import create_engine +from sqlalchemy import create_engine, text engine = create_engine("taosrest://root:taosdata@localhost:6041") -df: pandas.DataFrame = pandas.read_sql("SELECT * FROM power.meters", engine) +conn = engine.connect() +df: pandas.DataFrame = pandas.read_sql(text("SELECT * FROM power.meters"), conn) +conn.close() # print index print(df.index) diff --git a/docs/examples/python/connect_rest_examples.py b/docs/examples/python/connect_rest_examples.py index 900ec1022e..0f8625ae53 100644 --- a/docs/examples/python/connect_rest_examples.py +++ b/docs/examples/python/connect_rest_examples.py @@ -1,24 +1,25 @@ # ANCHOR: connect from taosrest import connect, TaosRestConnection, TaosRestCursor -conn: TaosRestConnection = connect(url="http://localhost:6041", - user="root", - password="taosdata", - timeout=30) +conn = connect(url="http://localhost:6041", + user="root", + password="taosdata", + timeout=30) # ANCHOR_END: connect # ANCHOR: basic # create STable -cursor: TaosRestCursor = conn.cursor() +cursor = conn.cursor() cursor.execute("DROP DATABASE IF EXISTS power") cursor.execute("CREATE DATABASE power") -cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +cursor.execute( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") # insert data -cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") +cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") print("inserted row count:", cursor.rowcount) # query data @@ -28,7 +29,7 @@ print("queried row count:", cursor.rowcount) # get column names from cursor column_names = [meta[0] for meta in cursor.description] # get rows -data: list[tuple] = cursor.fetchall() +data = cursor.fetchall() print(column_names) for row in data: print(row) diff --git a/docs/examples/python/connection_usage_native_reference.py b/docs/examples/python/connection_usage_native_reference.py index 4803511e42..0a23c5f95b 100644 --- a/docs/examples/python/connection_usage_native_reference.py +++ b/docs/examples/python/connection_usage_native_reference.py @@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test") # change database. same as execute "USE db" conn.select_db("test") conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)") -affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m 24.4)") +affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)") print("affected_row", affected_row) # output: # affected_row 3 @@ -16,10 +16,10 @@ print("affected_row", affected_row) # ANCHOR: query # Execute a sql and get its result set. It's useful for SELECT statement -result: taos.TaosResult = conn.query("SELECT * from weather") +result = conn.query("SELECT * from weather") # Get fields from result -fields: taos.field.TaosFields = result.fields +fields = result.fields for field in fields: print(field) # {name: ts, type: 9, bytes: 8} @@ -42,4 +42,4 @@ print(data) # ANCHOR_END: query -conn.close() +conn.close() \ No newline at end of file diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py index c9d606388f..626e3310b1 100644 --- a/docs/examples/python/fast_write_example.py +++ b/docs/examples/python/fast_write_example.py @@ -1,15 +1,14 @@ # install dependencies: # recommend python >= 3.8 -# pip3 install faster-fifo # import logging import math +import multiprocessing import sys import time import os -from multiprocessing import Process -from faster_fifo import Queue +from multiprocessing import Process, Queue from mockdatasource import MockDataSource from queue import Empty from typing import List @@ -22,8 +21,7 @@ TABLE_COUNT = 1000 QUEUE_SIZE = 1000000 MAX_BATCH_SIZE = 3000 -read_processes = [] -write_processes = [] +_DONE_MESSAGE = '__DONE__' def get_connection(): @@ -44,41 +42,64 @@ def get_connection(): # ANCHOR: read -def run_read_task(task_id: int, task_queues: List[Queue]): +def run_read_task(task_id: int, task_queues: List[Queue], infinity): table_count_per_task = TABLE_COUNT // READ_TASK_COUNT - data_source = MockDataSource(f"tb{task_id}", table_count_per_task) + data_source = MockDataSource(f"tb{task_id}", table_count_per_task, infinity) try: for batch in data_source: + if isinstance(batch, tuple): + batch = [batch] for table_id, rows in batch: # hash data to different queue i = table_id % len(task_queues) # block putting forever when the queue is full - task_queues[i].put_many(rows, block=True, timeout=-1) + for row in rows: + task_queues[i].put(row) + if not infinity: + for queue in task_queues: + queue.put(_DONE_MESSAGE) except KeyboardInterrupt: pass + finally: + logging.info('read task over') # ANCHOR_END: read + # ANCHOR: write -def run_write_task(task_id: int, queue: Queue): +def run_write_task(task_id: int, queue: Queue, done_queue: Queue): from sql_writer import SQLWriter log = logging.getLogger(f"WriteTask-{task_id}") writer = SQLWriter(get_connection) lines = None try: while True: - try: - # get as many as possible - lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE) + over = False + lines = [] + for _ in range(MAX_BATCH_SIZE): + try: + line = queue.get_nowait() + if line == _DONE_MESSAGE: + over = True + break + if line: + lines.append(line) + except Empty: + time.sleep(0.1) + if len(lines) > 0: writer.process_lines(lines) - except Empty: - time.sleep(0.01) + if over: + done_queue.put(_DONE_MESSAGE) + break except KeyboardInterrupt: pass except BaseException as e: log.debug(f"lines={lines}") raise e + finally: + writer.close() + log.debug('write task over') # ANCHOR_END: write @@ -103,47 +124,64 @@ def set_global_config(): # ANCHOR: monitor -def run_monitor_process(): +def run_monitor_process(done_queue: Queue): log = logging.getLogger("DataBaseMonitor") - conn = get_connection() - conn.execute("DROP DATABASE IF EXISTS test") - conn.execute("CREATE DATABASE test") - conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " - "TAGS (location BINARY(64), groupId INT)") + conn = None + try: + conn = get_connection() - def get_count(): - res = conn.query("SELECT count(*) FROM test.meters") - rows = res.fetch_all() - return rows[0][0] if rows else 0 + def get_count(): + res = conn.query("SELECT count(*) FROM test.meters") + rows = res.fetch_all() + return rows[0][0] if rows else 0 - last_count = 0 - while True: - time.sleep(10) - count = get_count() - log.info(f"count={count} speed={(count - last_count) / 10}") - last_count = count + last_count = 0 + while True: + try: + done = done_queue.get_nowait() + if done == _DONE_MESSAGE: + break + except Empty: + pass + time.sleep(10) + count = get_count() + log.info(f"count={count} speed={(count - last_count) / 10}") + last_count = count + finally: + conn.close() # ANCHOR_END: monitor # ANCHOR: main -def main(): +def main(infinity): set_global_config() logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, " f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}") - monitor_process = Process(target=run_monitor_process) + conn = get_connection() + conn.execute("DROP DATABASE IF EXISTS test") + conn.execute("CREATE DATABASE IF NOT EXISTS test") + conn.execute("CREATE STABLE IF NOT EXISTS test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " + "TAGS (location BINARY(64), groupId INT)") + conn.close() + + done_queue = Queue() + monitor_process = Process(target=run_monitor_process, args=(done_queue,)) monitor_process.start() - time.sleep(3) # waiting for database ready. + logging.debug(f"monitor task started with pid {monitor_process.pid}") task_queues: List[Queue] = [] + write_processes = [] + read_processes = [] + # create task queues for i in range(WRITE_TASK_COUNT): - queue = Queue(max_size_bytes=QUEUE_SIZE) + queue = Queue() task_queues.append(queue) # create write processes for i in range(WRITE_TASK_COUNT): - p = Process(target=run_write_task, args=(i, task_queues[i])) + p = Process(target=run_write_task, args=(i, task_queues[i], done_queue)) p.start() logging.debug(f"WriteTask-{i} started with pid {p.pid}") write_processes.append(p) @@ -151,13 +189,19 @@ def main(): # create read processes for i in range(READ_TASK_COUNT): queues = assign_queues(i, task_queues) - p = Process(target=run_read_task, args=(i, queues)) + p = Process(target=run_read_task, args=(i, queues, infinity)) p.start() logging.debug(f"ReadTask-{i} started with pid {p.pid}") read_processes.append(p) try: monitor_process.join() + for p in read_processes: + p.join() + for p in write_processes: + p.join() + time.sleep(1) + return except KeyboardInterrupt: monitor_process.terminate() [p.terminate() for p in read_processes] @@ -176,5 +220,6 @@ def assign_queues(read_task_id, task_queues): if __name__ == '__main__': - main() + multiprocessing.set_start_method('spawn') + main(False) # ANCHOR_END: main diff --git a/docs/examples/python/kafka_example.py b/docs/examples/python/kafka_example.py index 735059eec0..a89287d372 100644 --- a/docs/examples/python/kafka_example.py +++ b/docs/examples/python/kafka_example.py @@ -26,7 +26,8 @@ class Consumer(object): 'bath_consume': True, 'batch_size': 1000, 'async_model': True, - 'workers': 10 + 'workers': 10, + 'testing': False } LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose', @@ -46,11 +47,12 @@ class Consumer(object): def __init__(self, **configs): self.config: dict = self.DEFAULT_CONFIGS self.config.update(configs) - self.consumer = KafkaConsumer( - self.config.get('kafka_topic'), # topic - bootstrap_servers=self.config.get('kafka_brokers'), - group_id=self.config.get('kafka_group_id'), - ) + if not self.config.get('testing'): + self.consumer = KafkaConsumer( + self.config.get('kafka_topic'), # topic + bootstrap_servers=self.config.get('kafka_brokers'), + group_id=self.config.get('kafka_group_id'), + ) self.taos = taos.connect( host=self.config.get('taos_host'), user=self.config.get('taos_user'), @@ -60,7 +62,7 @@ class Consumer(object): ) if self.config.get('async_model'): self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers')) - self.tasks: list[Future] = [] + self.tasks = [] # tags and table mapping # key: {location}_{groupId} value: self.tag_table_mapping = {} i = 0 @@ -104,8 +106,8 @@ class Consumer(object): for task in self.tasks: while not task.done(): pass - if self.pool is not None: - self.pool.shutdown() + if self.pool is not None: + self.pool.shutdown() # clean data if self.config.get('clean_after_testing'): @@ -115,14 +117,14 @@ class Consumer(object): if self.taos is not None: self.taos.close() - def _run(self, f: Callable[[ConsumerRecord], bool]): + def _run(self, f): for message in self.consumer: if self.config.get('async_model'): self.pool.submit(f(message)) else: f(message) - def _run_batch(self, f: Callable[[list[list[ConsumerRecord]]], None]): + def _run_batch(self, f): while True: messages = self.consumer.poll(timeout_ms=500, max_records=self.config.get('batch_size')) if messages: @@ -140,7 +142,7 @@ class Consumer(object): logging.info('## insert sql %s', sql) return self.taos.execute(sql=sql) == 1 - def _to_taos_batch(self, messages: list[list[ConsumerRecord]]): + def _to_taos_batch(self, messages): sql = self._build_sql_batch(messages=messages) if len(sql) == 0: # decode error, skip return @@ -162,7 +164,7 @@ class Consumer(object): table_name = self._get_table_name(location=location, group_id=group_id) return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase) - def _build_sql_batch(self, messages: list[list[ConsumerRecord]]) -> str: + def _build_sql_batch(self, messages) -> str: sql_list = [] for partition_messages in messages: for message in partition_messages: @@ -186,7 +188,55 @@ def _get_location_and_group(key: str) -> (str, int): return fields[0], fields[1] +def test_to_taos(consumer: Consumer): + msg = { + 'location': 'California.SanFrancisco', + 'groupId': 1, + 'ts': '2022-12-06 15:13:38.643', + 'current': 3.41, + 'voltage': 105, + 'phase': 0.02027, + } + record = ConsumerRecord(checksum=None, headers=None, offset=1, key=None, value=json.dumps(msg), partition=1, + topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None) + assert consumer._to_taos(message=record) + + +def test_to_taos_batch(consumer: Consumer): + records = [ + [ + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value=json.dumps({'location': 'California.SanFrancisco', + 'groupId': 1, + 'ts': '2022-12-06 15:13:38.643', + 'current': 3.41, + 'voltage': 105, + 'phase': 0.02027, }), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value=json.dumps({'location': 'California.LosAngles', + 'groupId': 2, + 'ts': '2022-12-06 15:13:39.643', + 'current': 3.41, + 'voltage': 102, + 'phase': 0.02027, }), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ] + ] + + consumer._to_taos_batch(messages=records) + + if __name__ == '__main__': - consumer = Consumer(async_model=True) + consumer = Consumer(async_model=True, testing=True) + # init env consumer.init_env() - consumer.consume() \ No newline at end of file + # consumer.consume() + # test build sql + # test build sql batch + test_to_taos(consumer) + test_to_taos_batch(consumer) + \ No newline at end of file diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py index 1c516a800e..15a7d2ff8c 100644 --- a/docs/examples/python/mockdatasource.py +++ b/docs/examples/python/mockdatasource.py @@ -10,13 +10,14 @@ class MockDataSource: "9.4,118,0.141,California.SanFrancisco,4" ] - def __init__(self, tb_name_prefix, table_count): + def __init__(self, tb_name_prefix, table_count, infinity=True): self.table_name_prefix = tb_name_prefix + "_" self.table_count = table_count self.max_rows = 10000000 self.current_ts = round(time.time() * 1000) - self.max_rows * 100 # [(tableId, tableName, values),] self.data = self._init_data() + self.infinity = infinity def _init_data(self): lines = self.samples * (self.table_count // 5 + 1) @@ -28,14 +29,19 @@ class MockDataSource: def __iter__(self): self.row = 0 - return self + if not self.infinity: + return iter(self._iter_data()) + else: + return self def __next__(self): """ next 1000 rows for each table. return: {tableId:[row,...]} """ - # generate 1000 timestamps + return self._iter_data() + + def _iter_data(self): ts = [] for _ in range(1000): self.current_ts += 100 @@ -47,3 +53,10 @@ class MockDataSource: rows = [table_name + ',' + t + ',' + values for t in ts] result.append((table_id, rows)) return result + + +if __name__ == '__main__': + datasource = MockDataSource('t', 10, False) + for data in datasource: + print(data) + \ No newline at end of file diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py index 758167376b..db51bb7174 100644 --- a/docs/examples/python/sql_writer.py +++ b/docs/examples/python/sql_writer.py @@ -10,6 +10,7 @@ class SQLWriter: self._tb_tags = {} self._conn = get_connection_func() self._max_sql_length = self.get_max_sql_length() + self._conn.execute("create database if not exists test") self._conn.execute("USE test") def get_max_sql_length(self): @@ -20,7 +21,7 @@ class SQLWriter: return int(r[1]) return 1024 * 1024 - def process_lines(self, lines: str): + def process_lines(self, lines: [str]): """ :param lines: [[tbName,ts,current,voltage,phase,location,groupId]] """ @@ -60,6 +61,7 @@ class SQLWriter: buf.append(q) sql_len += len(q) sql += " ".join(buf) + self.create_tables() self.execute_sql(sql) self._tb_values.clear() @@ -88,3 +90,23 @@ class SQLWriter: except BaseException as e: self.log.error("Execute SQL: %s", sql) raise e + + def close(self): + if self._conn: + self._conn.close() + + +if __name__ == '__main__': + def get_connection_func(): + conn = taos.connect() + return conn + + + writer = SQLWriter(get_connection_func=get_connection_func) + writer.execute_sql( + "create stable if not exists meters (ts timestamp, current float, voltage int, phase float) " + "tags (location binary(64), groupId int)") + writer.execute_sql( + "INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) " + "VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32)") + \ No newline at end of file diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py index a4625ca11a..32778e9f25 100644 --- a/docs/examples/python/tmq_example.py +++ b/docs/examples/python/tmq_example.py @@ -1,58 +1,55 @@ +from taos.tmq import Consumer import taos -from taos.tmq import * - -conn = taos.connect() - -print("init") -conn.execute("drop topic if exists topic_ctb_column") -conn.execute("drop database if exists py_tmq") -conn.execute("create database if not exists py_tmq vgroups 2") -conn.select_db("py_tmq") -conn.execute( - "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)" -) -conn.execute("create table if not exists tb1 using stb1 tags(1)") -conn.execute("create table if not exists tb2 using stb1 tags(2)") -conn.execute("create table if not exists tb3 using stb1 tags(3)") - -print("create topic") -conn.execute( - "create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1" -) - -print("build consumer") -conf = TaosTmqConf() -conf.set("group.id", "tg2") -conf.set("td.connect.user", "root") -conf.set("td.connect.pass", "taosdata") -conf.set("enable.auto.commit", "true") -def tmq_commit_cb_print(tmq, resp, offset, param=None): - print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}") +def init_tmq_env(db, topic): + conn = taos.connect() + conn.execute("drop topic if exists {}".format(topic)) + conn.execute("drop database if exists {}".format(db)) + conn.execute("create database if not exists {}".format(db)) + conn.select_db(db) + conn.execute( + "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))") + conn.execute("create table if not exists tb1 using stb1 tags(1, 't1')") + conn.execute("create table if not exists tb2 using stb1 tags(2, 't2')") + conn.execute("create table if not exists tb3 using stb1 tags(3, 't3')") + conn.execute("create topic if not exists {} as select ts, c1, c2, c3 from stb1".format(topic)) + conn.execute("insert into tb1 values (now, 1, 1.0, 'tmq test')") + conn.execute("insert into tb2 values (now, 2, 2.0, 'tmq test')") + conn.execute("insert into tb3 values (now, 3, 3.0, 'tmq test')") -conf.set_auto_commit_cb(tmq_commit_cb_print, None) -tmq = conf.new_consumer() +def cleanup(db, topic): + conn = taos.connect() + conn.execute("drop topic if exists {}".format(topic)) + conn.execute("drop database if exists {}".format(db)) -print("build topic list") -topic_list = TaosTmqList() -topic_list.append("topic_ctb_column") +if __name__ == '__main__': + init_tmq_env("tmq_test", "tmq_test_topic") # init env + consumer = Consumer( + { + "group.id": "tg2", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "enable.auto.commit": "true", + } + ) + consumer.subscribe(["tmq_test_topic"]) -print("basic consume loop") -tmq.subscribe(topic_list) + try: + while True: + res = consumer.poll(1) + if not res: + break + err = res.error() + if err is not None: + raise err + val = res.value() -sub_list = tmq.subscription() - -print("subscribed topics: ", sub_list) - -while 1: - res = tmq.poll(1000) - if res: - topic = res.get_topic_name() - vg = res.get_vgroup_id() - db = res.get_db_name() - print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}") - for row in res: - print(row) + for block in val: + print(block.fetchall()) + finally: + consumer.unsubscribe() + consumer.close() + cleanup("tmq_test", "tmq_test_topic") \ No newline at end of file diff --git a/tests/docs-examples-test/python.sh b/tests/docs-examples-test/python.sh index 140d05395b..ccb391b752 100644 --- a/tests/docs-examples-test/python.sh +++ b/tests/docs-examples-test/python.sh @@ -23,7 +23,7 @@ python3 bind_param_example.py # 4 taos -s "drop database power" -python3 multi_bind_example.py +python3 multi_bind_example.py # 5 python3 query_example.py @@ -44,4 +44,43 @@ taos -s "drop database test" python3 json_protocol_example.py # 10 -# python3 subscribe_demo.py +pip install SQLAlchemy +pip install pandas +taosBenchmark -y -d power -t 10 -n 10 +python3 conn_native_pandas.py +python3 conn_rest_pandas.py +taos -s "drop database if exists power" + +# 11 +taos -s "create database if not exists test" +python3 connect_native_reference.py + +# 12 +python3 connect_rest_examples.py + +# 13 +python3 handle_exception.py + +# 14 +taosBenchmark -y -d power -t 2 -n 10 +python3 rest_client_example.py +taos -s "drop database if exists power" + +# 15 +python3 result_set_examples.py + +# 16 +python3 tmq_example.py + +# 17 +python3 sql_writer.py + +# 18 +python3 mockdatasource.py + +# 19 +python3 fast_write_example.py + +# 20 +pip3 install kafka-python +python3 kafka_example.py From cabf3e86839de09bbfa45a623319b7d43f57d0e2 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 31 Jan 2023 11:22:06 +0800 Subject: [PATCH 18/57] fix: invalid read --- source/dnode/vnode/src/tsdb/tsdbSnapshot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 1bfd630331..98c9c0fdda 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -1643,8 +1643,8 @@ _exit: if (code) { tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); } else { - tsdbTrace("vgId:%d %s done, suid:%" PRId64 " uid:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), __func__, pId->suid, - pId->uid); + tsdbTrace("vgId:%d %s done, suid:%" PRId64 " uid:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), __func__, + pWriter->tbid.suid, pWriter->tbid.uid); } return code; } From a84e990c4e1f99cd2f68c7913c2c56197cba2f7f Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Tue, 31 Jan 2023 11:29:46 +0800 Subject: [PATCH 19/57] fix: not try to propose commit on vmCloseVnode --- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 99ba9b9b3b..951544c766 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -79,8 +79,6 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { char path[TSDB_FILENAME_LEN] = {0}; - vnodeProposeCommitOnNeed(pVnode->pImpl); - taosThreadRwlockWrlock(&pMgmt->lock); taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t)); taosThreadRwlockUnlock(&pMgmt->lock); From b56db2026b7c7a30cc94196edf5b1b887ae52996 Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Tue, 31 Jan 2023 15:14:48 +0800 Subject: [PATCH 20/57] fix(taosAdapter): set option `TSDB_OPTION_USE_ADAPTER = true` (#19711) --- cmake/taosadapter_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index 13b247770e..d156057459 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 213f8b3 + GIT_TAG 3e08996 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From f6df973d4d2e3ae42da47fc82f9ead04b353fff2 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 1 Feb 2023 10:15:42 +0800 Subject: [PATCH 21/57] fix: no cache auto create table error --- source/libs/parser/src/parInsertSql.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 98c6aed829..1e1821842f 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -1760,6 +1760,9 @@ static int32_t getTableSchemaFromMetaData(SInsertParseContext* pCxt, const SMeta if (TSDB_CODE_SUCCESS == code && !isStb && TSDB_SUPER_TABLE == pStmt->pTableMeta->tableType) { code = buildInvalidOperationMsg(&pCxt->msg, "insert data into super table is not supported"); } + if (TSDB_CODE_SUCCESS == code && isStb) { + code = storeTableMeta(pCxt, pStmt); + } if (TSDB_CODE_SUCCESS == code) { code = getTableVgroupFromMetaData(pMetaData->pTableHash, pStmt, isStb); } From c4531512303fb4129b22cde6552bfe193cce80c5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 1 Feb 2023 15:56:58 +0800 Subject: [PATCH 22/57] test(query): add regress test case. --- tests/script/tsim/parser/regressiontest.sim | 34 +++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/script/tsim/parser/regressiontest.sim b/tests/script/tsim/parser/regressiontest.sim index 1b127155cb..3ce2b47b44 100644 --- a/tests/script/tsim/parser/regressiontest.sim +++ b/tests/script/tsim/parser/regressiontest.sim @@ -63,4 +63,38 @@ if $rows != 8198 then return -1 endi +print ===========================> TD-22077 && TD-21877 +sql drop database if exists $db -x step1 +sql create database $db vgroups 1; + +sql use $db +sql create stable st1 (ts timestamp, c int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); + +$i = 0 +$ts = 1674977959000 +$rowNum = 200 + +$x = 0 +while $x < $rowNum +$xs = $x * $delta +$ts = $ts0 + $xs +sql insert into t1 values ( $ts , $x ) +sql insert into t2 values ( $ts + 1000a, $x ) +$x = $x + 1 +$ts = $ts + 1000 +endw + +sql flush database $db + +sql insert into t1 values('2018-09-17 09:00:26', 26); +sql insert into t2 values('2018-09-17 09:00:25', 25); + +sql insert into t2 values('2018-09-17 09:00:30', 30); +sql flush database reg_db0; + +sql delete from st1 where ts<='2018-9-17 09:00:26'; +sql select * from st1; + system sh/exec.sh -n dnode1 -s stop -x SIGINT From a3a2af4b3fb35f9c62ad43a86b388a43814bbcf0 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 1 Feb 2023 15:59:29 +0800 Subject: [PATCH 23/57] fix: taosbenchmark schemaless refine for main (#19714) * fix: taos-tools 143b9e4 for taosbenchmark schemaless refine for main * fix: update taos-tools 723f696 * test: fix ../tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py * test: check nchar temporarily as diff behavior between main and 3.0 branch * fix: update taos-tools 181bcac --- cmake/taostools_CMakeLists.txt.in | 2 +- .../taosbenchmark/sml_json_alltypes.py | 62 ++++++++++++------- 2 files changed, 42 insertions(+), 22 deletions(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 13a81f88ea..926d0c63e7 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 0cd564a + GIT_TAG 181bcac SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py index 2c6d09b0f5..1b65e38d72 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py @@ -19,32 +19,38 @@ from util.dnodes import * class TDTestCase: def caseDescription(self): - ''' + """ [TD-11510] taosBenchmark test cases - ''' - return + """ def init(self, conn, logSql, replicaVar=1): - self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) + self.replicaVar = int(replicaVar) tdSql.init(conn.cursor(), logSql) def getPath(self, tool="taosBenchmark"): selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] + elif "src" in selfPath: + projPath = selfPath[: selfPath.find("src")] + elif "/tools/" in selfPath: + projPath = selfPath[: selfPath.find("/tools/")] + elif "/tests/" in selfPath: + projPath = selfPath[: selfPath.find("/tests/")] else: - projPath = selfPath[:selfPath.find("tests")] + tdLog.info("cannot found %s in path: %s, use system's" % (tool, selfPath)) + projPath = "/usr/local/taos/bin/" paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files): + for root, dummy, files in os.walk(projPath): + if (tool) in files: rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): + if "packaging" not in rootRealPath: paths.append(os.path.join(root, tool)) break - if (len(paths) == 0): + if len(paths) == 0: tdLog.exit("taosBenchmark not found!") return else: @@ -52,31 +58,45 @@ class TDTestCase: return paths[0] def run(self): + tdSql.query("select client_version()") + client_ver = "".join(tdSql.queryResult[0]) + major_ver = client_ver.split(".")[0] + binPath = self.getPath() - cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_json_alltypes.json" %binPath + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_json_alltypes.json" % binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe db.stb1") tdSql.checkData(1, 1, "BOOL") tdSql.query("describe db.stb2") - tdSql.checkData(1, 1, "TINYINT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb3") - tdSql.checkData(1, 1, "SMALLINT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb4") - tdSql.checkData(1, 1, "INT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb5") - tdSql.checkData(1, 1, "BIGINT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb6") - tdSql.checkData(1, 1, "FLOAT") + tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb7") tdSql.checkData(1, 1, "DOUBLE") tdSql.query("describe db.stb8") - tdSql.checkData(1, 1, "VARCHAR") - tdSql.checkData(1, 2, 16) + if major_ver == "3": + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 16) + else: + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 8) + tdSql.query("describe db.stb9") - tdSql.checkData(1, 1, "NCHAR") - tdSql.checkData(1, 2, 16) + if major_ver == "3": + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 16) + else: + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 8) + tdSql.query("select count(*) from db.stb1") tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from db.stb2") From 869ab0d394e08e19839a22300bbd55e059166462 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 1 Feb 2023 17:27:56 +0800 Subject: [PATCH 24/57] fix: snode timer --- source/dnode/snode/src/snode.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 860db20fa8..1d2f4da26b 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -93,6 +93,8 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) { pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle); ASSERT(pTask->exec.executor); + streamSetupTrigger(pTask); + return 0; } From df30b51a7517d63d89c9730d8e52aed920c6de49 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 1 Feb 2023 17:59:39 +0800 Subject: [PATCH 25/57] fix(stream): use tdb page replacement with stream state --- source/libs/stream/src/streamState.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 6670bf463e..2460da25f4 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -192,7 +192,7 @@ void streamStateClose(SStreamState* pState) { } int32_t streamStateBegin(SStreamState* pState) { - if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { tdbAbort(pState->pTdbState->db, pState->pTdbState->txn); return -1; @@ -208,7 +208,7 @@ int32_t streamStateCommit(SStreamState* pState) { return -1; } - if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { return -1; } @@ -220,7 +220,7 @@ int32_t streamStateAbort(SStreamState* pState) { return -1; } - if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { return -1; } From 8e4c6ccbb3c49a92296dc6d424d26af947c467a7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 1 Feb 2023 18:15:35 +0800 Subject: [PATCH 26/57] fix(query): move the reset to other place. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 19 +++++++++++++++---- source/util/src/talgo.c | 8 ++++---- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index a77eb8298e..0bb9ebc40e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2693,10 +2693,6 @@ static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SRea void* p = taosHashIterate(pStatus->pTableMap, NULL); while (p != NULL) { STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; - - // reset the last del file index - pScanInfo->lastBlockDelIndex = getInitialDelIndex(pScanInfo->delSkyline, order); - pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid; p = taosHashIterate(pStatus->pTableMap, p); } @@ -2704,6 +2700,18 @@ static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SRea taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc); } +// reset the last del file index +static void resetScanBlockLastBlockDelIndex(SReaderStatus* pStatus, int32_t order) { + void* p = taosHashIterate(pStatus->pTableMap, NULL); + while (p != NULL) { + STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; + + // reset the last del file index + pScanInfo->lastBlockDelIndex = getInitialDelIndex(pScanInfo->delSkyline, order); + p = taosHashIterate(pStatus->pTableMap, p); + } +} + static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; @@ -3052,6 +3060,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have data files, let's start check the last block file if exists if (pBlockIter->numOfBlocks == 0) { + resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); goto _begin; } } @@ -3083,6 +3092,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // data blocks in current file are exhausted, let's try the next file now tBlockDataReset(&pReader->status.fileBlockData); resetDataBlockIterator(pBlockIter, pReader->order); + resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); goto _begin; } else { code = initForFirstBlockInFile(pReader, pBlockIter); @@ -3094,6 +3104,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have blocks, let's start check the last block file if (pBlockIter->numOfBlocks == 0) { + resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order); goto _begin; } } diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c index a06aac6afe..e373850b3c 100644 --- a/source/util/src/talgo.c +++ b/source/util/src/talgo.c @@ -28,14 +28,14 @@ static void median(void *src, int64_t size, int64_t s, int64_t e, const void *pa void *buf) { int32_t mid = ((int32_t)(e - s) >> 1u) + (int32_t)s; - if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, s), param) == 1) { + if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, s), param) > 0) { doswap(elePtrAt(src, size, mid), elePtrAt(src, size, s), size, buf); } - if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, e), param) == 1) { + if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, e), param) > 0) { doswap(elePtrAt(src, size, mid), elePtrAt(src, size, s), size, buf); doswap(elePtrAt(src, size, mid), elePtrAt(src, size, e), size, buf); - } else if (comparFn(elePtrAt(src, size, s), elePtrAt(src, size, e), param) == 1) { + } else if (comparFn(elePtrAt(src, size, s), elePtrAt(src, size, e), param) > 0) { doswap(elePtrAt(src, size, s), elePtrAt(src, size, e), size, buf); } @@ -47,7 +47,7 @@ static void tInsertSort(void *src, int64_t size, int32_t s, int32_t e, const voi void *buf) { for (int32_t i = s + 1; i <= e; ++i) { for (int32_t j = i; j > s; --j) { - if (comparFn(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), param) == -1) { + if (comparFn(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), param) < 0) { doswap(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), size, buf); } else { break; From 043fc8d98046afbfea2ae8cb84899391f88b83de Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Wed, 1 Feb 2023 19:11:51 +0800 Subject: [PATCH 27/57] enh: improve logging msgs for sync snapshot repl --- source/libs/sync/src/syncUtil.c | 133 +++++++++++++++----------------- 1 file changed, 63 insertions(+), 70 deletions(-) diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index b246d9a79d..6a50572cba 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -141,20 +141,15 @@ static void syncLogReplMgrStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bu } static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { - int32_t len = 1; - + int32_t len = 0; + len += snprintf(buf + len, bufLen - len, "%s", "{"); for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) { SPeerState* pState = syncNodeGetPeerState(pSyncNode, &(pSyncNode->replicasId[i])); if (pState == NULL) break; - - if (i < pSyncNode->replicaNum - 1) { - len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 " %" PRId64 ", ", i, pState->lastSendIndex, - pState->lastSendTime); - } else { - len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 " %" PRId64 "}", i, pState->lastSendIndex, - pState->lastSendTime); - } + len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 " %" PRId64 "%s", i, pState->lastSendIndex, + pState->lastSendTime, (i < pSyncNode->replicaNum - 1) ? ", " : ""); } + len += snprintf(buf + len, bufLen - len, "%s", "}"); } void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) { @@ -245,7 +240,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla char cfgStr[1024] = ""; syncCfg2SimpleStr(&pNode->raftCfg.cfg, cfgStr, sizeof(cfgStr)); - char peerStr[1024] = "{"; + char peerStr[1024] = ""; syncPeerState2Str(pNode, peerStr, sizeof(peerStr)); char eventLog[512]; // {0}; @@ -255,20 +250,21 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla va_end(argpointer); taosPrintLog(flags, level, dflag, - "vgId:%d, %s, sync:%s, {%p s-param:%" PRId64 " e-param:%" PRId64 " laindex:%" PRId64 " laterm:%" PRIu64 - " lcindex:%" PRId64 - " seq:%d ack:%d finish:%d replica-index:%d dnode:%d}" - ", tm:%" PRIu64 ", cmt:%" PRId64 ", fst:%" PRId64 ", lst:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64 - ", snap-tm:%" PRIu64 ", sby:%d, stgy:%d, bch:%d, r-num:%d, lcfg:%" PRId64 - ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s, %s", + "vgId:%d, %s, sync:%s, snap-sender:{%p start:%" PRId64 " end:%" PRId64 " last-index:%" PRId64 + " last-term:%" PRIu64 " last-cfg:%" PRId64 + ", seq:%d ack:%d finish:%d, as:%d dnode:%d}" + ", term:%" PRIu64 ", commit-index:%" PRId64 ", firstver:%" PRId64 ", lastver:%" PRId64 + ", min-match:%" PRId64 ", snap:{last-index:%" PRId64 ", term:%" PRIu64 + "}, standby:%d, batch-sz:%d, replicas:%d, last-cfg:%" PRId64 + ", chging:%d, restore:%d, quorum:%d, lc-timer:{elect:%" PRId64 ", hb:%" PRId64 "}, peer:%s, cfg:%s", pNode->vgId, eventLog, syncStr(pNode->state), pSender, pSender->snapshotParam.start, pSender->snapshotParam.end, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, pSender->finish, pSender->replicaIndex, DID(&pNode->replicasId[pSender->replicaIndex]), pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, - pNode->raftCfg.isStandBy, pNode->raftCfg.snapshotStrategy, pNode->raftCfg.batchSize, pNode->replicaNum, - pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), - pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); + pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, + pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, + pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); } void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotReceiver* pReceiver, @@ -291,7 +287,7 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df char cfgStr[1024] = ""; syncCfg2SimpleStr(&pNode->raftCfg.cfg, cfgStr, sizeof(cfgStr)); - char peerStr[1024] = "{"; + char peerStr[1024] = ""; syncPeerState2Str(pNode, peerStr, sizeof(peerStr)); char eventLog[512]; // {0}; @@ -300,22 +296,22 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df int32_t writeLen = vsnprintf(eventLog, sizeof(eventLog), format, argpointer); va_end(argpointer); - taosPrintLog(flags, level, dflag, - "vgId:%d, %s, sync:%s," - " {%p start:%d ack:%d term:%" PRIu64 " start-time:%" PRId64 " from dnode:%d s-param:%" PRId64 - " e-param:%" PRId64 " laindex:%" PRId64 " laterm:%" PRIu64 " lcindex:%" PRId64 - "}" - ", tm:%" PRIu64 ", cmt:%" PRId64 ", fst:%" PRId64 ", lst:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64 - ", snap-tm:%" PRIu64 ", sby:%d, stgy:%d, bch:%d, r-num:%d, lcfg:%" PRId64 - ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s, %s", - pNode->vgId, eventLog, syncStr(pNode->state), pReceiver, pReceiver->start, pReceiver->ack, - pReceiver->term, pReceiver->startTime, DID(&pReceiver->fromId), pReceiver->snapshotParam.start, - pReceiver->snapshotParam.end, pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, - pReceiver->snapshot.lastConfigIndex, pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, - logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, - pNode->raftCfg.isStandBy, pNode->raftCfg.snapshotStrategy, pNode->raftCfg.batchSize, pNode->replicaNum, - pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), - pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); + taosPrintLog( + flags, level, dflag, + "vgId:%d, %s, sync:%s," + " snap-receiver:{%p started:%d acked:%d term:%" PRIu64 " start-time:%" PRId64 " from-dnode:%d, start:%" PRId64 + " end:%" PRId64 " last-index:%" PRId64 " last-term:%" PRIu64 " last-cfg:%" PRId64 + "}" + ", term:%" PRIu64 ", commit-index:%" PRId64 ", firstver:%" PRId64 ", lastver:%" PRId64 ", min-match:%" PRId64 + ", snap:{last-index:%" PRId64 ", last-term:%" PRIu64 "}, standby:%d, batch-sz:%d, replicas:%d, last-cfg:%" PRId64 + ", chging:%d, restore:%d, quorum:%d, lc-timers:{elect:%" PRId64 ", hb:%" PRId64 "}, peer:%s, cfg:%s", + pNode->vgId, eventLog, syncStr(pNode->state), pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, + pReceiver->startTime, DID(&pReceiver->fromId), pReceiver->snapshotParam.start, pReceiver->snapshotParam.end, + pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex, + pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, + snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize, + pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, + syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); } void syncLogRecvTimer(SSyncNode* pSyncNode, const SyncTimeout* pMsg, const char* s) { @@ -351,13 +347,13 @@ void syncLogSendHeartbeat(SSyncNode* pSyncNode, const SyncHeartbeat* pMsg, bool int64_t execTime) { if (printX) { sNTrace(pSyncNode, - "send sync-heartbeat to dnode:%d {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64 - "}, x", + "send sync-heartbeat to dnode:%d {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 + ", ts:%" PRId64 "}, x", DID(&pMsg->destId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp); } else { sNTrace(pSyncNode, - "send sync-heartbeat to dnode:%d {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64 - "}, timer-elapsed:%" PRId64 ", next-exec:%" PRId64, + "send sync-heartbeat to dnode:%d {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 + ", ts:%" PRId64 "}, timer-elapsed:%" PRId64 ", next-exec:%" PRId64, DID(&pMsg->destId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, timerElapsed, execTime); } @@ -368,14 +364,14 @@ void syncLogRecvHeartbeat(SSyncNode* pSyncNode, const SyncHeartbeat* pMsg, int64 pSyncNode->hbSlowNum++; sNInfo(pSyncNode, - "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 + "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64 "}, %s, net elapsed:%" PRId64, DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff); } sNTrace(pSyncNode, - "recv sync-heartbeat from dnode:%d {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64 - "}, %s, net elapsed:%" PRId64, + "recv sync-heartbeat from dnode:%d {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 + ", ts:%" PRId64 "}, %s, net elapsed:%" PRId64, DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff); } @@ -400,67 +396,64 @@ void syncLogRecvHeartbeatReply(SSyncNode* pSyncNode, const SyncHeartbeatReply* p void syncLogSendSyncSnapshotSend(SSyncNode* pSyncNode, const SyncSnapshotSend* pMsg, const char* s) { sNDebug(pSyncNode, - "send sync-snapshot-send to dnode:%d, %s, seq:%d, term:%" PRId64 ", begin:%" PRId64 ", end:%" PRId64 - ", lterm:%" PRId64 ", stime:%" PRId64, + "send sync-snapshot-send to dnode:%d, %s, seq:%d, term:%" PRId64 ", begin-index:%" PRId64 + ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64, DID(&pMsg->destId), s, pMsg->seq, pMsg->term, pMsg->beginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime); } void syncLogRecvSyncSnapshotSend(SSyncNode* pSyncNode, const SyncSnapshotSend* pMsg, const char* s) { sNDebug(pSyncNode, - "recv sync-snapshot-send from dnode:%d, %s, seq:%d, term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64 - ", lterm:%" PRId64 ", stime:%" PRId64 ", len:%u", + "recv sync-snapshot-send from dnode:%d, %s, seq:%d, term:%" PRId64 ", begin-index:%" PRId64 + ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64 ", data-len:%u", DID(&pMsg->srcId), s, pMsg->seq, pMsg->term, pMsg->beginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime, pMsg->dataLen); } void syncLogSendSyncSnapshotRsp(SSyncNode* pSyncNode, const SyncSnapshotRsp* pMsg, const char* s) { sNDebug(pSyncNode, - "send sync-snapshot-rsp to dnode:%d, %s, ack:%d, term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64 - ", lterm:%" PRId64 ", stime:%" PRId64, + "send sync-snapshot-rsp to dnode:%d, %s, acked:%d, term:%" PRId64 ", begin-index:%" PRId64 + ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64, DID(&pMsg->destId), s, pMsg->ack, pMsg->term, pMsg->snapBeginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime); } void syncLogRecvSyncSnapshotRsp(SSyncNode* pSyncNode, const SyncSnapshotRsp* pMsg, const char* s) { sNDebug(pSyncNode, - "recv sync-snapshot-rsp from dnode:%d, %s, ack:%d, term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64 - ", lterm:%" PRId64 ", stime:%" PRId64, + "recv sync-snapshot-rsp from dnode:%d, %s, ack:%d, term:%" PRId64 ", begin-index:%" PRId64 + ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64, DID(&pMsg->srcId), s, pMsg->ack, pMsg->term, pMsg->snapBeginIndex, pMsg->lastIndex, pMsg->lastTerm, pMsg->startTime); } void syncLogRecvAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s) { sNTrace(pSyncNode, - "recv sync-append-entries from dnode:%d {term:%" PRId64 ", pre-index:%" PRId64 ", pre-term:%" PRId64 - ", cmt:%" PRId64 ", pterm:%" PRId64 ", datalen:%d}, %s", - DID(&pMsg->srcId), pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, pMsg->privateTerm, - pMsg->dataLen, s); + "recv sync-append-entries from dnode:%d {term:%" PRId64 ", prev-log:{index:%" PRId64 ", term:%" PRId64 + "}, commit-index:%" PRId64 ", datalen:%d}, %s", + DID(&pMsg->srcId), pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, pMsg->dataLen, s); } void syncLogSendAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s) { sNTrace(pSyncNode, - "send sync-append-entries to dnode:%d, {term:%" PRId64 ", pre-index:%" PRId64 ", pre-term:%" PRId64 - ", lsend-index:%" PRId64 ", cmt:%" PRId64 ", datalen:%d}, %s", + "send sync-append-entries to dnode:%d, {term:%" PRId64 ", prev-log:{index:%" PRId64 ", term:%" PRId64 + "}, index:%" PRId64 ", commit-index:%" PRId64 ", datalen:%d}, %s", DID(&pMsg->destId), pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, (pMsg->prevLogIndex + 1), pMsg->commitIndex, pMsg->dataLen, s); } -void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted, const char* s) { - if (voteGranted == -1) { - sNInfo(pSyncNode, - "recv sync-request-vote from dnode:%d, {term:%" PRId64 ", lindex:%" PRId64 ", lterm:%" PRId64 "}, %s", - DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, s); - } else { - sNInfo(pSyncNode, - "recv sync-request-vote from dnode:%d, {term:%" PRId64 ", lindex:%" PRId64 ", lterm:%" PRId64 - "}, granted:%d", - DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, voteGranted); - } +void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted, + const char* errmsg) { + char statusMsg[64]; + snprintf(statusMsg, sizeof(statusMsg), "granted:%d", voteGranted); + sNInfo(pSyncNode, + "recv sync-request-vote from dnode:%d, {term:%" PRId64 ", last-index:%" PRId64 ", last-term:%" PRId64 "}, %s", + DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, + (voteGranted != -1) ? statusMsg : errmsg); } void syncLogSendRequestVote(SSyncNode* pNode, const SyncRequestVote* pMsg, const char* s) { - sNInfo(pNode, "send sync-request-vote to dnode:%d {term:%" PRId64 ", lindex:%" PRId64 ", lterm:%" PRId64 "}, %s", + sNInfo(pNode, + "send sync-request-vote to dnode:%d {term:%" PRId64 ", last-index:%" PRId64 ", last-term:%" PRId64 "}, %s", DID(&pMsg->destId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, s); } From 3665ce26e7ca2ab3f6c38747f3ef0c407418ec43 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 1 Feb 2023 21:32:34 +0800 Subject: [PATCH 28/57] fix: update taos-tools a0234fe for main (#19737) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 926d0c63e7..1053caf4ef 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 181bcac + GIT_TAG a0234fe SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 9fe1512c553e0c9f33d858bc85ae26fa48a614e2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 1 Feb 2023 22:41:39 +0800 Subject: [PATCH 29/57] refactor: add some logs. --- source/dnode/vnode/src/tsdb/tsdbRead.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 0bb9ebc40e..db52d7604f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -1113,9 +1113,9 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn int32_t unDumpedRows = asc ? pBlock->nRow - pDumpInfo->rowIndex : pDumpInfo->rowIndex + 1; tsdbDebug("%p copy file block to sdatablock, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 - ", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s", + ", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", uid:%"PRIu64" elapsed time:%.2f ms, %s", pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, dumpedRows, - unDumpedRows, pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr); + unDumpedRows, pBlock->minVer, pBlock->maxVer, pBlockInfo->uid, elapsedTime, pReader->idStr); return TSDB_CODE_SUCCESS; } @@ -2187,17 +2187,17 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea if (code == TSDB_CODE_SUCCESS) { pBlockScanInfo->iter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iter.iter) != NULL); - tsdbDebug("%p uid:%" PRId64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + tsdbDebug("%p uid:%" PRIu64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 " %s", pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, d->minKey, d->maxKey, pReader->idStr); } else { - tsdbError("%p uid:%" PRId64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid, + tsdbError("%p uid:%" PRIu64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid, tstrerror(code), pReader->idStr); return code; } } } else { - tsdbDebug("%p uid:%" PRId64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); + tsdbDebug("%p uid:%" PRIu64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); } STbData* di = NULL; @@ -2208,17 +2208,17 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea if (code == TSDB_CODE_SUCCESS) { pBlockScanInfo->iiter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iiter.iter) != NULL); - tsdbDebug("%p uid:%" PRId64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 + tsdbDebug("%p uid:%" PRIu64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 " %s", pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, di->minKey, di->maxKey, pReader->idStr); } else { - tsdbError("%p uid:%" PRId64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid, + tsdbError("%p uid:%" PRIu64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid, tstrerror(code), pReader->idStr); return code; } } } else { - tsdbDebug("%p uid:%" PRId64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); + tsdbDebug("%p uid:%" PRIu64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr); } initDelSkylineIterator(pBlockScanInfo, pReader, d, di); @@ -2837,6 +2837,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader; + ASSERT(pBlockInfo != NULL); + if (pBlockInfo != NULL) { pScanInfo = *(STableBlockScanInfo**)taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); @@ -2857,7 +2859,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { initLastBlockReader(pLastBlockReader, pScanInfo, pReader); TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader); - if (pBlockInfo == NULL) { // build data block from last data file + /*if (pBlockInfo == NULL) { // build data block from last data file SBlockData* pBData = &pReader->status.fileBlockData; tBlockDataReset(pBData); @@ -2889,7 +2891,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows, el, pReader->idStr); } - } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) { + } else*/ if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) { code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid); if (code != TSDB_CODE_SUCCESS) { return code; From 041d0d817a63b514f22637f4478d5eace116f15c Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 2 Feb 2023 09:11:17 +0800 Subject: [PATCH 30/57] fix: tq open --- source/dnode/vnode/src/tq/tq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index b195cfafb0..cca241a1cf 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -93,21 +93,21 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { taosHashSetFreeFp(pTq->pCheckInfo, (FDelete)tDeleteSTqCheckInfo); if (tqMetaOpen(pTq) < 0) { - ASSERT(0); + return NULL; } pTq->pOffsetStore = tqOffsetOpen(pTq); if (pTq->pOffsetStore == NULL) { - ASSERT(0); + return NULL; } pTq->pStreamMeta = streamMetaOpen(path, pTq, (FTaskExpand*)tqExpandTask, pTq->pVnode->config.vgId); if (pTq->pStreamMeta == NULL) { - ASSERT(0); + return NULL; } if (streamLoadTasks(pTq->pStreamMeta) < 0) { - ASSERT(0); + return NULL; } return pTq; From 1e17f1d482d65daba29c3f5d017d604383578913 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 2 Feb 2023 09:42:06 +0800 Subject: [PATCH 31/57] fix: mem leak --- source/client/src/clientTmq.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 90c405c204..22739108e2 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1891,9 +1891,6 @@ int32_t tmq_consumer_close(tmq_t* tmq) { } tmq_list_destroy(lst); - - /*return rsp;*/ - return 0; } taosRemoveRef(tmqMgmt.rsetId, tmq->refId); return 0; From 69d6fb5bfe504f2a661530762395ec2f22f45a9b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 2 Feb 2023 10:20:34 +0800 Subject: [PATCH 32/57] fix(meta): use metaAbort instead of tdbAbort with metaClose --- source/dnode/vnode/src/meta/metaCommit.c | 5 ++++- source/dnode/vnode/src/meta/metaOpen.c | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaCommit.c b/source/dnode/vnode/src/meta/metaCommit.c index f61930b84c..f597c100d0 100644 --- a/source/dnode/vnode/src/meta/metaCommit.c +++ b/source/dnode/vnode/src/meta/metaCommit.c @@ -56,4 +56,7 @@ int metaPrepareAsyncCommit(SMeta *pMeta) { } // abort the meta txn -int metaAbort(SMeta *pMeta) { return tdbAbort(pMeta->pEnv, pMeta->txn); } +int metaAbort(SMeta *pMeta) { + if (!pMeta->txn) return 0; + return tdbAbort(pMeta->pEnv, pMeta->txn); +} diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 867b481bcc..35677d6f07 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -203,7 +203,7 @@ _err: int metaClose(SMeta *pMeta) { if (pMeta) { - if (pMeta->pEnv) tdbAbort(pMeta->pEnv, pMeta->txn); + if (pMeta->pEnv) metaAbort(pMeta); if (pMeta->pCache) metaCacheClose(pMeta); if (pMeta->pIdx) metaCloseIdx(pMeta); if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb); From 7f571b1ebdb122d8ce9b0defc29e79076cf47c7f Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Thu, 2 Feb 2023 10:59:47 +0800 Subject: [PATCH 33/57] fix: a time point can be fill --- source/libs/parser/src/parTranslater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 9991c2c6ae..183d1b0c6e 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2978,7 +2978,7 @@ static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* intervalRange = pInterval->datum.i; } - if ((timeRange == 0) || (timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) { + if ((timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE); } From e234dda2de599b0f39ab9f272e46131e0727cb61 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 2 Feb 2023 11:44:08 +0800 Subject: [PATCH 34/57] fix: interp support filter and scalar calc --- source/libs/executor/src/timesliceoperator.c | 12 ++++++++ tests/script/tsim/parser/interp.sim | 29 ++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 94c98f41c9..6561e810bb 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -433,6 +433,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { break; } + if (pSliceInfo->scalarSup.pExprInfo != NULL) { + SExprSupp* pExprSup = &pSliceInfo->scalarSup; + projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL); + } + int32_t code = initKeeperInfo(pSliceInfo, pBlock); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); @@ -531,6 +536,8 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); } + doFilter(pResBlock, pOperator->exprSupp.pFilterInfo, NULL); + // restore the value setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED); if (pResBlock->info.rows == 0) { @@ -566,6 +573,11 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode } } + code = filterInitFromNode((SNode*)pInterpPhyNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + pInfo->tsCol = extractColumnFromColumnNode((SColumnNode*)pInterpPhyNode->pTimeSeries); pInfo->fillType = convertFillType(pInterpPhyNode->fillMode); initResultSizeInfo(&pOperator->resultInfo, 4096); diff --git a/tests/script/tsim/parser/interp.sim b/tests/script/tsim/parser/interp.sim index 1b7878178c..e6512a22d7 100644 --- a/tests/script/tsim/parser/interp.sim +++ b/tests/script/tsim/parser/interp.sim @@ -72,4 +72,33 @@ sql_error select interp(*) from nt5931 where ts=now sql_error select interp(*) from st5931 where ts=now sql_error select interp(*) from ct5931 where ts=now +sql create stable sta (ts timestamp, f1 double, f2 binary(200)) tags(t1 int); +sql create table tba1 using sta tags(1); +sql insert into tba1 values ('2022-04-26 15:15:01', -3.0, "a"); +sql insert into tba1 values ('2022-04-26 15:15:05', 3.0, "b"); +sql select a from (select interp(f1) as a from tba1 where ts >= '2022-04-26 15:15:01' and ts <= '2022-04-26 15:15:05' range('2022-04-26 15:15:01','2022-04-26 15:15:05') every(1s) fill(linear)) where a > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 1.500000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi + +sql select a from (select interp(f1+1) as a from tba1 where ts >= '2022-04-26 15:15:01' and ts <= '2022-04-26 15:15:05' range('2022-04-26 15:15:01','2022-04-26 15:15:05') every(1s) fill(linear)) where a > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.500000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT From b9ee0f983f6f7b849d14bc8db3d023ae77fe343d Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 2 Feb 2023 15:32:02 +0800 Subject: [PATCH 35/57] ci:add ci docker file to TD-rep --- tests/ci/Dockerfile | 48 ++++++++++++++++++++++++++++++ tests/ci/build_image.sh | 4 +++ tests/ci/daily_build_image.sh | 56 +++++++++++++++++++++++++++++++++++ 3 files changed, 108 insertions(+) create mode 100644 tests/ci/Dockerfile create mode 100755 tests/ci/build_image.sh create mode 100755 tests/ci/daily_build_image.sh diff --git a/tests/ci/Dockerfile b/tests/ci/Dockerfile new file mode 100644 index 0000000000..594bcc902d --- /dev/null +++ b/tests/ci/Dockerfile @@ -0,0 +1,48 @@ +FROM python:3.8 +RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple +RUN pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro +RUN apt-get update +RUN apt-get install -y psmisc sudo tree libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config build-essential valgrind \ + vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 +RUN add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' +RUN apt install -y r-base +ADD go1.17.6.linux-amd64.tar.gz /usr/local/ +ADD jdk-8u144-linux-x64.tar.gz /usr/local/ +ADD apache-maven-3.8.4-bin.tar.gz /usr/local/ +RUN apt-get install wget -y \ + && wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \ + && dpkg -i packages-microsoft-prod.deb \ + && rm packages-microsoft-prod.deb \ + && apt-get update && apt-get install -y dotnet-sdk-5.0 && apt-get install -y dotnet-sdk-6.0 +ADD node-v12.20.0-linux-x64.tar.gz /usr/local/ +RUN sh -c "rm -f /etc/localtime;ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime;echo \"Asia/Shanghai\" >/etc/timezone" +COPY id_rsa /root/.ssh/id_rsa +COPY .m2 /root/.m2 +COPY .nuget /root/.nuget +COPY .dotnet /root/.dotnet +COPY .cargo /root/.cargo +COPY go /root/go +ADD cmake-3.21.5-linux-x86_64.tar.gz /usr/local/ +RUN echo " export RUSTUP_DIST_SERVER=\"https://rsproxy.cn\" " >> /root/.bashrc +RUN echo " export RUSTUP_UPDATE_ROOT=\"https://rsproxy.cn/rustup\" " >> /root/.bashrc +RUN curl https://sh.rustup.rs -o /tmp/rustup-init.sh +RUN sh /tmp/rustup-init.sh -y +ENV PATH /usr/local/go/bin:/usr/local/node-v12.20.0-linux-x64/bin:/usr/local/apache-maven-3.8.4/bin:/usr/local/jdk1.8.0_144/bin:/usr/local/cmake-3.21.5-linux-x86_64/bin:/root/.cargo/bin:$PATH +ENV JAVA_HOME /usr/local/jdk1.8.0_144 +RUN go env -w GOPROXY=https://goproxy.cn +RUN echo "StrictHostKeyChecking no" >>/etc/ssh/ssh_config +RUN npm config -g set unsafe-perm +RUN npm config -g set registry https://registry.npm.taobao.org +COPY .npm /root/.npm +RUN R CMD javareconf JAVA_HOME=${JAVA_HOME} JAVA=${JAVA_HOME}/bin/java JAVAC=${JAVA_HOME}/bin/javac JAVAH=${JAVA_HOME}/bin/javah JAR=${JAVA_HOME}/bin/jar +RUN echo "install.packages(\"RJDBC\", repos=\"http://cran.us.r-project.org\")"|R --no-save +COPY .gitconfig /root/.gitconfig +RUN mkdir -p /run/sshd +COPY id_rsa.pub /root/.ssh/id_rsa.pub +COPY id_rsa.pub /root/.ssh/authorized_keys +RUN pip3 uninstall -y taostest +COPY repository/TDinternal /home/TDinternal +COPY repository/taos-connector-python /home/taos-connector-python +RUN sh -c "cd /home/taos-connector-python; pip3 install ." +COPY setup.sh /home/setup.sh \ No newline at end of file diff --git a/tests/ci/build_image.sh b/tests/ci/build_image.sh new file mode 100755 index 0000000000..1864df35db --- /dev/null +++ b/tests/ci/build_image.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +docker build --no-cache -t taos_test:v1.0 . + diff --git a/tests/ci/daily_build_image.sh b/tests/ci/daily_build_image.sh new file mode 100755 index 0000000000..47f0c971d9 --- /dev/null +++ b/tests/ci/daily_build_image.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -x + +script_dir=`dirname $0` +cd $script_dir +script_dir=`pwd` +cd $script_dir/repository/taos-connector-python +git pull + +cd $script_dir/repository/TDinternal +git clean -fxd +git pull + +cd $script_dir/repository/TDinternal/community +git clean -fxd +git pull +git submodule update --init --recursive + +cd $script_dir +./build_image.sh || exit 1 +docker image prune -f +ips="\ +192.168.1.47 \ +192.168.1.48 \ +192.168.1.49 \ +192.168.1.52 \ +192.168.0.215 \ +192.168.0.217 \ +192.168.0.219 \ +" + +image=taos_image.tar + +docker save taos_test:v1.0 -o $image + +for ip in $ips; do + echo "scp $image root@$ip:/home/ &" + scp $image root@$ip:/home/ & +done +wait + +for ip in $ips; do + echo "ssh root@$ip docker load -i /home/$image &" + ssh root@$ip docker load -i /home/$image & +done +wait + +for ip in $ips; do + echo "ssh root@$ip rm -f /home/$image &" + ssh root@$ip rm -f /home/$image & +done +wait + +rm -rf taos_image.tar + From 57a0c8abd20ef2291388221c5d6876dd2469e5a1 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 2 Feb 2023 15:52:12 +0800 Subject: [PATCH 36/57] ci:add ci docker file to TD-rep --- tests/ci/daily_build_image.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/ci/daily_build_image.sh b/tests/ci/daily_build_image.sh index 47f0c971d9..01148a3aae 100755 --- a/tests/ci/daily_build_image.sh +++ b/tests/ci/daily_build_image.sh @@ -14,10 +14,14 @@ git pull cd $script_dir/repository/TDinternal/community git clean -fxd -git pull +git checkout main +git pull origin main git submodule update --init --recursive cd $script_dir +cp $script_dir/repository/TDinternal/community/tests/ci/build_image.sh . +cp $script_dir/repository/TDinternal/community/tests/ci/daily_build_image.sh . + ./build_image.sh || exit 1 docker image prune -f ips="\ From a32c8520399e1b5f8694f61d03e77b5e0562d298 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 2 Feb 2023 16:13:17 +0800 Subject: [PATCH 37/57] fix:add ttl drop response for mnode --- source/dnode/mgmt/mgmt_mnode/src/mmHandle.c | 1 + source/dnode/mnode/impl/src/mndStb.c | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index 16fe6c1b91..cf4eaaf7d1 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -167,6 +167,7 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_SUBSCRIBE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index d504a94700..471c53b2f5 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -41,6 +41,7 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq); static int32_t mndProcessCreateStbReq(SRpcMsg *pReq); static int32_t mndProcessAlterStbReq(SRpcMsg *pReq); static int32_t mndProcessDropStbReq(SRpcMsg *pReq); +static int32_t mndProcessDropTtltbReq(SRpcMsg *pReq); static int32_t mndProcessTableMetaReq(SRpcMsg *pReq); static int32_t mndRetrieveStb(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextStb(SMnode *pMnode, void *pIter); @@ -64,6 +65,7 @@ int32_t mndInitStb(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_ALTER_STB, mndProcessAlterStbReq); mndSetMsgHandle(pMnode, TDMT_MND_DROP_STB, mndProcessDropStbReq); mndSetMsgHandle(pMnode, TDMT_VND_CREATE_STB_RSP, mndTransProcessRsp); + mndSetMsgHandle(pMnode, TDMT_VND_DROP_TTL_TABLE_RSP, mndProcessDropTtltbReq); mndSetMsgHandle(pMnode, TDMT_VND_ALTER_STB_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_VND_DROP_STB_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_MND_TABLE_META, mndProcessTableMetaReq); @@ -2176,6 +2178,10 @@ static int32_t mndCheckDropStbForStream(SMnode *pMnode, const char *stbFullName, return 0; } +static int32_t mndProcessDropTtltbReq(SRpcMsg *pRsp) { + return 0; +} + static int32_t mndProcessDropStbReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; From aab31f655c14a06df56be761a397fb51b566b240 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 2 Feb 2023 16:57:05 +0800 Subject: [PATCH 38/57] fix(query): fix bug in multi-group limit/offset of the merge sort . --- source/libs/executor/src/sortoperator.c | 32 +++++++++++++------------ 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 6d3da3e111..98ef6b8a36 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -544,7 +544,6 @@ typedef struct SMultiwayMergeOperatorInfo { SSDataBlock* pIntermediateBlock; // to hold the intermediate result int64_t startTs; // sort start time bool groupSort; - bool hasGroupId; uint64_t groupId; STupleHandle* prefetchedTuple; } SMultiwayMergeOperatorInfo; @@ -591,7 +590,9 @@ int32_t openMultiwayMergeOperator(SOperatorInfo* pOperator) { return TSDB_CODE_SUCCESS; } -static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pHandle, int32_t capacity, SSDataBlock* p) { +static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pHandle, int32_t capacity, + SSDataBlock* p, bool* newgroup) { + *newgroup = false; while (1) { STupleHandle* pTupleHandle = NULL; @@ -600,8 +601,12 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pTupleHandle = tsortNextTuple(pHandle); } else { pTupleHandle = pInfo->prefetchedTuple; - pInfo->groupId = tsortGetGroupId(pTupleHandle); pInfo->prefetchedTuple = NULL; + uint64_t gid = tsortGetGroupId(pTupleHandle); + if (gid != pInfo->groupId) { + *newgroup = true; + pInfo->groupId = gid; + } } } else { pTupleHandle = tsortNextTuple(pHandle); @@ -614,12 +619,10 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* if (pInfo->groupSort) { uint64_t tupleGroupId = tsortGetGroupId(pTupleHandle); - if (!pInfo->hasGroupId) { + if (pInfo->groupId == 0 || pInfo->groupId == tupleGroupId) { + appendOneRowToDataBlock(p, pTupleHandle); + p->info.id.groupId = tupleGroupId; pInfo->groupId = tupleGroupId; - pInfo->hasGroupId = true; - appendOneRowToDataBlock(p, pTupleHandle); - } else if (pInfo->groupId == tupleGroupId) { - appendOneRowToDataBlock(p, pTupleHandle); } else { pInfo->prefetchedTuple = pTupleHandle; break; @@ -632,11 +635,6 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* break; } } - - if (pInfo->groupSort) { - pInfo->hasGroupId = false; - } - } SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, SArray* pColMatchInfo, @@ -660,14 +658,18 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData } SSDataBlock* p = pInfo->pIntermediateBlock; + bool newgroup = false; while (1) { - doGetSortedBlockData(pInfo, pHandle, capacity, p); + doGetSortedBlockData(pInfo, pHandle, capacity, p, &newgroup); if (p->info.rows == 0) { break; } - // todo fix it: we need to decide whether this block is belonged to previous group or not . + if (newgroup) { + resetLimitInfoForNextGroup(&pInfo->limitInfo); + } + bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo); if (limitReached) { resetLimitInfoForNextGroup(&pInfo->limitInfo); From 6cdfa6d60f3fd46989ff084b2793d229dd0254ec Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 2 Feb 2023 17:51:04 +0800 Subject: [PATCH 39/57] fix(tdb): return success if txn is commited --- source/libs/tdb/src/db/tdbPager.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index 62d82edeb1..63e88b0a12 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -457,6 +457,11 @@ int tdbPagerAbort(SPager *pPager, TXN *pTxn) { SPgno journalSize = 0; int ret; + if (pTxn->jfd == 0) { + // txn is commited + return 0; + } + // sync the journal file ret = tdbOsFSync(pTxn->jfd); if (ret < 0) { From 746a6bc243fd110ddd81e1c108244d224e33740b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 2 Feb 2023 18:24:25 +0800 Subject: [PATCH 40/57] fix: taosbenchmark handle mem better patch2 for main (#19743) * fix: update taos-tools a0234fe for main * fix: update taos-tools c4a567b --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 1053caf4ef..4da2d80fbb 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG a0234fe + GIT_TAG c4a567b SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From b6b0b3439e8c519a2210842fdc52089bb5f135be Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Thu, 2 Feb 2023 19:34:18 +0800 Subject: [PATCH 41/57] enh: rename syncNodeOnSnapshotPre to syncNodeOnSnapshotPrep --- source/libs/sync/inc/syncSnapshot.h | 2 +- source/libs/sync/src/syncSnapshot.c | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index 5277e7818f..063b4f51f5 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -24,7 +24,7 @@ extern "C" { #define SYNC_SNAPSHOT_SEQ_INVALID -2 #define SYNC_SNAPSHOT_SEQ_FORCE_CLOSE -3 -#define SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT -1 +#define SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT -1 #define SYNC_SNAPSHOT_SEQ_BEGIN 0 #define SYNC_SNAPSHOT_SEQ_END 0x7FFFFFFF diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 880c76e4dd..e61bcc9ffc 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -112,7 +112,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) { pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex; pMsg->lastConfig = pSender->lastConfig; pMsg->startTime = pSender->startTime; - pMsg->seq = SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT; + pMsg->seq = SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT; // event log syncLogSendSyncSnapshotSend(pSender->pSyncNode, pMsg, "snapshot sender start"); @@ -379,7 +379,7 @@ void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *p } pReceiver->start = true; - pReceiver->ack = SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT; + pReceiver->ack = SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT; pReceiver->term = pReceiver->pSyncNode->raftStore.currentTerm; pReceiver->fromId = pPreMsg->srcId; pReceiver->startTime = pPreMsg->startTime; @@ -527,7 +527,7 @@ SyncIndex syncNodeGetSnapBeginIndex(SSyncNode *ths) { return snapStart; } -static int32_t syncNodeOnSnapshotPre(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { +static int32_t syncNodeOnSnapshotPrep(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { SSyncSnapshotReceiver *pReceiver = pSyncNode->pNewNodeReceiver; int64_t timeNow = taosGetTimestampMs(); int32_t code = 0; @@ -565,7 +565,7 @@ _START_RECEIVER: } else { // waiting for clock match while (timeNow < pMsg->startTime) { - sRInfo(pReceiver, "snapshot receiver pre waitting for true time, now:%" PRId64 ", stime:%" PRId64, timeNow, + sRInfo(pReceiver, "snapshot receiver pre waitting for true time, now:%" PRId64 ", startTime:%" PRId64, timeNow, pMsg->startTime); taosMsleep(10); timeNow = taosGetTimestampMs(); @@ -765,7 +765,7 @@ static int32_t syncNodeOnSnapshotEnd(SSyncNode *pSyncNode, SyncSnapshotSend *pMs // receiver on message // -// condition 1, recv SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT +// condition 1, recv SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT // if receiver already start // if sender.start-time > receiver.start-time, restart receiver(reply snapshot start) // if sender.start-time = receiver.start-time, maybe duplicate msg @@ -809,9 +809,9 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { int32_t code = 0; if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) { if (pMsg->term == pSyncNode->raftStore.currentTerm) { - if (pMsg->seq == SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT) { + if (pMsg->seq == SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq pre-snapshot"); - code = syncNodeOnSnapshotPre(pSyncNode, pMsg); + code = syncNodeOnSnapshotPrep(pSyncNode, pMsg); } else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_BEGIN) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq begin"); code = syncNodeOnSnapshotBegin(pSyncNode, pMsg); @@ -848,7 +848,7 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { return code; } -static int32_t syncNodeOnSnapshotPreRsp(SSyncNode *pSyncNode, SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) { +static int32_t syncNodeOnSnapshotPrepRsp(SSyncNode *pSyncNode, SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) { SSnapshot snapshot = {0}; pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot); @@ -945,8 +945,8 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { if (pMsg->startTime != pSender->startTime) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "snapshot sender and receiver time not match"); - sSError(pSender, "sender:%" PRId64 " receiver:%" PRId64 " time not match, code:0x%x", pMsg->startTime, - pSender->startTime, pMsg->code); + sSError(pSender, "sender:%" PRId64 " receiver:%" PRId64 " time not match, error:%s 0x%x", pMsg->startTime, + pSender->startTime, tstrerror(pMsg->code), pMsg->code); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; goto _ERROR; } @@ -961,15 +961,15 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { if (pMsg->code != 0) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "receive error code"); - sSError(pSender, "snapshot sender receive error code:0x%x and stop sender", pMsg->code); + sSError(pSender, "snapshot sender receive error:%s 0x%x and stop sender", tstrerror(pMsg->code), pMsg->code); terrno = pMsg->code; goto _ERROR; } // prepare , send begin msg - if (pMsg->ack == SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT) { + if (pMsg->ack == SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "process seq pre-snapshot"); - return syncNodeOnSnapshotPreRsp(pSyncNode, pSender, pMsg); + return syncNodeOnSnapshotPrepRsp(pSyncNode, pSender, pMsg); } if (pSender->pReader == NULL || pSender->finish) { From 40b745932ce8b17f1381ae12231c5391b16b1b78 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 2 Feb 2023 19:39:08 +0800 Subject: [PATCH 42/57] fix: scalar expr memory leak issue --- source/libs/executor/src/timesliceoperator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 6561e810bb..bca87079ec 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -634,6 +634,7 @@ void destroyTimeSliceOperatorInfo(void* param) { taosMemoryFree(pKey->end.val); } taosArrayDestroy(pInfo->pLinearInfo); + cleanupExprSupp(&pInfo->scalarSup); taosMemoryFree(pInfo->pFillColInfo); taosMemoryFreeClear(param); From 6519ef904593ab7c3a1111919832f402b4d3a0f5 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Thu, 2 Feb 2023 15:39:30 +0800 Subject: [PATCH 43/57] ci:add stream ci --- tests/script/tsim/stream/udTableAndTag2.sim | 149 ++++++++++++++++++++ 1 file changed, 149 insertions(+) diff --git a/tests/script/tsim/stream/udTableAndTag2.sim b/tests/script/tsim/stream/udTableAndTag2.sim index 5dd2e3ae2b..bacc301ad0 100644 --- a/tests/script/tsim/stream/udTableAndTag2.sim +++ b/tests/script/tsim/stream/udTableAndTag2.sim @@ -361,6 +361,155 @@ if $data02 != NULL then goto loop8 endi + +print ===== step6 +print ===== table name + +sql create database result5 vgroups 1; + +sql create database test5 vgroups 1; +sql use test5; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams51 trigger at_once into result5.streamt51 SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams52 trigger at_once into result5.streamt52 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams53 trigger at_once into result5.streamt53 TAGS(dd varchar(100)) SUBTABLE(concat("aaa-", "1") ) as select _wstart, count(*) c1 from st interval(10s); + +sql insert into t1 values(1648791213000,1,2,3); +sql insert into t2 values(1648791213000,2,2,3); + +$loop_count = 0 +loop9: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print select table_name from information_schema.ins_tables where db_name="result5" order by 1; + +sql select table_name from information_schema.ins_tables where db_name="result5" order by 1; + +if $rows != 3 then + print =====rows=$rows + print $data00 + print $data10 + print $data20 + print $data40 + goto loop9 +endi + +if $data00 != aaa then + print =====data00=$data00 + goto loop9 +endi + +if $data10 != aaa-1 then + print =====data00=$data00 + goto loop9 +endi + +$loop_count = 0 +loop10: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print sql select tag_name from information_schema.ins_tags where db_name="result5" and stable_name = "streamt52" order by 1; + +sql select tag_name from information_schema.ins_tags where db_name="result5" and stable_name = "streamt52" order by 1; + +if $rows != 1 then + print =====rows=$rows + print $data00 + print $data10 + goto loop10 +endi + +if $data00 != cc then + print =====data00=$data00 + goto loop10 +endi + +print sql select tag_name from information_schema.ins_tags where db_name="result5" and stable_name = "streamt53" order by 1; + +sql select tag_name from information_schema.ins_tags where db_name="result5" and stable_name = "streamt53" order by 1; + +if $rows != 1 then + print =====rows=$rows + print $data00 + print $data10 + goto loop10 +endi + +if $data00 != dd then + print =====data00=$data00 + goto loop10 +endi + + + + + +$loop_count = 0 +loop11: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result5.streamt51; + +if $rows != 1 then + print =====rows=$rows + print $data00 $data10 + goto loop11 +endi + +if $data01 != 2 then + print =====data01=$data01 + goto loop11 +endi + +sql select * from result5.streamt52; + +if $rows != 1 then + print =====rows=$rows + print $data00 $data10 + goto loop11 +endi + +if $data01 != 2 then + print =====data01=$data01 + goto loop11 +endi + +sql select * from result5.streamt53; + +if $rows != 1 then + print =====rows=$rows + print $data00 $data10 + goto loop11 +endi + +if $data01 != 2 then + print =====data01=$data01 + goto loop11 +endi + print ======over system sh/stop_dnodes.sh From 561ee87e9dd9a5ef2f1a6e9e3edba00b624b01d7 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Fri, 3 Feb 2023 09:22:00 +0800 Subject: [PATCH 44/57] enh: change the level of sync probe logging msg to trace --- source/libs/sync/src/syncPipeline.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 6cc517fda0..bb3bb0d6a4 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -830,7 +830,7 @@ int32_t syncLogReplMgrReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy pMgr->endIndex = index + 1; SSyncLogBuffer* pBuf = pNode->pLogBuf; - sInfo("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term: %" PRId64 ". mgr (rs:%d): [%" PRId64 + sTrace("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term: %" PRId64 ". mgr (rs:%d): [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pNode->vgId, pDestId->addr, index, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); From d83a94bb195bcbd6e0c68ebcbb65cb511b7672f6 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Fri, 3 Feb 2023 09:48:50 +0800 Subject: [PATCH 45/57] fix: check dup rebalance --- source/dnode/mnode/impl/src/mndConsumer.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 3bbf4a4279..1aa2fa997b 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -836,10 +836,13 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, char *addedTopic = strdup(taosArrayGetP(pNewConsumer->rebNewTopics, 0)); // not exist in current topic + bool existing = false; #if 1 for (int32_t i = 0; i < taosArrayGetSize(pOldConsumer->currentTopics); i++) { char *topic = taosArrayGetP(pOldConsumer->currentTopics, i); - ASSERT(strcmp(topic, addedTopic) != 0); + if (strcmp(topic, addedTopic) == 0) { + existing = true; + } } #endif @@ -854,8 +857,10 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, } // add to current topic - taosArrayPush(pOldConsumer->currentTopics, &addedTopic); - taosArraySort(pOldConsumer->currentTopics, taosArrayCompareString); + if (!existing) { + taosArrayPush(pOldConsumer->currentTopics, &addedTopic); + taosArraySort(pOldConsumer->currentTopics, taosArrayCompareString); + } // set status if (taosArrayGetSize(pOldConsumer->rebNewTopics) == 0 && taosArrayGetSize(pOldConsumer->rebRemovedTopics) == 0) { From 49631f6e9647617ef13c230d7e3f31d9b7c27a18 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 3 Feb 2023 14:34:55 +0800 Subject: [PATCH 46/57] fix: snapshot coredump caused by type --- source/dnode/vnode/src/tsdb/tsdbSnapshot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 98c9c0fdda..e8181f922f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -1205,7 +1205,7 @@ static int32_t tsdbSnapWriteTableRow(STsdbSnapWriter* pWriter, TSDBROW* pRow) { TSDB_CHECK_CODE(code, lino, _exit); } - tMapDataPutItem(&pWriter->pDIter->dIter.mDataBlk, &dataBlk, tPutDataBlk); + tMapDataPutItem(&pWriter->mDataBlk, &dataBlk, tPutDataBlk); pWriter->pDIter->dIter.iDataBlk++; } else { code = tsdbReadDataBlockEx(pWriter->pDataFReader, &dataBlk, &pWriter->pDIter->dIter.bData); From 9ca44cd5cd2e798a72f36b94e03ec9f0d42f4383 Mon Sep 17 00:00:00 2001 From: cadem Date: Fri, 3 Feb 2023 16:08:16 +0800 Subject: [PATCH 47/57] docs:fix docs table format --- docs/en/14-reference/12-config/index.md | 12 ++---------- docs/zh/14-reference/12-config/index.md | 12 ++---------- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index 9e56a0b0bf..9d0c7de033 100644 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -162,11 +162,7 @@ The parameters described in this document by the effect that they have on the sy | Meaning | Execution policy for query statements | | Unit | None | | Default | 1 | -| Value Range | 1: Run queries on vnodes and not on qnodes - -2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes. - -3: Only run scan operators on vnodes; run all other operators on qnodes. | +| Value Range | 1: Run queries on vnodes and not on qnodes
2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
3: Only run scan operators on vnodes; run all other operators on qnodes. | ### querySmaOptimize @@ -176,11 +172,7 @@ The parameters described in this document by the effect that they have on the sy | Meaning | SMA index optimization policy | | Unit | None | | Default Value | 0 | -| Notes | - -0: Disable SMA indexing and perform all queries on non-indexed data. - -1: Enable SMA indexing and perform queries from suitable statements on precomputation results.| +| Notes |0: Disable SMA indexing and perform all queries on non-indexed data.
1: Enable SMA indexing and perform queries from suitable statements on precomputation results.| ### countAlwaysReturnValue diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index 503f692764..6cef0059b4 100644 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -162,11 +162,7 @@ taos --dump-config | 含义 | 查询语句的执行策略 | | 单位 | 无 | | 缺省值 | 1 | -| 补充说明 | 1: 只使用 vnode,不使用 qnode | - -2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行 - -3: vnode 只运行扫描算子,其余算子均在 qnode 执行 | +| 补充说明 | 1: 只使用 vnode,不使用 qnode
2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行
3: vnode 只运行扫描算子,其余算子均在 qnode 执行 | ### querySmaOptimize @@ -176,11 +172,7 @@ taos --dump-config | 含义 | sma index 的优化策略 | | 单位 | 无 | | 缺省值 | 0 | -| 补充说明 | - -0: 表示不使用 sma index,永远从原始数据进行查询 - -1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 | +| 补充说明 |0: 表示不使用 sma index,永远从原始数据进行查询
1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 | ### maxNumOfDistinctRes From ff31b94c874f01163970868552a6c10815d1451d Mon Sep 17 00:00:00 2001 From: cadem Date: Fri, 3 Feb 2023 16:18:56 +0800 Subject: [PATCH 48/57] docs:fix documents confusing wording --- docs/zh/14-reference/12-config/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index 503f692764..560b9b86ca 100644 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -389,7 +389,7 @@ charset 的有效值是 UTF-8。 | 属性 | 说明 | | -------- | -------------------------------------------- | | 适用范围 | 服务端和客户端均适用 | -| 含义 | 当日志文件夹的磁盘大小小于该值时,停止写日志 | +| 含义 | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志 | | 单位 | GB | | 缺省值 | 1.0 | From c21ab3b345b92ceb2852648e86f7663342500c85 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Fri, 3 Feb 2023 16:54:45 +0800 Subject: [PATCH 49/57] Update index.md --- docs/en/14-reference/12-config/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index 9d0c7de033..48864636b0 100644 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -162,7 +162,7 @@ The parameters described in this document by the effect that they have on the sy | Meaning | Execution policy for query statements | | Unit | None | | Default | 1 | -| Value Range | 1: Run queries on vnodes and not on qnodes
2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
3: Only run scan operators on vnodes; run all other operators on qnodes. | +| Value Range | 1: Run queries on vnodes and not on qnodes; 2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes; 3: Only run scan operators on vnodes, and run all other operators on qnodes. | ### querySmaOptimize From 595e87cddf23cf31449f9b5ec7debd7e2d6ff5a3 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Fri, 3 Feb 2023 16:55:59 +0800 Subject: [PATCH 50/57] Update index.md --- docs/zh/14-reference/12-config/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index e751959421..c7a26d8051 100644 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -162,7 +162,7 @@ taos --dump-config | 含义 | 查询语句的执行策略 | | 单位 | 无 | | 缺省值 | 1 | -| 补充说明 | 1: 只使用 vnode,不使用 qnode
2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行
3: vnode 只运行扫描算子,其余算子均在 qnode 执行 | +| 补充说明 | 1: 只使用 vnode,不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 | ### querySmaOptimize From 8292bb838eef8e146bc8f9ef1c33a33908c96568 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Fri, 3 Feb 2023 17:13:51 +0800 Subject: [PATCH 51/57] Update index.md --- docs/zh/14-reference/12-config/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index c7a26d8051..28b409de30 100644 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -172,7 +172,7 @@ taos --dump-config | 含义 | sma index 的优化策略 | | 单位 | 无 | | 缺省值 | 0 | -| 补充说明 |0: 表示不使用 sma index,永远从原始数据进行查询
1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 | +| 补充说明 |0: 表示不使用 sma index,永远从原始数据进行查询; 1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 | ### maxNumOfDistinctRes From 889b7ea872bcc1afd98be71353b52fd3f16f2fbe Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Fri, 3 Feb 2023 17:14:18 +0800 Subject: [PATCH 52/57] Update index.md --- docs/en/14-reference/12-config/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index 48864636b0..e6fd6dbe6f 100644 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -172,7 +172,7 @@ The parameters described in this document by the effect that they have on the sy | Meaning | SMA index optimization policy | | Unit | None | | Default Value | 0 | -| Notes |0: Disable SMA indexing and perform all queries on non-indexed data.
1: Enable SMA indexing and perform queries from suitable statements on precomputation results.| +| Notes |0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results.| ### countAlwaysReturnValue From 5108e454cd96c434099b2f1fc027f96b78097162 Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Fri, 3 Feb 2023 17:44:47 +0800 Subject: [PATCH 53/57] Update 02-database.md Use the same database name "db_name" in all the examples; "SHOW CREATE DATABASE db_name" should be run with \G, otherwise the output is not complete. --- docs/zh/12-taos-sql/02-database.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md index fc35da8636..b0a84af6dc 100644 --- a/docs/zh/12-taos-sql/02-database.md +++ b/docs/zh/12-taos-sql/02-database.md @@ -142,7 +142,7 @@ SHOW DATABASES; ### 显示一个数据库的创建语句 ``` -SHOW CREATE DATABASE db_name; +SHOW CREATE DATABASE db_name \G; ``` 常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。 @@ -150,7 +150,7 @@ SHOW CREATE DATABASE db_name; ### 查看数据库参数 ```sql -SELECT * FROM INFORMATION_SCHEMA.INS_DATABASES WHERE NAME='DBNAME' \G; +SELECT * FROM INFORMATION_SCHEMA.INS_DATABASES WHERE NAME='db_name' \G; ``` 会列出指定数据库的配置参数,并且每行只显示一个参数。 @@ -177,4 +177,4 @@ REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3 BALANCE VGROUP ``` -自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。 \ No newline at end of file +自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。 From 4a87a58c655e13f07776778148913a7c64dd7c35 Mon Sep 17 00:00:00 2001 From: xiaolei li <85657333+xleili@users.noreply.github.com> Date: Sat, 4 Feb 2023 02:10:47 +0800 Subject: [PATCH 54/57] docs(node):fix node-rest example failed (#19797) --- docs/examples/node/restexample/connect.js | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/examples/node/restexample/connect.js b/docs/examples/node/restexample/connect.js index bb027d4fe8..f36472b4c0 100644 --- a/docs/examples/node/restexample/connect.js +++ b/docs/examples/node/restexample/connect.js @@ -3,18 +3,14 @@ const { options, connect } = require("@tdengine/rest"); async function test() { options.path = "/rest/sql"; options.host = "localhost"; + options.port = 6041; let conn = connect(options); let cursor = conn.cursor(); try { let res = await cursor.query("SELECT server_version()"); - res.toString(); + console.log("res.getResult()",res.getResult()); } catch (err) { console.log(err); } } test(); - -// output: -// server_version() | -// =================== -// 3.0.0.0 | From d0dc6dc5ef668d0f10116ea83d0c9bd27f803fd0 Mon Sep 17 00:00:00 2001 From: xiaolei li <85657333+xleili@users.noreply.github.com> Date: Sat, 4 Feb 2023 13:27:10 +0800 Subject: [PATCH 55/57] docs(driver):update c# examples' .NET to 6.0 (#19795) --- docs/examples/csharp/wsConnect/wsConnect.csproj | 2 +- docs/examples/csharp/wsInsert/wsInsert.csproj | 2 +- docs/examples/csharp/wsQuery/wsQuery.csproj | 2 +- docs/examples/csharp/wsStmt/wsStmt.csproj | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/examples/csharp/wsConnect/wsConnect.csproj b/docs/examples/csharp/wsConnect/wsConnect.csproj index 6d78be6e7a..c7988b6e9c 100644 --- a/docs/examples/csharp/wsConnect/wsConnect.csproj +++ b/docs/examples/csharp/wsConnect/wsConnect.csproj @@ -2,7 +2,7 @@ Exe - net5.0 + net6.0 diff --git a/docs/examples/csharp/wsInsert/wsInsert.csproj b/docs/examples/csharp/wsInsert/wsInsert.csproj index 95bfbdea3d..5aa419b2c8 100644 --- a/docs/examples/csharp/wsInsert/wsInsert.csproj +++ b/docs/examples/csharp/wsInsert/wsInsert.csproj @@ -2,7 +2,7 @@ Exe - net5.0 + net6.0 enable diff --git a/docs/examples/csharp/wsQuery/wsQuery.csproj b/docs/examples/csharp/wsQuery/wsQuery.csproj index e5c2cf767c..bcc7c19a59 100644 --- a/docs/examples/csharp/wsQuery/wsQuery.csproj +++ b/docs/examples/csharp/wsQuery/wsQuery.csproj @@ -2,7 +2,7 @@ Exe - net5.0 + net6.0 enable diff --git a/docs/examples/csharp/wsStmt/wsStmt.csproj b/docs/examples/csharp/wsStmt/wsStmt.csproj index e5c2cf767c..bcc7c19a59 100644 --- a/docs/examples/csharp/wsStmt/wsStmt.csproj +++ b/docs/examples/csharp/wsStmt/wsStmt.csproj @@ -2,7 +2,7 @@ Exe - net5.0 + net6.0 enable From 44cb5ae7d525563f0dcc2404771200787622d2f9 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 4 Feb 2023 22:36:19 +0800 Subject: [PATCH 56/57] fix: taosbenchmark exist stb (#19801) * fix: taosbenchmark reuse exist stb * fix: update taos-tools dd8d99f * fix: update taos-tools 3eb18bc * fix: update taos-tools bff4e31 * fix: update taos-tools ad47fef * fix: update taos-tools f177a39 * fix: update taos-tools 19e725f * fix: update taos-tools 6a2d9fc --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 4da2d80fbb..37dd448b87 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG c4a567b + GIT_TAG 6a2d9fc SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 7a110083162c809f45bb21033e12194e0fb89f05 Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Mon, 6 Feb 2023 00:07:16 +0800 Subject: [PATCH 57/57] doc: add descriptions to all docs --- docs/en/01-index.md | 1 + docs/en/02-intro/index.md | 1 + docs/en/04-concept/index.md | 1 + docs/en/05-get-started/01-docker.md | 3 ++- docs/en/05-get-started/03-package.md | 3 ++- docs/en/05-get-started/index.md | 2 +- docs/en/07-develop/01-connect/index.md | 4 ++-- docs/en/07-develop/02-model/index.mdx | 1 + docs/en/07-develop/03-insert-data/01-sql-writing.mdx | 1 + docs/en/07-develop/03-insert-data/20-kafka-writting.mdx | 1 + docs/en/07-develop/03-insert-data/30-influxdb-line.mdx | 3 ++- docs/en/07-develop/03-insert-data/40-opentsdb-telnet.mdx | 3 ++- docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx | 3 ++- docs/en/07-develop/03-insert-data/60-high-volume.md | 3 ++- docs/en/07-develop/03-insert-data/index.md | 1 + docs/en/07-develop/04-query-data/index.mdx | 2 +- docs/en/07-develop/06-stream.md | 4 ++-- docs/en/07-develop/08-cache.md | 4 ++-- docs/en/07-develop/09-udf.md | 4 ++-- docs/en/07-develop/index.md | 1 + docs/en/10-deployment/01-deploy.md | 3 ++- docs/en/10-deployment/03-k8s.md | 3 ++- docs/en/10-deployment/05-helm.md | 3 ++- docs/en/10-deployment/index.md | 1 + docs/en/12-taos-sql/01-data-type.md | 4 ++-- docs/en/12-taos-sql/02-database.md | 4 ++-- docs/en/12-taos-sql/03-table.md | 1 + docs/en/12-taos-sql/04-stable.md | 3 ++- docs/en/12-taos-sql/05-insert.md | 3 ++- docs/en/12-taos-sql/06-select.md | 3 ++- docs/en/12-taos-sql/08-delete-data.mdx | 4 ++-- docs/en/12-taos-sql/10-function.md | 3 ++- docs/en/12-taos-sql/12-distinguished.md | 3 ++- docs/en/12-taos-sql/13-tmq.md | 3 ++- docs/en/12-taos-sql/14-stream.md | 3 ++- docs/en/12-taos-sql/16-operators.md | 3 ++- docs/en/12-taos-sql/17-json.md | 3 ++- docs/en/12-taos-sql/18-escape.md | 1 + docs/en/12-taos-sql/19-limit.md | 3 ++- docs/en/12-taos-sql/20-keywords.md | 3 ++- docs/en/12-taos-sql/21-node.md | 3 ++- docs/en/12-taos-sql/22-meta.md | 3 ++- docs/en/12-taos-sql/23-perf.md | 3 ++- docs/en/12-taos-sql/24-show.md | 3 ++- docs/en/12-taos-sql/25-grant.md | 4 ++-- docs/en/12-taos-sql/26-udf.md | 3 ++- docs/en/12-taos-sql/27-index.md | 5 +++-- docs/en/12-taos-sql/28-recovery.md | 3 ++- docs/en/12-taos-sql/29-changes.md | 4 ++-- docs/en/12-taos-sql/index.md | 2 +- docs/en/13-operation/01-pkg-install.md | 2 +- docs/en/13-operation/02-planning.mdx | 3 ++- docs/en/13-operation/03-tolerance.md | 1 + docs/en/13-operation/07-import.md | 1 + docs/en/13-operation/08-export.md | 1 + docs/en/13-operation/10-monitor.md | 1 + docs/en/13-operation/17-diagnose.md | 1 + docs/en/13-operation/index.md | 1 + docs/en/14-reference/02-rest-api/02-rest-api.mdx | 1 + docs/en/14-reference/03-connector/03-cpp.mdx | 3 ++- docs/en/14-reference/03-connector/04-java.mdx | 6 +++--- docs/en/14-reference/03-connector/05-go.mdx | 5 +++-- docs/en/14-reference/03-connector/06-rust.mdx | 5 +++-- docs/en/14-reference/03-connector/07-python.mdx | 4 ++-- docs/en/14-reference/03-connector/08-node.mdx | 5 +++-- docs/en/14-reference/03-connector/09-csharp.mdx | 5 +++-- docs/en/14-reference/03-connector/10-php.mdx | 3 ++- docs/en/14-reference/03-connector/index.mdx | 1 + docs/en/14-reference/04-taosadapter.md | 6 +++--- docs/en/14-reference/05-taosbenchmark.md | 2 +- docs/en/14-reference/06-taosdump.md | 2 +- docs/en/14-reference/07-tdinsight/index.md | 1 + docs/en/14-reference/08-taos-shell.md | 2 +- docs/en/14-reference/09-support-platform/index.md | 2 +- docs/en/14-reference/11-docker/index.md | 2 +- docs/en/14-reference/12-config/index.md | 2 +- docs/en/14-reference/12-directory.md | 2 +- docs/en/14-reference/13-schemaless/13-schemaless.md | 2 +- docs/en/14-reference/14-taosKeeper.md | 2 +- docs/en/14-reference/index.md | 1 + docs/en/20-third-party/01-grafana.mdx | 3 ++- docs/en/20-third-party/02-prometheus.md | 3 ++- docs/en/20-third-party/03-telegraf.md | 3 ++- docs/en/20-third-party/05-collectd.md | 3 ++- docs/en/20-third-party/06-statsd.md | 3 ++- docs/en/20-third-party/07-icinga2.md | 3 ++- docs/en/20-third-party/08-tcollector.md | 3 ++- docs/en/20-third-party/09-emq-broker.md | 3 ++- docs/en/20-third-party/10-hive-mq-broker.md | 3 ++- docs/en/20-third-party/11-kafka.md | 3 ++- docs/en/20-third-party/12-google-data-studio.md | 3 ++- docs/en/20-third-party/13-Jupyter.md | 3 ++- docs/en/20-third-party/index.md | 1 + docs/en/21-tdinternal/01-arch.md | 3 ++- docs/en/21-tdinternal/03-high-availability.md | 3 ++- docs/en/21-tdinternal/04-load-balance.md | 3 ++- docs/en/21-tdinternal/index.md | 1 + docs/en/25-application/01-telegraf.md | 3 ++- docs/en/25-application/02-collectd.md | 3 ++- docs/en/25-application/03-immigrate.md | 3 ++- docs/en/25-application/index.md | 1 + docs/en/27-train-faq/01-faq.md | 1 + docs/en/27-train-faq/index.md | 1 + docs/en/28-releases/01-tdengine.md | 4 ++-- docs/en/28-releases/02-tools.md | 4 ++-- docs/en/28-releases/index.md | 1 + 106 files changed, 179 insertions(+), 100 deletions(-) diff --git a/docs/en/01-index.md b/docs/en/01-index.md index 13552ea9dc..296cd96898 100644 --- a/docs/en/01-index.md +++ b/docs/en/01-index.md @@ -1,6 +1,7 @@ --- title: TDengine Documentation sidebar_label: Documentation Home +description: This website contains the user manuals for TDengine, an open-source, cloud-native time-series database optimized for IoT, Connected Cars, and Industrial IoT. slug: / --- diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md index a60bfab2cc..95dd4324f1 100644 --- a/docs/en/02-intro/index.md +++ b/docs/en/02-intro/index.md @@ -1,5 +1,6 @@ --- title: Introduction +description: This document introduces the major features, competitive advantages, typical use cases, and benchmarks of TDengine. toc_max_heading_level: 2 --- diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md index 0b1b226c17..771a061c31 100644 --- a/docs/en/04-concept/index.md +++ b/docs/en/04-concept/index.md @@ -1,5 +1,6 @@ --- title: Concepts +description: This document describes the basic concepts of TDengine, including the supertable. --- In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase; 2. There are multiple smart meters; 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table: diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index ac273daba4..42e6861674 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -1,6 +1,7 @@ --- -sidebar_label: Docker title: Quick Install on Docker +sidebar_label: Docker +description: This document describes how to install TDengine in a Docker container and perform queries and inserts. --- This document describes how to install TDengine in a Docker container and perform queries and inserts. diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md index 54d2e046c2..a0c1d93983 100644 --- a/docs/en/05-get-started/03-package.md +++ b/docs/en/05-get-started/03-package.md @@ -1,6 +1,7 @@ --- -sidebar_label: Package title: Quick Install from Package +sidebar_label: Package +description: This document describes how to install TDengine on Linux, Windows, and macOS and perform queries and inserts. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md index 12cfa22c69..66573a89cd 100644 --- a/docs/en/05-get-started/index.md +++ b/docs/en/05-get-started/index.md @@ -1,6 +1,6 @@ --- title: Get Started -description: This article describes how to install TDengine and test its performance. +description: This document describes how to install TDengine on various platforms. --- import GitHubSVG from './github.svg' diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md index 45bbaa2751..913c24f189 100644 --- a/docs/en/07-develop/01-connect/index.md +++ b/docs/en/07-develop/01-connect/index.md @@ -1,7 +1,7 @@ --- -sidebar_label: Connect title: Connect to TDengine -description: "How to establish connections to TDengine and how to install and use TDengine connectors." +sidebar_label: Connect +description: This document describes how to establish connections to TDengine and how to install and use TDengine connectors. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/07-develop/02-model/index.mdx b/docs/en/07-develop/02-model/index.mdx index 19a239805f..db5a259cfe 100644 --- a/docs/en/07-develop/02-model/index.mdx +++ b/docs/en/07-develop/02-model/index.mdx @@ -1,5 +1,6 @@ --- title: Data Model +description: This document describes the data model of TDengine. --- The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx index f2168645ff..32ba53a0cb 100644 --- a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx @@ -1,5 +1,6 @@ --- title: Insert Using SQL +description: This document describes how to insert data into TDengine using SQL. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/07-develop/03-insert-data/20-kafka-writting.mdx b/docs/en/07-develop/03-insert-data/20-kafka-writting.mdx index ffb969a8a6..89ca10b669 100644 --- a/docs/en/07-develop/03-insert-data/20-kafka-writting.mdx +++ b/docs/en/07-develop/03-insert-data/20-kafka-writting.mdx @@ -1,5 +1,6 @@ --- title: Write from Kafka +description: This document describes how to insert data into TDengine using Kafka. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx b/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx index fc5644850c..c559883d26 100644 --- a/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx +++ b/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx @@ -1,6 +1,7 @@ --- -sidebar_label: InfluxDB Line Protocol title: InfluxDB Line Protocol +sidebar_label: InfluxDB Line Protocol +description: This document describes how to insert data into TDengine using the InfluxDB Line Protocol. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/07-develop/03-insert-data/40-opentsdb-telnet.mdx b/docs/en/07-develop/03-insert-data/40-opentsdb-telnet.mdx index 5d3f25dca9..30bc3b87bf 100644 --- a/docs/en/07-develop/03-insert-data/40-opentsdb-telnet.mdx +++ b/docs/en/07-develop/03-insert-data/40-opentsdb-telnet.mdx @@ -1,6 +1,7 @@ --- -sidebar_label: OpenTSDB Line Protocol title: OpenTSDB Line Protocol +sidebar_label: OpenTSDB Line Protocol +description: This document describes how to insert data into TDengine using the OpenTSDB Line Protocol. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx b/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx index 214580c179..e9db130241 100644 --- a/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx +++ b/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx @@ -1,6 +1,7 @@ --- -sidebar_label: OpenTSDB JSON Protocol title: OpenTSDB JSON Protocol +sidebar_label: OpenTSDB JSON Protocol +description: This document describes how to insert data into TDengine using the OpenTSDB JSON protocol. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/07-develop/03-insert-data/60-high-volume.md b/docs/en/07-develop/03-insert-data/60-high-volume.md index 272a138813..d5afa8ef6e 100644 --- a/docs/en/07-develop/03-insert-data/60-high-volume.md +++ b/docs/en/07-develop/03-insert-data/60-high-volume.md @@ -1,6 +1,7 @@ --- -sidebar_label: High Performance Writing title: High Performance Writing +sidebar_label: High Performance Writing +description: This document describes how to achieve high performance when writing data into TDengine. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/07-develop/03-insert-data/index.md b/docs/en/07-develop/03-insert-data/index.md index 1a71e719a5..15f8f4ee9e 100644 --- a/docs/en/07-develop/03-insert-data/index.md +++ b/docs/en/07-develop/03-insert-data/index.md @@ -1,5 +1,6 @@ --- title: Insert Data +description: This document describes how to insert data into TDengine. --- TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted. diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx index 38dc98d1ff..7e167bb4f3 100644 --- a/docs/en/07-develop/04-query-data/index.mdx +++ b/docs/en/07-develop/04-query-data/index.mdx @@ -1,6 +1,6 @@ --- title: Query Data -description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors." +description: This document describes how to query data in TDengine and how to perform synchronous and asynchronous queries using connectors. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/07-develop/06-stream.md b/docs/en/07-develop/06-stream.md index 36f903ee9a..125173e60b 100644 --- a/docs/en/07-develop/06-stream.md +++ b/docs/en/07-develop/06-stream.md @@ -1,7 +1,7 @@ --- -sidebar_label: Stream Processing -description: "The TDengine stream processing engine combines data inserts, preprocessing, analytics, real-time computation, and alerting into a single component." title: Stream Processing +sidebar_label: Stream Processing +description: This document describes the stream processing component of TDengine. --- Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. In a traditional time-series solution, this generally requires the deployment of stream processing systems such as Kafka or Flink. However, the complexity of such systems increases the cost of development and maintenance. diff --git a/docs/en/07-develop/08-cache.md b/docs/en/07-develop/08-cache.md index 82a4787016..6a6ca3e594 100644 --- a/docs/en/07-develop/08-cache.md +++ b/docs/en/07-develop/08-cache.md @@ -1,7 +1,7 @@ --- -sidebar_label: Caching title: Caching -description: "This document describes the caching component of TDengine." +sidebar_label: Caching +description: This document describes the caching component of TDengine. --- TDengine uses various kinds of caching techniques to efficiently write and query data. This document describes the caching component of TDengine. diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md index 699b3ebe5f..553a7b932b 100644 --- a/docs/en/07-develop/09-udf.md +++ b/docs/en/07-develop/09-udf.md @@ -1,7 +1,7 @@ --- -sidebar_label: UDF title: User-Defined Functions (UDF) -description: "You can define your own scalar and aggregate functions to expand the query capabilities of TDengine." +sidebar_label: UDF +description: This document describes how to create user-defined functions (UDF), your own scalar and aggregate functions that can expand the query capabilities of TDengine. --- The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input. diff --git a/docs/en/07-develop/index.md b/docs/en/07-develop/index.md index 34649d32a2..8f80b82b97 100644 --- a/docs/en/07-develop/index.md +++ b/docs/en/07-develop/index.md @@ -1,5 +1,6 @@ --- title: Developer Guide +description: This document describes how to use the various components of TDengine from a developer's perspective. --- Before creating an application to process time-series data with TDengine, consider the following: diff --git a/docs/en/10-deployment/01-deploy.md b/docs/en/10-deployment/01-deploy.md index 5dfcd3108d..da00e21a7e 100644 --- a/docs/en/10-deployment/01-deploy.md +++ b/docs/en/10-deployment/01-deploy.md @@ -1,6 +1,7 @@ --- -sidebar_label: Manual Deployment title: Manual Deployment and Management +sidebar_label: Manual Deployment +description: This document describes how to deploy TDengine on a server. --- ## Prerequisites diff --git a/docs/en/10-deployment/03-k8s.md b/docs/en/10-deployment/03-k8s.md index b0aa677713..49e61caafc 100644 --- a/docs/en/10-deployment/03-k8s.md +++ b/docs/en/10-deployment/03-k8s.md @@ -1,6 +1,7 @@ --- -sidebar_label: Kubernetes title: Deploying a TDengine Cluster in Kubernetes +sidebar_label: Kubernetes +description: This document describes how to deploy TDengine on Kubernetes. --- TDengine is a cloud-native time-series database that can be deployed on Kubernetes. This document gives a step-by-step description of how you can use YAML files to create a TDengine cluster and introduces common operations for TDengine in a Kubernetes environment. diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md index 90baa5f445..aa61717669 100644 --- a/docs/en/10-deployment/05-helm.md +++ b/docs/en/10-deployment/05-helm.md @@ -1,6 +1,7 @@ --- -sidebar_label: Helm title: Use Helm to deploy TDengine +sidebar_label: Helm +description: This document describes how to deploy TDengine on Kubernetes by using Helm. --- Helm is a package manager for Kubernetes that can provide more capabilities in deploying on Kubernetes. diff --git a/docs/en/10-deployment/index.md b/docs/en/10-deployment/index.md index 7054a33e4a..865fbc2da5 100644 --- a/docs/en/10-deployment/index.md +++ b/docs/en/10-deployment/index.md @@ -1,5 +1,6 @@ --- title: Deployment +description: This document describes how to deploy a TDengine cluster on a server, on Kubernetes, and by using Helm. --- TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source. diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md index 60046629a4..641fd3cbb7 100644 --- a/docs/en/12-taos-sql/01-data-type.md +++ b/docs/en/12-taos-sql/01-data-type.md @@ -1,7 +1,7 @@ --- -sidebar_label: Data Types title: Data Types -description: 'TDengine supports a variety of data types including timestamp, float, JSON and many others.' +sidebar_label: Data Types +description: This document describes the data types that TDengine supports. --- ## Timestamp diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md index 059f124ea5..280d72697c 100644 --- a/docs/en/12-taos-sql/02-database.md +++ b/docs/en/12-taos-sql/02-database.md @@ -1,7 +1,7 @@ --- -sidebar_label: Database title: Database -description: "create and drop database, show or change database parameters" +sidebar_label: Database +description: This document describes how to create and perform operations on databases. --- ## Create a Database diff --git a/docs/en/12-taos-sql/03-table.md b/docs/en/12-taos-sql/03-table.md index 6d12b8c730..f61d1f5147 100644 --- a/docs/en/12-taos-sql/03-table.md +++ b/docs/en/12-taos-sql/03-table.md @@ -1,5 +1,6 @@ --- title: Table +description: This document describes how to create and perform operations on standard tables and subtables. --- ## Create Table diff --git a/docs/en/12-taos-sql/04-stable.md b/docs/en/12-taos-sql/04-stable.md index 8a7c713f8c..5b316d0d24 100644 --- a/docs/en/12-taos-sql/04-stable.md +++ b/docs/en/12-taos-sql/04-stable.md @@ -1,6 +1,7 @@ --- -sidebar_label: Supertable title: Supertable +sidebar_label: Supertable +description: This document describes how to create and perform operations on supertables. --- ## Create a Supertable diff --git a/docs/en/12-taos-sql/05-insert.md b/docs/en/12-taos-sql/05-insert.md index ca3154c25e..c22357abfa 100644 --- a/docs/en/12-taos-sql/05-insert.md +++ b/docs/en/12-taos-sql/05-insert.md @@ -1,6 +1,7 @@ --- -sidebar_label: Insert title: Insert +sidebar_label: Insert +description: This document describes how to insert data into TDengine. --- ## Syntax diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md index ee06a7be2d..183ab58726 100644 --- a/docs/en/12-taos-sql/06-select.md +++ b/docs/en/12-taos-sql/06-select.md @@ -1,6 +1,7 @@ --- -sidebar_label: Select title: Select +sidebar_label: Select +description: This document describes how to query data in TDengine. --- ## Syntax diff --git a/docs/en/12-taos-sql/08-delete-data.mdx b/docs/en/12-taos-sql/08-delete-data.mdx index 999c467ad0..f91a89a7eb 100644 --- a/docs/en/12-taos-sql/08-delete-data.mdx +++ b/docs/en/12-taos-sql/08-delete-data.mdx @@ -1,7 +1,7 @@ --- -sidebar_label: Delete Data -description: "Delete data from table or Stable" title: Delete Data +sidebar_label: Delete Data +description: This document describes how to delete data from TDengine. --- TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure. diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index 802eb259bf..b2d44f1a1d 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -1,6 +1,7 @@ --- -sidebar_label: Functions title: Functions +sidebar_label: Functions +description: This document describes the standard SQL functions available in TDengine. toc_max_heading_level: 4 --- diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index 0763e85a53..536fd8ffc3 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -1,6 +1,7 @@ --- -sidebar_label: Time-Series Extensions title: Time-Series Extensions +sidebar_label: Time-Series Extensions +description: This document describes the extended functions specific to time-series data processing available in TDengine. --- As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL. diff --git a/docs/en/12-taos-sql/13-tmq.md b/docs/en/12-taos-sql/13-tmq.md index befab4f4f0..1a805c76fb 100644 --- a/docs/en/12-taos-sql/13-tmq.md +++ b/docs/en/12-taos-sql/13-tmq.md @@ -1,6 +1,7 @@ --- -sidebar_label: Data Subscription title: Data Subscription +sidebar_label: Data Subscription +description: This document describes the SQL statements related to the data subscription component of TDengine. --- The information in this document is related to the TDengine data subscription feature. diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md index e70e962668..b8f6c3a163 100644 --- a/docs/en/12-taos-sql/14-stream.md +++ b/docs/en/12-taos-sql/14-stream.md @@ -1,6 +1,7 @@ --- -sidebar_label: Stream Processing title: Stream Processing +sidebar_label: Stream Processing +description: This document describes the SQL statements related to the stream processing component of TDengine. --- Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. Stream processing components like Kafka, Flink, and Spark are often deployed alongside a time-series database to handle these operations, increasing system complexity and maintenance costs. diff --git a/docs/en/12-taos-sql/16-operators.md b/docs/en/12-taos-sql/16-operators.md index c426e28793..32ad4e7075 100644 --- a/docs/en/12-taos-sql/16-operators.md +++ b/docs/en/12-taos-sql/16-operators.md @@ -1,6 +1,7 @@ --- -sidebar_label: Operators title: Operators +sidebar_label: Operators +description: This document describes the SQL operators available in TDengine. --- ## Arithmetic Operators diff --git a/docs/en/12-taos-sql/17-json.md b/docs/en/12-taos-sql/17-json.md index 77f7743033..b2494e0cc1 100644 --- a/docs/en/12-taos-sql/17-json.md +++ b/docs/en/12-taos-sql/17-json.md @@ -1,6 +1,7 @@ --- -sidebar_label: JSON Type title: JSON Type +sidebar_label: JSON Type +description: This document describes the JSON data type in TDengine. --- diff --git a/docs/en/12-taos-sql/18-escape.md b/docs/en/12-taos-sql/18-escape.md index a2ae40de98..85e4610e44 100644 --- a/docs/en/12-taos-sql/18-escape.md +++ b/docs/en/12-taos-sql/18-escape.md @@ -1,5 +1,6 @@ --- title: Escape Characters +description: This document describes the usage of escape characters in TDengine. --- ## Escape Characters diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md index f00ec90f57..654fae7560 100644 --- a/docs/en/12-taos-sql/19-limit.md +++ b/docs/en/12-taos-sql/19-limit.md @@ -1,6 +1,7 @@ --- -sidebar_label: Name and Size Limits title: Name and Size Limits +sidebar_label: Name and Size Limits +description: This document describes the name and size limits in TDengine. --- ## Naming Rules diff --git a/docs/en/12-taos-sql/20-keywords.md b/docs/en/12-taos-sql/20-keywords.md index 23f85947e3..a2191c87ee 100644 --- a/docs/en/12-taos-sql/20-keywords.md +++ b/docs/en/12-taos-sql/20-keywords.md @@ -1,6 +1,7 @@ --- -sidebar_label: Reserved Keywords title: Reserved Keywords +sidebar_label: Reserved Keywords +description: This document describes the reserved keywords in TDengine that cannot be used in object names. --- ## Keyword List diff --git a/docs/en/12-taos-sql/21-node.md b/docs/en/12-taos-sql/21-node.md index a0d49ab208..8a5069e66f 100644 --- a/docs/en/12-taos-sql/21-node.md +++ b/docs/en/12-taos-sql/21-node.md @@ -1,6 +1,7 @@ --- -sidebar_label: Cluster title: Cluster +sidebar_label: Cluster +description: This document describes the SQL statements related to cluster management in TDengine. --- The physical entities that form TDengine clusters are known as data nodes (dnodes). Each dnode is a process running on the operating system of the physical machine. Dnodes can contain virtual nodes (vnodes), which store time-series data. Virtual nodes are formed into vgroups, which have 1 or 3 vnodes depending on the replica setting. If you want to enable replication on your cluster, it must contain at least three nodes. Dnodes can also contain management nodes (mnodes). Each cluster has up to three mnodes. Finally, dnodes can contain query nodes (qnodes), which compute time-series data, thus separating compute from storage. A single dnode can contain a vnode, qnode, and mnode. diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md index 1cd759742a..d2bc72f047 100644 --- a/docs/en/12-taos-sql/22-meta.md +++ b/docs/en/12-taos-sql/22-meta.md @@ -1,6 +1,7 @@ --- -sidebar_label: Metadata title: Information_Schema Database +sidebar_label: Metadata +description: This document describes how to use the INFORMATION_SCHEMA database in TDengine. --- TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide access to database metadata, system information, and status information. This information includes database names, table names, and currently running SQL statements. All information related to TDengine maintenance is stored in this database. It contains several read-only tables. These tables are more accurately described as views, and they do not correspond to specific files. You can query these tables but cannot write data to them. The INFORMATION_SCHEMA database is intended to provide a unified method for SHOW commands to access data. However, using SELECT ... FROM INFORMATION_SCHEMA.tablename offers several advantages over SHOW commands: diff --git a/docs/en/12-taos-sql/23-perf.md b/docs/en/12-taos-sql/23-perf.md index 29cf3af6ab..fc369ec663 100644 --- a/docs/en/12-taos-sql/23-perf.md +++ b/docs/en/12-taos-sql/23-perf.md @@ -1,6 +1,7 @@ --- -sidebar_label: Statistics title: Performance_Schema Database +sidebar_label: Statistics +description: This document describes how to use the PERFORMANCE_SCHEMA database in TDengine. --- TDengine includes a built-in database named `PERFORMANCE_SCHEMA` to provide access to database performance statistics. This document introduces the tables of PERFORMANCE_SCHEMA and their structure. diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index f70d86570e..dc1db956a0 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -1,6 +1,7 @@ --- -sidebar_label: SHOW Statement title: SHOW Statement for Metadata +sidebar_label: SHOW Statement +description: This document describes how to use the SHOW statement in TDengine. --- `SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`. diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md index f895567c62..8b4c439352 100644 --- a/docs/en/12-taos-sql/25-grant.md +++ b/docs/en/12-taos-sql/25-grant.md @@ -1,7 +1,7 @@ --- -sidebar_label: Access Control title: User and Access Control -description: Manage user and user's permission +sidebar_label: Access Control +description: This document describes how to manage users and permissions in TDengine. --- This document describes how to manage permissions in TDengine. diff --git a/docs/en/12-taos-sql/26-udf.md b/docs/en/12-taos-sql/26-udf.md index 977f3bcc08..249fcd3b54 100644 --- a/docs/en/12-taos-sql/26-udf.md +++ b/docs/en/12-taos-sql/26-udf.md @@ -1,6 +1,7 @@ --- -sidebar_label: User-Defined Functions title: User-Defined Functions (UDF) +sidebar_label: User-Defined Functions +description: This document describes the SQL statements related to user-defined functions (UDF) in TDengine. --- You can create user-defined functions and import them into TDengine. diff --git a/docs/en/12-taos-sql/27-index.md b/docs/en/12-taos-sql/27-index.md index 7d09bc43ab..7586e4af76 100644 --- a/docs/en/12-taos-sql/27-index.md +++ b/docs/en/12-taos-sql/27-index.md @@ -1,6 +1,7 @@ --- -sidebar_label: Index -title: Using Indices +title: Indexing +sidebar_label: Indexing +description: This document describes the SQL statements related to indexing in TDengine. --- TDengine supports SMA and FULLTEXT indexing. diff --git a/docs/en/12-taos-sql/28-recovery.md b/docs/en/12-taos-sql/28-recovery.md index e869ffc45f..b4da25ea0c 100644 --- a/docs/en/12-taos-sql/28-recovery.md +++ b/docs/en/12-taos-sql/28-recovery.md @@ -1,6 +1,7 @@ --- -sidebar_label: Error Recovery title: Error Recovery +sidebar_label: Error Recovery +description: This document describes the SQL statements related to error recovery in TDengine. --- In a complex environment, connections and query tasks may encounter errors or fail to return in a reasonable time. If this occurs, you can terminate the connection or task. diff --git a/docs/en/12-taos-sql/29-changes.md b/docs/en/12-taos-sql/29-changes.md index f288cd7545..341791d675 100644 --- a/docs/en/12-taos-sql/29-changes.md +++ b/docs/en/12-taos-sql/29-changes.md @@ -1,7 +1,7 @@ --- -sidebar_label: Changes in TDengine 3.0 title: Changes in TDengine 3.0 -description: "This document explains how TDengine SQL has changed in version 3.0." +sidebar_label: Changes in TDengine 3.0 +description: This document describes how TDengine SQL has changed in version 3.0 compared with previous versions. --- ## Basic SQL Elements diff --git a/docs/en/12-taos-sql/index.md b/docs/en/12-taos-sql/index.md index a5ffc9dc8d..276f84f21b 100644 --- a/docs/en/12-taos-sql/index.md +++ b/docs/en/12-taos-sql/index.md @@ -1,6 +1,6 @@ --- title: TDengine SQL -description: 'The syntax supported by TDengine SQL ' +description: This document describes the syntax and functions supported by TDengine SQL. --- This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes). diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md index d7713b943f..6e6c4aaebf 100644 --- a/docs/en/13-operation/01-pkg-install.md +++ b/docs/en/13-operation/01-pkg-install.md @@ -1,6 +1,6 @@ --- title: Install and Uninstall -description: Install, Uninstall, Start, Stop and Upgrade +description: This document describes how to install, upgrade, and uninstall TDengine. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/13-operation/02-planning.mdx b/docs/en/13-operation/02-planning.mdx index 2dffa7bb87..37ef6aae26 100644 --- a/docs/en/13-operation/02-planning.mdx +++ b/docs/en/13-operation/02-planning.mdx @@ -1,6 +1,7 @@ --- -sidebar_label: Resource Planning title: Resource Planning +sidebar_label: Resource Planning +description: This document describes how to plan compute and storage resources for your TDengine cluster. --- It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter. diff --git a/docs/en/13-operation/03-tolerance.md b/docs/en/13-operation/03-tolerance.md index 21a5a90282..4f33748e68 100644 --- a/docs/en/13-operation/03-tolerance.md +++ b/docs/en/13-operation/03-tolerance.md @@ -1,5 +1,6 @@ --- title: Fault Tolerance and Disaster Recovery +description: This document describes how TDengine provides fault tolerance and disaster recovery. --- ## Fault Tolerance diff --git a/docs/en/13-operation/07-import.md b/docs/en/13-operation/07-import.md index 8362cec1ab..e95824e927 100644 --- a/docs/en/13-operation/07-import.md +++ b/docs/en/13-operation/07-import.md @@ -1,5 +1,6 @@ --- title: Data Import +description: This document describes how to import data into TDengine. --- There are multiple ways of importing data provided by TDengine: import with script, import from data file, import using `taosdump`. diff --git a/docs/en/13-operation/08-export.md b/docs/en/13-operation/08-export.md index 5780de42fa..bffda36e23 100644 --- a/docs/en/13-operation/08-export.md +++ b/docs/en/13-operation/08-export.md @@ -1,5 +1,6 @@ --- title: Data Export +description: This document describes how to export data from TDengine. --- There are two ways of exporting data from a TDengine cluster: diff --git a/docs/en/13-operation/10-monitor.md b/docs/en/13-operation/10-monitor.md index 74a5564a2a..346b874059 100644 --- a/docs/en/13-operation/10-monitor.md +++ b/docs/en/13-operation/10-monitor.md @@ -1,5 +1,6 @@ --- title: TDengine Monitoring +description: This document describes how to monitor your TDengine cluster. --- After TDengine is started, it automatically writes monitoring data including CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, into a designated database at a predefined interval through taosKeeper. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. diff --git a/docs/en/13-operation/17-diagnose.md b/docs/en/13-operation/17-diagnose.md index fa202a23ea..9d42b3ebbc 100644 --- a/docs/en/13-operation/17-diagnose.md +++ b/docs/en/13-operation/17-diagnose.md @@ -1,5 +1,6 @@ --- title: Problem Diagnostics +description: This document describes how to diagnose issues with your TDengine cluster. --- ## Network Connection Diagnostics diff --git a/docs/en/13-operation/index.md b/docs/en/13-operation/index.md index c64749c40e..8b386dc19a 100644 --- a/docs/en/13-operation/index.md +++ b/docs/en/13-operation/index.md @@ -1,5 +1,6 @@ --- title: Administration +description: This document describes how to perform management operations on your TDengine cluster from an administrator's perspective. --- This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization. diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx index 09e40b956f..b138d69bfc 100644 --- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -1,5 +1,6 @@ --- title: REST API +description: This document describes the TDengine REST API. --- To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. diff --git a/docs/en/14-reference/03-connector/03-cpp.mdx b/docs/en/14-reference/03-connector/03-cpp.mdx index 906d56ab15..3bd7b7f4c6 100644 --- a/docs/en/14-reference/03-connector/03-cpp.mdx +++ b/docs/en/14-reference/03-connector/03-cpp.mdx @@ -1,6 +1,7 @@ --- -sidebar_label: C/C++ title: C/C++ Connector +sidebar_label: C/C++ +description: This document describes the TDengine C/C++ connector. --- C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index c37738b3f8..61ce166069 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -1,8 +1,8 @@ --- -toc_max_heading_level: 4 -sidebar_label: Java title: TDengine Java Connector -description: The TDengine Java Connector is implemented on the standard JDBC API and provides native and REST connectors. +sidebar_label: Java +description: This document describes the TDengine Java Connector. +toc_max_heading_level: 4 --- import Tabs from '@theme/Tabs'; diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx index 60407c0735..da2f54708f 100644 --- a/docs/en/14-reference/03-connector/05-go.mdx +++ b/docs/en/14-reference/03-connector/05-go.mdx @@ -1,7 +1,8 @@ --- -toc_max_heading_level: 4 -sidebar_label: Go title: TDengine Go Connector +sidebar_label: Go +description: This document describes the TDengine Go connector. +toc_max_heading_level: 4 --- import Tabs from '@theme/Tabs'; diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx index 4e2a7848dc..6e84859610 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -1,7 +1,8 @@ --- -toc_max_heading_level: 4 -sidebar_label: Rust title: TDengine Rust Connector +sidebar_label: Rust +description: This document describes the TDengine Rust connector. +toc_max_heading_level: 4 --- import Tabs from '@theme/Tabs'; diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index d593c3f133..146da268a8 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -1,7 +1,7 @@ --- -sidebar_label: Python title: TDengine Python Connector -description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas." +sidebar_label: Python +description: This document describes taospy, the TDengine Python connector. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/14-reference/03-connector/08-node.mdx b/docs/en/14-reference/03-connector/08-node.mdx index a36cf0efc9..1ac8f69d06 100644 --- a/docs/en/14-reference/03-connector/08-node.mdx +++ b/docs/en/14-reference/03-connector/08-node.mdx @@ -1,7 +1,8 @@ --- -toc_max_heading_level: 4 -sidebar_label: Node.js title: TDengine Node.js Connector +sidebar_label: Node.js +description: This document describes the TDengine Node.js connector. +toc_max_heading_level: 4 --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx index 756e948bd2..e984967fb7 100644 --- a/docs/en/14-reference/03-connector/09-csharp.mdx +++ b/docs/en/14-reference/03-connector/09-csharp.mdx @@ -1,7 +1,8 @@ --- -toc_max_heading_level: 4 -sidebar_label: C# title: C# Connector +sidebar_label: C# +description: This document describes the TDengine C# connector. +toc_max_heading_level: 4 --- import Tabs from '@theme/Tabs'; diff --git a/docs/en/14-reference/03-connector/10-php.mdx b/docs/en/14-reference/03-connector/10-php.mdx index 87f8616f9e..fd00d11239 100644 --- a/docs/en/14-reference/03-connector/10-php.mdx +++ b/docs/en/14-reference/03-connector/10-php.mdx @@ -1,6 +1,7 @@ --- -sidebar_label: PHP title: PHP Connector +sidebar_label: PHP +description: This document describes the TDengine PHP connector. --- `php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine. diff --git a/docs/en/14-reference/03-connector/index.mdx b/docs/en/14-reference/03-connector/index.mdx index da3aae8309..2df73b8592 100644 --- a/docs/en/14-reference/03-connector/index.mdx +++ b/docs/en/14-reference/03-connector/index.mdx @@ -1,5 +1,6 @@ --- title: Connector +description: This document describes the connectors that TDengine provides to interface with various programming languages. --- TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. diff --git a/docs/en/14-reference/04-taosadapter.md b/docs/en/14-reference/04-taosadapter.md index 9eb6cb9213..c1ec97b647 100644 --- a/docs/en/14-reference/04-taosadapter.md +++ b/docs/en/14-reference/04-taosadapter.md @@ -1,7 +1,7 @@ --- -title: "taosAdapter" -description: "taosAdapter is a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications. It provides an easy-to-use and efficient way to ingest data directly from data collection agent software such as Telegraf, StatsD, collectd, etc. It also provides an InfluxDB/OpenTSDB compatible data ingestion interface, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine." -sidebar_label: "taosAdapter" +title: taosAdapter +sidebar_label: taosAdapter +description: This document describes how to use taosAdapter, a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications. --- import Prometheus from "./_prometheus.mdx" diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md index 4017b12be9..9828d71ece 100644 --- a/docs/en/14-reference/05-taosbenchmark.md +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -1,8 +1,8 @@ --- title: taosBenchmark sidebar_label: taosBenchmark +description: This document describes how to use taosBenchmark, a tool for testing the performance of TDengine. toc_max_heading_level: 4 -description: "taosBenchmark (once called taosdemo ) is a tool for testing the performance of TDengine." --- # Introduction diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md index 9c63b4dc03..dcfc068a3d 100644 --- a/docs/en/14-reference/06-taosdump.md +++ b/docs/en/14-reference/06-taosdump.md @@ -1,6 +1,6 @@ --- title: taosdump -description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster." +description: This document describes how to use taosdump, a tool for backing up and restoring the data in a TDengine cluster. --- ## Introduction diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md index d03c16a8bc..1c58dd6a76 100644 --- a/docs/en/14-reference/07-tdinsight/index.md +++ b/docs/en/14-reference/07-tdinsight/index.md @@ -1,6 +1,7 @@ --- title: TDinsight - Grafana-based Zero-Dependency Monitoring Solution for TDengine sidebar_label: TDinsight +description: This document describes TDinsight, a monitoring solution for TDengine. --- TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana]. diff --git a/docs/en/14-reference/08-taos-shell.md b/docs/en/14-reference/08-taos-shell.md index 68e2f08765..7833ac861f 100644 --- a/docs/en/14-reference/08-taos-shell.md +++ b/docs/en/14-reference/08-taos-shell.md @@ -1,7 +1,7 @@ --- title: TDengine Command Line Interface (CLI) sidebar_label: Command Line Interface -description: Instructions and tips for using the TDengine CLI +description: This document describes how to use the TDengine CLI. --- The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances. diff --git a/docs/en/14-reference/09-support-platform/index.md b/docs/en/14-reference/09-support-platform/index.md index 061294f016..7dfa8ac93a 100644 --- a/docs/en/14-reference/09-support-platform/index.md +++ b/docs/en/14-reference/09-support-platform/index.md @@ -1,6 +1,6 @@ --- title: List of supported platforms -description: "List of platforms supported by TDengine server, client, and connector" +description: This document describes the supported platforms for the TDengine server, client, and connectors. --- ## List of supported platforms for TDengine server diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md index 89987c456f..b9278c6961 100644 --- a/docs/en/14-reference/11-docker/index.md +++ b/docs/en/14-reference/11-docker/index.md @@ -1,6 +1,6 @@ --- title: Deploying TDengine with Docker -description: "This chapter focuses on starting the TDengine service in a container and accessing it." +description: This chapter describes how to start and access TDengine in a Docker container. --- This chapter describes how to start the TDengine service in a container and access it. Users can control the behavior of the service in the container by using environment variables on the docker run command-line or in the docker-compose file. diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index e6fd6dbe6f..afa9f5a8ae 100644 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -1,6 +1,6 @@ --- title: Configuration Parameters -description: "Configuration parameters for client and server in TDengine" +description: This document describes the configuration parameters for the TDengine server and client. --- ## Configuration File on Server Side diff --git a/docs/en/14-reference/12-directory.md b/docs/en/14-reference/12-directory.md index 19b036418f..651892c8b2 100644 --- a/docs/en/14-reference/12-directory.md +++ b/docs/en/14-reference/12-directory.md @@ -1,6 +1,6 @@ --- title: File directory structure -description: "TDengine installation directory description" +description: This document describes the structure of the TDengine directory after installation. --- After TDengine is installed, the following directories or files will be created in the system by default. diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md index a97a54af02..caedd76df8 100644 --- a/docs/en/14-reference/13-schemaless/13-schemaless.md +++ b/docs/en/14-reference/13-schemaless/13-schemaless.md @@ -1,6 +1,6 @@ --- title: Schemaless Writing -description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.' +description: This document describes how to use the schemaless write component of TDengine. --- In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md index 665bc75380..0b4a1fbd60 100644 --- a/docs/en/14-reference/14-taosKeeper.md +++ b/docs/en/14-reference/14-taosKeeper.md @@ -1,7 +1,7 @@ --- sidebar_label: taosKeeper title: taosKeeper -description: exports TDengine monitoring metrics. +description: This document describes how to use taosKeeper, a tool for exporting TDengine monitoring metrics. --- ## Introduction diff --git a/docs/en/14-reference/index.md b/docs/en/14-reference/index.md index f3a64913d0..bc8ec69965 100644 --- a/docs/en/14-reference/index.md +++ b/docs/en/14-reference/index.md @@ -1,5 +1,6 @@ --- title: Reference +description: This document describes TDengine connectors and utilities. --- This section describes the TDengine connectors and utilities. diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx index ca32ce8afc..b33e1c1199 100644 --- a/docs/en/20-third-party/01-grafana.mdx +++ b/docs/en/20-third-party/01-grafana.mdx @@ -1,6 +1,7 @@ --- -sidebar_label: Grafana title: Grafana +sidebar_label: Grafana +description: This document describes how to integrate TDengine with Grafana. --- import Tabs from "@theme/Tabs"; diff --git a/docs/en/20-third-party/02-prometheus.md b/docs/en/20-third-party/02-prometheus.md index ef9b9cb637..bfdd3d015e 100644 --- a/docs/en/20-third-party/02-prometheus.md +++ b/docs/en/20-third-party/02-prometheus.md @@ -1,6 +1,7 @@ --- -sidebar_label: Prometheus title: Prometheus writing and reading +sidebar_label: Prometheus +description: This document describes how to integrate TDengine with Prometheus. --- import Prometheus from "../14-reference/_prometheus.mdx" diff --git a/docs/en/20-third-party/03-telegraf.md b/docs/en/20-third-party/03-telegraf.md index 8f3cab0e57..7e99b84eab 100644 --- a/docs/en/20-third-party/03-telegraf.md +++ b/docs/en/20-third-party/03-telegraf.md @@ -1,6 +1,7 @@ --- -sidebar_label: Telegraf title: Telegraf writing +sidebar_label: Telegraf +description: This document describes how to integrate TDengine with Telegraf. --- import Telegraf from "../14-reference/_telegraf.mdx" diff --git a/docs/en/20-third-party/05-collectd.md b/docs/en/20-third-party/05-collectd.md index 5b52e3b7bc..d8c8e7f81d 100644 --- a/docs/en/20-third-party/05-collectd.md +++ b/docs/en/20-third-party/05-collectd.md @@ -1,6 +1,7 @@ --- -sidebar_label: collectd title: collectd writing +sidebar_label: collectd +description: This document describes how to integrate TDengine with collectd. --- import CollectD from "../14-reference/_collectd.mdx" diff --git a/docs/en/20-third-party/06-statsd.md b/docs/en/20-third-party/06-statsd.md index b861a48ecd..ea428e9cdb 100644 --- a/docs/en/20-third-party/06-statsd.md +++ b/docs/en/20-third-party/06-statsd.md @@ -1,6 +1,7 @@ --- -sidebar_label: StatsD title: StatsD Writing +sidebar_label: StatsD +description: This document describes how to integrate TDengine with StatsD. --- import StatsD from "../14-reference/_statsd.mdx" diff --git a/docs/en/20-third-party/07-icinga2.md b/docs/en/20-third-party/07-icinga2.md index 167b6a4303..540aae8689 100644 --- a/docs/en/20-third-party/07-icinga2.md +++ b/docs/en/20-third-party/07-icinga2.md @@ -1,6 +1,7 @@ --- -sidebar_label: icinga2 title: icinga2 writing +sidebar_label: icinga2 +description: This document describes how to integrate TDengine with icinga2. --- import Icinga2 from "../14-reference/_icinga2.mdx" diff --git a/docs/en/20-third-party/08-tcollector.md b/docs/en/20-third-party/08-tcollector.md index b604a2d712..f1c0ecd44d 100644 --- a/docs/en/20-third-party/08-tcollector.md +++ b/docs/en/20-third-party/08-tcollector.md @@ -1,6 +1,7 @@ --- -sidebar_label: TCollector title: TCollector writing +sidebar_label: TCollector +description: This document describes how to integrate TDengine with TCollector. --- import TCollector from "../14-reference/_tcollector.mdx" diff --git a/docs/en/20-third-party/09-emq-broker.md b/docs/en/20-third-party/09-emq-broker.md index 2ead1bbaf4..10ce0174ed 100644 --- a/docs/en/20-third-party/09-emq-broker.md +++ b/docs/en/20-third-party/09-emq-broker.md @@ -1,6 +1,7 @@ --- -sidebar_label: EMQX Broker title: EMQX Broker writing +sidebar_label: EMQX Broker +description: This document describes how to integrate TDengine with the EMQX broker. --- MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). diff --git a/docs/en/20-third-party/10-hive-mq-broker.md b/docs/en/20-third-party/10-hive-mq-broker.md index 828a62ac5b..6c5165596e 100644 --- a/docs/en/20-third-party/10-hive-mq-broker.md +++ b/docs/en/20-third-party/10-hive-mq-broker.md @@ -1,6 +1,7 @@ --- -sidebar_label: HiveMQ Broker title: HiveMQ Broker Writing +sidebar_label: HiveMQ Broker +description: This document describes how to integrate TDengine with the HiveMQ broker. --- [HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. For more information, see [HiveMQ TDengine Extension](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md). diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md index 3e8f7c295d..0998862b2d 100644 --- a/docs/en/20-third-party/11-kafka.md +++ b/docs/en/20-third-party/11-kafka.md @@ -1,6 +1,7 @@ --- -sidebar_label: Kafka title: TDengine Kafka Connector Tutorial +sidebar_label: Kafka +description: This document describes how to integrate TDengine with Kafka. --- TDengine Kafka Connector contains two plugins: TDengine Source Connector and TDengine Sink Connector. Users only need to provide a simple configuration file to synchronize the data of the specified topic in Kafka (batch or real-time) to TDengine or synchronize the data (batch or real-time) of the specified database in TDengine to Kafka. diff --git a/docs/en/20-third-party/12-google-data-studio.md b/docs/en/20-third-party/12-google-data-studio.md index fc94f98056..ea6431fa5a 100644 --- a/docs/en/20-third-party/12-google-data-studio.md +++ b/docs/en/20-third-party/12-google-data-studio.md @@ -1,6 +1,7 @@ --- -sidebar_label: Google Data Studio title: Use Google Data Studio to access TDengine +sidebar_label: Google Data Studio +description: This document describes how to integrate TDengine with Google Data Studio. --- Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis. diff --git a/docs/en/20-third-party/13-Jupyter.md b/docs/en/20-third-party/13-Jupyter.md index fbd7e530f0..1ac9df1da4 100644 --- a/docs/en/20-third-party/13-Jupyter.md +++ b/docs/en/20-third-party/13-Jupyter.md @@ -1,6 +1,7 @@ --- -sidebar_label: JupyterLab title: Connect JupyterLab to TDengine +sidebar_label: JupyterLab +description: This document describes how to integrate TDengine with JupyterLab. --- JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python connector to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab. diff --git a/docs/en/20-third-party/index.md b/docs/en/20-third-party/index.md index 87bd9e0751..6fc8043eef 100644 --- a/docs/en/20-third-party/index.md +++ b/docs/en/20-third-party/index.md @@ -1,5 +1,6 @@ --- title: Third Party Tools +description: This document describes how to integrate TDengine with various third-party tools. --- Since TDengine supports standard SQL commands, common database connector standards (e.g., JDBC), ORM, and other popular time-series database writing protocols (e.g., InfluxDB Line Protocol, OpenTSDB JSON, OpenTSDB Telnet, etc.), it is very easy to integrate TDengine with other third party tools. You only need to provide simple configuration, the integration can be done without a line of code. diff --git a/docs/en/21-tdinternal/01-arch.md b/docs/en/21-tdinternal/01-arch.md index 697ecb98a1..cef05dcc56 100644 --- a/docs/en/21-tdinternal/01-arch.md +++ b/docs/en/21-tdinternal/01-arch.md @@ -1,6 +1,7 @@ --- -sidebar_label: Architecture title: Architecture +sidebar_label: Architecture +description: This document describes the architecture of TDengine. --- ## Cluster and Primary Logic Unit diff --git a/docs/en/21-tdinternal/03-high-availability.md b/docs/en/21-tdinternal/03-high-availability.md index e2e1c6521e..a0f6ca4ffe 100644 --- a/docs/en/21-tdinternal/03-high-availability.md +++ b/docs/en/21-tdinternal/03-high-availability.md @@ -1,6 +1,7 @@ --- -sidebar_label: High Availability title: High Availability +sidebar_label: High Availability +description: This document describes how TDengine implements high availability. --- ## High Availability of Vnode diff --git a/docs/en/21-tdinternal/04-load-balance.md b/docs/en/21-tdinternal/04-load-balance.md index 7648398059..73da1c1dd6 100644 --- a/docs/en/21-tdinternal/04-load-balance.md +++ b/docs/en/21-tdinternal/04-load-balance.md @@ -1,6 +1,7 @@ --- -sidebar_label: Load Balance title: Load Balance +sidebar_label: Load Balance +description: This document describes how TDengine implements load balancing. --- The load balance in TDengine is mainly about processing data series data. TDengine employes builtin hash algorithm to distribute all the tables, sub-tables and their data of a database across all the vgroups that belongs to the database. Each table or sub-table can only be handled by a single vgroup, while each vgroup can process multiple table or sub-table. diff --git a/docs/en/21-tdinternal/index.md b/docs/en/21-tdinternal/index.md index 999d6f89ff..4f08467543 100644 --- a/docs/en/21-tdinternal/index.md +++ b/docs/en/21-tdinternal/index.md @@ -1,5 +1,6 @@ --- title: TDengine Inside +description: This document describes the internals of TDengine from an architectural perspective. --- ```mdx-code-block diff --git a/docs/en/25-application/01-telegraf.md b/docs/en/25-application/01-telegraf.md index 65fb08ee67..e043aebcb6 100644 --- a/docs/en/25-application/01-telegraf.md +++ b/docs/en/25-application/01-telegraf.md @@ -1,6 +1,7 @@ --- -sidebar_label: TDengine + Telegraf + Grafana title: Quickly Build IT DevOps Visualization System with TDengine + Telegraf + Grafana +sidebar_label: TDengine + Telegraf + Grafana +description: This document describes how to create an IT visualization system by integrating TDengine with Telegraf and Grafana. --- ## Background diff --git a/docs/en/25-application/02-collectd.md b/docs/en/25-application/02-collectd.md index 97412b2309..6ac7253fc4 100644 --- a/docs/en/25-application/02-collectd.md +++ b/docs/en/25-application/02-collectd.md @@ -1,6 +1,7 @@ --- -sidebar_label: TDengine + collectd/StatsD + Grafana title: Quickly build an IT DevOps visualization system using TDengine + collectd/StatsD + Grafana +sidebar_label: TDengine + collectd/StatsD + Grafana +description: This document describes how to build an IT visualization system by integrating TDengine with Grafana and collectd or StatsD. --- ## Background diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md index 1aabaa43e7..30d069e4e2 100644 --- a/docs/en/25-application/03-immigrate.md +++ b/docs/en/25-application/03-immigrate.md @@ -1,6 +1,7 @@ --- -sidebar_label: OpenTSDB Migration to TDengine title: Best Practices for Migrating OpenTSDB Applications to TDengine +sidebar_label: OpenTSDB Migration to TDengine +description: This document describes the best practices for migrating an OpenTSDB application to TDengine. --- As a distributed, scalable, distributed time-series database platform based on HBase, and thanks to its first-mover advantage, OpenTSDB is widely used for monitoring in DevOps. However, as new technologies like cloud computing, microservices, and containerization technology has developed rapidly, Enterprise-level services are becoming more and more diverse and the architecture is becoming more complex. diff --git a/docs/en/25-application/index.md b/docs/en/25-application/index.md index 5383a00c67..178fef47b6 100644 --- a/docs/en/25-application/index.md +++ b/docs/en/25-application/index.md @@ -1,5 +1,6 @@ --- title: Application Practice +description: This document describes some examples of building systems around TDengine. --- ```mdx-code-block diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md index 7650e97365..9e7f4f8e0d 100644 --- a/docs/en/27-train-faq/01-faq.md +++ b/docs/en/27-train-faq/01-faq.md @@ -1,5 +1,6 @@ --- title: Frequently Asked Questions +description: This document describes the frequently asked questions about TDengine. --- ## Submit an Issue diff --git a/docs/en/27-train-faq/index.md b/docs/en/27-train-faq/index.md index 2cb87aab00..cc55a41559 100644 --- a/docs/en/27-train-faq/index.md +++ b/docs/en/27-train-faq/index.md @@ -1,5 +1,6 @@ --- title: FAQ & Others +description: This document describes common issues related with TDengine. --- ```mdx-code-block diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 83ea3eb5e6..a6a28e04c4 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -1,7 +1,7 @@ --- -sidebar_label: TDengine title: TDengine Release History and Download Links -description: TDengine release history, Release Notes and download links. +sidebar_label: TDengine +description: This document provides download links for all released versions of TDengine 3.0. --- TDengine 3.x installation packages can be downloaded at the following links: diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md index 97fed654f2..3c1dc32f8d 100644 --- a/docs/en/28-releases/02-tools.md +++ b/docs/en/28-releases/02-tools.md @@ -1,7 +1,7 @@ --- -sidebar_label: taosTools title: taosTools Release History and Download Links -description: taosTools release history, Release Notes, download links. +sidebar_label: taosTools +description: This document provides download links for all released versions of taosTools compatible with TDengine 3.0. --- taosTools installation packages can be downloaded at the following links: diff --git a/docs/en/28-releases/index.md b/docs/en/28-releases/index.md index c01c99cdce..d1f93c3b9b 100644 --- a/docs/en/28-releases/index.md +++ b/docs/en/28-releases/index.md @@ -1,5 +1,6 @@ --- title: Releases +description: This document describes TDengine products that have been released. --- ```mdx-code-block