From 52bff98f344c15c6cb374a624cb5194ae561a8a1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 2 Sep 2022 12:16:03 +0800 Subject: [PATCH 01/49] feat: update taos-tools f169c0f for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 74c2dbca30..3c7067a9f8 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 212c34d + GIT_TAG f169c0f SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From c1cdf9fec2b0ce65ed3307ab604e936ae7f9e36e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 2 Sep 2022 16:51:49 +0800 Subject: [PATCH 02/49] feat: update taos-tools a4d9b92 for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 3c7067a9f8..b905e30489 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG f169c0f + GIT_TAG a4d9b92 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 2b94231e693e2a89b08e4fdbd2fce5a2d00ae130 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 7 Sep 2022 10:19:15 +0800 Subject: [PATCH 03/49] feat: update taostools 0fa3a66 for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index b905e30489..74cdd2a6ad 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG a4d9b92 + GIT_TAG 0fa3a66 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 28a6865ddaf8ef67792b2d2b00c1a1aa23bb81fd Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 7 Sep 2022 11:21:32 +0800 Subject: [PATCH 04/49] feat: udpate taos-tools 023a01e for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 74cdd2a6ad..0cdbb5d60a 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 0fa3a66 + GIT_TAG 023a01e SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 52cc999ed180a3189b45ea06f378857c7a959f34 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 7 Sep 2022 11:32:26 +0800 Subject: [PATCH 05/49] feat: update taos-tools 7d5c1c0 for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 0cdbb5d60a..adefacc6af 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 023a01e + GIT_TAG 7d5c1c0 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 00a974cfa0b4fed6d3abaa1114141dc16c1c122b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 11 Sep 2022 14:06:04 +0800 Subject: [PATCH 06/49] feat: update taos-tools 2.2.0 (2dba49c) for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index adefacc6af..c273e9889f 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 7d5c1c0 + GIT_TAG 2dba49c SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 4fd0bc409d2177a77148a502546e590179496bcb Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 13 Sep 2022 09:09:21 +0800 Subject: [PATCH 07/49] fix: fix client/server memory leak issues --- source/client/src/clientImpl.c | 1 + source/client/src/clientMain.c | 2 ++ source/client/src/clientRawBlockWrite.c | 6 ++++++ source/common/src/tmsg.c | 2 ++ source/libs/catalog/src/ctgUtil.c | 11 ++++++++++- source/libs/parser/src/parTranslater.c | 25 ++++++------------------- 6 files changed, 27 insertions(+), 20 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 5ebc2729f8..1c18812509 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -854,6 +854,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { pRequest->metric.resultReady = taosGetTimestampUs(); if (pResult) { + destroyQueryExecRes(&pRequest->body.resInfo.execRes); memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult)); } diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 3086078080..73636e7372 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -870,11 +870,13 @@ static void fetchCallback(void *pResult, void *param, int32_t code) { if (code != TSDB_CODE_SUCCESS) { pRequest->code = code; + taosMemoryFreeClear(pResultInfo->pData); pRequest->body.fetchFp(pRequest->body.param, pRequest, 0); return; } if (pRequest->code != TSDB_CODE_SUCCESS) { + taosMemoryFreeClear(pResultInfo->pData); pRequest->body.fetchFp(pRequest->body.param, pRequest, 0); return; } diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index c135965f07..6e9711f57b 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -701,6 +701,12 @@ typedef struct SVgroupCreateTableBatch { static void destroyCreateTbReqBatch(void* data) { SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data; + size_t size = taosArrayGetSize(pTbBatch->req.pArray); + for (int32_t i = 0; i < size; ++i) { + SVCreateTbReq* pTableReq = taosArrayGet(pTbBatch->req.pArray, i); + tdDestroySVCreateTbReq(pTableReq); + } + taosArrayDestroy(pTbBatch->req.pArray); } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index ea25094d10..f96462945f 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -5436,6 +5436,8 @@ void tFreeSSubmitRsp(SSubmitRsp *pRsp) { for (int32_t i = 0; i < pRsp->nBlocks; ++i) { SSubmitBlkRsp *sRsp = pRsp->pBlocks + i; taosMemoryFree(sRsp->tblFName); + tFreeSTableMetaRsp(sRsp->pMeta); + taosMemoryFree(sRsp->pMeta); } taosMemoryFree(pRsp->pBlocks); diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 296100ce6d..97b174de1c 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -99,7 +99,16 @@ char *ctgTaskTypeStr(CTG_TASK_TYPE type) { } void ctgFreeQNode(SCtgQNode *node) { - //TODO + if (NULL == node) { + return; + } + + if (node->op) { + taosMemoryFree(node->op->data); + taosMemoryFree(node->op); + } + + taosMemoryFree(node); } void ctgFreeSTableIndex(void *info) { diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index ad9a467dee..7d49e3d7c5 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -5922,12 +5922,6 @@ typedef struct SVgroupCreateTableBatch { char dbName[TSDB_DB_NAME_LEN]; } SVgroupCreateTableBatch; -static void destroyCreateTbReq(SVCreateTbReq* pReq) { - taosMemoryFreeClear(pReq->name); - taosMemoryFreeClear(pReq->comment); - taosMemoryFreeClear(pReq->ntb.schemaRow.pSchema); -} - static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* pStmt, const SVgroupInfo* pVgroupInfo, SVgroupCreateTableBatch* pBatch) { char dbFName[TSDB_DB_FNAME_LEN] = {0}; @@ -5942,7 +5936,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* if (pStmt->pOptions->commentNull == false) { req.comment = strdup(pStmt->pOptions->comment); if (NULL == req.comment) { - destroyCreateTbReq(&req); + tdDestroySVCreateTbReq(&req); return TSDB_CODE_OUT_OF_MEMORY; } req.commentLen = strlen(pStmt->pOptions->comment); @@ -5953,7 +5947,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* req.ntb.schemaRow.version = 1; req.ntb.schemaRow.pSchema = taosMemoryCalloc(req.ntb.schemaRow.nCols, sizeof(SSchema)); if (NULL == req.name || NULL == req.ntb.schemaRow.pSchema) { - destroyCreateTbReq(&req); + tdDestroySVCreateTbReq(&req); return TSDB_CODE_OUT_OF_MEMORY; } if (pStmt->ignoreExists) { @@ -5969,7 +5963,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* strcpy(pBatch->dbName, pStmt->dbName); pBatch->req.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq)); if (NULL == pBatch->req.pArray) { - destroyCreateTbReq(&req); + tdDestroySVCreateTbReq(&req); return TSDB_CODE_OUT_OF_MEMORY; } taosArrayPush(pBatch->req.pArray, &req); @@ -6014,16 +6008,7 @@ static void destroyCreateTbReqBatch(void* data) { size_t size = taosArrayGetSize(pTbBatch->req.pArray); for (int32_t i = 0; i < size; ++i) { SVCreateTbReq* pTableReq = taosArrayGet(pTbBatch->req.pArray, i); - taosMemoryFreeClear(pTableReq->name); - taosMemoryFreeClear(pTableReq->comment); - - if (pTableReq->type == TSDB_NORMAL_TABLE) { - taosMemoryFreeClear(pTableReq->ntb.schemaRow.pSchema); - } else if (pTableReq->type == TSDB_CHILD_TABLE) { - taosMemoryFreeClear(pTableReq->ctb.pTag); - taosMemoryFreeClear(pTableReq->ctb.name); - taosArrayDestroy(pTableReq->ctb.tagName); - } + tdDestroySVCreateTbReq(pTableReq); } taosArrayDestroy(pTbBatch->req.pArray); @@ -6384,6 +6369,8 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla if (TSDB_CODE_SUCCESS == code) { addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt, pTag, pSuperTableMeta->uid, pStmt->useTableName, &info, tagName, pSuperTableMeta->tableInfo.numOfTags); + } else { + taosMemoryFree(pTag); } taosArrayDestroy(tagName); From ff9d673489b1c67042f47fe47656a5905d4ab8bb Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 13 Sep 2022 10:48:39 +0800 Subject: [PATCH 08/49] enh: add binary serialization method to node structure --- include/libs/planner/planner.h | 6 +- source/libs/nodes/src/nodesMsgFuncs.c | 274 ++++++++++++++++---------- source/libs/planner/src/planner.c | 15 ++ source/libs/qworker/src/qworker.c | 4 +- source/libs/scheduler/src/schTask.c | 2 +- 5 files changed, 191 insertions(+), 110 deletions(-) diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h index 05caa7a7bb..e03ac3811a 100644 --- a/include/libs/planner/planner.h +++ b/include/libs/planner/planner.h @@ -52,10 +52,14 @@ int32_t qSetSubplanExecutionNode(SSubplan* pSubplan, int32_t groupId, SDownstrea void qClearSubplanExecutionNode(SSubplan* pSubplan); -// Convert to subplan to string for the scheduler to send to the executor +// Convert to subplan to display string for the scheduler to send to the executor int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen); int32_t qStringToSubplan(const char* pStr, SSubplan** pSubplan); +// Convert to subplan to msg for the scheduler to send to the executor +int32_t qSubPlanToMsg(const SSubplan* pSubplan, char** pStr, int32_t* pLen); +int32_t qMsgToSubplan(const char* pStr, int32_t len, SSubplan** pSubplan); + char* qQueryPlanToString(const SQueryPlan* pPlan); SQueryPlan* qStringToQueryPlan(const char* pStr); diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 5fe31ed78e..b0be001e56 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -1082,6 +1082,170 @@ static int32_t msgToSlotDescNode(STlvDecoder* pDecoder, void* pObj) { return code; } +enum { EP_CODE_FQDN = 1, EP_CODE_port }; + +static int32_t epToMsg(const void* pObj, STlvEncoder* pEncoder) { + const SEp* pNode = (const SEp*)pObj; + + int32_t code = tlvEncodeCStr(pEncoder, EP_CODE_FQDN, pNode->fqdn); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU16(pEncoder, EP_CODE_port, pNode->port); + } + + return code; +} + +static int32_t msgToEp(STlvDecoder* pDecoder, void* pObj) { + SEp* pNode = (SEp*)pObj; + + int32_t code = TSDB_CODE_SUCCESS; + STlv* pTlv = NULL; + tlvForEach(pDecoder, pTlv, code) { + switch (pTlv->type) { + case EP_CODE_FQDN: + code = tlvDecodeCStr(pTlv, pNode->fqdn); + break; + case EP_CODE_port: + code = tlvDecodeU16(pTlv, &pNode->port); + break; + default: + break; + } + } + + return code; +} + +enum { EP_SET_CODE_IN_USE = 1, EP_SET_CODE_NUM_OF_EPS, EP_SET_CODE_EPS }; + +static int32_t epSetToMsg(const void* pObj, STlvEncoder* pEncoder) { + const SEpSet* pNode = (const SEpSet*)pObj; + + int32_t code = tlvEncodeI8(pEncoder, EP_SET_CODE_IN_USE, pNode->inUse); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI8(pEncoder, EP_SET_CODE_NUM_OF_EPS, pNode->numOfEps); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObjArray(pEncoder, EP_SET_CODE_EPS, epToMsg, pNode->eps, sizeof(SEp), pNode->numOfEps); + } + + return code; +} + +static int32_t msgToEpSet(STlvDecoder* pDecoder, void* pObj) { + SEpSet* pNode = (SEpSet*)pObj; + + int32_t code = TSDB_CODE_SUCCESS; + STlv* pTlv = NULL; + tlvForEach(pDecoder, pTlv, code) { + switch (pTlv->type) { + case EP_SET_CODE_IN_USE: + code = tlvDecodeI8(pTlv, &pNode->inUse); + break; + case EP_SET_CODE_NUM_OF_EPS: + code = tlvDecodeI8(pTlv, &pNode->numOfEps); + break; + case EP_SET_CODE_EPS: + code = tlvDecodeObjArrayFromTlv(pTlv, msgToEp, pNode->eps, sizeof(SEp)); + break; + default: + break; + } + } + + return code; +} + +enum { QUERY_NODE_ADDR_CODE_NODE_ID = 1, QUERY_NODE_ADDR_CODE_EP_SET }; + +static int32_t queryNodeAddrToMsg(const void* pObj, STlvEncoder* pEncoder) { + const SQueryNodeAddr* pNode = (const SQueryNodeAddr*)pObj; + + int32_t code = tlvEncodeI32(pEncoder, QUERY_NODE_ADDR_CODE_NODE_ID, pNode->nodeId); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, QUERY_NODE_ADDR_CODE_EP_SET, epSetToMsg, &pNode->epSet); + } + + return code; +} + +static int32_t msgToQueryNodeAddr(STlvDecoder* pDecoder, void* pObj) { + SQueryNodeAddr* pNode = (SQueryNodeAddr*)pObj; + + int32_t code = TSDB_CODE_SUCCESS; + STlv* pTlv = NULL; + tlvForEach(pDecoder, pTlv, code) { + switch (pTlv->type) { + case QUERY_NODE_ADDR_CODE_NODE_ID: + code = tlvDecodeI32(pTlv, &pNode->nodeId); + break; + case QUERY_NODE_ADDR_CODE_EP_SET: + code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->epSet); + break; + } + } + + return code; +} + +enum { + DOWNSTREAM_SOURCE_CODE_ADDR = 1, + DOWNSTREAM_SOURCE_CODE_TASK_ID, + DOWNSTREAM_SOURCE_CODE_SCHED_ID, + DOWNSTREAM_SOURCE_CODE_EXEC_ID, + DOWNSTREAM_SOURCE_CODE_FETCH_MSG_TYPE +}; + +static int32_t downstreamSourceNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { + const SDownstreamSourceNode* pNode = (const SDownstreamSourceNode*)pObj; + + int32_t code = tlvEncodeObj(pEncoder, DOWNSTREAM_SOURCE_CODE_ADDR, queryNodeAddrToMsg, &pNode->addr); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, DOWNSTREAM_SOURCE_CODE_TASK_ID, pNode->taskId); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, DOWNSTREAM_SOURCE_CODE_SCHED_ID, pNode->schedId); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI32(pEncoder, DOWNSTREAM_SOURCE_CODE_EXEC_ID, pNode->execId); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI32(pEncoder, DOWNSTREAM_SOURCE_CODE_FETCH_MSG_TYPE, pNode->fetchMsgType); + } + + return code; +} + +static int32_t msgToDownstreamSourceNode(STlvDecoder* pDecoder, void* pObj) { + SDownstreamSourceNode* pNode = (SDownstreamSourceNode*)pObj; + + int32_t code = TSDB_CODE_SUCCESS; + STlv* pTlv = NULL; + tlvForEach(pDecoder, pTlv, code) { + switch (pTlv->type) { + case DOWNSTREAM_SOURCE_CODE_ADDR: + code = tlvDecodeObjFromTlv(pTlv, msgToQueryNodeAddr, &pNode->addr); + break; + case DOWNSTREAM_SOURCE_CODE_TASK_ID: + code = tlvDecodeU64(pTlv, &pNode->taskId); + break; + case DOWNSTREAM_SOURCE_CODE_SCHED_ID: + code = tlvDecodeU64(pTlv, &pNode->schedId); + break; + case DOWNSTREAM_SOURCE_CODE_EXEC_ID: + code = tlvDecodeI32(pTlv, &pNode->execId); + break; + case DOWNSTREAM_SOURCE_CODE_FETCH_MSG_TYPE: + code = tlvDecodeI32(pTlv, &pNode->fetchMsgType); + break; + default: + break; + } + } + + return code; +} + enum { PHY_NODE_CODE_OUTPUT_DESC = 1, PHY_NODE_CODE_CONDITIONS, @@ -1401,80 +1565,6 @@ static int32_t msgToPhysiTableScanNode(STlvDecoder* pDecoder, void* pObj) { return code; } -enum { EP_CODE_FQDN = 1, EP_CODE_port }; - -static int32_t epToMsg(const void* pObj, STlvEncoder* pEncoder) { - const SEp* pNode = (const SEp*)pObj; - - int32_t code = tlvEncodeCStr(pEncoder, EP_CODE_FQDN, pNode->fqdn); - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeU16(pEncoder, EP_CODE_port, pNode->port); - } - - return code; -} - -static int32_t msgToEp(STlvDecoder* pDecoder, void* pObj) { - SEp* pNode = (SEp*)pObj; - - int32_t code = TSDB_CODE_SUCCESS; - STlv* pTlv = NULL; - tlvForEach(pDecoder, pTlv, code) { - switch (pTlv->type) { - case EP_CODE_FQDN: - code = tlvDecodeCStr(pTlv, pNode->fqdn); - break; - case EP_CODE_port: - code = tlvDecodeU16(pTlv, &pNode->port); - break; - default: - break; - } - } - - return code; -} - -enum { EP_SET_CODE_IN_USE = 1, EP_SET_CODE_NUM_OF_EPS, EP_SET_CODE_EPS }; - -static int32_t epSetToMsg(const void* pObj, STlvEncoder* pEncoder) { - const SEpSet* pNode = (const SEpSet*)pObj; - - int32_t code = tlvEncodeI8(pEncoder, EP_SET_CODE_IN_USE, pNode->inUse); - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeI8(pEncoder, EP_SET_CODE_NUM_OF_EPS, pNode->numOfEps); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObjArray(pEncoder, EP_SET_CODE_EPS, epToMsg, pNode->eps, sizeof(SEp), pNode->numOfEps); - } - - return code; -} - -static int32_t msgToEpSet(STlvDecoder* pDecoder, void* pObj) { - SEpSet* pNode = (SEpSet*)pObj; - - int32_t code = TSDB_CODE_SUCCESS; - STlv* pTlv = NULL; - tlvForEach(pDecoder, pTlv, code) { - switch (pTlv->type) { - case EP_SET_CODE_IN_USE: - code = tlvDecodeI8(pTlv, &pNode->inUse); - break; - case EP_SET_CODE_NUM_OF_EPS: - code = tlvDecodeI8(pTlv, &pNode->numOfEps); - break; - case EP_SET_CODE_EPS: - code = tlvDecodeObjArrayFromTlv(pTlv, msgToEp, pNode->eps, sizeof(SEp)); - break; - default: - break; - } - } - - return code; -} - enum { PHY_SYSTABLE_SCAN_CODE_SCAN = 1, PHY_SYSTABLE_SCAN_CODE_MGMT_EP_SET, @@ -2594,38 +2684,6 @@ static int32_t msgToSubplanId(STlvDecoder* pDecoder, void* pObj) { return code; } -enum { QUERY_NODE_ADDR_CODE_NODE_ID = 1, QUERY_NODE_ADDR_CODE_EP_SET }; - -static int32_t queryNodeAddrToMsg(const void* pObj, STlvEncoder* pEncoder) { - const SQueryNodeAddr* pNode = (const SQueryNodeAddr*)pObj; - - int32_t code = tlvEncodeI32(pEncoder, QUERY_NODE_ADDR_CODE_NODE_ID, pNode->nodeId); - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, QUERY_NODE_ADDR_CODE_EP_SET, epSetToMsg, &pNode->epSet); - } - - return code; -} - -static int32_t msgToQueryNodeAddr(STlvDecoder* pDecoder, void* pObj) { - SQueryNodeAddr* pNode = (SQueryNodeAddr*)pObj; - - int32_t code = TSDB_CODE_SUCCESS; - STlv* pTlv = NULL; - tlvForEach(pDecoder, pTlv, code) { - switch (pTlv->type) { - case QUERY_NODE_ADDR_CODE_NODE_ID: - code = tlvDecodeI32(pTlv, &pNode->nodeId); - break; - case QUERY_NODE_ADDR_CODE_EP_SET: - code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->epSet); - break; - } - } - - return code; -} - enum { SUBPLAN_CODE_SUBPLAN_ID = 1, SUBPLAN_CODE_SUBPLAN_TYPE, @@ -2802,6 +2860,8 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { case QUERY_NODE_SLOT_DESC: code = slotDescNodeToMsg(pObj, pEncoder); break; + case QUERY_NODE_DOWNSTREAM_SOURCE: + return downstreamSourceNodeToMsg(pObj, pEncoder); case QUERY_NODE_LEFT_VALUE: break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: @@ -2929,6 +2989,8 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) { case QUERY_NODE_SLOT_DESC: code = msgToSlotDescNode(pDecoder, pObj); break; + case QUERY_NODE_DOWNSTREAM_SOURCE: + return msgToDownstreamSourceNode(pDecoder, pObj); case QUERY_NODE_LEFT_VALUE: break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c index baa1d1074c..35903d45b1 100644 --- a/source/libs/planner/src/planner.c +++ b/source/libs/planner/src/planner.c @@ -123,6 +123,21 @@ int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen) { int32_t qStringToSubplan(const char* pStr, SSubplan** pSubplan) { return nodesStringToNode(pStr, (SNode**)pSubplan); } +int32_t qSubPlanToMsg(const SSubplan* pSubplan, char** pStr, int32_t* pLen) { + if (SUBPLAN_TYPE_MODIFY == pSubplan->subplanType && NULL == pSubplan->pNode) { + SDataInserterNode* insert = (SDataInserterNode*)pSubplan->pDataSink; + *pLen = insert->size; + *pStr = insert->pData; + insert->pData = NULL; + return TSDB_CODE_SUCCESS; + } + return nodesNodeToMsg((const SNode*)pSubplan, pStr, pLen); +} + +int32_t qMsgToSubplan(const char* pStr, int32_t len, SSubplan** pSubplan) { + return nodesMsgToNode(pStr, len, (SNode**)pSubplan); +} + char* qQueryPlanToString(const SQueryPlan* pPlan) { char* pStr = NULL; int32_t len = 0; diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index f006096ce2..61c38f59db 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -559,7 +559,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) { // QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg); - code = qStringToSubplan(qwMsg->msg, &plan); + code = qMsgToSubplan(qwMsg->msg, qwMsg->msgLen, &plan); if (TSDB_CODE_SUCCESS != code) { code = TSDB_CODE_INVALID_MSG; QW_TASK_ELOG("task physical plan to subplan failed, code:%x - %s", code, tstrerror(code)); @@ -968,7 +968,7 @@ int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SDeleteRes *pRes) { DataSinkHandle sinkHandle = NULL; SQWTaskCtx ctx = {0}; - code = qStringToSubplan(qwMsg->msg, &plan); + code = qMsgToSubplan(qwMsg->msg, qwMsg->msgLen, &plan); if (TSDB_CODE_SUCCESS != code) { code = TSDB_CODE_INVALID_MSG; QW_TASK_ELOG("task physical plan to subplan failed, code:%x - %s", code, tstrerror(code)); diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index c5f161b66a..969c6fc8a6 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -860,7 +860,7 @@ int32_t schLaunchTaskImpl(void *param) { SSubplan *plan = pTask->plan; if (NULL == pTask->msg) { // TODO add more detailed reason for failure - code = qSubPlanToString(plan, &pTask->msg, &pTask->msgLen); + code = qSubPlanToMsg(plan, &pTask->msg, &pTask->msgLen); if (TSDB_CODE_SUCCESS != code) { SCH_TASK_ELOG("failed to create physical plan, code:%s, msg:%p, len:%d", tstrerror(code), pTask->msg, pTask->msgLen); From f32a17580cc7e6a39c4f765f43a6de9b86ed5bd4 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 13 Sep 2022 13:31:35 +0800 Subject: [PATCH 09/49] enh: add binary serialization method to node structure --- source/libs/nodes/src/nodesMsgFuncs.c | 28 +++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index b0be001e56..af32913f47 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -367,6 +367,10 @@ enum { COLUMN_CODE_TABLE_TYPE, COLUMN_CODE_COLUMN_ID, COLUMN_CODE_COLUMN_TYPE, + COLUMN_CODE_DB_NAME, + COLUMN_CODE_TABLE_NAME, + COLUMN_CODE_TABLE_ALIAS, + COLUMN_CODE_COL_NAME, COLUMN_CODE_DATABLOCK_ID, COLUMN_CODE_SLOT_ID }; @@ -387,6 +391,18 @@ static int32_t columnNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeEnum(pEncoder, COLUMN_CODE_COLUMN_TYPE, pNode->colType); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeCStr(pEncoder, COLUMN_CODE_DB_NAME, pNode->dbName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeCStr(pEncoder, COLUMN_CODE_TABLE_NAME, pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeCStr(pEncoder, COLUMN_CODE_TABLE_ALIAS, pNode->tableAlias); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeCStr(pEncoder, COLUMN_CODE_COL_NAME, pNode->colName); + } if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeI16(pEncoder, COLUMN_CODE_DATABLOCK_ID, pNode->dataBlockId); } @@ -419,6 +435,18 @@ static int32_t msgToColumnNode(STlvDecoder* pDecoder, void* pObj) { case COLUMN_CODE_COLUMN_TYPE: code = tlvDecodeEnum(pTlv, &pNode->colType, sizeof(pNode->colType)); break; + case COLUMN_CODE_DB_NAME: + code = tlvDecodeCStr(pTlv, pNode->dbName); + break; + case COLUMN_CODE_TABLE_NAME: + code = tlvDecodeCStr(pTlv, pNode->tableName); + break; + case COLUMN_CODE_TABLE_ALIAS: + code = tlvDecodeCStr(pTlv, pNode->tableAlias); + break; + case COLUMN_CODE_COL_NAME: + code = tlvDecodeCStr(pTlv, pNode->colName); + break; case COLUMN_CODE_DATABLOCK_ID: code = tlvDecodeI16(pTlv, &pNode->dataBlockId); break; From f40d838fe7c52a4cf86abf6389515679b044f509 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 13 Sep 2022 15:07:31 +0800 Subject: [PATCH 10/49] enh: add binary serialization method to node structure --- source/libs/nodes/src/nodesMsgFuncs.c | 13 ++++++++++--- source/libs/planner/test/planTestUtil.cpp | 6 +++++- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index af32913f47..7e2aa76404 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -24,7 +24,7 @@ typedef struct STlv { int16_t type; - int16_t len; + int32_t len; char value[0]; } STlv; @@ -70,7 +70,7 @@ static void endTlvEncode(STlvEncoder* pEncoder, char** pMsg, int32_t* pLen) { // nodesWarn("encode tlv count = %d, tl size = %d", pEncoder->tlvCount, sizeof(STlv) * pEncoder->tlvCount); } -static int32_t tlvEncodeImpl(STlvEncoder* pEncoder, int16_t type, const void* pValue, int16_t len) { +static int32_t tlvEncodeImpl(STlvEncoder* pEncoder, int16_t type, const void* pValue, int32_t len) { int32_t tlvLen = sizeof(STlv) + len; if (pEncoder->offset + tlvLen > pEncoder->allocSize) { void* pNewBuf = taosMemoryRealloc(pEncoder->pBuf, pEncoder->allocSize * 2); @@ -187,7 +187,7 @@ static int32_t tlvGetNextTlv(STlvDecoder* pDecoder, STlv** pTlv) { static bool tlvDecodeEnd(STlvDecoder* pDecoder) { return pDecoder->offset == pDecoder->bufSize; } -static int32_t tlvDecodeImpl(STlv* pTlv, void* pValue, int16_t len) { +static int32_t tlvDecodeImpl(STlv* pTlv, void* pValue, int32_t len) { if (pTlv->len != len) { return TSDB_CODE_FAILED; } @@ -710,6 +710,7 @@ static int32_t msgToLogicConditionNode(STlvDecoder* pDecoder, void* pObj) { enum { FUNCTION_CODE_EXPR_BASE = 1, + FUNCTION_CODE_FUNCTION_NAME, FUNCTION_CODE_FUNCTION_ID, FUNCTION_CODE_FUNCTION_TYPE, FUNCTION_CODE_PARAMETERS, @@ -720,6 +721,9 @@ static int32_t functionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { const SFunctionNode* pNode = (const SFunctionNode*)pObj; int32_t code = tlvEncodeObj(pEncoder, FUNCTION_CODE_EXPR_BASE, exprNodeToMsg, pNode); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeCStr(pEncoder, FUNCTION_CODE_FUNCTION_NAME, pNode->functionName); + } if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeI32(pEncoder, FUNCTION_CODE_FUNCTION_ID, pNode->funcId); } @@ -746,6 +750,9 @@ static int32_t msgToFunctionNode(STlvDecoder* pDecoder, void* pObj) { case FUNCTION_CODE_EXPR_BASE: code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node); break; + case FUNCTION_CODE_FUNCTION_NAME: + code = tlvDecodeCStr(pTlv, pNode->functionName); + break; case FUNCTION_CODE_FUNCTION_ID: code = tlvDecodeI32(pTlv, &pNode->funcId); break; diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp index b280b32a94..47fa8a68dd 100644 --- a/source/libs/planner/test/planTestUtil.cpp +++ b/source/libs/planner/test/planTestUtil.cpp @@ -480,8 +480,12 @@ class PlannerTestBaseImpl { DO_WITH_THROW(nodesNodeToMsg, pNode, &pNewStr, &newlen) if (newlen != len || 0 != memcmp(pStr, pNewStr, len)) { cout << "nodesNodeToMsg error!!!!!!!!!!!!!! len = " << len << ", newlen = " << newlen << endl; + taosMemoryFreeClear(pNewStr); + DO_WITH_THROW(nodesNodeToString, pRoot, false, &pNewStr, &newlen) + cout << "orac node: " << pNewStr << endl; + taosMemoryFreeClear(pNewStr); DO_WITH_THROW(nodesNodeToString, pNode, false, &pNewStr, &newlen) - cout << "nodesNodeToString " << pNewStr << endl; + cout << "new node: " << pNewStr << endl; } taosMemoryFreeClear(pNewStr); From 82fcd22107f4ae589a45cfe30fb0e8815494c3b6 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 13 Sep 2022 15:57:17 +0800 Subject: [PATCH 11/49] enh: add binary serialization method to node structure --- source/libs/nodes/src/nodesMsgFuncs.c | 13 ++++++++++++- source/libs/planner/test/planTestUtil.cpp | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 7e2aa76404..b72cb694d2 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -237,6 +237,11 @@ static int32_t tlvDecodeCStr(STlv* pTlv, char* pValue) { return TSDB_CODE_SUCCESS; } +static int32_t tlvDecodeCStrP(STlv* pTlv, char** pValue) { + *pValue = strndup(pTlv->value, pTlv->len); + return NULL == *pValue ? TSDB_CODE_OUT_OF_MEMORY : TSDB_CODE_SUCCESS; +} + static int32_t tlvDecodeDynBinary(STlv* pTlv, void** pValue) { *pValue = taosMemoryMalloc(pTlv->len); if (NULL == *pValue) { @@ -461,7 +466,7 @@ static int32_t msgToColumnNode(STlvDecoder* pDecoder, void* pObj) { return code; } -enum { VALUE_CODE_EXPR_BASE = 1, VALUE_CODE_IS_NULL, VALUE_CODE_DATUM }; +enum { VALUE_CODE_EXPR_BASE = 1, VALUE_CODE_LITERAL, VALUE_CODE_IS_NULL, VALUE_CODE_DATUM }; static int32_t datumToMsg(const void* pObj, STlvEncoder* pEncoder) { const SValueNode* pNode = (const SValueNode*)pObj; @@ -512,6 +517,9 @@ static int32_t valueNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { const SValueNode* pNode = (const SValueNode*)pObj; int32_t code = tlvEncodeObj(pEncoder, VALUE_CODE_EXPR_BASE, exprNodeToMsg, pNode); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeCStr(pEncoder, VALUE_CODE_LITERAL, pNode->literal); + } if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_NULL, pNode->isNull); } @@ -608,6 +616,9 @@ static int32_t msgToValueNode(STlvDecoder* pDecoder, void* pObj) { case VALUE_CODE_EXPR_BASE: code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node); break; + case VALUE_CODE_LITERAL: + code = tlvDecodeCStrP(pTlv, &pNode->literal); + break; case VALUE_CODE_IS_NULL: code = tlvDecodeBool(pTlv, &pNode->isNull); break; diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp index 47fa8a68dd..bf19c7a222 100644 --- a/source/libs/planner/test/planTestUtil.cpp +++ b/source/libs/planner/test/planTestUtil.cpp @@ -487,6 +487,7 @@ class PlannerTestBaseImpl { DO_WITH_THROW(nodesNodeToString, pNode, false, &pNewStr, &newlen) cout << "new node: " << pNewStr << endl; } + nodesDestroyNode(pNode); taosMemoryFreeClear(pNewStr); string str(pStr, len); From 0d7e1eb4e03a02d1e9995285a28a9b4106ecd412 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 13 Sep 2022 16:45:40 +0800 Subject: [PATCH 12/49] fix(query): twa function output NULL if input has overlap timestamps TD-18224 --- source/libs/function/src/builtinsimpl.c | 54 ++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 0d7fd1a6da..35e3e07839 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -5398,8 +5398,8 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { int32_t i = pInput->startRowIndex; if (pCtx->start.key != INT64_MIN) { - ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) || - (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC)); + //ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) || + // (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC)); ASSERT(last->key == INT64_MIN); for (; i < pInput->numOfRows + pInput->startRowIndex; ++i) { @@ -5446,6 +5446,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; @@ -5461,6 +5466,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; @@ -5475,6 +5485,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; @@ -5489,6 +5504,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; @@ -5503,6 +5523,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; @@ -5517,6 +5542,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; @@ -5531,6 +5561,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; @@ -5545,6 +5580,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; @@ -5559,6 +5599,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; @@ -5573,6 +5618,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + if (pInfo->p.key == st.key) { + numOfElems = 0; + goto _twa_over; + } + INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; From 26a1bb437ce99d4358ba43f6ee18e138de7794d6 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 13 Sep 2022 16:53:26 +0800 Subject: [PATCH 13/49] enh: add binary serialization method to node structure --- source/libs/nodes/src/nodesMsgFuncs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index b72cb694d2..9a093fca99 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -130,6 +130,9 @@ static int32_t tlvEncodeBool(STlvEncoder* pEncoder, int16_t type, bool value) { } static int32_t tlvEncodeCStr(STlvEncoder* pEncoder, int16_t type, const char* pValue) { + if (NULL == pValue) { + return TSDB_CODE_SUCCESS; + } return tlvEncodeImpl(pEncoder, type, pValue, strlen(pValue)); } From cd69248905e3884e12565661b62bec4508c7e65d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 13 Sep 2022 17:25:31 +0800 Subject: [PATCH 14/49] feat: update taos-tools 404d8da for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index c273e9889f..13559bba27 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 2dba49c + GIT_TAG 404d8da SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From aa204ec62be3f12a81af6c6d86ceb011d7c112c9 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 13 Sep 2022 17:30:47 +0800 Subject: [PATCH 15/49] test: fix location in develop-test/5-taos-tools/taosbenchmark/demo.py --- .../5-taos-tools/taosbenchmark/demo.py | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py index 99e8cd36a4..f62cb05269 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py @@ -22,9 +22,9 @@ from util.dnodes import * class TDTestCase: def caseDescription(self): - ''' + """ [TD-13823] taosBenchmark test cases - ''' + """ return def init(self, conn, logSql): @@ -34,19 +34,19 @@ class TDTestCase: def getPath(self, tool="taosBenchmark"): selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] else: - projPath = selfPath[:selfPath.find("tests")] + projPath = selfPath[: selfPath.find("tests")] paths = [] for root, dirs, files in os.walk(projPath): - if ((tool) in files): + if (tool) in files: rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): + if "packaging" not in rootRealPath: paths.append(os.path.join(root, tool)) break - if (len(paths) == 0): + if len(paths) == 0: tdLog.exit("taosBenchmark not found!") return else: @@ -55,7 +55,7 @@ class TDTestCase: def run(self): binPath = self.getPath() - cmd = "%s -n 100 -t 100 -y" %binPath + cmd = "%s -n 100 -t 100 -y" % binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("use test") @@ -83,8 +83,10 @@ class TDTestCase: tdSql.query("select count(*) from test.meters where groupid >= 0") tdSql.checkData(0, 0, 10000) - tdSql.query("select count(*) from test.meters where location = 'San Francisco' or location = 'Los Angles' or location = 'San Diego' or location = 'San Jose' or \ - location = 'Palo Alto' or location = 'Campbell' or location = 'Mountain View' or location = 'Sunnyvale' or location = 'Santa Clara' or location = 'Cupertino' ") + tdSql.query( + "select count(*) from test.meters where location = 'California.SanFrancisco' or location = 'California.LosAngles' or location = 'California.SanDiego' or location = 'California.SanJose' or \ + location = 'California.PaloAlto' or location = 'California.Campbell' or location = 'California.MountainView' or location = 'California.Sunnyvale' or location = 'California.SantaClara' or location = 'California.Cupertino' " + ) tdSql.checkData(0, 0, 10000) def stop(self): From 632a26d0fe8612e7f72d5cc2347d68836dff2279 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 13 Sep 2022 17:39:43 +0800 Subject: [PATCH 16/49] refactor(query): do some internal refactor. --- source/libs/executor/inc/executorimpl.h | 42 +-- source/libs/executor/src/executorimpl.c | 350 ++---------------- source/libs/executor/src/timewindowoperator.c | 166 ++++----- 3 files changed, 115 insertions(+), 443 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index a9826e0018..61c6f9878e 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -584,11 +584,12 @@ typedef struct SIntervalAggOperatorInfo { typedef struct SMergeAlignedIntervalAggOperatorInfo { SIntervalAggOperatorInfo* intervalAggOperatorInfo; - bool hasGroupId; +// bool hasGroupId; uint64_t groupId; // current groupId int64_t curTs; // current ts SSDataBlock* prefetchedBlock; SNode* pCondition; + SResultRow* pResultRow; } SMergeAlignedIntervalAggOperatorInfo; typedef struct SStreamIntervalOperatorInfo { @@ -648,7 +649,6 @@ typedef struct SAggOperatorInfo { } SAggOperatorInfo; typedef struct SProjectOperatorInfo { - // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; SNode* pFilterNode; // filter info, which is push down by optimizer @@ -690,7 +690,6 @@ typedef struct SFillOperatorInfo { } SFillOperatorInfo; typedef struct SGroupbyOperatorInfo { - // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; @@ -737,7 +736,6 @@ typedef struct SWindowRowsSup { } SWindowRowsSup; typedef struct SSessionAggOperatorInfo { - // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; @@ -825,7 +823,6 @@ typedef struct SStateWindowOperatorInfo { SStateKeys stateKey; int32_t tsSlotId; // primary timestamp column slot id STimeWindowAggSupp twAggSup; - // bool reptScan; const SNode* pCondition; } SStateWindowOperatorInfo; @@ -846,24 +843,6 @@ typedef struct SStreamStateAggOperatorInfo { bool ignoreExpiredData; } SStreamStateAggOperatorInfo; -typedef struct SSortedMergeOperatorInfo { - // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode - SOptrBasicInfo binfo; - SAggSupporter aggSup; - - SArray* pSortInfo; - int32_t numOfSources; - SSortHandle* pSortHandle; - int32_t bufPageSize; - uint32_t sortBufSize; // max buffer size for in-memory sort - int32_t resultRowFactor; - bool hasGroupVal; - SDiskbasedBuf* pTupleStore; // keep the final results - int32_t numOfResPerPage; - char** groupVal; - SArray* groupInfo; -} SSortedMergeOperatorInfo; - typedef struct SSortOperatorInfo { SOptrBasicInfo binfo; uint32_t sortBufSize; // max buffer size for in-memory sort @@ -871,11 +850,10 @@ typedef struct SSortOperatorInfo { SSortHandle* pSortHandle; SArray* pColMatchInfo; // for index map from table scan output int32_t bufPageSize; - - int64_t startTs; // sort start time - uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included. - SLimitInfo limitInfo; - SNode* pCondition; + int64_t startTs; // sort start time + uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included. + SLimitInfo limitInfo; + SNode* pCondition; } SSortOperatorInfo; typedef struct STagFilterOperatorInfo { @@ -907,7 +885,6 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_decode_fn_t decode, __optr_explain_fn_t explain); int32_t operatorDummyOpenFn(SOperatorInfo* pOperator); -void operatorDummyCloseFn(void* param, int32_t numOfCols); int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t num); void initBasicInfo(SOptrBasicInfo* pInfo, SSDataBlock* pBlock); @@ -942,7 +919,6 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int SSDataBlock* pBlock, const char* idStr); void cleanupAggSup(SAggSupporter* pAggSup); -void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle); void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId); @@ -1089,10 +1065,8 @@ void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEn void printDataBlock(SSDataBlock* pBlock, const char* flag); uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId); -int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, - SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, - const int32_t* rowCellOffset, SSDataBlock* pBlock, - SExecTaskInfo* pTaskInfo); +int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, + SExprSupp* pSup, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 205bcd58df..e5f0d4d177 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -132,8 +132,6 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, return fpSet; } -void operatorDummyCloseFn(void* param, int32_t numOfCols) {} - static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo); @@ -1269,33 +1267,12 @@ static void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t nu } } -// todo extract method with copytoSSDataBlock -int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, - SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, - const int32_t* rowCellOffset, SSDataBlock* pBlock, - SExecTaskInfo* pTaskInfo) { - SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId); - SResultRow* pRow = (SResultRow*)((char*)page + resultRowPosition->offset); - - doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowCellOffset); - if (pRow->numOfRows == 0) { - releaseBufPage(pBuf, page); - return 0; - } - - while (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) { - int32_t code = blockDataEnsureCapacity(pBlock, pBlock->info.capacity * 1.25); - if (TAOS_FAILED(code)) { - releaseBufPage(pBuf, page); - qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); - T_LONG_JMP(pTaskInfo->env, code); - } - } - +static void doCopyResultToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx, + SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo) { for (int32_t j = 0; j < numOfExprs; ++j) { int32_t slotId = pExprInfo[j].base.resSchema.slotId; - pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset); + pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset); if (pCtx[j].fpSet.finalize) { int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); if (TAOS_FAILED(code)) { @@ -1303,7 +1280,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi T_LONG_JMP(pTaskInfo->env, code); } } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) { - // do nothing, todo refactor + // do nothing } else { // expand the result into multiple rows. E.g., _wstart, top(k, 20) // the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows. @@ -1314,10 +1291,39 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi } } } +} + +int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, + SExprSupp* pSup, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) { + SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId); + SResultRow* pRow = (SResultRow*)((char*)page + resultRowPosition->offset); + + SqlFunctionCtx* pCtx = pSup->pCtx; + SExprInfo* pExprInfo = pSup->pExprInfo; + const int32_t* rowEntryOffset = pSup->rowEntryInfoOffset; + + doUpdateNumOfRows(pCtx, pRow, pSup->numOfExprs, rowEntryOffset); + if (pRow->numOfRows == 0) { + releaseBufPage(pBuf, page); + return 0; + } + + int32_t size = pBlock->info.capacity; + while (pBlock->info.rows + pRow->numOfRows > size) { + size = size * 1.25; + } + + int32_t code = blockDataEnsureCapacity(pBlock, size); + if (TAOS_FAILED(code)) { + releaseBufPage(pBuf, page); + qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); + T_LONG_JMP(pTaskInfo->env, code); + } + + doCopyResultToDataBlock(pExprInfo, pSup->numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); releaseBufPage(pBuf, page); pBlock->info.rows += pRow->numOfRows; - return 0; } @@ -1362,32 +1368,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS } pGroupResInfo->index += 1; - - for (int32_t j = 0; j < numOfExprs; ++j) { - int32_t slotId = pExprInfo[j].base.resSchema.slotId; - - pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset); - if (pCtx[j].fpSet.finalize) { -#ifdef BUF_PAGE_DEBUG - qDebug("\npage_finalize %d", numOfExprs); -#endif - int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); - if (TAOS_FAILED(code)) { - qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); - T_LONG_JMP(pTaskInfo->env, code); - } - } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) { - // do nothing, todo refactor - } else { - // expand the result into multiple rows. E.g., _wstart, top(k, 20) - // the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows. - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId); - char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo); - for (int32_t k = 0; k < pRow->numOfRows; ++k) { - colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes); - } - } - } + doCopyResultToDataBlock(pExprInfo, numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); releaseBufPage(pBuf, page); pBlock->info.rows += pRow->numOfRows; @@ -2307,21 +2288,6 @@ _error: static int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize, const char* pKey); -static void destroySortedMergeOperatorInfo(void* param, int32_t numOfOutput) { - SSortedMergeOperatorInfo* pInfo = (SSortedMergeOperatorInfo*)param; - taosArrayDestroy(pInfo->pSortInfo); - taosArrayDestroy(pInfo->groupInfo); - - if (pInfo->pSortHandle != NULL) { - tsortDestroySortHandle(pInfo->pSortHandle); - } - - blockDataDestroy(pInfo->binfo.pRes); - cleanupAggSup(&pInfo->aggSup); - - taosMemoryFreeClear(param); -} - static bool needToMerge(SSDataBlock* pBlock, SArray* groupInfo, char** buf, int32_t rowIndex) { size_t size = taosArrayGetSize(groupInfo); if (size == 0) { @@ -2357,41 +2323,6 @@ static bool needToMerge(SSDataBlock* pBlock, SArray* groupInfo, char** buf, int3 return 0; } -static void doMergeResultImpl(SSortedMergeOperatorInfo* pInfo, SqlFunctionCtx* pCtx, int32_t numOfExpr, - int32_t rowIndex) { - for (int32_t j = 0; j < numOfExpr; ++j) { // TODO set row index - // pCtx[j].startRow = rowIndex; - } - - for (int32_t j = 0; j < numOfExpr; ++j) { - int32_t functionId = pCtx[j].functionId; - // pCtx[j].fpSet->addInput(&pCtx[j]); - - // if (functionId < 0) { - // SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1); - // doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE); - // } else { - // assert(!TSDB_FUNC_IS_SCALAR(functionId)); - // aAggs[functionId].mergeFunc(&pCtx[j]); - // } - } -} - -static void doFinalizeResultImpl(SqlFunctionCtx* pCtx, int32_t numOfExpr) { - for (int32_t j = 0; j < numOfExpr; ++j) { - int32_t functionId = pCtx[j].functionId; - // if (functionId == FUNC_TAG_DUMMY || functionId == FUNC_TS_DUMMY) { - // continue; - // } - - // if (functionId < 0) { - // SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1); - // doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE); - // } else { - // pCtx[j].fpSet.finalize(&pCtx[j]); - } -} - static bool saveCurrentTuple(char** rowColData, SArray* pColumnList, SSDataBlock* pBlock, int32_t rowIndex) { int32_t size = (int32_t)taosArrayGetSize(pColumnList); @@ -2406,210 +2337,6 @@ static bool saveCurrentTuple(char** rowColData, SArray* pColumnList, SSDataBlock return true; } -static void doMergeImpl(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) { - SSortedMergeOperatorInfo* pInfo = pOperator->info; - - SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx; - - for (int32_t i = 0; i < pBlock->info.rows; ++i) { - if (!pInfo->hasGroupVal) { - ASSERT(i == 0); - doMergeResultImpl(pInfo, pCtx, numOfExpr, i); - pInfo->hasGroupVal = saveCurrentTuple(pInfo->groupVal, pInfo->groupInfo, pBlock, i); - } else { - if (needToMerge(pBlock, pInfo->groupInfo, pInfo->groupVal, i)) { - doMergeResultImpl(pInfo, pCtx, numOfExpr, i); - } else { - doFinalizeResultImpl(pCtx, numOfExpr); - int32_t numOfRows = getNumOfResult(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs, NULL); - // setTagValueForMultipleRows(pCtx, pOperator->exprSupp.numOfExprs, numOfRows); - - // TODO check for available buffer; - - // next group info data - pInfo->binfo.pRes->info.rows += numOfRows; - for (int32_t j = 0; j < numOfExpr; ++j) { - if (pCtx[j].functionId < 0) { - continue; - } - - pCtx[j].fpSet.process(&pCtx[j]); - } - - doMergeResultImpl(pInfo, pCtx, numOfExpr, i); - pInfo->hasGroupVal = saveCurrentTuple(pInfo->groupVal, pInfo->groupInfo, pBlock, i); - } - } - } -} - -static SSDataBlock* doMerge(SOperatorInfo* pOperator) { - SSortedMergeOperatorInfo* pInfo = pOperator->info; - SSortHandle* pHandle = pInfo->pSortHandle; - - SSDataBlock* pDataBlock = createOneDataBlock(pInfo->binfo.pRes, false); - blockDataEnsureCapacity(pDataBlock, pOperator->resultInfo.capacity); - - while (1) { - blockDataCleanup(pDataBlock); - while (1) { - STupleHandle* pTupleHandle = tsortNextTuple(pHandle); - if (pTupleHandle == NULL) { - break; - } - - // build datablock for merge for one group - appendOneRowToDataBlock(pDataBlock, pTupleHandle); - if (pDataBlock->info.rows >= pOperator->resultInfo.capacity) { - break; - } - } - - if (pDataBlock->info.rows == 0) { - break; - } - - setInputDataBlock(pOperator, pOperator->exprSupp.pCtx, pDataBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); - // updateOutputBuf(&pInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor, - // pOperator->pRuntimeEnv, true); - doMergeImpl(pOperator, pOperator->exprSupp.numOfExprs, pDataBlock); - // flush to tuple store, and after all data have been handled, return to upstream node or sink node - } - - doFinalizeResultImpl(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs); - int32_t numOfRows = getNumOfResult(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs, NULL); - // setTagValueForMultipleRows(pCtx, pOperator->exprSupp.numOfExprs, numOfRows); - - // TODO check for available buffer; - - // next group info data - pInfo->binfo.pRes->info.rows += numOfRows; - return (pInfo->binfo.pRes->info.rows > 0) ? pInfo->binfo.pRes : NULL; -} - -SSDataBlock* getSortedMergeBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, - SArray* pColMatchInfo, SSortedMergeOperatorInfo* pInfo) { - blockDataCleanup(pDataBlock); - - SSDataBlock* p = tsortGetSortedDataBlock(pHandle); - if (p == NULL) { - return NULL; - } - - blockDataEnsureCapacity(p, capacity); - - while (1) { - STupleHandle* pTupleHandle = tsortNextTuple(pHandle); - if (pTupleHandle == NULL) { - break; - } - - appendOneRowToDataBlock(p, pTupleHandle); - if (p->info.rows >= capacity) { - break; - } - } - - if (p->info.rows > 0) { - int32_t numOfCols = taosArrayGetSize(pColMatchInfo); - for (int32_t i = 0; i < numOfCols; ++i) { - SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, i); - ASSERT(pmInfo->matchType == COL_MATCH_FROM_SLOT_ID); - - SColumnInfoData* pSrc = taosArrayGet(p->pDataBlock, pmInfo->srcSlotId); - SColumnInfoData* pDst = taosArrayGet(pDataBlock->pDataBlock, pmInfo->targetSlotId); - colDataAssign(pDst, pSrc, p->info.rows, &pDataBlock->info); - } - - pDataBlock->info.rows = p->info.rows; - pDataBlock->info.capacity = p->info.rows; - } - - blockDataDestroy(p); - return (pDataBlock->info.rows > 0) ? pDataBlock : NULL; -} - -static SSDataBlock* doSortedMerge(SOperatorInfo* pOperator) { - if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SSortedMergeOperatorInfo* pInfo = pOperator->info; - if (pOperator->status == OP_RES_TO_RETURN) { - return getSortedMergeBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, NULL, pInfo); - } - - int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; - pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, - pInfo->binfo.pRes, "GET_TASKID(pTaskInfo)"); - - tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL); - - for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) { - SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource)); - ps->param = pOperator->pDownstream[i]; - tsortAddSource(pInfo->pSortHandle, ps); - } - - int32_t code = tsortOpen(pInfo->pSortHandle); - if (code != TSDB_CODE_SUCCESS) { - T_LONG_JMP(pTaskInfo->env, terrno); - } - - pOperator->status = OP_RES_TO_RETURN; - return doMerge(pOperator); -} - -static int32_t initGroupCol(SExprInfo* pExprInfo, int32_t numOfCols, SArray* pGroupInfo, - SSortedMergeOperatorInfo* pInfo) { - if (pGroupInfo == NULL || taosArrayGetSize(pGroupInfo) == 0) { - return 0; - } - - int32_t len = 0; - SArray* plist = taosArrayInit(3, sizeof(SColumn)); - pInfo->groupInfo = taosArrayInit(3, sizeof(int32_t)); - - if (plist == NULL || pInfo->groupInfo == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - size_t numOfGroupCol = taosArrayGetSize(pInfo->groupInfo); - for (int32_t i = 0; i < numOfGroupCol; ++i) { - SColumn* pCol = taosArrayGet(pGroupInfo, i); - for (int32_t j = 0; j < numOfCols; ++j) { - SExprInfo* pe = &pExprInfo[j]; - if (pe->base.resSchema.slotId == pCol->colId) { - taosArrayPush(plist, pCol); - taosArrayPush(pInfo->groupInfo, &j); - len += pCol->bytes; - break; - } - } - } - - ASSERT(taosArrayGetSize(pGroupInfo) == taosArrayGetSize(plist)); - - pInfo->groupVal = taosMemoryCalloc(1, (POINTER_BYTES * numOfGroupCol + len)); - if (pInfo->groupVal == NULL) { - taosArrayDestroy(plist); - return TSDB_CODE_OUT_OF_MEMORY; - } - - int32_t offset = 0; - char* start = (char*)(pInfo->groupVal + (POINTER_BYTES * numOfGroupCol)); - for (int32_t i = 0; i < numOfGroupCol; ++i) { - pInfo->groupVal[i] = start + offset; - SColumn* pCol = taosArrayGet(plist, i); - offset += pCol->bytes; - } - - taosArrayDestroy(plist); - - return TSDB_CODE_SUCCESS; -} - int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag) { // todo add more information about exchange operation int32_t type = pOperator->operatorType; @@ -3342,13 +3069,6 @@ void cleanupBasicInfo(SOptrBasicInfo* pInfo) { pInfo->pRes = blockDataDestroy(pInfo->pRes); } -void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) { - SOptrBasicInfo* pInfo = (SOptrBasicInfo*)param; - cleanupBasicInfo(pInfo); - - taosMemoryFreeClear(param); -} - static void freeItem(void* pItem) { void** p = pItem; if (*p != NULL) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d773f8a629..81a5d9953c 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -46,19 +46,6 @@ static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, co uint64_t groupId); static void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult); -///* -// * There are two cases to handle: -// * -// * 1. Query range is not set yet (queryRangeSet = 0). we need to set the query range info, including -// * pQueryAttr->lastKey, pQueryAttr->window.skey, and pQueryAttr->eKey. -// * 2. Query range is set and query is in progress. There may be another result with the same query ranges to be -// * merged during merge stage. In this case, we need the pTableQueryInfo->lastResRows to decide if there -// * is a previous result generated or not. -// */ -// static void setIntervalQueryRange(STableQueryInfo* pTableQueryInfo, TSKEY key, STimeWindow* pQRange) { -// // do nothing -//} - static TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols) { return tsCols == NULL ? win->skey : tsCols[0]; } static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindow* win, bool masterscan, @@ -3004,9 +2991,9 @@ static void addRetriveWindow(SArray* wins, SStreamFinalIntervalOperatorInfo* pIn SPullWindowInfo pull = {.window = nextWin, .groupId = winKey->groupId}; // add pull data request savePullWindow(&pull, pInfo->pPullWins); - int32_t size = taosArrayGetSize(pInfo->pChildren); - addPullWindow(pInfo->pPullDataMap, winKey, size); - qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, size); + int32_t size1 = taosArrayGetSize(pInfo->pChildren); + addPullWindow(pInfo->pPullDataMap, winKey, size1); + qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, size1); } } } @@ -4884,66 +4871,64 @@ void destroyMergeAlignedIntervalOperatorInfo(void* param) { taosMemoryFreeClear(param); } -static int32_t outputMergeAlignedIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId, - SSDataBlock* pResultBlock, TSKEY wstartTs) { - SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info; +static SResultRow* doSetSingleOutputTupleBuf(SResultRowInfo* pResultRowInfo, SAggSupporter* pSup) { + SResultRow* pResult = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize); + pResultRowInfo->cur = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; + return pResult; +} - SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo; - SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; - SExprSupp* pSup = &pOperatorInfo->exprSupp; +static int32_t setSingleOutputTupleBuf(SResultRowInfo* pResultRowInfo, STimeWindow* win, SResultRow** pResult, + SExprSupp* pExprSup, SAggSupporter* pAggSup) { + if (*pResult == NULL) { + *pResult = doSetSingleOutputTupleBuf(pResultRowInfo, pAggSup); + if (*pResult == NULL) { + return terrno; + } + } - SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &wstartTs, TSDB_KEYSIZE, tableGroupId); - SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet( - iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); - ASSERT(p1 != NULL); - - finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pSup->pCtx, pSup->pExprInfo, pSup->numOfExprs, - pSup->rowEntryInfoOffset, pResultBlock, pTaskInfo); - tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); - ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0); + // set time window for current result ,todo extract method + (*pResult)->win = (*win); + (*pResult)->numOfRows = 0; + (*pResult)->closed = false; + (*pResult)->endInterp = false; + (*pResult)->startInterp = false; + memset((*pResult)->pEntryInfo, 0, pAggSup->resultRowSize - sizeof(SResultRow)); + setResultRowInitCtx((*pResult), pExprSup->pCtx, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset); return TSDB_CODE_SUCCESS; } static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, - SSDataBlock* pBlock, int32_t scanFlag, SSDataBlock* pResultBlock) { + SSDataBlock* pBlock, SSDataBlock* pResultBlock) { SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info; SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo; SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; SExprSupp* pSup = &pOperatorInfo->exprSupp; + SInterval* pInterval = &iaInfo->interval; - int32_t startPos = 0; - int32_t numOfOutput = pSup->numOfExprs; - int64_t* tsCols = extractTsCol(pBlock, iaInfo); - uint64_t tableGroupId = pBlock->info.groupId; - SResultRow* pResult = NULL; + int32_t startPos = 0; + int64_t* tsCols = extractTsCol(pBlock, iaInfo); TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols); // there is an result exists if (miaInfo->curTs != INT64_MIN) { - ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1); - if (ts != miaInfo->curTs) { - outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs); + finalizeResultRows(iaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pResultBlock, pTaskInfo); miaInfo->curTs = ts; } } else { miaInfo->curTs = ts; - ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0); } STimeWindow win = {0}; win.skey = miaInfo->curTs; - win.ekey = - taosTimeAdd(win.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit, iaInfo->interval.precision) - 1; + win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; - // TODO: remove the hash table (groupid + winkey => result row position) - int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, - pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); - if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + int32_t ret = setSingleOutputTupleBuf(pResultRowInfo, &win, &miaInfo->pResultRow, pSup, &iaInfo->aggSup); + if (ret != TSDB_CODE_SUCCESS || miaInfo->pResultRow == NULL) { + T_LONG_JMP(pTaskInfo->env, ret); } int32_t currPos = startPos; @@ -4956,21 +4941,18 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); doApplyFunctions(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, - pBlock->info.rows, numOfOutput); + pBlock->info.rows, pSup->numOfExprs); - outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs); + finalizeResultRows(iaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pResultBlock, pTaskInfo); miaInfo->curTs = tsCols[currPos]; currWin.skey = miaInfo->curTs; - currWin.ekey = taosTimeAdd(currWin.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit, - iaInfo->interval.precision) - - 1; + currWin.ekey = taosTimeAdd(currWin.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; startPos = currPos; - ret = setTimeWindowOutputBuf(pResultRowInfo, &currWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, - numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); - if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + ret = setSingleOutputTupleBuf(pResultRowInfo, &win, &miaInfo->pResultRow, pSup, &iaInfo->aggSup); + if (ret != TSDB_CODE_SUCCESS || miaInfo->pResultRow == NULL) { + T_LONG_JMP(pTaskInfo->env, ret); } miaInfo->curTs = currWin.skey; @@ -4978,68 +4960,66 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); doApplyFunctions(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, - pBlock->info.rows, numOfOutput); + pBlock->info.rows, pSup->numOfExprs); } static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperator->info; - SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo; + SMergeAlignedIntervalAggOperatorInfo* pMiaInfo = pOperator->info; + SIntervalAggOperatorInfo* pIaInfo = pMiaInfo->intervalAggOperatorInfo; - SExprSupp* pSup = &pOperator->exprSupp; - SSDataBlock* pRes = iaInfo->binfo.pRes; - - SOperatorInfo* downstream = pOperator->pDownstream[0]; - int32_t scanFlag = MAIN_SCAN; + SExprSupp* pSup = &pOperator->exprSupp; + SSDataBlock* pRes = pIaInfo->binfo.pRes; + SResultRowInfo* pResultRowInfo = &pIaInfo->binfo.resultRowInfo; + SOperatorInfo* downstream = pOperator->pDownstream[0]; + int32_t scanFlag = MAIN_SCAN; while (1) { SSDataBlock* pBlock = NULL; - if (miaInfo->prefetchedBlock == NULL) { + if (pMiaInfo->prefetchedBlock == NULL) { pBlock = downstream->fpSet.getNextFn(downstream); } else { - pBlock = miaInfo->prefetchedBlock; - miaInfo->prefetchedBlock = NULL; + pBlock = pMiaInfo->prefetchedBlock; + pMiaInfo->prefetchedBlock = NULL; - miaInfo->groupId = pBlock->info.groupId; + pMiaInfo->groupId = pBlock->info.groupId; } + // no data exists, all query processing is done if (pBlock == NULL) { - // close last unfinalized time window - if (miaInfo->curTs != INT64_MIN) { - ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1); - outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs); - miaInfo->curTs = INT64_MIN; + // close last unclosed time window + if (pMiaInfo->curTs != INT64_MIN) { + finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo); + pMiaInfo->curTs = INT64_MIN; } doSetOperatorCompleted(pOperator); break; } - if (!miaInfo->hasGroupId) { - miaInfo->hasGroupId = true; - miaInfo->groupId = pBlock->info.groupId; - } else if (miaInfo->groupId != pBlock->info.groupId) { + if (pMiaInfo->groupId != pBlock->info.groupId && pMiaInfo->groupId != 0) { // if there are unclosed time window, close it firstly. - ASSERT(miaInfo->curTs != INT64_MIN); - outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs); - miaInfo->prefetchedBlock = pBlock; - miaInfo->curTs = INT64_MIN; + ASSERT(pMiaInfo->curTs != INT64_MIN); + finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo); + + pMiaInfo->prefetchedBlock = pBlock; + pMiaInfo->curTs = INT64_MIN; + pMiaInfo->groupId = 0; break; } - getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag); - setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->inputOrder, scanFlag, true); - doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes); + getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag); + setInputDataBlock(pOperator, pSup->pCtx, pBlock, pIaInfo->inputOrder, scanFlag, true); + doMergeAlignedIntervalAggImpl(pOperator, &pIaInfo->binfo.resultRowInfo, pBlock, pRes); - doFilter(miaInfo->pCondition, pRes, NULL); + doFilter(pMiaInfo->pCondition, pRes, NULL); if (pRes->info.rows >= pOperator->resultInfo.capacity) { break; } } - pRes->info.groupId = miaInfo->groupId; - miaInfo->hasGroupId = false; + pRes->info.groupId = pMiaInfo->groupId; } static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) { @@ -5191,8 +5171,7 @@ static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t table SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet( iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); ASSERT(p1 != NULL); - finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo, - pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo); +// finalizeResultRows(iaInfo->aggSup.pResultBuf, p1, pResultBlock, pTaskInfo); tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); return TSDB_CODE_SUCCESS; } @@ -5201,9 +5180,7 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t STimeWindow* newWin) { SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info; SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo; - SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC); - SExprSupp* pExprSup = &pOperatorInfo->exprSupp; SGroupTimeWindow groupTimeWindow = {.groupId = tableGroupId, .window = *newWin}; tdListAppend(miaInfo->groupIntervals, &groupTimeWindow); @@ -5216,9 +5193,10 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t if (prevGrpWin->groupId != tableGroupId) { continue; } + STimeWindow* prevWin = &prevGrpWin->window; if ((ascScan && newWin->skey > prevWin->ekey) || ((!ascScan) && newWin->skey < prevWin->ekey)) { - finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock); +// finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock); tdListPopNode(miaInfo->groupIntervals, listNode); } } @@ -5378,7 +5356,7 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) { if (listNode != NULL) { SGroupTimeWindow* grpWin = (SGroupTimeWindow*)(listNode->data); - finalizeWindowResult(pOperator, grpWin->groupId, &grpWin->window, pRes); +// finalizeWindowResult(pOperator, grpWin->groupId, &grpWin->window, pRes); pRes->info.groupId = grpWin->groupId; } } From 60574763a9e7ff8f28d4e5607a7fc8c35680478c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 13 Sep 2022 17:41:31 +0800 Subject: [PATCH 17/49] refactor(query): do some internal refactor. --- source/libs/executor/src/timewindowoperator.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 81a5d9953c..c4525cc3b9 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4865,7 +4865,7 @@ _error: return NULL; } -void destroyMergeAlignedIntervalOperatorInfo(void* param) { +void destroyMAIOperatorInfo(void* param) { SMergeAlignedIntervalAggOperatorInfo* miaInfo = (SMergeAlignedIntervalAggOperatorInfo*)param; destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo); taosMemoryFreeClear(param); @@ -5118,7 +5118,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, pOperator->info = miaInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL, - destroyMergeAlignedIntervalOperatorInfo, NULL, NULL, NULL); + destroyMAIOperatorInfo, NULL, NULL, NULL); code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { @@ -5128,7 +5128,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, return pOperator; _error: - destroyMergeAlignedIntervalOperatorInfo(miaInfo); + destroyMAIOperatorInfo(miaInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; From b82f27ec03279a2db352a6bfad66f5a8e544f76b Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 13 Sep 2022 17:48:57 +0800 Subject: [PATCH 18/49] tsc: handle schedule error --- include/util/tsched.h | 6 ++---- source/client/src/clientImpl.c | 7 ++++++- source/libs/qcom/src/queryUtil.c | 5 +---- source/util/src/tsched.c | 13 +++++++------ 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/include/util/tsched.h b/include/util/tsched.h index 347cacd191..379456afe6 100644 --- a/include/util/tsched.h +++ b/include/util/tsched.h @@ -31,7 +31,6 @@ typedef struct SSchedMsg { void *thandle; } SSchedMsg; - typedef struct { char label[TSDB_LABEL_LEN]; tsem_t emptySem; @@ -48,7 +47,6 @@ typedef struct { void *pTimer; } SSchedQueue; - /** * Create a thread-safe ring-buffer based task queue and return the instance. A thread * pool will be created to consume the messages in the queue. @@ -57,7 +55,7 @@ typedef struct { * @param label the label of the queue * @return the created queue scheduler */ -void *taosInitScheduler(int32_t capacity, int32_t numOfThreads, const char *label, SSchedQueue* pSched); +void *taosInitScheduler(int32_t capacity, int32_t numOfThreads, const char *label, SSchedQueue *pSched); /** * Create a thread-safe ring-buffer based task queue and return the instance. @@ -83,7 +81,7 @@ void taosCleanUpScheduler(void *queueScheduler); * @param queueScheduler the queue scheduler instance * @param pMsg the message for the task */ -void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg); +int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg); #ifdef __cplusplus } diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 5ebc2729f8..39b4b069a0 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1399,7 +1399,12 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { arg->msg = *pMsg; arg->pEpset = tEpSet; - taosAsyncExec(doProcessMsgFromServer, arg, NULL); + if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) { + tscError("failed to sched msg to tsc, tsc ready to quit"); + rpcFreeCont(pMsg->pCont); + taosMemoryFree(arg->pEpset); + taosMemoryFree(arg); + } } TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) { diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index d848016e46..8162b922ce 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -134,8 +134,7 @@ int32_t taosAsyncExec(__async_exec_fn_t execFn, void* execParam, int32_t* code) schedMsg.thandle = execParam; schedMsg.msg = code; - taosScheduleTask(&pTaskQueue, &schedMsg); - return 0; + return taosScheduleTask(&pTaskQueue, &schedMsg); } void destroySendMsgInfo(SMsgSendInfo* pMsgBody) { @@ -472,5 +471,3 @@ int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst) { return TSDB_CODE_SUCCESS; } - - diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c index 89471c4347..9cf9e2c431 100644 --- a/source/util/src/tsched.c +++ b/source/util/src/tsched.c @@ -149,18 +149,18 @@ void *taosProcessSchedQueue(void *scheduler) { return NULL; } -void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { +int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { SSchedQueue *pSched = (SSchedQueue *)queueScheduler; int32_t ret = 0; if (pSched == NULL) { uError("sched is not ready, msg:%p is dropped", pMsg); - return; + return -1; } if (atomic_load_8(&pSched->stop)) { uError("sched is already stopped, msg:%p is dropped", pMsg); - return; + return -1; } if ((ret = tsem_wait(&pSched->emptySem)) != 0) { @@ -185,6 +185,7 @@ void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno)); ASSERT(0); } + return ret; } void taosCleanUpScheduler(void *param) { @@ -192,11 +193,11 @@ void taosCleanUpScheduler(void *param) { if (pSched == NULL) return; uDebug("start to cleanup %s schedQsueue", pSched->label); - + atomic_store_8(&pSched->stop, 1); taosMsleep(200); - + for (int32_t i = 0; i < pSched->numOfThreads; ++i) { if (taosCheckPthreadValid(pSched->qthread[i])) { tsem_post(&pSched->fullSem); @@ -220,7 +221,7 @@ void taosCleanUpScheduler(void *param) { if (pSched->queue) taosMemoryFree(pSched->queue); if (pSched->qthread) taosMemoryFree(pSched->qthread); - //taosMemoryFree(pSched); + // taosMemoryFree(pSched); } // for debug purpose, dump the scheduler status every 1min. From 9355996eb19f29910af5a4bcee38b96824211bbb Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 13 Sep 2022 17:50:35 +0800 Subject: [PATCH 19/49] docs: change location to align with taosbenchmark --- docs/en/05-get-started/01-docker.md | 6 +++--- docs/en/05-get-started/03-package.md | 6 +++--- docs/en/14-reference/03-connector/06-rust.mdx | 6 +++--- .../src/main/java/com/taos/example/SubscribeDemo.java | 8 ++++---- .../com/taos/example/highvolume/MockDataSource.java | 4 ++-- docs/examples/python/mockdatasource.py | 10 +++++----- .../rust/nativeexample/examples/stmt_example.rs | 2 +- .../rust/nativeexample/examples/subscribe_demo.rs | 6 +++--- .../rust/restexample/examples/insert_example.rs | 8 ++++---- docs/zh/05-get-started/01-docker.md | 6 +++--- docs/zh/05-get-started/03-package.md | 6 +++--- docs/zh/08-connector/06-rust.mdx | 6 +++--- 12 files changed, 37 insertions(+), 37 deletions(-) diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index 66f7d5d594..6191492b37 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -52,7 +52,7 @@ Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) i taosBenchmark ``` -This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`. +This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`. The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds. @@ -74,10 +74,10 @@ Query the average, maximum, and minimum values of all 100 million rows of data: SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters; ``` -Query the number of rows whose `location` tag is `San Francisco`: +Query the number of rows whose `location` tag is `California.SanFrancisco`: ```sql -SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco"; +SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco"; ``` Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`: diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md index 7257fccc80..b0400de673 100644 --- a/docs/en/05-get-started/03-package.md +++ b/docs/en/05-get-started/03-package.md @@ -221,7 +221,7 @@ Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) i taosBenchmark ``` -This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`. +This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`. The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds. @@ -243,10 +243,10 @@ Query the average, maximum, and minimum values of all 100 million rows of data: SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters; ``` -Query the number of rows whose `location` tag is `San Francisco`: +Query the number of rows whose `location` tag is `California.SanFrancisco`: ```sql -SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco"; +SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco"; ``` Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`: diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx index 0d391c6ac3..530287e2a4 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -155,15 +155,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { let inserted = taos.exec_many([ // create super table "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ - TAGS (`groupid` INT, `location` BINARY(16))", + TAGS (`groupid` INT, `location` BINARY(24))", // create child table - "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')", // insert into child table "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", // insert with NULL values "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", // insert and automatically create table with tags if not exists - "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)", + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)", // insert many records in a single sql "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", ]).await?; diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java index 179e6e6911..e9af5e9ce0 100644 --- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java @@ -38,12 +38,12 @@ public class SubscribeDemo { statement.executeUpdate("create database " + DB_NAME); statement.executeUpdate("use " + DB_NAME); statement.executeUpdate( - "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(16))"); - statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')"); + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))"); + statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')"); statement.executeUpdate("INSERT INTO `d0` values(now - 10s, 0.32, 116)"); statement.executeUpdate("INSERT INTO `d0` values(now - 8s, NULL, NULL)"); statement.executeUpdate( - "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119)"); + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119)"); statement.executeUpdate( "INSERT INTO `d1` values (now-8s, 10, 120) (now - 6s, 10, 119) (now - 4s, 11.2, 118)"); // create topic @@ -75,4 +75,4 @@ public class SubscribeDemo { } timer.cancel(); } -} \ No newline at end of file +} diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java index 6fe83f002e..f0ebc53b4b 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java @@ -16,7 +16,7 @@ class MockDataSource implements Iterator { private int currentTbId = -1; // mock values - String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"}; + String[] location = {"California.LosAngeles", "California.SanDiego", "California.SanJose", "California.Campbell", "California.SanFrancisco"}; float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f}; int[] voltage = {119, 116, 111, 113, 118}; float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f}; @@ -50,4 +50,4 @@ class MockDataSource implements Iterator { return sb.toString(); } -} \ No newline at end of file +} diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py index 852860aec0..1c516a800e 100644 --- a/docs/examples/python/mockdatasource.py +++ b/docs/examples/python/mockdatasource.py @@ -3,11 +3,11 @@ import time class MockDataSource: samples = [ - "8.8,119,0.32,LosAngeles,0", - "10.7,116,0.34,SanDiego,1", - "9.9,111,0.33,Hollywood,2", - "8.9,113,0.329,Compton,3", - "9.4,118,0.141,San Francisco,4" + "8.8,119,0.32,California.LosAngeles,0", + "10.7,116,0.34,California.SanDiego,1", + "9.9,111,0.33,California.SanJose,2", + "8.9,113,0.329,California.Campbell,3", + "9.4,118,0.141,California.SanFrancisco,4" ] def __init__(self, tb_name_prefix, table_count): diff --git a/docs/examples/rust/nativeexample/examples/stmt_example.rs b/docs/examples/rust/nativeexample/examples/stmt_example.rs index 26084746f2..9cf8e8e1fc 100644 --- a/docs/examples/rust/nativeexample/examples/stmt_example.rs +++ b/docs/examples/rust/nativeexample/examples/stmt_example.rs @@ -12,7 +12,7 @@ async fn main() -> anyhow::Result<()> { // bind table name and tags stmt.set_tbname_tags( "d1001", - &[Value::VarChar("San Fransico".into()), Value::Int(2)], + &[Value::VarChar("California.SanFransico".into()), Value::Int(2)], )?; // bind values. let values = vec![ diff --git a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs index 7e0a347948..11d6d4e004 100644 --- a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs +++ b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs @@ -19,13 +19,13 @@ struct Record { async fn prepare(taos: Taos) -> anyhow::Result<()> { let inserted = taos.exec_many([ // create child table - "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')", // insert into child table "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", // insert with NULL values "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", // insert and automatically create table with tags if not exists - "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)", + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)", // insert many records in a single sql "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", ]).await?; @@ -48,7 +48,7 @@ async fn main() -> anyhow::Result<()> { format!("CREATE DATABASE `{db}`"), format!("USE `{db}`"), // create super table - format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"), + format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"), // create topic for subscription format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") ]) diff --git a/docs/examples/rust/restexample/examples/insert_example.rs b/docs/examples/rust/restexample/examples/insert_example.rs index 27b2bb4788..11a84f1661 100644 --- a/docs/examples/rust/restexample/examples/insert_example.rs +++ b/docs/examples/rust/restexample/examples/insert_example.rs @@ -14,14 +14,14 @@ async fn main() -> anyhow::Result<()> { ]).await?; let inserted = taos.exec("INSERT INTO - power.d1001 USING power.meters TAGS('San Francisco', 2) + power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS('San Francisco', 3) + power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS('Los Angeles', 2) + power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS('Los Angeles', 3) + power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)").await?; assert_eq!(inserted, 8); diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md index e3345fed96..0f004581b5 100644 --- a/docs/zh/05-get-started/01-docker.md +++ b/docs/zh/05-get-started/01-docker.md @@ -52,7 +52,7 @@ taos> $ taosBenchmark ``` -该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `Campbell`、`Cupertino`、`Los Angeles`、`Mountain View`、`Palo Alto`、`San Diego`、`San Francisco`、`San Jose`、`Santa Clara` 或者 `Sunnyvale`。 +该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 @@ -74,10 +74,10 @@ SELECT COUNT(*) FROM test.meters; SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters; ``` -查询 location = "San Francisco" 的记录总条数: +查询 location = "California.SanFrancisco" 的记录总条数: ```sql -SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco"; +SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco"; ``` 查询 groupId = 10 的所有记录的平均值、最大值、最小值等: diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index cb2553a0bf..66863d1bd9 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -223,7 +223,7 @@ Query OK, 2 row(s) in set (0.003128s) $ taosBenchmark ``` -该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `Campbell`、`Cupertino`、`Los Angeles`、`Mountain View`、`Palo Alto`、`San Diego`、`San Francisco`、`San Jose`、`Santa Clara` 或者 `Sunnyvale`。 +该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 @@ -245,10 +245,10 @@ SELECT COUNT(*) FROM test.meters; SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters; ``` -查询 location = "San Francisco" 的记录总条数: +查询 location = "California.SanFrancisco" 的记录总条数: ```sql -SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco"; +SELECT COUNT(*) FROM test.meters WHERE location = "Calaifornia.SanFrancisco"; ``` 查询 groupId = 10 的所有记录的平均值、最大值、最小值等: diff --git a/docs/zh/08-connector/06-rust.mdx b/docs/zh/08-connector/06-rust.mdx index 26f53c82d6..b838e5c5a2 100644 --- a/docs/zh/08-connector/06-rust.mdx +++ b/docs/zh/08-connector/06-rust.mdx @@ -155,15 +155,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { let inserted = taos.exec_many([ // create super table "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ - TAGS (`groupid` INT, `location` BINARY(16))", + TAGS (`groupid` INT, `location` BINARY(24))", // create child table - "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')", // insert into child table "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", // insert with NULL values "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", // insert and automatically create table with tags if not exists - "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)", + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)", // insert many records in a single sql "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", ]).await?; From 6b2bf42f6824df1599303768160cfc403c04b7c1 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 13 Sep 2022 16:45:40 +0800 Subject: [PATCH 20/49] fix(query): twa function output NULL if input has overlap timestamps TD-18224 --- include/util/taoserror.h | 1 + source/libs/function/src/builtinsimpl.c | 30 +++++++++---------------- source/util/src/terror.c | 1 + 3 files changed, 12 insertions(+), 20 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index d16a599811..0058b03b30 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -577,6 +577,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802) #define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803) #define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) +#define TSDB_CODE_FUNC_DUP_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x012B) //udf #define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 35e3e07839..7d8c231c15 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -5447,8 +5447,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); @@ -5467,8 +5466,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); @@ -5486,8 +5484,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); @@ -5505,8 +5502,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); @@ -5524,8 +5520,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); @@ -5543,8 +5538,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); @@ -5562,8 +5556,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); @@ -5581,8 +5574,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); @@ -5600,8 +5592,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); @@ -5619,8 +5610,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pInfo->p.key == st.key) { - numOfElems = 0; - goto _twa_over; + return TSDB_CODE_FUNC_DUP_TIMESTAMP; } INIT_INTP_POINT(st, tsList[i], val[i]); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 044cdc86b4..ab0771d218 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -579,6 +579,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_NUM, "Invalid function par TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_TYPE, "Invalid function para type") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_VALUE, "Invalid function para value") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION, "Not buildin function") +TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_DUP_TIMESTAMP, "Duplicate timestamps not allowed in function") //udf TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping") From ad45c0a84806decee413b68770a62810f9a8715a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 13 Sep 2022 18:22:58 +0800 Subject: [PATCH 21/49] fix(query): set the group id. --- source/libs/executor/src/executil.c | 10 +++++-- source/libs/executor/src/timewindowoperator.c | 28 ++++++++++++------- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 3965b7e5b2..410da92013 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -800,9 +800,15 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, taosMemoryFreeClear(pColInfoData); } - for (int i = 0; i < taosArrayGetSize(res); i++) { + size_t numOfTables = taosArrayGetSize(res); + for (int i = 0; i < numOfTables; i++) { STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0}; - taosArrayPush(pListInfo->pTableList, &info); + void* p = taosArrayPush(pListInfo->pTableList, &info); + if (p == NULL) { + taosArrayDestroy(res); + return TSDB_CODE_OUT_OF_MEMORY; + } + qDebug("tagfilter get uid:%ld", info.uid); } diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index c4525cc3b9..7239927a6c 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4872,8 +4872,8 @@ void destroyMAIOperatorInfo(void* param) { } static SResultRow* doSetSingleOutputTupleBuf(SResultRowInfo* pResultRowInfo, SAggSupporter* pSup) { - SResultRow* pResult = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize); - pResultRowInfo->cur = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; + SResultRow* pResult = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize); + pResultRowInfo->cur = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; return pResult; } @@ -4998,15 +4998,23 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { break; } - if (pMiaInfo->groupId != pBlock->info.groupId && pMiaInfo->groupId != 0) { - // if there are unclosed time window, close it firstly. - ASSERT(pMiaInfo->curTs != INT64_MIN); - finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo); + if (pMiaInfo->groupId == 0) { + if (pMiaInfo->groupId != pBlock->info.groupId) { + pMiaInfo->groupId = pBlock->info.groupId; + } + } else { + if (pMiaInfo->groupId != pBlock->info.groupId) { + // if there are unclosed time window, close it firstly. + ASSERT(pMiaInfo->curTs != INT64_MIN); + finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo); - pMiaInfo->prefetchedBlock = pBlock; - pMiaInfo->curTs = INT64_MIN; - pMiaInfo->groupId = 0; - break; + pMiaInfo->prefetchedBlock = pBlock; + pMiaInfo->curTs = INT64_MIN; + pMiaInfo->groupId = 0; + break; + } else { + // continue + } } getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag); From 5463159a3d5a8f56a4f3f08f9864a79c24013c0d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 13 Sep 2022 18:24:29 +0800 Subject: [PATCH 22/49] fix twa function input dup timestamps --- source/libs/function/src/builtinsimpl.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 7d8c231c15..5862a06aac 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -5446,11 +5446,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5465,11 +5465,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5483,11 +5483,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5501,11 +5501,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5519,11 +5519,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5537,11 +5537,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5555,11 +5555,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5573,11 +5573,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5591,11 +5591,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } @@ -5609,11 +5609,11 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } numOfElems++; + INIT_INTP_POINT(st, tsList[i], val[i]); if (pInfo->p.key == st.key) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - INIT_INTP_POINT(st, tsList[i], val[i]); pInfo->dOutput += twa_get_area(pInfo->p, st); pInfo->p = st; } From 9ee003c971aa2c064c1da800aa1e366ffd262eab Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 13 Sep 2022 18:32:17 +0800 Subject: [PATCH 23/49] fix test case --- tests/system-test/2-query/twa.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py index 62940477cf..4c163da485 100644 --- a/tests/system-test/2-query/twa.py +++ b/tests/system-test/2-query/twa.py @@ -124,7 +124,7 @@ class TDTestCase: tdSql.checkData(0,1,4.500000000) # mixup with other functions - tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.stb1 ") + tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.ct1 ") tdSql.checkData(0,0,1.000000000) tdSql.checkData(0,1,11111.000000000) tdSql.checkData(0,2,1) From 990416d20dd84b2a236320755bf3e358790c723a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 13 Sep 2022 18:32:22 +0800 Subject: [PATCH 24/49] enh(tsc): handle schedule error --- source/libs/transport/src/transCli.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 4a0008b5ff..07e671ad05 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -374,10 +374,12 @@ void cliHandleResp(SCliConn* conn) { if (pCtx == NULL && CONN_NO_PERSIST_BY_APP(conn)) { tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn); + transFreeCont(transMsg.pCont); return; } if (CONN_RELEASE_BY_SERVER(conn) && transMsg.info.ahandle == NULL) { tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn); + transFreeCont(transMsg.pCont); return; } From 3dec6668a9fed326e4885eba8039add57eec3229 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 13 Sep 2022 18:33:45 +0800 Subject: [PATCH 25/49] fix(query): return correct error code. --- source/libs/executor/src/executorimpl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index e5f0d4d177..202d8eadde 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3534,7 +3534,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode; int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanPhyNode, pTagCond, pTagIndexCond, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { - pTaskInfo->code = terrno; + pTaskInfo->code = code; qError("failed to getTableList, code: %s", tstrerror(code)); return NULL; } From d585c20aa3472baf82cbdd68c22281b866073afe Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 13 Sep 2022 18:58:56 +0800 Subject: [PATCH 26/49] enh: count(tbname) optimize --- source/libs/parser/src/parAstCreater.c | 2 +- source/libs/parser/src/parTranslater.c | 33 +++++++++++++++++++ source/libs/planner/test/planOptimizeTest.cpp | 2 ++ 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 655bb68206..511ae25810 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1303,7 +1303,7 @@ SNode* createShowStmtWithCond(SAstCreateContext* pCxt, ENodeType type, SNode* pD EOperatorType tableCondType) { CHECK_PARSER_STATUS(pCxt); if (needDbShowStmt(type) && NULL == pDbName) { - snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified"); + snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "database not specified"); pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR; return NULL; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 2af4032fd8..f1a59ebf51 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1283,6 +1283,36 @@ static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount) return code; } +static bool isCountTbname(SFunctionNode* pFunc) { + if (FUNCTION_TYPE_COUNT != pFunc->funcType || 1 != LIST_LENGTH(pFunc->pParameterList)) { + return false; + } + SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); + return (QUERY_NODE_FUNCTION == nodeType(pPara) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPara)->funcType); +} + +// count(tbname) is rewritten as count(ts) for scannning optimization +static int32_t rewriteCountTbname(STranslateContext* pCxt, SFunctionNode* pCount) { + SFunctionNode* pTbname = (SFunctionNode*)nodesListGetNode(pCount->pParameterList, 0); + const char* pTableAlias = NULL; + if (LIST_LENGTH(pTbname->pParameterList) > 0) { + pTableAlias = ((SValueNode*)nodesListGetNode(pTbname->pParameterList, 0))->literal; + } + STableNode* pTable = NULL; + int32_t code = findTable(pCxt, pTableAlias, &pTable); + if (TSDB_CODE_SUCCESS == code) { + SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pCol) { + code = TSDB_CODE_OUT_OF_MEMORY; + } else { + setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol); + NODES_DESTORY_LIST(pCount->pParameterList); + code = nodesListMakeAppend(&pCount->pParameterList, (SNode*)pCol); + } + } + return code; +} + static bool hasInvalidFuncNesting(SNodeList* pParameterList) { bool hasInvalidFunc = false; nodesWalkExprs(pParameterList, haveVectorFunction, &hasInvalidFunc); @@ -1318,6 +1348,9 @@ static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { if (isCountStar(pFunc)) { return rewriteCountStar(pCxt, pFunc); } + if (isCountTbname(pFunc)) { + return rewriteCountTbname(pCxt, pFunc); + } return TSDB_CODE_SUCCESS; } diff --git a/source/libs/planner/test/planOptimizeTest.cpp b/source/libs/planner/test/planOptimizeTest.cpp index 6c5b760564..c2a0aee847 100644 --- a/source/libs/planner/test/planOptimizeTest.cpp +++ b/source/libs/planner/test/planOptimizeTest.cpp @@ -35,6 +35,8 @@ TEST_F(PlanOptimizeTest, scanPath) { run("SELECT LAST(c1) FROM t1 WHERE ts BETWEEN '2022-7-29 11:10:10' AND '2022-7-30 11:10:10' INTERVAL(10S) " "FILL(LINEAR)"); + + run("SELECT COUNT(TBNAME) FROM t1"); } TEST_F(PlanOptimizeTest, pushDownCondition) { From b914e37874b1d2ea5e693bb482fcc420e936a4ea Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 13 Sep 2022 19:05:35 +0800 Subject: [PATCH 27/49] enh(tsc): handle schedule error --- source/libs/transport/src/transCli.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 07e671ad05..18efabd44f 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -395,7 +395,7 @@ void cliHandleResp(SCliConn* conn) { } if (CONN_NO_PERSIST_BY_APP(conn)) { - addConnToPool(pThrd->pool, conn); + return addConnToPool(pThrd->pool, conn); } uv_read_start((uv_stream_t*)conn->stream, cliAllocRecvBufferCb, cliRecvCb); From 0277a96b5fd863f610223e421bbea0f03b7fea32 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 13 Sep 2022 19:30:28 +0800 Subject: [PATCH 28/49] fix: tag length of 5-taos-tools/taosbenchmark/demo.py --- tests/develop-test/5-taos-tools/taosbenchmark/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py index f62cb05269..a44ad4c1d0 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py @@ -77,7 +77,7 @@ class TDTestCase: tdSql.checkData(4, 3, "TAG") tdSql.checkData(5, 0, "location") tdSql.checkData(5, 1, "VARCHAR") - tdSql.checkData(5, 2, 16) + tdSql.checkData(5, 2, 24) tdSql.checkData(5, 3, "TAG") tdSql.query("select count(*) from test.meters where groupid >= 0") From 2b8b1ebc866abd65d11621b5058b3c659c7dd809 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 13 Sep 2022 20:45:28 +0800 Subject: [PATCH 29/49] fix: fix mem leak --- source/libs/transport/src/transCli.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 18efabd44f..dcfa93431b 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -374,12 +374,12 @@ void cliHandleResp(SCliConn* conn) { if (pCtx == NULL && CONN_NO_PERSIST_BY_APP(conn)) { tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn); - transFreeCont(transMsg.pCont); + transFreeMsg(transMsg.pCont); return; } if (CONN_RELEASE_BY_SERVER(conn) && transMsg.info.ahandle == NULL) { tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn); - transFreeCont(transMsg.pCont); + transFreeMsg(transMsg.pCont); return; } From 7cc095419f68b7e872ac82d1537f0104c903925e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 13 Sep 2022 16:56:52 +0000 Subject: [PATCH 30/49] feat: update taos-tools 3588b3d for 3.0 --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 13559bba27..0f7a450920 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 404d8da + GIT_TAG 3588b3d SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From 7f03ffcd7596655f8b7f349012cd380e41e545a7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 14 Sep 2022 09:57:37 +0800 Subject: [PATCH 31/49] fix(query): set correct group id. --- source/libs/executor/src/timewindowoperator.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 7239927a6c..6568c85edb 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4992,6 +4992,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { if (pMiaInfo->curTs != INT64_MIN) { finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo); pMiaInfo->curTs = INT64_MIN; + pRes->info.groupId = pMiaInfo->groupId; } doSetOperatorCompleted(pOperator); @@ -5007,8 +5008,9 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { // if there are unclosed time window, close it firstly. ASSERT(pMiaInfo->curTs != INT64_MIN); finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo); - pMiaInfo->prefetchedBlock = pBlock; + + pRes->info.groupId = pMiaInfo->groupId; pMiaInfo->curTs = INT64_MIN; pMiaInfo->groupId = 0; break; @@ -5026,8 +5028,6 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { break; } } - - pRes->info.groupId = pMiaInfo->groupId; } static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) { From fbcef61d069adbee89c844d3c7281bbee9d2ef1a Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Wed, 14 Sep 2022 10:34:13 +0800 Subject: [PATCH 32/49] enh: add binary serialization method to node structure --- source/common/src/tmsg.c | 10 +++--- source/libs/nodes/src/nodesMsgFuncs.c | 47 ++++++++++++++++++++++++--- 2 files changed, 46 insertions(+), 11 deletions(-) diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index ea25094d10..45861984b0 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -4718,9 +4718,8 @@ int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) { if (tEncodeU64(&encoder, pReq->queryId) < 0) return -1; if (tEncodeU64(&encoder, pReq->taskId) < 0) return -1; if (tEncodeU32(&encoder, pReq->sqlLen) < 0) return -1; - if (tEncodeU32(&encoder, pReq->phyLen) < 0) return -1; if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1; - if (tEncodeCStr(&encoder, pReq->msg) < 0) return -1; + if (tEncodeBinary(&encoder, pReq->msg, pReq->phyLen) < 0) return -1; tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -4750,13 +4749,12 @@ int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) { if (tDecodeU64(&decoder, &pReq->queryId) < 0) return -1; if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1; if (tDecodeU32(&decoder, &pReq->sqlLen) < 0) return -1; - if (tDecodeU32(&decoder, &pReq->phyLen) < 0) return -1; pReq->sql = taosMemoryCalloc(1, pReq->sqlLen + 1); if (NULL == pReq->sql) return -1; - pReq->msg = taosMemoryCalloc(1, pReq->phyLen + 1); - if (NULL == pReq->msg) return -1; if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1; - if (tDecodeCStrTo(&decoder, pReq->msg) < 0) return -1; + uint64_t msgLen = 0; + if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->msg, &msgLen) < 0) return -1; + pReq->phyLen = msgLen; tEndDecode(&decoder); diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 9a093fca99..efe820fee2 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -254,6 +254,11 @@ static int32_t tlvDecodeDynBinary(STlv* pTlv, void** pValue) { return TSDB_CODE_SUCCESS; } +static int32_t tlvDecodeBinary(STlv* pTlv, void* pValue) { + memcpy(pValue, pTlv->value, pTlv->len); + return TSDB_CODE_SUCCESS; +} + static int32_t tlvDecodeObjFromTlv(STlv* pTlv, FToObject func, void* pObj) { STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value}; return func(&decoder, pObj); @@ -469,7 +474,15 @@ static int32_t msgToColumnNode(STlvDecoder* pDecoder, void* pObj) { return code; } -enum { VALUE_CODE_EXPR_BASE = 1, VALUE_CODE_LITERAL, VALUE_CODE_IS_NULL, VALUE_CODE_DATUM }; +enum { + VALUE_CODE_EXPR_BASE = 1, + VALUE_CODE_LITERAL, + VALUE_CODE_IS_DURATION, + VALUE_CODE_TRANSLATE, + VALUE_CODE_NOT_RESERVED, + VALUE_CODE_IS_NULL, + VALUE_CODE_DATUM +}; static int32_t datumToMsg(const void* pObj, STlvEncoder* pEncoder) { const SValueNode* pNode = (const SValueNode*)pObj; @@ -524,9 +537,18 @@ static int32_t valueNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { code = tlvEncodeCStr(pEncoder, VALUE_CODE_LITERAL, pNode->literal); } if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_NULL, pNode->isNull); + code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_DURATION, pNode->isDuration); } if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, VALUE_CODE_TRANSLATE, pNode->translate); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, VALUE_CODE_NOT_RESERVED, pNode->notReserved); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_NULL, pNode->isNull); + } + if (TSDB_CODE_SUCCESS == code && !pNode->isNull) { code = datumToMsg(pNode, pEncoder); } @@ -590,12 +612,18 @@ static int32_t msgToDatum(STlv* pTlv, void* pObj) { break; case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_VARCHAR: - case TSDB_DATA_TYPE_VARBINARY: - code = tlvDecodeDynBinary(pTlv, (void**)&pNode->datum.p); + case TSDB_DATA_TYPE_VARBINARY: { + pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + VARSTR_HEADER_SIZE + 1); + if (NULL == pNode->datum.p) { + code = TSDB_CODE_OUT_OF_MEMORY; + break; + } + code = tlvDecodeBinary(pTlv, pNode->datum.p); if (TSDB_CODE_SUCCESS == code) { - varDataSetLen(pNode->datum.p, pNode->node.resType.bytes - VARSTR_HEADER_SIZE); + varDataSetLen(pNode->datum.p, pTlv->len - VARSTR_HEADER_SIZE); } break; + } case TSDB_DATA_TYPE_JSON: code = tlvDecodeDynBinary(pTlv, (void**)&pNode->datum.p); break; @@ -622,6 +650,15 @@ static int32_t msgToValueNode(STlvDecoder* pDecoder, void* pObj) { case VALUE_CODE_LITERAL: code = tlvDecodeCStrP(pTlv, &pNode->literal); break; + case VALUE_CODE_IS_DURATION: + code = tlvDecodeBool(pTlv, &pNode->isDuration); + break; + case VALUE_CODE_TRANSLATE: + code = tlvDecodeBool(pTlv, &pNode->translate); + break; + case VALUE_CODE_NOT_RESERVED: + code = tlvDecodeBool(pTlv, &pNode->notReserved); + break; case VALUE_CODE_IS_NULL: code = tlvDecodeBool(pTlv, &pNode->isNull); break; From 0bb1dc9d1ffc2d7be5b6c84b17fbb5d57a274f1d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 14 Sep 2022 11:00:44 +0800 Subject: [PATCH 33/49] fix(query): reset the output buffer when results have been produced. --- source/libs/executor/inc/executil.h | 1 + source/libs/executor/src/executil.c | 11 +++++++++++ source/libs/executor/src/executorimpl.c | 5 +++-- source/libs/executor/src/timewindowoperator.c | 10 +++------- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 0722c2b306..3f40c18a28 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -88,6 +88,7 @@ struct SqlFunctionCtx; size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput); void initResultRowInfo(SResultRowInfo* pResultRowInfo); void closeResultRow(SResultRow* pResultRow); +void resetResultRow(SResultRow* pResultRow, size_t entrySize); struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 410da92013..cfc5fec25f 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -33,6 +33,17 @@ void initResultRowInfo(SResultRowInfo* pResultRowInfo) { void closeResultRow(SResultRow* pResultRow) { pResultRow->closed = true; } +void resetResultRow(SResultRow* pResultRow, size_t entrySize) { + pResultRow->numOfRows = 0; + pResultRow->closed = false; + pResultRow->endInterp = false; + pResultRow->startInterp = false; + + if (entrySize > 0) { + memset(pResultRow->pEntryInfo, 0, entrySize); + } +} + // TODO refactor: use macro SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset) { assert(index >= 0 && offset != NULL); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 202d8eadde..cd1177794e 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1293,8 +1293,9 @@ static void doCopyResultToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SR } } -int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, - SExprSupp* pSup, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) { +// todo refactor. SResultRow has direct pointer in miainfo +int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup, + SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) { SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId); SResultRow* pRow = (SResultRow*)((char*)page + resultRowPosition->offset); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 6568c85edb..c24e04eab1 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4886,14 +4886,8 @@ static int32_t setSingleOutputTupleBuf(SResultRowInfo* pResultRowInfo, STimeWind } } - // set time window for current result ,todo extract method + // set time window for current result (*pResult)->win = (*win); - (*pResult)->numOfRows = 0; - (*pResult)->closed = false; - (*pResult)->endInterp = false; - (*pResult)->startInterp = false; - memset((*pResult)->pEntryInfo, 0, pAggSup->resultRowSize - sizeof(SResultRow)); - setResultRowInitCtx((*pResult), pExprSup->pCtx, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset); return TSDB_CODE_SUCCESS; } @@ -4916,6 +4910,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR if (miaInfo->curTs != INT64_MIN) { if (ts != miaInfo->curTs) { finalizeResultRows(iaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pResultBlock, pTaskInfo); + resetResultRow(miaInfo->pResultRow, iaInfo->aggSup.resultRowSize - sizeof(SResultRow)); miaInfo->curTs = ts; } } else { @@ -4944,6 +4939,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR pBlock->info.rows, pSup->numOfExprs); finalizeResultRows(iaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pResultBlock, pTaskInfo); + resetResultRow(miaInfo->pResultRow, iaInfo->aggSup.resultRowSize - sizeof(SResultRow)); miaInfo->curTs = tsCols[currPos]; currWin.skey = miaInfo->curTs; From e076b8c7c24208b3c9d2ca3653c6930454380f85 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 14 Sep 2022 11:05:24 +0800 Subject: [PATCH 34/49] fix test case --- tests/system-test/2-query/max_partition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py index 01c2677242..08bb7675ad 100644 --- a/tests/system-test/2-query/max_partition.py +++ b/tests/system-test/2-query/max_partition.py @@ -181,7 +181,7 @@ class TDTestCase: # bug need fix tdSql.checkData(0,1,None) - tdSql.query(f"select c1 , twa(c1) from {dbname}.stb partition by c1 order by c1") + tdSql.query(f"select c1 , twa(c1) from {dbname}.sub_stb_1 partition by c1 order by c1") tdSql.checkRows(11) tdSql.checkData(0,1,None) From 892d7861f08ef62da6f6748b4fd24df6c041b3f6 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 14 Sep 2022 11:06:37 +0800 Subject: [PATCH 35/49] fix(tsc): avoid mem leak --- source/client/src/clientImpl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 39b4b069a0..e7b34ab783 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1369,7 +1369,7 @@ int32_t doProcessMsgFromServer(void* param) { updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); SDataBuf buf = { - .msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = pEpSet}; + .msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = NULL}; if (pMsg->contLen > 0) { buf.pData = taosMemoryCalloc(1, pMsg->contLen); @@ -1384,6 +1384,8 @@ int32_t doProcessMsgFromServer(void* param) { pSendInfo->fp(pSendInfo->param, &buf, pMsg->code); rpcFreeCont(pMsg->pCont); destroySendMsgInfo(pSendInfo); + + taosMemoryFree(arg->pEpset); taosMemoryFree(arg); return TSDB_CODE_SUCCESS; } From 0a6ec7c0d763e288118e6e88352c7d9115244ac0 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 14 Sep 2022 11:17:12 +0800 Subject: [PATCH 36/49] fix(query): restrict max/min function input type not including timestamp type --- source/libs/function/src/builtins.c | 20 ++------------------ source/libs/function/src/builtinsimpl.c | 6 +++--- 2 files changed, 5 insertions(+), 21 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 5844784ea4..f3d3393ac3 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -311,22 +311,6 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le return TSDB_CODE_SUCCESS; } -static int32_t translateMinMax(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - if (!IS_TIMESTAMP_TYPE(paraType) && !IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } else if (IS_NULL_TYPE(paraType)) { - paraType = TSDB_DATA_TYPE_BIGINT; - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType}; - return TSDB_CODE_SUCCESS; -} - static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); @@ -2076,7 +2060,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "min", .type = FUNCTION_TYPE_MIN, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC, - .translateFunc = translateMinMax, + .translateFunc = translateInOutNum, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getMinmaxFuncEnv, .initFunc = minmaxFunctionSetup, @@ -2091,7 +2075,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "max", .type = FUNCTION_TYPE_MAX, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC, - .translateFunc = translateMinMax, + .translateFunc = translateInOutNum, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getMinmaxFuncEnv, .initFunc = minmaxFunctionSetup, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 0d7fd1a6da..c9e345f49f 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -1204,7 +1204,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } else { - if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type)) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { int64_t prev = 0; GET_TYPED_DATA(prev, int64_t, type, &pBuf->v); @@ -1263,7 +1263,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { int32_t start = pInput->startRowIndex; int32_t numOfRows = pInput->numOfRows; - if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { + if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { if (type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_BOOL) { int8_t* pData = (int8_t*)pCol->pData; int8_t* val = (int8_t*)&pBuf->v; @@ -1357,7 +1357,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { numOfElems += 1; } - } else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) { + } else if (type == TSDB_DATA_TYPE_BIGINT) { int64_t* pData = (int64_t*)pCol->pData; int64_t* val = (int64_t*)&pBuf->v; From 00908a649c2927e166362190b6c799caeaa22777 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 14 Sep 2022 11:44:39 +0800 Subject: [PATCH 37/49] fix(query): reset buffer after group results generated. --- source/libs/executor/src/executorimpl.c | 16 ---------------- source/libs/executor/src/timewindowoperator.c | 17 +++++++++++------ 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index cd1177794e..677fddc85c 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1709,22 +1709,6 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t static void doDestroyTableList(STableListInfo* pTableqinfoList); -static void doTableQueryInfoTimeWindowCheck(SExecTaskInfo* pTaskInfo, STableQueryInfo* pTableQueryInfo, int32_t order) { -#if 0 - if (order == TSDB_ORDER_ASC) { - assert( - (pTableQueryInfo->win.skey <= pTableQueryInfo->win.ekey) && - (pTableQueryInfo->lastKey >= pTaskInfo->window.skey) && - (pTableQueryInfo->win.skey >= pTaskInfo->window.skey && pTableQueryInfo->win.ekey <= pTaskInfo->window.ekey)); - } else { - assert( - (pTableQueryInfo->win.skey >= pTableQueryInfo->win.ekey) && - (pTableQueryInfo->lastKey <= pTaskInfo->window.skey) && - (pTableQueryInfo->win.skey <= pTaskInfo->window.skey && pTableQueryInfo->win.ekey >= pTaskInfo->window.ekey)); - } -#endif -} - typedef struct SFetchRspHandleWrapper { uint32_t exchangeId; int32_t sourceIndex; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index c24e04eab1..c6dca79e8d 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4959,6 +4959,12 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR pBlock->info.rows, pSup->numOfExprs); } +static void cleanupAfterGroupResultGen(SMergeAlignedIntervalAggOperatorInfo* pMiaInfo, SSDataBlock* pRes) { + pRes->info.groupId = pMiaInfo->groupId; + pMiaInfo->curTs = INT64_MIN; + pMiaInfo->groupId = 0; +} + static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -4987,8 +4993,8 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { // close last unclosed time window if (pMiaInfo->curTs != INT64_MIN) { finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo); - pMiaInfo->curTs = INT64_MIN; - pRes->info.groupId = pMiaInfo->groupId; + resetResultRow(pMiaInfo->pResultRow, pIaInfo->aggSup.resultRowSize - sizeof(SResultRow)); + cleanupAfterGroupResultGen(pMiaInfo, pRes); } doSetOperatorCompleted(pOperator); @@ -5004,11 +5010,10 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { // if there are unclosed time window, close it firstly. ASSERT(pMiaInfo->curTs != INT64_MIN); finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo); - pMiaInfo->prefetchedBlock = pBlock; + resetResultRow(pMiaInfo->pResultRow, pIaInfo->aggSup.resultRowSize - sizeof(SResultRow)); - pRes->info.groupId = pMiaInfo->groupId; - pMiaInfo->curTs = INT64_MIN; - pMiaInfo->groupId = 0; + pMiaInfo->prefetchedBlock = pBlock; + cleanupAfterGroupResultGen(pMiaInfo, pRes); break; } else { // continue From 2c2d5c16ed0410ae58e78c8b5586dbd583f945b2 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 14 Sep 2022 13:29:06 +0800 Subject: [PATCH 38/49] fix: fix insert memory leak --- source/client/src/clientHb.c | 3 +++ source/libs/parser/src/parInsert.c | 4 +--- source/libs/parser/src/parUtil.c | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 84a827ed78..e484e3c59c 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -414,6 +414,9 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) { int32_t code = hbBuildQueryDesc(hbBasic, pTscObj); if (code) { releaseTscObj(connKey->tscRid); + if (hbBasic->queryDesc) { + taosArrayDestroyEx(hbBasic->queryDesc, tFreeClientHbQueryDesc); + } taosMemoryFree(hbBasic); return code; } diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 162161b67a..d351333c3e 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -1420,9 +1420,7 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SToken filePath, STa } static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) { - if (!pCxt->pComCxt->async) { - taosMemoryFreeClear(pCxt->pTableMeta); - } + taosMemoryFreeClear(pCxt->pTableMeta); destroyBoundColumnInfo(&pCxt->tags); tdDestroySVCreateTbReq(&pCxt->createTblReq); } diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 32513fd0b6..daab80667c 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -1124,7 +1124,7 @@ int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* p int32_t reqIndex = *(int32_t*)taosArrayGet(pTableMetaPos, tableNo); SMetaRes* pRes = taosArrayGet(pMetaCache->pTableMetaData, reqIndex); if (TSDB_CODE_SUCCESS == pRes->code) { - *pMeta = pRes->pRes; + *pMeta = tableMetaDup(pRes->pRes); if (NULL == *pMeta) { return TSDB_CODE_OUT_OF_MEMORY; } From 8905d37b7065ffc0832ae96d9141e55e55ff76b8 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 14 Sep 2022 13:35:46 +0800 Subject: [PATCH 39/49] fix test cases --- tests/system-test/2-query/max.py | 21 ++------------- tests/system-test/2-query/min.py | 45 +++++--------------------------- 2 files changed, 8 insertions(+), 58 deletions(-) diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py index 169b1c2c38..5cc9a2d9e8 100644 --- a/tests/system-test/2-query/max.py +++ b/tests/system-test/2-query/max.py @@ -38,18 +38,7 @@ class TDTestCase: elif i>=9: tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query(f"select max(now()) from {dbname}.stb_1") - tdSql.checkRows(1) - - tdSql.query(f"select last(ts) from {dbname}.stb_1") - lastTs = tdSql.getData(0, 0) - tdSql.query(f"select max(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, lastTs) - - tdSql.query(f"select last(ts) from {dbname}.stb") - lastTs = tdSql.getData(0, 0) - tdSql.query(f"select max(ts) from {dbname}.stb") - tdSql.checkData(0, 0, lastTs) + tdSql.error(f"select max(now()) from {dbname}.stb_1") tdSql.query(f"select max(col1) from {dbname}.stb_1 where col2<=5") tdSql.checkData(0,0,5) @@ -78,13 +67,7 @@ class TDTestCase: elif i>=9: tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query(f"select max(now()) from {dbname}.ntb") - tdSql.checkRows(1) - - tdSql.query(f"select last(ts) from {dbname}.ntb") - lastTs = tdSql.getData(0, 0) - tdSql.query(f"select max(ts) from {dbname}.ntb") - tdSql.checkData(0, 0, lastTs) + tdSql.error(f"select max(now()) from {dbname}.ntb") tdSql.query(f"select max(col1) from {dbname}.ntb where col2<=5") tdSql.checkData(0,0,5) diff --git a/tests/system-test/2-query/min.py b/tests/system-test/2-query/min.py index 3d46b7b222..d97c4340f4 100644 --- a/tests/system-test/2-query/min.py +++ b/tests/system-test/2-query/min.py @@ -37,6 +37,8 @@ class TDTestCase: floatData.append(i + 0.1) # max verifacation + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") tdSql.error(f"select min(col7) from {dbname}.stb_1") tdSql.error(f"select min(col8) from {dbname}.stb_1") tdSql.error(f"select min(col9) from {dbname}.stb_1") @@ -67,20 +69,9 @@ class TDTestCase: tdSql.query(f"select min(col1) from {dbname}.stb_1 where col2>=5") tdSql.checkData(0,0,5) - tdSql.query(f"select min(now()) from {dbname}.stb_1") - tdSql.checkRows(1) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") tdSql.error(f"select min(col7) from {dbname}.stb_1") tdSql.error(f"select min(col8) from {dbname}.stb_1") tdSql.error(f"select min(col9) from {dbname}.stb_1") @@ -111,19 +102,8 @@ class TDTestCase: tdSql.query(f"select min(col1) from {dbname}.stb where col2>=5") tdSql.checkData(0,0,5) - tdSql.query(f"select min(now()) from {dbname}.stb_1") - tdSql.checkRows(1) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") tdSql.error(f"select min(col7) from {dbname}.ntb") tdSql.error(f"select min(col8) from {dbname}.ntb") tdSql.error(f"select min(col9) from {dbname}.ntb") @@ -154,19 +134,6 @@ class TDTestCase: tdSql.query(f"select min(col1) from {dbname}.ntb where col2>=5") tdSql.checkData(0,0,5) - tdSql.query(f"select min(now()) from {dbname}.stb_1") - tdSql.checkRows(1) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - - tdSql.query(f"select first(ts) from {dbname}.stb_1") - firstTs = tdSql.getData(0, 0) - tdSql.query(f"select min(ts) from {dbname}.stb_1") - tdSql.checkData(0, 0, firstTs) - def stop(self): tdSql.close() From d2f6a7928686556ea44d9c52f2b59aaeeac25f59 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 14 Sep 2022 13:38:39 +0800 Subject: [PATCH 40/49] fix docs --- docs/en/12-taos-sql/10-function.md | 44 +++++++++++++++--------------- docs/zh/12-taos-sql/10-function.md | 38 +++++++++++++------------- 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index f74d0dbe5c..ab1d2f900b 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -126,7 +126,7 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded down value of a specific field +**Description**: The rounded down value of a specific field **More explanations**: The restrictions are same as those of the `CEIL` function. #### LOG @@ -173,7 +173,7 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded value of a specific field. +**Description**: The rounded value of a specific field. **More explanations**: The restrictions are same as those of the `CEIL` function. @@ -434,7 +434,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; **More explanations**: - You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00"). -- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp +- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp - If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use @@ -769,14 +769,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam **Explanations**: - bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 -- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: - - "user_input": "[1, 3, 5, 7]": +- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: + - "user_input": "[1, 3, 5, 7]": User specified bin values. - + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" "start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. - + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" "start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. @@ -862,9 +862,9 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RA - `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. - The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. -- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. -- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. -- Interpolation is performed based on `FILL` parameter. +- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. +- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. +- Interpolation is performed based on `FILL` parameter. - `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable. ### LAST @@ -917,7 +917,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric, Timestamp +**Applicable data types**: Numeric **Applicable table types**: standard tables and supertables @@ -932,7 +932,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric, Timestamp +**Applicable data types**: Numeric **Applicable table types**: standard tables and supertables @@ -968,7 +968,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] **Applicable table types**: standard tables and supertables -**More explanations**: +**More explanations**: This function cannot be used in expression calculation. - Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline @@ -1046,10 +1046,10 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] **Applicable table types**: standard tables and supertables -**More explanations**: - +**More explanations**: + - Arithmetic operation can't be performed on the result of `csum` function -- Can only be used with aggregate functions This function can be used with supertables and standard tables. +- Can only be used with aggregate functions This function can be used with supertables and standard tables. - Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline @@ -1067,8 +1067,8 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **Applicable table types**: standard tables and supertables -**More explanation**: - +**More explanation**: + - It can be used together with `PARTITION BY tbname` against a STable. - It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。 @@ -1086,7 +1086,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **Applicable table types**: standard tables and supertables -**More explanation**: +**More explanation**: - The number of result rows is the number of rows subtracted by one, no output for the first row - It can be used together with a selected column. For example: select \_rowts, DIFF() from。 @@ -1123,9 +1123,9 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] **Applicable table types**: standard tables and supertables -**More explanations**: - -- Arithmetic operation can't be performed on the result of `MAVG`. +**More explanations**: + +- Arithmetic operation can't be performed on the result of `MAVG`. - Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions. - Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 9c5b7f771e..86e9aaa80f 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -127,7 +127,7 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:获得指定字段的向下取整数的结果。 +**功能说明**:获得指定字段的向下取整数的结果。 其他使用说明参见 CEIL 函数描述。 #### LOG @@ -174,7 +174,7 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:获得指定字段的四舍五入的结果。 +**功能说明**:获得指定字段的四舍五入的结果。 其他使用说明参见 CEIL 函数描述。 @@ -435,7 +435,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; **使用说明**: - timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。 -- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; +- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; - 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 @@ -770,14 +770,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam **详细说明**: - bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 -- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): - - "user_input": "[1, 3, 5, 7]" +- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): + - "user_input": "[1, 3, 5, 7]" 用户指定 bin 的具体数值。 - + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 - + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 @@ -918,7 +918,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; **返回数据类型**:同应用的字段。 -**适用数据类型**:数值类型,时间戳类型。 +**适用数据类型**:数值类型。 **适用于**:表和超级表。 @@ -933,7 +933,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; **返回数据类型**:同应用的字段。 -**适用数据类型**:数值类型,时间戳类型。 +**适用数据类型**:数值类型。 **适用于**:表和超级表。 @@ -969,7 +969,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] **适用于**:表和超级表。 -**使用说明**: +**使用说明**: - 不能参与表达式计算;该函数可以应用在普通表和超级表上; - 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。 @@ -1047,10 +1047,10 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] **适用于**:表和超级表。 -**使用说明**: - +**使用说明**: + - 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 -- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 +- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 @@ -1068,8 +1068,8 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **适用于**:表和超级表。 -**使用说明**: - +**使用说明**: + - DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。 - 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 @@ -1087,7 +1087,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **适用于**:表和超级表。 -**使用说明**: +**使用说明**: - 输出结果行数是范围内总行数减一,第一行没有结果输出。 - 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 @@ -1124,9 +1124,9 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] **适用于**:表和超级表。 -**使用说明**: - -- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); +**使用说明**: + +- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 From 65ee59fb90a6c267fffed149d68621e849041796 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 14 Sep 2022 14:01:08 +0800 Subject: [PATCH 41/49] docs: update arch --- docs/zh/21-tdinternal/01-arch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md index 704524fd21..b128b9d438 100644 --- a/docs/zh/21-tdinternal/01-arch.md +++ b/docs/zh/21-tdinternal/01-arch.md @@ -26,7 +26,7 @@ TDengine 分布式架构的逻辑结构图如下: **管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、超级表等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M1,M2,M3)。mnode 支持多副本,采用 RAFT 一致性协议,保证系统的高可用与高可靠,任何数据更新操作只能在 Leader 上进行。mnode 集群的第一个节点在集群部署时自动完成,其他节点的创建与删除由用户通过 SQL 命令完成。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。 -**弹性计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。 +**计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。当一个查询执行时,依赖执行计划,调度器会安排一个或多个 qnode 来一起执行。qnode 能从 vnode 获取数据,也可以将自己的计算结果发给其他 qnode 做进一步的处理。通过引入独立的计算节点,TDengine 实现了存储和计算分离。 **流计算节点(snode):** 一个虚拟的逻辑单元,只运行流计算任务(图中 S)。集群中可配置多个 snode,在整个集群内部共享使用(图中 S1,S2,S3)。snode 不与具体的 stream 绑定,即一个 snode 可以同时执行多个 stream 的计算任务。每个 dnode 上至多有一个 snode,由所属的数据节点的 EP 来唯一标识。由 mnode 调度可用的 snode 完成流计算任务,当没有可用的 snode 时,流计算任务在 vnode 中执行。 From 63f02c5103cf4559ff8b9facc5c0521a6d516498 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 14 Sep 2022 14:26:21 +0800 Subject: [PATCH 42/49] fix: fix free meta rsp issue --- source/common/src/tmsg.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index f96462945f..46e29fc07b 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -3344,7 +3344,13 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) { return 0; } -void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); } +void tFreeSTableMetaRsp(void *pRsp) { + if (NULL == pRsp) { + return; + } + + taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); +} void tFreeSTableIndexRsp(void *info) { if (NULL == info) { From 2c2d13d92839c362e0f0d959dcaa9dd6040cad3b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 14 Sep 2022 14:38:18 +0800 Subject: [PATCH 43/49] fix: detech taosadapter exist when make install (#16827) --- packaging/tools/install.sh | 20 ++++++++++++++++---- packaging/tools/make_install.sh | 20 ++++++++++++++++---- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index f2f72acafa..5088e9bdac 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -840,14 +840,20 @@ function updateProduct() { echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}" - echo -e "${GREEN_DARK}To configure Adapter (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml" + [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}" else - echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}" fi if [ ${openresty_work} = 'true' ]; then @@ -926,14 +932,20 @@ function installProduct() { # Ask if to start the service echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}" - echo -e "${GREEN_DARK}To configure ${adapterName} (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml" + [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}" else - echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}" fi if [ ! -z "$firstEp" ]; then diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index a6dceeeaad..d1e7a222cc 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -609,14 +609,20 @@ function update_TDengine() { echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" - echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}" else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}" fi echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}" @@ -649,14 +655,20 @@ function install_TDengine() { echo -e "\033[44;32;1m${productName} is installed successfully!${NC}" echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" - echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}" else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}" fi echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}" From 6e561a6c78954ee1dfc1c78cab98140fd64ba510 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 14 Sep 2022 14:46:14 +0800 Subject: [PATCH 44/49] fix: fix mem leak --- source/client/src/clientImpl.c | 3 +-- source/libs/index/src/indexTfile.c | 4 ---- source/libs/scheduler/src/schTask.c | 2 ++ 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index e7b34ab783..70d7a49208 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1369,7 +1369,7 @@ int32_t doProcessMsgFromServer(void* param) { updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); SDataBuf buf = { - .msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = NULL}; + .msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = pEpSet}; if (pMsg->contLen > 0) { buf.pData = taosMemoryCalloc(1, pMsg->contLen); @@ -1385,7 +1385,6 @@ int32_t doProcessMsgFromServer(void* param) { rpcFreeCont(pMsg->pCont); destroySendMsgInfo(pSendInfo); - taosMemoryFree(arg->pEpset); taosMemoryFree(arg); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c index c3be0ea6f5..1fc631e9f3 100644 --- a/source/libs/index/src/indexTfile.c +++ b/source/libs/index/src/indexTfile.c @@ -323,10 +323,6 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, while ((rt = stmStNextWith(st, NULL)) != NULL) { FstSlice* s = &rt->data; char* ch = (char*)fstSliceData(s, NULL); - // if (0 != strncmp(ch, tem->colName, tem->nColName)) { - // swsResultDestroy(rt); - // break; - //} TExeCond cond = cmpFn(ch, p, tem->colType); if (MATCH == cond) { diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index c5f161b66a..ee60d02ba2 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -430,12 +430,14 @@ int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32 code = schDoTaskRedirect(pJob, pTask, pData, rspCode); taosMemoryFree(pData->pData); + taosMemoryFree(pData->pEpSet); SCH_RET(code); _return: taosMemoryFree(pData->pData); + taosMemoryFree(pData->pEpSet); SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); } From 417571140f9b71b1f623099bbf528a8a940d9069 Mon Sep 17 00:00:00 2001 From: Pan YANG Date: Wed, 14 Sep 2022 15:00:54 +0800 Subject: [PATCH 45/49] docs: fix escape of backtick --- docs/zh/12-taos-sql/20-keywords.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md index ee36dc44f0..57a45b0a00 100644 --- a/docs/zh/12-taos-sql/20-keywords.md +++ b/docs/zh/12-taos-sql/20-keywords.md @@ -6,7 +6,7 @@ description: TDengine 保留关键字的详细列表 ## 保留关键字 -目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写如果需要用作库名、表名、STable 名、数据列名及标签列名等,需要使用符合``将关键字括起来使用,例如`ADD`。 +目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写如果需要用作库名、表名、STable 名、数据列名及标签列名等,需要使用符号 `` ` `` 将关键字括起来使用,例如 \`ADD\`。 关键字列表如下: ### A From 64226e5e55131c39ae987f9a4553b03c8f2329d8 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 14 Sep 2022 15:01:14 +0800 Subject: [PATCH 46/49] fix error code number --- include/util/taoserror.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 0058b03b30..086de593d0 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -577,7 +577,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802) #define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803) #define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) -#define TSDB_CODE_FUNC_DUP_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x012B) +#define TSDB_CODE_FUNC_DUP_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x2805) //udf #define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901) From 43bdaa6c1906dd660f951c5d1ad30079958a6fab Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 14 Sep 2022 16:11:26 +0800 Subject: [PATCH 47/49] fix: fix invalid free issue --- source/client/src/clientRawBlockWrite.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 6e9711f57b..c135965f07 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -701,12 +701,6 @@ typedef struct SVgroupCreateTableBatch { static void destroyCreateTbReqBatch(void* data) { SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data; - size_t size = taosArrayGetSize(pTbBatch->req.pArray); - for (int32_t i = 0; i < size; ++i) { - SVCreateTbReq* pTableReq = taosArrayGet(pTbBatch->req.pArray, i); - tdDestroySVCreateTbReq(pTableReq); - } - taosArrayDestroy(pTbBatch->req.pArray); } From 4c6a8bbc95f0e76199dd0226118a3f25bde58e12 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 14 Sep 2022 16:50:42 +0800 Subject: [PATCH 48/49] feat(stream): stream state support tuple --- include/common/tcommon.h | 33 +++++- include/libs/function/function.h | 145 ++++++++++++------------ include/libs/stream/streamState.h | 78 +++++++++++++ include/libs/stream/tstream.h | 42 +------ source/libs/function/CMakeLists.txt | 2 +- source/libs/function/src/builtinsimpl.c | 74 +++++++----- source/libs/stream/src/streamState.c | 21 +++- 7 files changed, 251 insertions(+), 144 deletions(-) create mode 100644 include/libs/stream/streamState.h diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 37db574d98..ba4baa0130 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -45,8 +45,8 @@ enum { // clang-format on typedef struct { - TSKEY ts; uint64_t groupId; + TSKEY ts; } SWinKey; static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) { @@ -68,6 +68,37 @@ static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, i return 0; } +typedef struct { + uint64_t groupId; + TSKEY ts; + int32_t exprIdx; +} STupleKey; + +static inline int STupleKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) { + STupleKey* pTuple1 = (STupleKey*)pKey1; + STupleKey* pTuple2 = (STupleKey*)pKey2; + + if (pTuple1->groupId > pTuple2->groupId) { + return 1; + } else if (pTuple1->groupId < pTuple2->groupId) { + return -1; + } + + if (pTuple1->ts > pTuple2->ts) { + return 1; + } else if (pTuple1->ts < pTuple2->ts) { + return -1; + } + + if (pTuple1->exprIdx > pTuple2->exprIdx) { + return 1; + } else if (pTuple1->exprIdx < pTuple2->exprIdx) { + return -1; + } + + return 0; +} + enum { TMQ_MSG_TYPE__DUMMY = 0, TMQ_MSG_TYPE__POLL_RSP, diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 3f26eee86a..65ddc180d6 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -34,66 +34,69 @@ typedef struct SFuncExecEnv { int32_t calcMemSize; } SFuncExecEnv; -typedef bool (*FExecGetEnv)(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); -typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo); +typedef bool (*FExecGetEnv)(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv); +typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResultCellInfo); typedef int32_t (*FExecProcess)(struct SqlFunctionCtx *pCtx); -typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock); +typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock); typedef int32_t (*FScalarExecProcess)(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); typedef int32_t (*FExecCombine)(struct SqlFunctionCtx *pDestCtx, struct SqlFunctionCtx *pSourceCtx); typedef struct SScalarFuncExecFuncs { - FExecGetEnv getEnv; + FExecGetEnv getEnv; FScalarExecProcess process; } SScalarFuncExecFuncs; typedef struct SFuncExecFuncs { - FExecGetEnv getEnv; - FExecInit init; - FExecProcess process; + FExecGetEnv getEnv; + FExecInit init; + FExecProcess process; FExecFinalize finalize; - FExecCombine combine; + FExecCombine combine; } SFuncExecFuncs; -#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results +#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results #define TOP_BOTTOM_QUERY_LIMIT 100 #define FUNCTIONS_NAME_MAX_LENGTH 16 typedef struct SResultRowEntryInfo { - bool initialized:1; // output buffer has been initialized - bool complete:1; // query has completed - uint8_t isNullRes:6; // the result is null - uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT + bool initialized : 1; // output buffer has been initialized + bool complete : 1; // query has completed + uint8_t isNullRes : 6; // the result is null + uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT } SResultRowEntryInfo; // determine the real data need to calculated the result enum { - BLK_DATA_NOT_LOAD = 0x0, - BLK_DATA_SMA_LOAD = 0x1, + BLK_DATA_NOT_LOAD = 0x0, + BLK_DATA_SMA_LOAD = 0x1, BLK_DATA_DATA_LOAD = 0x3, - BLK_DATA_FILTEROUT = 0x4, // discard current data block since it is not qualified for filter + BLK_DATA_FILTEROUT = 0x4, // discard current data block since it is not qualified for filter }; enum { - MAIN_SCAN = 0x0u, - REVERSE_SCAN = 0x1u, // todo remove it - REPEAT_SCAN = 0x2u, //repeat scan belongs to the master scan - MERGE_STAGE = 0x20u, + MAIN_SCAN = 0x0u, + REVERSE_SCAN = 0x1u, // todo remove it + REPEAT_SCAN = 0x2u, // repeat scan belongs to the master scan + MERGE_STAGE = 0x20u, }; typedef struct SPoint1 { - int64_t key; - union{double val; char* ptr;}; + int64_t key; + union { + double val; + char *ptr; + }; } SPoint1; struct SqlFunctionCtx; struct SResultRowEntryInfo; -//for selectivity query, the corresponding tag value is assigned if the data is qualified +// for selectivity query, the corresponding tag value is assigned if the data is qualified typedef struct SSubsidiaryResInfo { - int16_t num; - int32_t rowLen; - char* buf; // serialize data buffer + int16_t num; + int32_t rowLen; + char *buf; // serialize data buffer struct SqlFunctionCtx **pCtx; } SSubsidiaryResInfo; @@ -106,69 +109,70 @@ typedef struct SResultDataInfo { } SResultDataInfo; #define GET_RES_INFO(ctx) ((ctx)->resultInfo) -#define GET_ROWCELL_INTERBUF(_c) ((void*) ((char*)(_c) + sizeof(SResultRowEntryInfo))) +#define GET_ROWCELL_INTERBUF(_c) ((void *)((char *)(_c) + sizeof(SResultRowEntryInfo))) typedef struct SInputColumnInfoData { - int32_t totalRows; // total rows in current columnar data - int32_t startRowIndex; // handle started row index - int32_t numOfRows; // the number of rows needs to be handled - int32_t numOfInputCols; // PTS is not included - bool colDataAggIsSet;// if agg is set or not - SColumnInfoData *pPTS; // primary timestamp column + int32_t totalRows; // total rows in current columnar data + int32_t startRowIndex; // handle started row index + int32_t numOfRows; // the number of rows needs to be handled + int32_t numOfInputCols; // PTS is not included + bool colDataAggIsSet; // if agg is set or not + SColumnInfoData *pPTS; // primary timestamp column SColumnInfoData **pData; SColumnDataAgg **pColumnDataAgg; - uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions. + uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions. } SInputColumnInfoData; typedef struct SSerializeDataHandle { - struct SDiskbasedBuf* pBuf; + struct SDiskbasedBuf *pBuf; int32_t currentPage; + void *pState; } SSerializeDataHandle; // sql function runtime context typedef struct SqlFunctionCtx { - SInputColumnInfoData input; - SResultDataInfo resDataInfo; - uint32_t order; // data block scanner order: asc|desc - uint8_t scanFlag; // record current running step, default: 0 - int16_t functionId; // function id - char *pOutput; // final result output buffer, point to sdata->data - int32_t numOfParams; - SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param - SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ - int32_t offset; - struct SResultRowEntryInfo *resultInfo; - SSubsidiaryResInfo subsidiaries; - SPoint1 start; - SPoint1 end; - SFuncExecFuncs fpSet; - SScalarFuncExecFuncs sfp; - struct SExprInfo *pExpr; - struct SSDataBlock *pSrcBlock; - struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity - SSerializeDataHandle saveHandle; - bool isStream; + SInputColumnInfoData input; + SResultDataInfo resDataInfo; + uint32_t order; // data block scanner order: asc|desc + uint8_t scanFlag; // record current running step, default: 0 + int16_t functionId; // function id + char *pOutput; // final result output buffer, point to sdata->data + int32_t numOfParams; + SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param + SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ + int32_t offset; + struct SResultRowEntryInfo *resultInfo; + SSubsidiaryResInfo subsidiaries; + SPoint1 start; + SPoint1 end; + SFuncExecFuncs fpSet; + SScalarFuncExecFuncs sfp; + struct SExprInfo *pExpr; + struct SSDataBlock *pSrcBlock; + struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity + SSerializeDataHandle saveHandle; + bool isStream; - char udfName[TSDB_FUNC_NAME_LEN]; + char udfName[TSDB_FUNC_NAME_LEN]; } SqlFunctionCtx; enum { - TEXPR_BINARYEXPR_NODE= 0x1, + TEXPR_BINARYEXPR_NODE = 0x1, TEXPR_UNARYEXPR_NODE = 0x2, }; typedef struct tExprNode { int32_t nodeType; union { - struct {// function node - char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor - int32_t functionId; - int32_t num; - struct SFunctionNode *pFunctNode; + struct { // function node + char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor + int32_t functionId; + int32_t num; + struct SFunctionNode *pFunctNode; } _function; struct { - struct SNode* pRootNode; + struct SNode *pRootNode; } _optrRoot; }; } tExprNode; @@ -182,17 +186,18 @@ struct SScalarParam { int32_t numOfRows; }; -void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell); -int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock); -bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry); -bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry); +void cleanupResultRowEntry(struct SResultRowEntryInfo *pCell); +int32_t getNumOfResult(SqlFunctionCtx *pCtx, int32_t num, SSDataBlock *pResBlock); +bool isRowEntryCompleted(struct SResultRowEntryInfo *pEntry); +bool isRowEntryInitialized(struct SResultRowEntryInfo *pEntry); typedef struct SPoint { int64_t key; - void * val; + void *val; } SPoint; -int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType); +int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2, + int32_t inputType); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // udf api diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h new file mode 100644 index 0000000000..df19544396 --- /dev/null +++ b/include/libs/stream/streamState.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tdatablock.h" +#include "tdbInt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _STREAM_STATE_H_ +#define _STREAM_STATE_H_ + +typedef struct SStreamTask SStreamTask; + +// incremental state storage +typedef struct { + SStreamTask* pOwner; + TDB* db; + TTB* pStateDb; + TTB* pFuncStateDb; + TXN txn; +} SStreamState; + +SStreamState* streamStateOpen(char* path, SStreamTask* pTask); +void streamStateClose(SStreamState* pState); +int32_t streamStateBegin(SStreamState* pState); +int32_t streamStateCommit(SStreamState* pState); +int32_t streamStateAbort(SStreamState* pState); + +typedef struct { + TBC* pCur; +} SStreamStateCur; + +#if 1 +int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen); +int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key); + +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); +int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateDel(SStreamState* pState, const SWinKey* key); +int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal); +void streamFreeVal(void* val); + +SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key); +void streamStateFreeCur(SStreamStateCur* pCur); + +int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); + +int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur); + +int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ifndef _STREAM_STATE_H_ */ diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index afd8de6b1c..554d66d621 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -16,6 +16,7 @@ #include "executor.h" #include "os.h" #include "query.h" +#include "streamState.h" #include "tdatablock.h" #include "tdbInt.h" #include "tmsg.h" @@ -263,14 +264,6 @@ typedef struct { SArray* checkpointVer; } SStreamRecoveringState; -// incremental state storage -typedef struct { - SStreamTask* pOwner; - TDB* db; - TTB* pStateDb; - TXN txn; -} SStreamState; - typedef struct SStreamTask { int64_t streamId; int32_t taskId; @@ -540,39 +533,6 @@ int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaRollBack(SStreamMeta* pMeta); int32_t streamLoadTasks(SStreamMeta* pMeta); -SStreamState* streamStateOpen(char* path, SStreamTask* pTask); -void streamStateClose(SStreamState* pState); -int32_t streamStateBegin(SStreamState* pState); -int32_t streamStateCommit(SStreamState* pState); -int32_t streamStateAbort(SStreamState* pState); - -typedef struct { - TBC* pCur; -} SStreamStateCur; - -#if 1 -int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); -int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); -int32_t streamStateDel(SStreamState* pState, const SWinKey* key); -int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); -int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal); -void streamFreeVal(void* val); - -SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key); -SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key); -SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key); -void streamStateFreeCur(SStreamStateCur* pCur); - -int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); - -int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur); -int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur); - -int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur); -int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); - -#endif - #ifdef __cplusplus } #endif diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index ea401e56e5..dd048a047a 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -14,7 +14,7 @@ target_include_directories( target_link_libraries( function - PRIVATE os util common nodes scalar qcom transport + PRIVATE os util common nodes scalar qcom transport stream PUBLIC uv_a ) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 0d7fd1a6da..411c898657 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -18,6 +18,7 @@ #include "function.h" #include "query.h" #include "querynodes.h" +#include "streamState.h" #include "tcompare.h" #include "tdatablock.h" #include "tdigest.h" @@ -56,8 +57,13 @@ typedef struct SAvgRes { } SAvgRes; typedef struct STuplePos { - int32_t pageId; - int32_t offset; + union { + struct { + int32_t pageId; + int32_t offset; + }; + STupleKey streamTupleKey; + }; } STuplePos; typedef struct SMinmaxResInfo { @@ -1146,7 +1152,8 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { return true; } -static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock); +static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, + const STupleKey* pKey); static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos); @@ -1201,7 +1208,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { pBuf->v = *(int64_t*)tval; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } else { if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type)) { @@ -1213,7 +1220,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { *(int64_t*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { @@ -1225,7 +1232,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { *(uint64_t*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } } else if (type == TSDB_DATA_TYPE_DOUBLE) { @@ -1237,7 +1244,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { *(double*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } } else if (type == TSDB_DATA_TYPE_FLOAT) { @@ -1251,7 +1258,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } } } @@ -1276,7 +1283,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1307,7 +1314,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1338,7 +1345,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1369,7 +1376,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1402,7 +1409,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1433,7 +1440,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1464,7 +1471,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1495,7 +1502,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1527,7 +1534,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1558,7 +1565,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL); } pBuf->assign = true; } else { @@ -1581,7 +1588,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { _min_max_over: if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) { - pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL); pBuf->nullTupleSaved = true; } return numOfElems; @@ -2758,7 +2765,7 @@ static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowInde } if (!pInfo->hasResult) { - pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock); + pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL); } else { updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); } @@ -3426,7 +3433,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) { } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL); pRes->nullTupleSaved = true; } return TSDB_CODE_SUCCESS; @@ -3454,7 +3461,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) { } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL); pRes->nullTupleSaved = true; } @@ -3506,7 +3513,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData // save the data of this tuple if (pCtx->subsidiaries.num > 0) { - pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock); + pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL); } #ifdef BUF_PAGE_DEBUG qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId, @@ -3578,7 +3585,8 @@ void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsid return buf; } -static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) { +static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, + const STupleKey* pKey) { STuplePos p = {0}; if (pHandle->pBuf != NULL) { SFilePage* pPage = NULL; @@ -3604,12 +3612,16 @@ static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf releaseBufPage(pHandle->pBuf, pPage); } else { // other tuple save policy + if (streamStateFuncPut(pHandle->pState, pKey, pBuf, length) < 0) { + ASSERT(0); + } + p.streamTupleKey = *pKey; } return p; } -STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) { +STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, const STupleKey* pKey) { if (pCtx->subsidiaries.rowLen == 0) { int32_t rowLen = 0; for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) { @@ -3622,7 +3634,7 @@ STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBloc } char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); - return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen); + return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pKey); } static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) { @@ -3632,6 +3644,7 @@ static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf setBufPageDirty(pPage, true); releaseBufPage(pHandle->pBuf, pPage); } else { + streamStateFuncPut(pHandle->pState, &pPos->streamTupleKey, pBuf, length); } return TSDB_CODE_SUCCESS; @@ -3650,7 +3663,10 @@ static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPo releaseBufPage(pHandle->pBuf, pPage); return p; } else { - return NULL; + void* value = NULL; + int32_t vLen; + streamStateFuncGet(pHandle->pState, &pPos->streamTupleKey, &value, &vLen); + return (char*)value; } } @@ -4981,7 +4997,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da if (pInfo->numSampled < pInfo->samples) { sampleAssignResult(pInfo, data, pInfo->numSampled); if (pCtx->subsidiaries.num > 0) { - pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock); + pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL); } pInfo->numSampled++; } else { @@ -5012,7 +5028,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { } if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) { - pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL); pInfo->nullTupleSaved = true; } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 5efdbb4679..6cd5132bb9 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -35,6 +35,10 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { goto _err; } + if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->db, &pState->pFuncStateDb) < 0) { + goto _err; + } + if (streamStateBegin(pState) < 0) { goto _err; } @@ -44,8 +48,9 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { return pState; _err: - if (pState->pStateDb) tdbTbClose(pState->pStateDb); - if (pState->db) tdbClose(pState->db); + tdbTbClose(pState->pStateDb); + tdbTbClose(pState->pFuncStateDb); + tdbClose(pState->db); taosMemoryFree(pState); return NULL; } @@ -53,6 +58,7 @@ _err: void streamStateClose(SStreamState* pState) { tdbCommit(pState->db, &pState->txn); tdbTbClose(pState->pStateDb); + tdbTbClose(pState->pFuncStateDb); tdbClose(pState->db); taosMemoryFree(pState); @@ -101,6 +107,17 @@ int32_t streamStateAbort(SStreamState* pState) { return 0; } +int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen) { + return tdbTbUpsert(pState->pFuncStateDb, key, sizeof(STupleKey), value, vLen, &pState->txn); +} +int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen) { + return tdbTbGet(pState->pFuncStateDb, key, sizeof(STupleKey), pVal, pVLen); +} + +int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key) { + return tdbTbDelete(pState->pFuncStateDb, key, sizeof(STupleKey), &pState->txn); +} + int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn); } From c7f986a656c4ba73e09bec2b338a9c0557a177bc Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Wed, 14 Sep 2022 17:11:50 +0800 Subject: [PATCH 49/49] refactor(tmq): add error code --- include/util/taoserror.h | 1 + source/dnode/mnode/impl/src/mndSubscribe.c | 1 + source/dnode/mnode/impl/src/mndTopic.c | 1 - source/util/src/terror.c | 1 + 4 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index d16a599811..d3e1416b85 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -285,6 +285,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB) #define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC) #define TSDB_CODE_MND_TOPIC_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03ED) +#define TSDB_CODE_MND_IN_REBALANCE TAOS_DEF_ERROR_CODE(0, 0x03EF) // mnode-stream #define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0) diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 1452c5ae2f..8a968712e0 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -900,6 +900,7 @@ int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topicName) // iter all vnode to delete handle if (taosHashGetSize(pSub->consumerHash) != 0) { sdbRelease(pSdb, pSub); + terrno = TSDB_CODE_MND_IN_REBALANCE; return -1; } int32_t sz = taosArrayGetSize(pSub->unassignedVgs); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index eb072d013d..7b36966d6c 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -713,7 +713,6 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { mndReleaseTopic(pMnode, pTopic); if (code != 0) { - terrno = code; mError("topic:%s, failed to drop since %s", dropReq.name, terrstr()); return -1; } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 044cdc86b4..dbe8be6a81 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -288,6 +288,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_READY, "Consumer not ready") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_SUBSCRIBED, "Topic subscribed cannot be dropped") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_MUST_BE_DELETED, "Topic must be dropped first") TAOS_DEFINE_ERROR(TSDB_CODE_MND_CGROUP_USED, "Consumer group being used by some consumer") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_IN_REBALANCE, "Topic being rebalanced") // mnode-stream TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists")