From eb6250de4f85a0604f98111e40e0c93dbcb69df2 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 7 Jun 2022 17:41:11 +0800 Subject: [PATCH 01/14] feat: support multiway sort merge --- source/common/src/tdatablock.c | 2 +- source/libs/executor/inc/executorimpl.h | 2 +- source/libs/executor/src/executorimpl.c | 5 +++-- source/libs/executor/src/sortoperator.c | 13 ++++++++----- source/libs/executor/src/tsort.c | 18 +++++++++++++++--- source/libs/planner/src/planSpliter.c | 4 ++-- 6 files changed, 30 insertions(+), 14 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d9293433ea..fbe246434c 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1213,7 +1213,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) { pBlock->info.numOfCols = numOfCols; pBlock->info.hasVarCol = pDataBlock->info.hasVarCol; - pBlock->info.rowSize = pDataBlock->info.rows; + pBlock->info.rowSize = pDataBlock->info.rowSize; for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData colInfo = {0}; diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 1e0739e066..78ab34b304 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -785,7 +785,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t num, SSDataBlock* pResBlock, SLimit* pLimit, SLimit* pSlimit, SNode* pCondition, SExecTaskInfo* pTaskInfo); SOperatorInfo *createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols, SArray* pIndexMap, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams, +SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams, SSDataBlock* pInputBlock, SSDataBlock* pResBlock, SArray* pSortInfo, SArray* pColMatchColInfo, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t num, SArray* pSortInfo, SArray* pGroupInfo, SExecTaskInfo* pTaskInfo); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index f08c2f95ba..c4b72be465 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4660,8 +4660,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo int32_t numOfOutputCols = 0; SArray* pColList = extractColMatchInfo(pMergePhyNode->pTargets, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_SLOT_ID); - - pOptr = createMultiwaySortMergeOperatorInfo(ops, size, pResBlock, sortInfo, pColList, pTaskInfo); + SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, 0); + SSDataBlock* pInputDataBlock = createResDataBlock(pChildNode->pOutputDataBlockDesc); + pOptr = createMultiwaySortMergeOperatorInfo(ops, size, pInputDataBlock, pResBlock, sortInfo, pColList, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW == type) { SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode; diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index c84d4491af..b5195e65b8 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -230,6 +230,7 @@ typedef struct SMultiwaySortMergeOperatorInfo { SSortHandle* pSortHandle; SArray* pColMatchInfo; // for index map from table scan output + SSDataBlock* pInputBlock; int64_t startTs; // sort start time } SMultiwaySortMergeOperatorInfo; @@ -246,14 +247,14 @@ int32_t doOpenMultiwaySortMergeOperator(SOperatorInfo* pOperator) { int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_MULTISOURCE_MERGE, - pInfo->bufPageSize, numOfBufPage, NULL, pTaskInfo->id.str); + pInfo->bufPageSize, numOfBufPage, pInfo->pInputBlock, pTaskInfo->id.str); tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL); for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) { - SSortSource ps = {0}; - ps.param = pOperator->pDownstream[i]; - tsortAddSource(pInfo->pSortHandle, &ps); + SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource)); + ps->param = pOperator->pDownstream[i]; + tsortAddSource(pInfo->pSortHandle, ps); } int32_t code = tsortOpen(pInfo->pSortHandle); @@ -296,6 +297,7 @@ SSDataBlock* doMultiwaySortMerge(SOperatorInfo* pOperator) { void destroyMultiwaySortMergeOperatorInfo(void* param, int32_t numOfOutput) { SMultiwaySortMergeOperatorInfo * pInfo = (SMultiwaySortMergeOperatorInfo*)param; pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes); + pInfo->pInputBlock = blockDataDestroy(pInfo->pInputBlock); taosArrayDestroy(pInfo->pSortInfo); taosArrayDestroy(pInfo->pColMatchInfo); @@ -313,7 +315,7 @@ int32_t getMultiwaySortMergeExplainExecInfo(SOperatorInfo* pOptr, void** pOptrEx return TSDB_CODE_SUCCESS; } -SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams, +SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams, SSDataBlock* pInputBlock, SSDataBlock* pResBlock, SArray* pSortInfo, SArray* pColMatchColInfo, SExecTaskInfo* pTaskInfo) { SMultiwaySortMergeOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SMultiwaySortMergeOperatorInfo)); @@ -330,6 +332,7 @@ SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, pInfo->pSortInfo = pSortInfo; pInfo->pColMatchInfo = pColMatchColInfo; + pInfo->pInputBlock = pInputBlock; pOperator->name = "MultiwaySortMerge"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE; pOperator->blocking = true; diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 7581836d59..3705d0a57b 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -225,6 +225,10 @@ static int32_t sortComparInit(SMsortComparParam* cmpParam, SArray* pSources, int for (int32_t i = 0; i < cmpParam->numOfSources; ++i) { SSortSource* pSource = cmpParam->pSources[i]; pSource->src.pBlock = pHandle->fetchfp(pSource->param); + if (pSource->src.pBlock == NULL) { + pSource->src.rowIndex = -1; + ++pHandle->numOfCompletedSources; + } } } @@ -361,13 +365,21 @@ int32_t msortComparFn(const void *pLeft, const void *pRight, void *param) { bool leftNull = false; if (pLeftColInfoData->hasNull) { - leftNull = colDataIsNull(pLeftColInfoData, pLeftBlock->info.rows, pLeftSource->src.rowIndex, pLeftBlock->pBlockAgg[pOrder->slotId]); + if (pLeftBlock->pBlockAgg == NULL) { + leftNull = colDataIsNull_s(pLeftColInfoData, pLeftSource->src.rowIndex); + } else { + leftNull = colDataIsNull(pLeftColInfoData, pLeftBlock->info.rows, pLeftSource->src.rowIndex, pLeftBlock->pBlockAgg[i]); + } } SColumnInfoData* pRightColInfoData = TARRAY_GET_ELEM(pRightBlock->pDataBlock, pOrder->slotId); bool rightNull = false; if (pRightColInfoData->hasNull) { - rightNull = colDataIsNull(pRightColInfoData, pRightBlock->info.rows, pRightSource->src.rowIndex, pRightBlock->pBlockAgg[pOrder->slotId]); + if (pLeftBlock->pBlockAgg == NULL) { + rightNull = colDataIsNull_s(pRightColInfoData, pRightSource->src.rowIndex); + } else { + rightNull = colDataIsNull(pRightColInfoData, pRightBlock->info.rows, pRightSource->src.rowIndex, pRightBlock->pBlockAgg[i]); + } } if (leftNull && rightNull) { @@ -408,7 +420,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { pHandle->totalElapsed = taosGetTimestampUs() - pHandle->startTs; qDebug("%s %d rounds mergesort required to complete the sort, first-round sorted data size:%"PRIzu", sort elapsed:%"PRId64", total elapsed:%"PRId64, - pHandle->idStr, (int32_t) (sortPass + 1), getTotalBufSize(pHandle->pBuf), pHandle->sortElapsed, pHandle->totalElapsed); + pHandle->idStr, (int32_t) (sortPass + 1), pHandle->pBuf ? getTotalBufSize(pHandle->pBuf) : 0, pHandle->sortElapsed, pHandle->totalElapsed); int32_t numOfRows = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize); blockDataEnsureCapacity(pHandle->pDataBlock, numOfRows); diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 7be33d54e3..5c8b41f87a 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -166,8 +166,8 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { } return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); } - // case QUERY_NODE_LOGIC_PLAN_SORT: - // return stbSplHasMultiTbScan(streamQuery, pNode); + case QUERY_NODE_LOGIC_PLAN_SORT: + return stbSplHasMultiTbScan(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_SCAN: return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode); default: From a9f019f6f24c91ed9e1fe02a38047cbd01d3b392 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 7 Jun 2022 18:52:54 +0800 Subject: [PATCH 02/14] feat: multi-way merge sort --- source/libs/executor/src/sortoperator.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index b5195e65b8..3034f409f7 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -84,7 +84,6 @@ void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle) { SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, SArray* pColMatchInfo) { blockDataCleanup(pDataBlock); - ASSERT(taosArrayGetSize(pColMatchInfo) == pDataBlock->info.numOfCols); SSDataBlock* p = tsortGetSortedDataBlock(pHandle); if (p == NULL) { From e8e68fab9a2d5206556467e292d0d1b136398d42 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 7 Jun 2022 20:59:44 +0800 Subject: [PATCH 03/14] feat: physical plan add clone interface --- include/libs/nodes/nodes.h | 14 +- source/libs/command/src/explain.c | 16 +- source/libs/executor/src/executorimpl.c | 23 +- source/libs/executor/src/joinoperator.c | 36 ++-- source/libs/executor/src/scanoperator.c | 199 +++++++++--------- source/libs/executor/src/timewindowoperator.c | 10 +- source/libs/nodes/src/nodesCloneFuncs.c | 51 +++++ source/libs/nodes/src/nodesCodeFuncs.c | 36 ++-- source/libs/nodes/src/nodesTraverseFuncs.c | 12 +- source/libs/nodes/src/nodesUtilFuncs.c | 20 +- source/libs/planner/src/planPhysiCreater.c | 15 +- source/libs/planner/src/planSpliter.c | 26 ++- 12 files changed, 266 insertions(+), 192 deletions(-) diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 36e9443ff9..7df731b95c 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -207,8 +207,8 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN, QUERY_NODE_PHYSICAL_PLAN_PROJECT, - QUERY_NODE_PHYSICAL_PLAN_JOIN, - QUERY_NODE_PHYSICAL_PLAN_AGG, + QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN, + QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, QUERY_NODE_PHYSICAL_PLAN_EXCHANGE, QUERY_NODE_PHYSICAL_PLAN_MERGE, QUERY_NODE_PHYSICAL_PLAN_SORT, @@ -218,11 +218,11 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_FILL, - QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW, - QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW, - QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW, - QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW, - QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW, + QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION, + QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, + QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION, + QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE, + QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, QUERY_NODE_PHYSICAL_PLAN_PARTITION, QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC, QUERY_NODE_PHYSICAL_PLAN_DISPATCH, diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 98ae5e2fb1..99ff4d406c 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -133,12 +133,12 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo pPhysiChildren = pPrjNode->node.pChildren; break; } - case QUERY_NODE_PHYSICAL_PLAN_JOIN: { + case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { SJoinPhysiNode *pJoinNode = (SJoinPhysiNode *)pNode; pPhysiChildren = pJoinNode->node.pChildren; break; } - case QUERY_NODE_PHYSICAL_PLAN_AGG: { + case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: { SAggPhysiNode *pAggNode = (SAggPhysiNode *)pNode; pPhysiChildren = pAggNode->node.pChildren; break; @@ -158,12 +158,12 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo pPhysiChildren = pIntNode->window.node.pChildren; break; } - case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: { + case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: { SSessionWinodwPhysiNode *pSessNode = (SSessionWinodwPhysiNode *)pNode; pPhysiChildren = pSessNode->window.node.pChildren; break; } - case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: { + case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: { SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode; pPhysiChildren = pStateNode->window.node.pChildren; break; @@ -513,7 +513,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } break; } - case QUERY_NODE_PHYSICAL_PLAN_JOIN: { + case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { SJoinPhysiNode *pJoinNode = (SJoinPhysiNode *)pNode; EXPLAIN_ROW_NEW(level, EXPLAIN_JOIN_FORMAT, EXPLAIN_JOIN_STRING(pJoinNode->joinType)); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); @@ -553,7 +553,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } break; } - case QUERY_NODE_PHYSICAL_PLAN_AGG: { + case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: { SAggPhysiNode *pAggNode = (SAggPhysiNode *)pNode; EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); @@ -744,7 +744,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } break; } - case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: { + case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: { SSessionWinodwPhysiNode *pSessNode = (SSessionWinodwPhysiNode *)pNode; EXPLAIN_ROW_NEW(level, EXPLAIN_SESSION_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); @@ -782,7 +782,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } break; } - case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: { + case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: { SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode; EXPLAIN_ROW_NEW(level, EXPLAIN_STATE_WINDOW_FORMAT, diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 431aca3eb5..3e41885739 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3936,7 +3936,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* } pOperator->name = "TableAggregate"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_AGG; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_AGG; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->info = pInfo; @@ -4582,7 +4582,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SLimit slimit = {.limit = pProjPhyNode->slimit, .offset = pProjPhyNode->soffset}; pOptr = createProjectOperatorInfo(ops[0], pExprInfo, num, pResBlock, &limit, &slimit, pProjPhyNode->node.pConditions, pTaskInfo); - } else if (QUERY_NODE_PHYSICAL_PLAN_AGG == type) { + } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_AGG == type) { SAggPhysiNode* pAggNode = (SAggPhysiNode*)pPhyNode; SExprInfo* pExprInfo = createExprInfo(pAggNode->pAggFuncs, pAggNode->pGroupKeys, &num); SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); @@ -4662,7 +4662,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo extractColMatchInfo(pMergePhyNode->pTargets, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_SLOT_ID); pOptr = createMultiwaySortMergeOperatorInfo(ops, size, pResBlock, sortInfo, pColList, pTaskInfo); - } else if (QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW == type) { + } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION == type) { SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode; STimeWindowAggSupp as = {.waterMark = pSessionNode->window.watermark, @@ -4674,7 +4674,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOptr = createSessionAggOperatorInfo(ops[0], pExprInfo, num, pResBlock, pSessionNode->gap, tsSlotId, &as, pTaskInfo); - } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW == type) { + } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION == type) { SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode; STimeWindowAggSupp as = {.waterMark = pSessionNode->window.watermark, @@ -4694,7 +4694,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &num); pOptr = createPartitionOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pTaskInfo); - } else if (QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW == type) { + } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE == type) { SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode; STimeWindowAggSupp as = {.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType}; @@ -4706,9 +4706,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr; SColumn col = extractColumnFromColumnNode(pColNode); pOptr = createStatewindowOperatorInfo(ops[0], pExprInfo, num, pResBlock, &as, tsSlotId, &col, pTaskInfo); - } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW == type) { + } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) { pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo); - } else if (QUERY_NODE_PHYSICAL_PLAN_JOIN == type) { + } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) { SJoinPhysiNode* pJoinNode = (SJoinPhysiNode*)pPhyNode; SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); @@ -5100,12 +5100,12 @@ int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) { return TDB_CODE_SUCCESS; } -int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo) { +int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, qTaskInfo_t* pTaskInfo) { SExecTaskInfo* pTask = *(SExecTaskInfo**)pTaskInfo; - + switch (pNode->type) { case QUERY_NODE_PHYSICAL_PLAN_DELETE: { - SDeleterParam *pDeleterParam = taosMemoryCalloc(1, sizeof(SDeleterParam)); + SDeleterParam* pDeleterParam = taosMemoryCalloc(1, sizeof(SDeleterParam)); if (NULL == pDeleterParam) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -5116,7 +5116,7 @@ int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pT return TSDB_CODE_OUT_OF_MEMORY; } for (int32_t i = 0; i < tbNum; ++i) { - STableKeyInfo *pTable = taosArrayGet(pTask->tableqinfoList.pTableList, i); + STableKeyInfo* pTable = taosArrayGet(pTask->tableqinfoList.pTableList, i); taosArrayPush(pDeleterParam->pUidList, &pTable->uid); } @@ -5130,7 +5130,6 @@ int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pT return TSDB_CODE_SUCCESS; } - int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, EOPTR_EXEC_MODEL model) { uint64_t queryId = pPlan->id.queryId; diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index ad9e4d63f0..7c8ab244a1 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -13,20 +13,20 @@ * along with this program. If not, see . */ +#include "executorimpl.h" #include "function.h" #include "os.h" #include "querynodes.h" -#include "tdatablock.h" -#include "tmsg.h" -#include "executorimpl.h" #include "tcompare.h" +#include "tdatablock.h" #include "thash.h" +#include "tmsg.h" #include "ttypes.h" -static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode); +static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode); static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator); -static void destroyMergeJoinOperator(void* param, int32_t numOfOutput); -static void extractTimeCondition(SJoinOperatorInfo *Info, SLogicConditionNode* pLogicConditionNode); +static void destroyMergeJoinOperator(void* param, int32_t numOfOutput); +static void extractTimeCondition(SJoinOperatorInfo* Info, SLogicConditionNode* pLogicConditionNode); SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, @@ -39,22 +39,22 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t initResultSizeInfo(pOperator, 4096); - pInfo->pRes = pResBlock; - pOperator->name = "MergeJoinOperator"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_JOIN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->pExpr = pExprInfo; + pInfo->pRes = pResBlock; + pOperator->name = "MergeJoinOperator"; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExprInfo; pOperator->numOfExprs = numOfCols; - pOperator->info = pInfo; - pOperator->pTaskInfo = pTaskInfo; + pOperator->info = pInfo; + pOperator->pTaskInfo = pTaskInfo; if (nodeType(pOnCondition) == QUERY_NODE_OPERATOR) { SOperatorNode* pNode = (SOperatorNode*)pOnCondition; setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft); setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight); } else if (nodeType(pOnCondition) == QUERY_NODE_LOGIC_CONDITION) { - extractTimeCondition(pInfo, (SLogicConditionNode*) pOnCondition); + extractTimeCondition(pInfo, (SLogicConditionNode*)pOnCondition); } pOperator->fpSet = @@ -66,7 +66,7 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t return pOperator; - _error: +_error: taosMemoryFree(pInfo); taosMemoryFree(pOperator); pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; @@ -180,10 +180,10 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { return (pRes->info.rows > 0) ? pRes : NULL; } -static void extractTimeCondition(SJoinOperatorInfo *pInfo, SLogicConditionNode* pLogicConditionNode) { +static void extractTimeCondition(SJoinOperatorInfo* pInfo, SLogicConditionNode* pLogicConditionNode) { int32_t len = LIST_LENGTH(pLogicConditionNode->pParameterList); - for(int32_t i = 0; i < len; ++i) { + for (int32_t i = 0; i < len; ++i) { SNode* pNode = nodesListGetNode(pLogicConditionNode->pParameterList, i); if (nodeType(pNode) == QUERY_NODE_OPERATOR) { SOperatorNode* pn1 = (SOperatorNode*)pNode; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ba0397fd34..be17728daf 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -42,10 +42,11 @@ static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capac static int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, const char* dbName); -static void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock); -static bool processBlockWithProbability(const SSampleExecInfo *pInfo); +static void addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, + SSDataBlock* pBlock); +static bool processBlockWithProbability(const SSampleExecInfo* pInfo); -bool processBlockWithProbability(const SSampleExecInfo *pInfo) { +bool processBlockWithProbability(const SSampleExecInfo* pInfo) { #if 0 if (pInfo->sampleRatio == 1) { return true; @@ -261,7 +262,8 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca // currently only the tbname pseudo column if (pTableScanInfo->numOfPseudoExpr > 0) { - addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr, pBlock); + addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr, + pBlock); } int64_t st = taosGetTimestampMs(); @@ -295,7 +297,8 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow); } -void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock) { +void addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, + SSDataBlock* pBlock) { // currently only the tbname pseudo column if (numOfPseudoExpr == 0) { return; @@ -311,7 +314,7 @@ void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_ int32_t dstSlotId = pExpr->base.resSchema.slotId; SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotId); - + colInfoDataEnsureCapacity(pColInfoData, 0, pBlock->info.rows); colInfoDataCleanup(pColInfoData, pBlock->info.rows); @@ -391,10 +394,10 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, 0); int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals); - uint64_t *groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len); + uint64_t* groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len); if (groupId) { pBlock->info.groupId = *groupId; - }else if(len != 0){ + } else if (len != 0) { pBlock->info.groupId = calcGroupId(pTableScanInfo->keyBuf, len); taosHashPut(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len, &pBlock->info.groupId, sizeof(uint64_t)); } @@ -483,7 +486,8 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - qDebug("%s start to repeat descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + qDebug("%s start to repeat descending order scan data blocks due to query func required", + GET_TASKID(pTaskInfo)); for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); @@ -525,7 +529,7 @@ static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) { tsdbCleanupReadHandle(pTableScanInfo->dataReader); taosArrayDestroy(pTableScanInfo->pGroupCols); - for(int i = 0; i < taosArrayGetSize(pTableScanInfo->pGroupColVals); i++){ + for (int i = 0; i < taosArrayGetSize(pTableScanInfo->pGroupColVals); i++) { SGroupKeys key = *(SGroupKeys*)taosArrayGet(pTableScanInfo->pGroupColVals, i); taosMemoryFree(key.pData); } @@ -562,28 +566,28 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, } pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]}; -// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose + // pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose - pInfo->readHandle = *readHandle; - pInfo->interval = extractIntervalInfo(pTableScanNode); - pInfo->sample.sampleRatio= pTableScanNode->ratio; - pInfo->sample.seed = taosGetTimestampSec(); + pInfo->readHandle = *readHandle; + pInfo->interval = extractIntervalInfo(pTableScanNode); + pInfo->sample.sampleRatio = pTableScanNode->ratio; + pInfo->sample.seed = taosGetTimestampSec(); pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired; - pInfo->pResBlock = createResDataBlock(pDescNode); - pInfo->pFilterNode = pTableScanNode->scan.node.pConditions; - pInfo->dataReader = pDataReader; - pInfo->scanFlag = MAIN_SCAN; - pInfo->pColMatchInfo = pColList; - pInfo->curTWinIdx = 0; + pInfo->pResBlock = createResDataBlock(pDescNode); + pInfo->pFilterNode = pTableScanNode->scan.node.pConditions; + pInfo->dataReader = pDataReader; + pInfo->scanFlag = MAIN_SCAN; + pInfo->pColMatchInfo = pColList; + pInfo->curTWinIdx = 0; - pOperator->name = "TableScanOperator"; // for debug purpose + pOperator->name = "TableScanOperator"; // for debug purpose pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = numOfCols; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = numOfCols; + pOperator->pTaskInfo = pTaskInfo; // for table group pInfo->pGroupCols = groupKyes; @@ -604,7 +608,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, pOperator->cost.openCost = 0; return pOperator; - _error: +_error: taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pOperator); @@ -723,16 +727,19 @@ static void doClearBufferedBlocks(SStreamBlockScanInfo* pInfo) { } static bool isSessionWindow(SStreamBlockScanInfo* pInfo) { - return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW; + return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION; } static bool isStateWindow(SStreamBlockScanInfo* pInfo) { - return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW; + return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE; } static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { SSDataBlock* pSDB = pInfo->pUpdateRes; - STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX,}; + STimeWindow win = { + .skey = INT64_MIN, + .ekey = INT64_MAX, + }; bool needRead = false; if (!isStateWindow(pInfo) && pInfo->updateResIndex < pSDB->info.rows) { SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, pInfo->primaryTsIndex); @@ -759,7 +766,7 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { SArray* pWins = pInfo->sessionSup.pStreamAggSup->pScanWindow; int32_t size = taosArrayGetSize(pWins); if (pInfo->scanWinIndex < size) { - win = *(STimeWindow *)taosArrayGet(pWins, pInfo->scanWinIndex); + win = *(STimeWindow*)taosArrayGet(pWins, pInfo->scanWinIndex); pInfo->scanWinIndex++; needRead = true; } else { @@ -790,11 +797,11 @@ static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo) { return pResult; } -static void getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool invertible, - SSDataBlock* pBlock, SSDataBlock* pUpdateBlock) { +static void getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool invertible, SSDataBlock* pBlock, + SSDataBlock* pUpdateBlock) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP); - TSKEY* ts = (TSKEY*)pColDataInfo->pData; + TSKEY* ts = (TSKEY*)pColDataInfo->pData; for (int32_t i = 0; i < pBlock->info.rows; i++) { if (updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, ts[i])) { taosArrayPush(pInfo->tsArray, ts + i); @@ -859,8 +866,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } return pInfo->pUpdateRes; } else { - if (isStateWindow(pInfo) && - taosArrayGetSize(pInfo->sessionSup.pStreamAggSup->pScanWindow) > 0) { + if (isStateWindow(pInfo) && taosArrayGetSize(pInfo->sessionSup.pStreamAggSup->pScanWindow) > 0) { pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; pInfo->updateResIndex = pInfo->pUpdateRes->info.rows; prepareDataScan(pInfo); @@ -974,9 +980,9 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } } -SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, - SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, - STimeWindowAggSupp* pTwSup) { +SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, SArray* pTableIdList, + STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, + STimeWindowAggSupp* pTwSup) { SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -987,12 +993,13 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan SScanPhysiNode* pScanPhyNode = &pTableScanNode->scan; SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; - SOperatorInfo* pTableScanDummy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, NULL, pTaskInfo); + SOperatorInfo* pTableScanDummy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, NULL, pTaskInfo); STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info; int32_t numOfCols = 0; - pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); + pInfo->pColMatchInfo = + extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo); SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t)); @@ -1025,8 +1032,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan } if (isSmaStream(pTableScanNode->triggerType)) { - pTwSup->waterMark = getSmaWaterMark(pSTInfo->interval.interval, - pTableScanNode->filesFactor); + pTwSup->waterMark = getSmaWaterMark(pSTInfo->interval.interval, pTableScanNode->filesFactor); } if (pSTInfo->interval.interval > 0 && pDataReader) { @@ -1040,27 +1046,27 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr); } - pInfo->readHandle = *pHandle; - pInfo->tableUid = pScanPhyNode->uid; + pInfo->readHandle = *pHandle; + pInfo->tableUid = pScanPhyNode->uid; pInfo->streamBlockReader = pHandle->reader; - pInfo->pRes = createResDataBlock(pDescNode); - pInfo->pCondition = pScanPhyNode->node.pConditions; - pInfo->pDataReader = pDataReader; - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - pInfo->pOperatorDumy = pTableScanDummy; - pInfo->interval = pSTInfo->interval; - pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; + pInfo->pRes = createResDataBlock(pDescNode); + pInfo->pCondition = pScanPhyNode->node.pConditions; + pInfo->pDataReader = pDataReader; + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + pInfo->pOperatorDumy = pTableScanDummy; + pInfo->interval = pSTInfo->interval; + pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; - pOperator->name = "StreamBlockScanOperator"; + pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = pInfo->pRes->info.numOfCols; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = pInfo->pRes->info.numOfCols; + pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, - NULL, operatorDummyCloseFn, NULL, NULL, NULL); + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL); return pOperator; @@ -1445,8 +1451,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { } SRetrieveMetaTableRsp* pTableRsp = pInfo->pRsp; - setDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pTableRsp->numOfRows, pTableRsp->data, - pTableRsp->compLen, pOperator->numOfExprs, startTs, NULL, pInfo->scanCols); + setDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pTableRsp->numOfRows, pTableRsp->data, pTableRsp->compLen, + pOperator->numOfExprs, startTs, NULL, pInfo->scanCols); // todo log the filter info doFilterResult(pInfo); @@ -1519,7 +1525,8 @@ int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbT return numOfRows; } -SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode *pScanPhyNode, SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, + SExecTaskInfo* pTaskInfo) { SSysTableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SSysTableScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -1529,16 +1536,16 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan SScanPhysiNode* pScanNode = &pScanPhyNode->scan; SDataBlockDescNode* pDescNode = pScanNode->node.pOutputDataBlockDesc; - SSDataBlock* pResBlock = createResDataBlock(pDescNode); + SSDataBlock* pResBlock = createResDataBlock(pDescNode); int32_t num = 0; SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, pTaskInfo, COL_MATCH_FROM_COL_ID); - pInfo->accountId = pScanPhyNode->accountId; + pInfo->accountId = pScanPhyNode->accountId; pInfo->showRewrite = pScanPhyNode->showRewrite; - pInfo->pRes = pResBlock; - pInfo->pCondition = pScanNode->node.pConditions; - pInfo->scanCols = colList; + pInfo->pRes = pResBlock; + pInfo->pCondition = pScanNode->node.pConditions; + pInfo->scanCols = colList; initResultSizeInfo(pOperator, 4096); @@ -1554,20 +1561,20 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan pInfo->readHandle = *(SReadHandle*)readHandle; } - pOperator->name = "SysTableScanOperator"; + pOperator->name = "SysTableScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = pResBlock->info.numOfCols; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = pResBlock->info.numOfCols; + pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, NULL, destroySysScanOperator, NULL, NULL, NULL); return pOperator; - _error: +_error: taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pOperator); terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; @@ -1687,16 +1694,16 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { val.cid = pExprInfo[j].base.pParam[0].pCol->colId; const char* p = metaGetTableTagVal(&mr.me, pDst->info.type, &val); - char *data = NULL; - if(pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL){ - data = tTagValToData((const STagVal *)p, false); - }else { + char* data = NULL; + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { + data = tTagValToData((const STagVal*)p, false); + } else { data = (char*)p; } colDataAppend(pDst, count, data, (data == NULL)); - if(pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL - && IS_VAR_DATA_TYPE(((const STagVal *)p)->type) && data != NULL){ + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && + data != NULL) { taosMemoryFree(data); } } @@ -1726,7 +1733,8 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { pInfo->pRes = blockDataDestroy(pInfo->pRes); } -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, + STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -1741,19 +1749,20 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi int32_t num = 0; SArray* colList = extractColMatchInfo(pPhyNode->pScanPseudoCols, pDescNode, &num, pTaskInfo, COL_MATCH_FROM_COL_ID); - pInfo->pTableList = pTableListInfo; - pInfo->pColMatchInfo = colList; - pInfo->pRes = createResDataBlock(pDescNode);; - pInfo->readHandle = *pReadHandle; - pInfo->curPos = 0; - pOperator->name = "TagScanOperator"; + pInfo->pTableList = pTableListInfo; + pInfo->pColMatchInfo = colList; + pInfo->pRes = createResDataBlock(pDescNode); + ; + pInfo->readHandle = *pReadHandle; + pInfo->curPos = 0; + pOperator->name = "TagScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->pExpr = pExprInfo; - pOperator->numOfExprs = numOfExprs; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->pExpr = pExprInfo; + pOperator->numOfExprs = numOfExprs; + pOperator->pTaskInfo = pTaskInfo; initResultSizeInfo(pOperator, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index b621d729e0..9196a9095c 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1822,7 +1822,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf pInfo->tsSlotId = tsSlotId; pOperator->name = "StateWindowOperator"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->pExpr = pExpr; @@ -1874,7 +1874,7 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo pInfo->winSup.prevTs = INT64_MIN; pInfo->reptScan = false; pOperator->name = "SessionWindowAggOperator"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->pExpr = pExprInfo; @@ -2137,7 +2137,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SEx pInfo->pChildren = NULL; pOperator->name = "StreamSessionWindowAggOperator"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->pExpr = pExprInfo; @@ -2624,7 +2624,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream goto _error; } pOperator->name = "StreamFinalSessionWindowAggOperator"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION; int32_t numOfChild = 1; // Todo(liuyao) get it from phy plan pInfo = pOperator->info; pInfo->pChildren = taosArrayInit(8, sizeof(void*)); @@ -3015,7 +3015,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys pInfo->pChildren = NULL; pOperator->name = "StreamStateAggOperator"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->numOfExprs = numOfCols; diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index b354470629..7e6a1f5e0c 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -456,6 +456,49 @@ static SNode* physiNodeCopy(const SPhysiNode* pSrc, SPhysiNode* pDst) { return (SNode*)pDst; } +static SNode* physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { + COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); + CLONE_NODE_LIST_FIELD(pScanCols); + CLONE_NODE_LIST_FIELD(pScanPseudoCols); + COPY_SCALAR_FIELD(uid); + COPY_SCALAR_FIELD(suid); + COPY_SCALAR_FIELD(tableType); + COPY_OBJECT_FIELD(tableName, sizeof(SName)); + return (SNode*)pDst; +} + +static SNode* physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { + return physiScanCopy(pSrc, pDst); +} + +static SNode* physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhysiNode* pDst) { + COPY_BASE_OBJECT_FIELD(scan, physiScanCopy); + COPY_OBJECT_FIELD(scanSeq[0], sizeof(uint8_t) * 2); + COPY_OBJECT_FIELD(scanRange, sizeof(STimeWindow)); + COPY_SCALAR_FIELD(ratio); + COPY_SCALAR_FIELD(dataRequired); + CLONE_NODE_LIST_FIELD(pDynamicScanFuncs); + CLONE_NODE_LIST_FIELD(pPartitionKeys); + COPY_SCALAR_FIELD(interval); + COPY_SCALAR_FIELD(offset); + COPY_SCALAR_FIELD(sliding); + COPY_SCALAR_FIELD(intervalUnit); + COPY_SCALAR_FIELD(slidingUnit); + COPY_SCALAR_FIELD(triggerType); + COPY_SCALAR_FIELD(watermark); + COPY_SCALAR_FIELD(tsColId); + COPY_SCALAR_FIELD(filesFactor); + return (SNode*)pDst; +} + +static SNode* physiSysTableScanCopy(const SSystemTableScanPhysiNode* pSrc, SSystemTableScanPhysiNode* pDst) { + COPY_BASE_OBJECT_FIELD(scan, physiScanCopy); + COPY_OBJECT_FIELD(mgmtEpSet, sizeof(SEpSet)); + COPY_SCALAR_FIELD(showRewrite); + COPY_SCALAR_FIELD(accountId); + return (SNode*)pDst; +} + static SNode* physiWindowCopy(const SWinodwPhysiNode* pSrc, SWinodwPhysiNode* pDst) { COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); CLONE_NODE_LIST_FIELD(pExprs); @@ -603,6 +646,14 @@ SNodeptr nodesCloneNode(const SNodeptr pNode) { return logicIndefRowsFuncCopy((const SIndefRowsFuncLogicNode*)pNode, (SIndefRowsFuncLogicNode*)pDst); case QUERY_NODE_LOGIC_SUBPLAN: return logicSubplanCopy((const SLogicSubplan*)pNode, (SLogicSubplan*)pDst); + case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return physiTagScanCopy((const STagScanPhysiNode*)pNode, (STagScanPhysiNode*)pDst); + case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: + case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN: + return physiTableScanCopy((const STableScanPhysiNode*)pNode, (STableScanPhysiNode*)pDst); + case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN: + return physiSysTableScanCopy((const SSystemTableScanPhysiNode*)pNode, (SSystemTableScanPhysiNode*)pDst); case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL: case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 2feb25d2bb..6b0c592cc0 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -220,9 +220,9 @@ const char* nodesNodeName(ENodeType type) { return "PhysiSystemTableScan"; case QUERY_NODE_PHYSICAL_PLAN_PROJECT: return "PhysiProject"; - case QUERY_NODE_PHYSICAL_PLAN_JOIN: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: return "PhysiJoin"; - case QUERY_NODE_PHYSICAL_PLAN_AGG: + case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: return "PhysiAgg"; case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: return "PhysiExchange"; @@ -242,13 +242,13 @@ const char* nodesNodeName(ENodeType type) { return "PhysiStreamSemiInterval"; case QUERY_NODE_PHYSICAL_PLAN_FILL: return "PhysiFill"; - case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: return "PhysiSessionWindow"; - case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: return "PhysiStreamSessionWindow"; - case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: return "PhysiStateWindow"; - case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: return "PhysiStreamStateWindow"; case QUERY_NODE_PHYSICAL_PLAN_PARTITION: return "PhysiPartition"; @@ -3875,9 +3875,9 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return physiSysTableScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_PROJECT: return physiProjectNodeToJson(pObj, pJson); - case QUERY_NODE_PHYSICAL_PLAN_JOIN: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: return physiJoinNodeToJson(pObj, pJson); - case QUERY_NODE_PHYSICAL_PLAN_AGG: + case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: return physiAggNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: return physiExchangeNodeToJson(pObj, pJson); @@ -3893,11 +3893,11 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return physiIntervalNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_FILL: return physiFillNodeToJson(pObj, pJson); - case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: - case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: return physiSessionWindowNodeToJson(pObj, pJson); - case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: - case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: return physiStateWindowNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_PARTITION: return physiPartitionNodeToJson(pObj, pJson); @@ -4008,9 +4008,9 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToPhysiSysTableScanNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_PROJECT: return jsonToPhysiProjectNode(pJson, pObj); - case QUERY_NODE_PHYSICAL_PLAN_JOIN: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: return jsonToPhysiJoinNode(pJson, pObj); - case QUERY_NODE_PHYSICAL_PLAN_AGG: + case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: return jsonToPhysiAggNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: return jsonToPhysiExchangeNode(pJson, pObj); @@ -4026,11 +4026,11 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToPhysiIntervalNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_FILL: return jsonToPhysiFillNode(pJson, pObj); - case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: - case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: return jsonToPhysiSessionWindowNode(pJson, pObj); - case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: - case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: return jsonToPhysiStateWindowNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_PARTITION: return jsonToPhysiPartitionNode(pJson, pObj); diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c index b0169b7fd1..d8130d5650 100644 --- a/source/libs/nodes/src/nodesTraverseFuncs.c +++ b/source/libs/nodes/src/nodesTraverseFuncs.c @@ -467,7 +467,7 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk } break; } - case QUERY_NODE_PHYSICAL_PLAN_JOIN: { + case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { SJoinPhysiNode* pJoin = (SJoinPhysiNode*)pNode; res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext); if (DEAL_RES_ERROR != res && DEAL_RES_END != res) { @@ -478,7 +478,7 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk } break; } - case QUERY_NODE_PHYSICAL_PLAN_AGG: { + case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: { SAggPhysiNode* pAgg = (SAggPhysiNode*)pNode; res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext); if (DEAL_RES_ERROR != res && DEAL_RES_END != res) { @@ -518,12 +518,12 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext); break; - case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: - case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext); break; - case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: - case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW: { + case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: { SStateWinodwPhysiNode* pState = (SStateWinodwPhysiNode*)pNode; res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext); if (DEAL_RES_ERROR != res && DEAL_RES_END != res) { diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index a753871834..1ea9124839 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -255,9 +255,9 @@ SNodeptr nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SSystemTableScanPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_PROJECT: return makeNode(type, sizeof(SProjectPhysiNode)); - case QUERY_NODE_PHYSICAL_PLAN_JOIN: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: return makeNode(type, sizeof(SJoinPhysiNode)); - case QUERY_NODE_PHYSICAL_PLAN_AGG: + case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: return makeNode(type, sizeof(SAggPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: return makeNode(type, sizeof(SExchangePhysiNode)); @@ -277,13 +277,13 @@ SNodeptr nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SStreamSemiIntervalPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_FILL: return makeNode(type, sizeof(SFillPhysiNode)); - case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: return makeNode(type, sizeof(SSessionWinodwPhysiNode)); - case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: return makeNode(type, sizeof(SStreamSessionWinodwPhysiNode)); - case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: return makeNode(type, sizeof(SStateWinodwPhysiNode)); - case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: return makeNode(type, sizeof(SStreamStateWinodwPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_PARTITION: return makeNode(type, sizeof(SPartitionPhysiNode)); @@ -657,14 +657,14 @@ void nodesDestroyNode(SNodeptr pNode) { nodesDestroyList(pPhyNode->pProjections); break; } - case QUERY_NODE_PHYSICAL_PLAN_JOIN: { + case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { SJoinPhysiNode* pPhyNode = (SJoinPhysiNode*)pNode; destroyPhysiNode((SPhysiNode*)pPhyNode); nodesDestroyNode(pPhyNode->pOnConditions); nodesDestroyList(pPhyNode->pTargets); break; } - case QUERY_NODE_PHYSICAL_PLAN_AGG: { + case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: { SAggPhysiNode* pPhyNode = (SAggPhysiNode*)pNode; destroyPhysiNode((SPhysiNode*)pPhyNode); nodesDestroyList(pPhyNode->pExprs); @@ -689,8 +689,8 @@ void nodesDestroyNode(SNodeptr pNode) { case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode); break; - case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: - case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode); break; case QUERY_NODE_PHYSICAL_PLAN_DISPATCH: diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 6b3d82b644..52add839b4 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -556,7 +556,7 @@ static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SJoinLogicNode* pJoinLogicNode, SPhysiNode** pPhyNode) { SJoinPhysiNode* pJoin = - (SJoinPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pJoinLogicNode, QUERY_NODE_PHYSICAL_PLAN_JOIN); + (SJoinPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pJoinLogicNode, QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN); if (NULL == pJoin) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -738,7 +738,8 @@ static int32_t rewritePrecalcExpr(SPhysiPlanContext* pCxt, SNode* pNode, SNodeLi static int32_t createAggPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SAggLogicNode* pAggLogicNode, SPhysiNode** pPhyNode) { - SAggPhysiNode* pAgg = (SAggPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pAggLogicNode, QUERY_NODE_PHYSICAL_PLAN_AGG); + SAggPhysiNode* pAgg = + (SAggPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pAggLogicNode, QUERY_NODE_PHYSICAL_PLAN_HASH_AGG); if (NULL == pAgg) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -996,8 +997,7 @@ static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) { SSessionWinodwPhysiNode* pSession = (SSessionWinodwPhysiNode*)makePhysiNode( pCxt, (SLogicNode*)pWindowLogicNode, - (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW - : QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW)); + (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION : QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION)); if (NULL == pSession) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -1009,10 +1009,9 @@ static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) { - SStateWinodwPhysiNode* pState = - (SStateWinodwPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pWindowLogicNode, - (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE_WINDOW - : QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW)); + SStateWinodwPhysiNode* pState = (SStateWinodwPhysiNode*)makePhysiNode( + pCxt, (SLogicNode*)pWindowLogicNode, + (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE : QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE)); if (NULL == pState) { return TSDB_CODE_OUT_OF_MEMORY; } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 7be33d54e3..e5c81eda4b 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -340,7 +340,7 @@ static int32_t stbSplCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pParent return code; } -static int32_t stbSplSplitWindowNodeForBatch(SSplitContext* pCxt, SStableSplitInfo* pInfo) { +static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo* pInfo) { SLogicNode* pPartWindow = NULL; int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow); if (TSDB_CODE_SUCCESS == code) { @@ -363,7 +363,7 @@ static int32_t stbSplSplitWindowNodeForBatch(SSplitContext* pCxt, SStableSplitIn return code; } -static int32_t stbSplSplitWindowNodeForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) { +static int32_t stbSplSplitIntervalForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) { SLogicNode* pPartWindow = NULL; int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow); if (TSDB_CODE_SUCCESS == code) { @@ -379,14 +379,30 @@ static int32_t stbSplSplitWindowNodeForStream(SSplitContext* pCxt, SStableSplitI return code; } -static int32_t stbSplSplitWindowNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) { +static int32_t stbSplSplitInterval(SSplitContext* pCxt, SStableSplitInfo* pInfo) { if (pCxt->pPlanCxt->streamQuery) { - return stbSplSplitWindowNodeForStream(pCxt, pInfo); + return stbSplSplitIntervalForStream(pCxt, pInfo); } else { - return stbSplSplitWindowNodeForBatch(pCxt, pInfo); + return stbSplSplitIntervalForBatch(pCxt, pInfo); } } +static int32_t stbSplSplitSession(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + return TSDB_CODE_PLAN_INTERNAL_ERROR; +} + +static int32_t stbSplSplitWindowNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + switch (((SWindowLogicNode*)pInfo->pSplitNode)->winType) { + case WINDOW_TYPE_INTERVAL: + return stbSplSplitInterval(pCxt, pInfo); + case WINDOW_TYPE_SESSION: + return stbSplSplitSession(pCxt, pInfo); + default: + break; + } + return TSDB_CODE_PLAN_INTERNAL_ERROR; +} + static int32_t stbSplCreatePartAggNode(SAggLogicNode* pMergeAgg, SLogicNode** pOutput) { SNodeList* pFunc = pMergeAgg->pAggFuncs; pMergeAgg->pAggFuncs = NULL; From c94c760e134e01f0ffe9d626ff5c13dc428b0fb5 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 8 Jun 2022 09:31:28 +0800 Subject: [PATCH 04/14] add merge explain --- source/libs/command/inc/commandInt.h | 3 ++ source/libs/command/src/explain.c | 44 ++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 100e35bc3c..a3755a174e 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -47,6 +47,9 @@ extern "C" { #define EXPLAIN_TIME_WINDOWS_FORMAT "Time Window: interval=%" PRId64 "%c offset=%" PRId64 "%c sliding=%" PRId64 "%c" #define EXPLAIN_WINDOW_FORMAT "Window: gap=%" PRId64 #define EXPLAIN_RATIO_TIME_FORMAT "Ratio: %f" +#define EXPLAIN_MERGE_FORMAT "Merge" +#define EXPLAIN_MERGE_KEYS_FORMAT "Merge Key: " + #define EXPLAIN_PLANNING_TIME_FORMAT "Planning Time: %.3f ms" #define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms" diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 98ae5e2fb1..1076dfa5b0 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -857,6 +857,50 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } break; } + case QUERY_NODE_PHYSICAL_PLAN_MERGE: { + SMergePhysiNode *pMergeNode = (SMergePhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_MERGE_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + + SDataBlockDescNode *pDescNode = pMergeNode->node.pOutputDataBlockDesc; + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pMergeNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pMergeNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGE_KEYS_FORMAT); + for (int32_t i = 0; i < LIST_LENGTH(pMergeNode->pMergeKeys); ++i) { + SOrderByExprNode *ptn = nodesListGetNode(pMergeNode->pMergeKeys, i); + EXPLAIN_ROW_APPEND("%s ", nodesGetNameFromColumnNode(ptn->pExpr)); + } + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pMergeNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pMergeNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } default: qError("not supported physical node type %d", pNode->type); return TSDB_CODE_QRY_APP_ERROR; From 262a221c1293e7fffd30315f52eaa9909f27d458 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Mon, 6 Jun 2022 13:23:04 +0800 Subject: [PATCH 05/14] distributed interval --- include/util/tpagedbuf.h | 7 + source/common/src/tdatablock.c | 2 +- source/libs/executor/inc/executorimpl.h | 7 +- source/libs/executor/src/executorimpl.c | 6 + source/libs/executor/src/scanoperator.c | 14 +- source/libs/executor/src/timewindowoperator.c | 293 ++++++++++++------ source/libs/function/inc/builtinsimpl.h | 1 + source/libs/function/src/builtins.c | 3 +- source/libs/function/src/builtinsimpl.c | 37 ++- source/libs/nodes/src/nodesCodeFuncs.c | 7 + source/util/src/tpagedbuf.c | 28 ++ tests/script/jenkins/basic.txt | 4 + tests/script/tsim/stream/session0.sim | 73 +++-- tests/script/tsim/stream/state0.sim | 107 +++---- 14 files changed, 388 insertions(+), 201 deletions(-) diff --git a/include/util/tpagedbuf.h b/include/util/tpagedbuf.h index acaff759b7..af82e29ec5 100644 --- a/include/util/tpagedbuf.h +++ b/include/util/tpagedbuf.h @@ -188,6 +188,13 @@ SDiskbasedBufStatis getDBufStatis(const SDiskbasedBuf* pBuf); */ void dBufPrintStatis(const SDiskbasedBuf* pBuf); +/** + * Set all of page buffer are not need + * @param pBuf + * @return + */ +void clearDiskbasedBuf(SDiskbasedBuf* pBuf); + #ifdef __cplusplus } #endif diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 9caa9a73a5..99ff4a4a42 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1500,7 +1500,7 @@ void blockDebugShowData(const SArray* dataBlocks, const char* flag) { for (int32_t k = 0; k < colNum; k++) { SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); - if (pColInfoData->hasNull) { + if (colDataIsNull(pColInfoData, rows, j, NULL)) { printf(" %15s |", "NULL"); continue; } diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 4804984397..9b8b9dca26 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -488,6 +488,8 @@ typedef struct SStreamFinalIntervalOperatorInfo { int32_t order; // current SSDataBlock scan order STimeWindowAggSupp twAggSup; SArray* pChildren; + SSDataBlock* pUpdateRes; + SPhysiNode* pPhyNode; // create new child } SStreamFinalIntervalOperatorInfo; typedef struct SAggOperatorInfo { @@ -793,9 +795,8 @@ SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, + SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild); SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 431aca3eb5..e266178068 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4634,6 +4634,12 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo); + } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) { + int32_t children = 8; + pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children); + } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL == type) { + int32_t children = 0; + pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children); } else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) { SSortPhysiNode* pSortPhyNode = (SSortPhysiNode*)pPhyNode; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ba0397fd34..686add5508 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -956,16 +956,15 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { if (rows == 0) { pOperator->status = OP_EXEC_DONE; } else if (pInfo->pUpdateInfo) { - SSDataBlock* upRes = createOneDataBlock(pInfo->pRes, false); - getUpdateDataBlock(pInfo, true, pInfo->pRes, upRes); - if (upRes) { - pInfo->pUpdateRes = upRes; - if (upRes->info.type == STREAM_REPROCESS) { + blockDataCleanup(pInfo->pUpdateRes); + getUpdateDataBlock(pInfo, true, pInfo->pRes, pInfo->pUpdateRes); + if (pInfo->pUpdateRes->info.rows > 0) { + if (pInfo->pUpdateRes->info.type == STREAM_REPROCESS) { pInfo->updateResIndex = 0; pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES; - } else if (upRes->info.type == STREAM_INVERT) { + } else if (pInfo->pUpdateRes->info.type == STREAM_INVERT) { pInfo->scanMode = STREAM_SCAN_FROM_RES; - return upRes; + return pInfo->pUpdateRes; } } } @@ -1044,6 +1043,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan pInfo->tableUid = pScanPhyNode->uid; pInfo->streamBlockReader = pHandle->reader; pInfo->pRes = createResDataBlock(pDescNode); + pInfo->pUpdateRes = createResDataBlock(pDescNode); pInfo->pCondition = pScanPhyNode->node.pConditions; pInfo->pDataReader = pDataReader; pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d7b47b9e50..95b7dc72b6 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -23,7 +23,6 @@ typedef enum SResultTsInterpType { RESULT_ROW_END_INTERP = 2, } SResultTsInterpType; -static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator); static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator); static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo); @@ -778,6 +777,22 @@ static int32_t saveResult(SResultRow* result, uint64_t groupId, SArray* pUpdated return TSDB_CODE_SUCCESS; } +static void removeResult(SArray* pUpdated, TSKEY key) { + int32_t size = taosArrayGetSize(pUpdated); + int32_t index = binarySearch(pUpdated, size, key, TSDB_ORDER_DESC, getReskey); + if (index >= 0 && key == getReskey(pUpdated, index)) { + taosArrayRemove(pUpdated, index); + } +} + +static void removeResults(SArray* pWins, SArray* pUpdated) { + int32_t size = taosArrayGetSize(pWins); + for (int32_t i = 0; i < size; i++) { + STimeWindow* pW = taosArrayGet(pWins, i); + removeResult(pUpdated, pW->skey); + } +} + static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, int32_t scanFlag, SArray* pUpdated) { SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; @@ -1212,6 +1227,7 @@ void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData, int SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId); SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + ASSERT(p1); doClearWindowImpl(p1, pSup->pResultBuf, pBinfo, numOfOutput); } @@ -1363,6 +1379,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(pChildOp); } } + nodesDestroyNode(pInfo->pPhyNode); } static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { @@ -1485,69 +1502,6 @@ _error: return NULL; } -SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, SInterval* pInterval, - int32_t primaryTsSlotId, STimeWindowAggSupp* pTwAggSupp, - SExecTaskInfo* pTaskInfo) { - SStreamFinalIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamFinalIntervalOperatorInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - if (pInfo == NULL || pOperator == NULL) { - goto _error; - } - pInfo->order = TSDB_ORDER_ASC; - pInfo->interval = *pInterval; - pInfo->twAggSup = *pTwAggSupp; - pInfo->primaryTsIndex = primaryTsSlotId; - size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; - initResultSizeInfo(pOperator, 4096); - int32_t code = - initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); - initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); - int32_t numOfChild = 8; // Todo(liuyao) get it from phy plan - pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo)); - for (int32_t i = 0; i < numOfChild; i++) { - SSDataBlock* chRes = createOneDataBlock(pResBlock, false); - SOperatorInfo* pChildOp = createIntervalOperatorInfo(NULL, pExprInfo, numOfCols, chRes, pInterval, primaryTsSlotId, - pTwAggSupp, pTaskInfo); - if (pChildOp && chRes) { - taosArrayPush(pInfo->pChildren, &pChildOp); - continue; - } - goto _error; - } - - pOperator->name = "StreamFinalIntervalOperator"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL; - pOperator->blocking = true; - pOperator->status = OP_NOT_OPENED; - pOperator->pExpr = pExprInfo; - pOperator->pTaskInfo = pTaskInfo; - pOperator->numOfExprs = numOfCols; - pOperator->info = pInfo; - - pOperator->fpSet = - createOperatorFpSet(NULL, doStreamFinalIntervalAgg, NULL, NULL, destroyStreamFinalIntervalOperatorInfo, - aggEncodeResultRow, aggDecodeResultRow, NULL); - - code = appendDownstream(pOperator, &downstream, 1); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - return pOperator; - -_error: - destroyStreamFinalIntervalOperatorInfo(pInfo, numOfCols); - taosMemoryFreeClear(pInfo); - taosMemoryFreeClear(pOperator); - pTaskInfo->code = code; - return NULL; -} - SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { @@ -1913,12 +1867,12 @@ _error: return NULL; } -static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, int32_t tableGroupId) { +static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, + int32_t tableGroupId, SArray* pUpdated) { SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info; SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo); SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; int32_t numOfOutput = pOperatorInfo->numOfExprs; - SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); int32_t step = 1; bool ascScan = true; TSKEY* tsCols = NULL; @@ -1929,7 +1883,7 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex); tsCols = (int64_t*)pColDataInfo->pData; } else { - return pUpdated; + return ; } int32_t startPos = ascScan ? 0 : (pSDataBlock->info.rows - 1); @@ -1946,13 +1900,13 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB pos->groupId = tableGroupId; pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; *(int64_t*)pos->key = pResult->win.skey; - taosArrayPush(pUpdated, &pos); - forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL, - TSDB_ORDER_ASC); + forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, + nextWin.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) { + saveResult(pResult, tableGroupId, pUpdated); + } // window start(end) key interpolation - // disable it temporarily - // doWindowBorderInterpolation(pInfo, pSDataBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos, - // forwardRows); + // doWindowBorderInterpolation(pInfo, pSDataBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, pSDataBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); @@ -1962,7 +1916,6 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB break; } } - return pUpdated; } bool isFinalInterval(SStreamFinalIntervalOperatorInfo* pInfo) { return pInfo->pChildren != NULL; } @@ -2006,24 +1959,72 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SArra } } +static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo) { + taosHashClear(pInfo->aggSup.pResultRowHashTable); + clearDiskbasedBuf(pInfo->aggSup.pResultBuf); + cleanupResultRowInfo(&pInfo->binfo.resultRowInfo); + initResultRowInfo(&pInfo->binfo.resultRowInfo, 1); +} + +static void clearUpdateDataBlock(SSDataBlock* pBlock) { + if (pBlock->info.rows <= 0) { + return; + } + blockDataCleanup(pBlock); +} + +static void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex) { + ASSERT(pDest->info.capacity >= pSource->info.rows); + clearUpdateDataBlock(pDest); + SColumnInfoData* pDestCol = taosArrayGet(pDest->pDataBlock, 0); + SColumnInfoData* pSourceCol = taosArrayGet(pSource->pDataBlock, tsColIndex); + // copy timestamp column + colDataAssign(pDestCol, pSourceCol, pSource->info.rows); + for (int32_t i = 1; i < pDest->info.numOfCols; i++) { + SColumnInfoData* pCol = taosArrayGet(pDest->pDataBlock, i); + colDataAppendNNULL(pCol, 0, pSource->info.rows); + } + pDest->info.rows = pSource->info.rows; + blockDataUpdateTsWindow(pDest, 0); +} + +static int32_t getChildIndex(SSDataBlock* pBlock) { + // if (pBlock->info.type != STREAM_INVALID && pBlock->info.rows < 4) { // for test + // return pBlock->info.rows - 1; + // } + return 0; +} + static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info; SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = NULL; + SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); + SArray* pClosed = taosArrayInit(4, POINTER_BYTES); if (pOperator->status == OP_EXEC_DONE) { return NULL; } else if (pOperator->status == OP_RES_TO_RETURN) { doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - if (pInfo->binfo.pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (pInfo->binfo.pRes->info.rows == 0) { pOperator->status = OP_EXEC_DONE; + if (isFinalInterval(pInfo) || pInfo->pUpdateRes->info.rows == 0) { + if (!isFinalInterval(pInfo)) { + // semi interval operator clear disk buffer + clearStreamIntervalOperator(pInfo); + } + return NULL; + } + // process the rest of the data + pOperator->status = OP_OPENED; + return pInfo->pUpdateRes; } - return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; + return pInfo->binfo.pRes; } while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { + clearUpdateDataBlock(pInfo->pUpdateRes); break; } @@ -2033,31 +2034,149 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval, pInfo->primaryTsIndex, pOperator->numOfExprs, pBlock, pUpWins); if (isFinalInterval(pInfo)) { - int32_t childIndex = 0; // Todo(liuyao) get child id from SSDataBlock + int32_t childIndex = getChildIndex(pBlock); SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); SIntervalAggOperatorInfo* pChildInfo = pChildOp->info; - doClearWindows(&pChildInfo->aggSup, &pChildInfo->binfo, &pChildInfo->interval, pChildInfo->primaryTsIndex, - pChildOp->numOfExprs, pBlock, NULL); - rebuildIntervalWindow(pInfo, pUpWins, pInfo->binfo.pRes->info.groupId, pOperator->numOfExprs, - pOperator->pTaskInfo); + doClearWindows(&pChildInfo->aggSup, &pChildInfo->binfo, &pChildInfo->interval, + pChildInfo->primaryTsIndex, pChildOp->numOfExprs, pBlock, NULL); + rebuildIntervalWindow(pInfo, pUpWins, pInfo->binfo.pRes->info.groupId, + pOperator->numOfExprs, pOperator->pTaskInfo); + taosArrayDestroy(pUpWins); + continue; } + removeResults(pUpWins, pUpdated); + copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex); taosArrayDestroy(pUpWins); - continue; + break; } if (isFinalInterval(pInfo)) { - int32_t chIndex = 1; // Todo(liuyao) get it from SSDataBlock + int32_t chIndex = getChildIndex(pBlock); + int32_t size = taosArrayGetSize(pInfo->pChildren); + // if chIndex + 1 - size > 0, add new child + for (int32_t i = 0; i < chIndex + 1 - size; i++) { + SOperatorInfo* pChildOp = createStreamFinalIntervalOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0); + if (!pChildOp) { + longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + taosArrayPush(pInfo->pChildren, &pChildOp); + } SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex); - doStreamIntervalAgg(pChildOp); + SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info; + setInputDataBlock(pChildOp, pChInfo->binfo.pCtx, pBlock, pChInfo->order, MAIN_SCAN, true); + doHashInterval(pChildOp, pBlock, pBlock->info.groupId, NULL); } - pUpdated = doHashInterval(pOperator, pBlock, 0); + doHashInterval(pOperator, pBlock, pBlock->info.groupId, pUpdated); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); } + + if (isFinalInterval(pInfo)) { + closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, + &pInfo->interval, pClosed); + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed, + pInfo->binfo.rowCellInfoOffset); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { + taosArrayAddAll(pUpdated, pClosed); + } + } + taosArrayDestroy(pClosed); finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); pOperator->status = OP_RES_TO_RETURN; - return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; + if (pInfo->binfo.pRes->info.rows == 0) { + pOperator->status = OP_EXEC_DONE; + if (pInfo->pUpdateRes->info.rows == 0) { + return NULL; + } + // process the rest of the data + pOperator->status = OP_OPENED; + return pInfo->pUpdateRes; + } + return pInfo->binfo.pRes; +} + +SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, + SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild) { + SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode; + SStreamFinalIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamFinalIntervalOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + if (pInfo == NULL || pOperator == NULL) { + goto _error; + } + pInfo->order = TSDB_ORDER_ASC; + pInfo->interval = (SInterval) {.interval = pIntervalPhyNode->interval, + .sliding = pIntervalPhyNode->sliding, + .intervalUnit = pIntervalPhyNode->intervalUnit, + .slidingUnit = pIntervalPhyNode->slidingUnit, + .offset = pIntervalPhyNode->offset, + .precision = + ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision}; + pInfo->twAggSup = (STimeWindowAggSupp){.waterMark = pIntervalPhyNode->window.watermark, + .calTrigger = pIntervalPhyNode->window.triggerType, + .maxTs = INT64_MIN, + .winMap = NULL, }; + pInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; + size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; + initResultSizeInfo(pOperator, 4096); + int32_t numOfCols = 0; + SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &numOfCols); + SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); + int32_t code = initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, + pResBlock, keyBufSize, pTaskInfo->id.str); + initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); + pInfo->pChildren = NULL; + if (numOfChild > 0) { + pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo)); + for (int32_t i = 0; i < numOfChild; i++) { + SOperatorInfo* pChildOp = createStreamFinalIntervalOperatorInfo(NULL, pPhyNode, pTaskInfo, 0); + if (pChildOp) { + taosArrayPush(pInfo->pChildren, &pChildOp); + continue; + } + goto _error; + } + } + // semi interval operator does not catch result + if (!isFinalInterval(pInfo)) { + pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; + } + pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);\ + pInfo->pUpdateRes->info.type = STREAM_REPROCESS; + blockDataEnsureCapacity(pInfo->pUpdateRes, 128); + pInfo->pPhyNode = nodesCloneNode(pPhyNode); + + pOperator->name = "StreamFinalIntervalOperator"; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL; + pOperator->blocking = true; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExprInfo; + pOperator->pTaskInfo = pTaskInfo; + pOperator->numOfExprs = numOfCols; + pOperator->info = pInfo; + + pOperator->fpSet = createOperatorFpSet(NULL, doStreamFinalIntervalAgg, NULL, NULL, + destroyStreamFinalIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, + NULL); + + code = appendDownstream(pOperator, &downstream, 1); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + return pOperator; + +_error: + destroyStreamFinalIntervalOperatorInfo(pInfo, numOfCols); + taosMemoryFreeClear(pInfo); + taosMemoryFreeClear(pOperator); + pTaskInfo->code = code; + return NULL; } void destroyStreamAggSupporter(SStreamAggSupporter* pSup) { diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index 68b83f4a19..dec5f717fd 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -67,6 +67,7 @@ bool leastSQRFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInf int32_t leastSQRFunction(SqlFunctionCtx* pCtx); int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); int32_t leastSQRInvertFunction(SqlFunctionCtx* pCtx); +int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool getPercentileFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); bool percentileFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 61944705c5..303a85d137 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1118,7 +1118,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = leastSQRFunctionSetup, .processFunc = leastSQRFunction, .finalizeFunc = leastSQRFinalize, - .invertFunc = leastSQRInvertFunction, + .invertFunc = NULL, + .combineFunc = leastSQRCombine, }, { .name = "avg", diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 4c7984c602..5f7c7756cd 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -1799,17 +1799,17 @@ int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { param[1][1] = (double)pInfo->num; param[1][0] = param[0][1]; - param[0][0] -= param[1][0] * (param[0][1] / param[1][1]); - param[0][2] -= param[1][2] * (param[0][1] / param[1][1]); - param[0][1] = 0; - param[1][2] -= param[0][2] * (param[1][0] / param[0][0]); - param[1][0] = 0; - param[0][2] /= param[0][0]; + double param00 = param[0][0] - param[1][0] * (param[0][1] / param[1][1]); + double param02 = param[0][2] - param[1][2] * (param[0][1] / param[1][1]); + // param[0][1] = 0; + double param12 = param[1][2] - param02 * (param[1][0] / param00); + // param[1][0] = 0; + param02 /= param00; - param[1][2] /= param[1][1]; + param12 /= param[1][1]; char buf[64] = {0}; - size_t len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", param[0][2], param[1][2]); + size_t len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", param02, param12); varDataSetLen(buf, len); colDataAppend(pCol, currentRow, buf, pResInfo->isNullRes); @@ -1822,6 +1822,27 @@ int32_t leastSQRInvertFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } +int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { + SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); + SLeastSQRInfo* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo); + int32_t type = pDestCtx->input.pData[0]->info.type; + double (*pDparam)[3] = pDBuf->matrix; + + SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); + SLeastSQRInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); + double (*pSparam)[3] = pSBuf->matrix; + for (int32_t i = 0; i < pSBuf->num; i++) { + pDparam[0][0] += pDBuf->startVal * pDBuf->startVal; + pDparam[0][1] += pDBuf->startVal; + pDBuf->startVal += pDBuf->stepVal; + } + pDparam[0][2] += pSparam[0][2] + pDBuf->num * pDBuf->stepVal * pSparam[1][2]; + pDparam[1][2] += pSparam[1][2]; + pDBuf->num += pSBuf->num; + pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes); + return TSDB_CODE_SUCCESS; +} + bool getPercentileFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { pEnv->calcMemSize = sizeof(SPercentileInfo); return true; diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 2feb25d2bb..fa4d1ba171 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2441,6 +2441,7 @@ static const char* jkValueLiteralSize = "LiteralSize"; static const char* jkValueLiteral = "Literal"; static const char* jkValueDuration = "Duration"; static const char* jkValueTranslate = "Translate"; +static const char* jkValueNotReserved = "NotReserved"; static const char* jkValueDatum = "Datum"; static int32_t datumToJson(const void* pObj, SJson* pJson) { @@ -2513,6 +2514,9 @@ static int32_t valueNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkValueTranslate, pNode->translate); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkValueNotReserved, pNode->notReserved); + } if (TSDB_CODE_SUCCESS == code && pNode->translate) { code = datumToJson(pNode, pJson); } @@ -2634,6 +2638,9 @@ static int32_t jsonToValueNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkValueTranslate, &pNode->translate); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkValueNotReserved, &pNode->notReserved); + } if (TSDB_CODE_SUCCESS == code && pNode->translate) { code = jsonToDatum(pJson, pNode); } diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 101ac78e18..cdf2629671 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -651,3 +651,31 @@ void dBufPrintStatis(const SDiskbasedBuf* pBuf) { ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages)); } + +void clearDiskbasedBuf(SDiskbasedBuf* pBuf) { + SArray** p = taosHashIterate(pBuf->groupSet, NULL); + while (p) { + size_t n = taosArrayGetSize(*p); + for (int32_t i = 0; i < n; ++i) { + SPageInfo* pi = taosArrayGetP(*p, i); + taosMemoryFreeClear(pi->pData); + taosMemoryFreeClear(pi); + } + taosArrayDestroy(*p); + p = taosHashIterate(pBuf->groupSet, p); + } + + tdListEmpty(pBuf->lruList); + tdListEmpty(pBuf->freePgList); + + taosArrayClear(pBuf->emptyDummyIdList); + taosArrayClear(pBuf->pFree); + + taosHashClear(pBuf->groupSet); + taosHashClear(pBuf->all); + + pBuf->numOfPages = 0; // all pages are in buffer in the first place + pBuf->totalBufSize = 0; + pBuf->allocateId = -1; + pBuf->fileSize = 0; +} \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 3249c02e88..28bc98a972 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -72,6 +72,10 @@ ./test.sh -f tsim/stream/basic2.sim # ./test.sh -f tsim/stream/session0.sim # ./test.sh -f tsim/stream/session1.sim +# ./test.sh -f tsim/stream/state0.sim +# ./test.sh -f tsim/stream/triggerInterval0.sim +# ./test.sh -f tsim/stream/triggerSession0.sim + # ---- transaction ./test.sh -f tsim/trans/lossdata1.sim diff --git a/tests/script/tsim/stream/session0.sim b/tests/script/tsim/stream/session0.sim index a5cd73d17e..a2fe773edb 100644 --- a/tests/script/tsim/stream/session0.sim +++ b/tests/script/tsim/stream/session0.sim @@ -23,89 +23,98 @@ sql insert into t1 values(1648791223001,10,2,3,1.1,2); sql insert into t1 values(1648791233002,3,2,3,2.1,3); sql insert into t1 values(1648791243003,NULL,NULL,NULL,NULL,4); sql insert into t1 values(1648791213002,NULL,NULL,NULL,NULL,5) (1648791233012,NULL,NULL,NULL,NULL,6); + +$loop_count = 0 +loop0: + sleep 300 sql select * from streamt order by s desc; +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + # row 0 if $data01 != 3 then - print ======$data01 - return -1 + print ======data01=$data01 + goto loop0 endi if $data02 != 3 then - print ======$data02 - return -1 + print ======data02=$data02 + goto loop0 endi if $data03 != 3 then - print ======$data03 - return -1 + print ======data03=$data03 + goto loop0 endi if $data04 != 2.100000000 then - print ======$data04 + print ======data04=$data04 return -1 endi if $data05 != 0.000000000 then - print ======$data05 + print ======data05=$data05 return -1 endi if $data06 != 3 then - print ======$data05 + print ======data06=$data06 return -1 endi if $data07 != 2.100000000 then - print ======$data05 + print ======data07=$data07 return -1 endi if $data08 != 6 then - print ======$data05 + print ======data08=$data08 return -1 endi # row 1 if $data11 != 3 then - print ======$data11 - return -1 + print ======data11=$data11 + goto loop0 endi if $data12 != 10 then - print ======$data12 - return -1 + print ======data12=$data12 + goto loop0 endi if $data13 != 10 then - print ======$data13 - return -1 + print ======data13=$data13 + goto loop0 endi if $data14 != 1.100000000 then - print ======$data14 + print ======data14=$data14 return -1 endi if $data15 != 0.000000000 then - print ======$data15 + print ======data15=$data15 return -1 endi if $data16 != 10 then - print ======$data15 + print ======data16=$data16 return -1 endi if $data17 != 1.100000000 then - print ======$data17 + print ======data17=$data17 return -1 endi if $data18 != 5 then - print ======$data18 + print ======data18=$data18 return -1 endi @@ -115,23 +124,31 @@ sql insert into t1 values(1648791233002,3,2,3,2.1,9); sql insert into t1 values(1648791243003,4,2,3,3.1,10); sql insert into t1 values(1648791213002,4,2,3,4.1,11) ; sql insert into t1 values(1648791213002,4,2,3,4.1,12) (1648791223009,4,2,3,4.1,13); + +$loop_count = 0 +loop1: sleep 300 sql select * from streamt order by s desc ; +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + # row 0 if $data01 != 7 then - print ======$data01 - return -1 + print =====data01=$data01 + goto loop1 endi if $data02 != 9 then - print ======$data02 - return -1 + print =====data02=$data02 + goto loop1 endi if $data03 != 4 then - print ======$data03 - return -1 + print =====data03=$data03 + goto loop1 endi if $data04 != 1.100000000 then diff --git a/tests/script/tsim/stream/state0.sim b/tests/script/tsim/stream/state0.sim index 3529f836f4..2f2038b914 100644 --- a/tests/script/tsim/stream/state0.sim +++ b/tests/script/tsim/stream/state0.sim @@ -20,21 +20,33 @@ sql create stream streams1 trigger at_once into streamt1 as select _wstartts, sql insert into t1 values(1648791213000,1,2,3,1.0,1); sql insert into t1 values(1648791213000,1,2,3,1.0,2); +$loop_count = 0 +loop0: sql select * from streamt1 order by c desc; sleep 300 +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi if $rows != 1 then - print ======$rows - return -1; + print =====rows=$rows + goto loop0 endi sql insert into t1 values(1648791214000,1,2,3,1.0,3); +$loop_count = 0 +loop00: sql select * from streamt1 order by c desc; sleep 300 +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi if $rows != 1 then - print ======$rows - return -1; + print =====rows=$rows + goto loop00 endi sql insert into t1 values(1648791213010,2,2,3,1.0,4); @@ -44,27 +56,25 @@ $loop_count = 0 loop1: sql select * from streamt1 where c >=4 order by `_wstartts`; sleep 300 - $loop_count = $loop_count + 1 if $loop_count == 10 then return -1 endi if $rows != 3 then - print ======$rows + print =====rows=$rows goto loop1 - return -1 endi # row 0 if $data01 != 1 then - print ======$data01 - return -1 + print =====data01=$data01 + goto loop1 endi if $data02 != 1 then - print ======$data02 - return -1 + print =====data02=$data02 + goto loop1 endi if $data03 != 1 then @@ -151,11 +161,10 @@ endi sql insert into t1 values(1648791213011,1,2,3,1.0,7); -loop2: $loop_count = 0 +loop2: sql select * from streamt1 where c in (5,4,7) order by `_wstartts`; sleep 300 - $loop_count = $loop_count + 1 if $loop_count == 10 then return -1 @@ -163,57 +172,51 @@ endi # row 2 if $data21 != 2 then - print ======$data21 + print =====data21=$data21 goto loop2 return -1 endi if $data22 != 2 then - print ======$data22 + print =====data22=$data22 goto loop2 return -1 endi if $data23 != 2 then print ======$data23 - goto loop2 return -1 endi if $data24 != 1 then print ======$data24 - goto loop2 return -1 endi if $data25 != 3 then print ======$data25 - goto loop2 return -1 endi if $data26 != 7 then print ======$data26 - goto loop2 return -1 endi sql insert into t1 values(1648791213011,1,2,3,1.0,8); -loop21: $loop_count = 0 +loop21: sql select * from streamt1 where c in (5,4,8) order by `_wstartts`; sleep 300 - $loop_count = $loop_count + 1 if $loop_count == 10 then return -1 endi if $data26 != 8 then - print ======$data26 + print =====data26=$data26 goto loop21 - return -1 endi @@ -222,11 +225,10 @@ sql insert into t1 values(1648791213020,3,2,3,1.0,10); sql insert into t1 values(1648791214000,1,2,3,1.0,11); sql insert into t1 values(1648791213011,10,20,10,10.0,12); -loop3: $loop_count = 0 +loop3: sql select * from streamt1 where c in (5,4,10,11,12) order by `_wstartts`; sleep 300 - $loop_count = $loop_count + 1 if $loop_count == 10 then return -1 @@ -234,112 +236,100 @@ endi # row 2 if $data21 != 1 then - print ======$data21 + print =====data21=$data21 goto loop3 return -1 endi if $data22 != 1 then - print ======$data22 + print =====data22=$data22 goto loop3 return -1 endi if $data23 != 10 then print ======$data23 - goto loop3 return -1 endi if $data24 != 10 then print ======$data24 - goto loop3 return -1 endi if $data25 != 10 then print ======$data25 - goto loop3 return -1 endi if $data26 != 12 then print ======$data26 - goto loop3 return -1 endi # row 3 if $data31 != 1 then - print ======$data31 + print =====data31=$data31 goto loop3 return -1 endi if $data32 != 1 then - print ======$data32 + print =====data32=$data32 goto loop3 return -1 endi if $data33 != 3 then print ======$data33 - goto loop3 return -1 endi if $data34 != 3 then print ======$data34 - goto loop3 return -1 endi if $data35 != 3 then print ======$data35 - goto loop3 return -1 endi if $data36 != 10 then print ======$data36 - goto loop3 return -1 endi # row 4 if $data41 != 1 then - print ======$data41 + print =====data41=$data41 goto loop3 return -1 endi if $data42 != 1 then - print ======$data42 + print =====data42=$data42 goto loop3 return -1 endi if $data43 != 1 then print ======$data43 - goto loop3 return -1 endi if $data44 != 1 then print ======$data44 - goto loop3 return -1 endi if $data45 != 3 then print ======$data45 - goto loop3 return -1 endi if $data46 != 11 then print ======$data46 - goto loop3 return -1 endi @@ -347,8 +337,8 @@ sql insert into t1 values(1648791213030,3,12,12,12.0,13); sql insert into t1 values(1648791214040,1,13,13,13.0,14); sql insert into t1 values(1648791213030,3,14,14,14.0,15) (1648791214020,15,15,15,15.0,16); -loop4: $loop_count = 0 +loop4: sql select * from streamt1 where c in (14,15,16) order by `_wstartts`; sleep 300 @@ -358,119 +348,104 @@ if $loop_count == 10 then endi if $rows != 3 then - print ======$rows - goto loop4 - return -1; + print ====loop4=rows=$rows +# goto loop4 endi # row 0 if $data01 != 2 then - print ======$data01 + print =====data01=$data01 goto loop4 - return -1 endi if $data02 != 2 then print ======$data02 - goto loop4 return -1 endi if $data03 != 6 then print ======$data03 - goto loop4 return -1 endi if $data04 != 3 then print ======$data04 - goto loop4 return -1 endi if $data05 != 3 then print ======$data05 - goto loop4 return -1 endi if $data06 != 15 then print ======$data06 - goto loop4 return -1 endi # row 1 if $data11 != 1 then - print ======$data11 + print =====data11=$data11 goto loop4 return -1 endi if $data12 != 1 then - print ======$data12 + print =====data12=$data12 goto loop4 return -1 endi if $data13 != 15 then print ======$data13 - goto loop4 return -1 endi if $data14 != 15 then print ======$data14 - goto loop4 return -1 endi if $data15 != 15 then print ======$data15 - goto loop4 return -1 endi if $data16 != 16 then print ======$data16 - goto loop4 return -1 endi # row 2 if $data21 != 1 then - print ======$data21 + print =====data21=$data21 goto loop4 return -1 endi if $data22 != 1 then - print ======$data22 + print =====data22=$data22 goto loop4 return -1 endi if $data23 != 1 then print ======$data23 - goto loop4 return -1 endi if $data24 != 1 then print ======$data24 - goto loop4 return -1 endi if $data25 != 13 then print ======$data25 - goto loop4 return -1 endi if $data26 != 14 then print ======$data26 - goto loop4 return -1 endi From 626f682f2bb98e40ddc1b5a6cfd59f95f7f96696 Mon Sep 17 00:00:00 2001 From: Zhengmao Zhu <70138133+fenghuazzm@users.noreply.github.com> Date: Wed, 8 Jun 2022 11:46:47 +0800 Subject: [PATCH 06/14] docs: update index.md about Monitor parameter --- docs-cn/14-reference/12-config/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-cn/14-reference/12-config/index.md b/docs-cn/14-reference/12-config/index.md index 89c414a5b8..2d1866d5dd 100644 --- a/docs-cn/14-reference/12-config/index.md +++ b/docs-cn/14-reference/12-config/index.md @@ -134,7 +134,7 @@ taos --dump-config | 适用范围 | 仅服务端适用 | | 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽、HTTP 请求量的监控记录,记录信息存储在`LOG`库中。 | | 取值范围 | 0:关闭监控服务, 1:激活监控服务。 | -| 缺省值 | 0 | +| 缺省值 | 1 | ### monitorInterval From e9ec7b1391ae0fa08a15057611b0f2359adb5c1f Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Wed, 8 Jun 2022 12:00:29 +0800 Subject: [PATCH 07/14] Update index.md --- docs-en/14-reference/12-config/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-en/14-reference/12-config/index.md b/docs-en/14-reference/12-config/index.md index 8ad9a474a0..b6b535429b 100644 --- a/docs-en/14-reference/12-config/index.md +++ b/docs-en/14-reference/12-config/index.md @@ -134,7 +134,7 @@ TDengine uses 13 continuous ports, both TCP and UDP, starting with the port spec | Applicable | Server Only | | Meaning | The switch for monitoring inside server. The workload of the hosts, including CPU, memory, disk, network, TTP requests, are collected and stored in a system builtin database `LOG` | | Value Range | 0: monitoring disabled, 1: monitoring enabled | -| Default Value | 0 | +| Default Value | 1 | ### monitorInterval From 10a94132244713975d3228f442ec0b8501c4074c Mon Sep 17 00:00:00 2001 From: dingbo Date: Wed, 8 Jun 2022 12:18:50 +0800 Subject: [PATCH 08/14] docs: emqx doucmentation --- docs-cn/20-third-party/09-emq-broker.md | 150 +++++++++--------------- docs-en/20-third-party/09-emq-broker.md | 134 +++++++-------------- docs-examples/other/mock.js | 78 ++++++++++++ 3 files changed, 173 insertions(+), 189 deletions(-) create mode 100644 docs-examples/other/mock.js diff --git a/docs-cn/20-third-party/09-emq-broker.md b/docs-cn/20-third-party/09-emq-broker.md index 833fa97e2e..5e5a065497 100644 --- a/docs-cn/20-third-party/09-emq-broker.md +++ b/docs-cn/20-third-party/09-emq-broker.md @@ -8,31 +8,24 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em ## 前置条件 要让 EMQX 能正常添加 TDengine 数据源,需要以下几方面的准备工作。 + - TDengine 集群已经部署并正常运行 - taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter) -- 如果使用后文介绍的模拟写入程序,需要安装合适版本的 Node.js,推荐安装 v12。 +- 如果使用后文介绍的模拟写入程序,需要安装合适版本的 Node.js,推荐安装 v12 ## 安装并启动 EMQX 用户可以根据当前的操作系统,到 EMQX 官网下载安装包,并执行安装。下载地址如下:。安装后使用 `sudo emqx start` 或 `sudo systemctl start emqx` 启动 EMQX 服务。 -## 在 TDengine 中为接收 MQTT 数据创建相应数据库和表结构 -### 以 Docker 安装 TDengine 为例 +## 创建数据库和表 -```bash - docker exec -it tdengine bash - taos -``` - -### 创建数据库和表 +在 TDengine 中为接收 MQTT 数据创建相应数据库和表结构。进入 TDengine CLI 复制并执行以下 SQL 语句: ```sql - create database test; - use test; - create table: - - CREATE TABLE sensor_data (ts timestamp, temperature float, humidity float, volume float, PM10 float, pm25 float, SO2 float, NO2 float, CO float, sensor_id NCHAR(255), area TINYINT, coll_time timestamp); +CREATE DATABASE test; +USE test; +CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP); ``` 注:表结构以博客[数据传输、存储、展现,EMQX + TDengine 搭建 MQTT 物联网数据可视化平台](https://www.taosdata.com/blog/2020/08/04/1722.html)为例。后续操作均以此博客场景为例进行,请你根据实际应用场景进行修改。 @@ -43,7 +36,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em ### 登录 EMQX Dashboard -使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public` +使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public`。 ![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) @@ -55,6 +48,17 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em ### 编辑 SQL 字段 +复制以下内容输入到 SQL 编辑框: + +```sql +SELECT + payload +FROM + "sensor/data" +``` + +其中 `payload` 代表整个消息体, `sensor/data` 为本规则选取的消息主题。 + ![TDengine Database EMQX create rule](./emqx/create-rule.webp) ### 新增“动作(action handler)” @@ -65,101 +69,54 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em ![TDengine Database EMQX create resource](./emqx/create-resource.webp) -选择“发送数据到 Web 服务“并点击“新建资源”按钮: +选择“发送数据到 Web 服务”并点击“新建资源”按钮: ### 编辑“资源(Resource)” -选择“发送数据到 Web 服务“并填写 请求 URL 为 运行 taosAdapter 的服务器地址和端口(默认为 6041)。其他属性请保持默认值。 +选择“WebHook”并填写“请求 URL”为 taosAdapter 提供 REST 服务的地址,如果是本地启动的 taosadapter, 那么默认地址为: + +``` +http://127.0.0.1:6041/rest/sql +``` + +其他属性请保持默认值。 ![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) ### 编辑“动作(action)” -编辑资源配置,增加 Authorization 认证的键/值配对项,相关文档请参考[ TDengine REST API 文档](https://docs.taosdata.com/reference/rest-api/)。在消息体中输入规则引擎替换模板。 +编辑资源配置,增加 Authorization 认证的键/值配对项。默认用户名和密码对应的 Authorization 值为: +``` +Basic cm9vdDp0YW9zZGF0YQ== +``` +相关文档请参考[ TDengine REST API 文档](/reference/rest-api/)。 + +在消息体中输入规则引擎替换模板: + +```sql +INSERT INTO test.sensor_data VALUES( + now, + ${payload.temperature}, + ${payload.humidity}, + ${payload.volume}, + ${payload.PM10}, + ${payload.pm25}, + ${payload.SO2}, + ${payload.NO2}, + ${payload.CO}, + '${payload.id}', + ${payload.area}, + ${payload.ts} +) +``` ![TDengine Database EMQX edit action](./emqx/edit-action.webp) +最后点击左下方的 “Create” 按钮,保存规则。 ## 编写模拟测试程序 ```javascript - // mock.js - const mqtt = require('mqtt') - const Mock = require('mockjs') - const EMQX_SERVER = 'mqtt://localhost:1883' - const CLIENT_NUM = 10 - const STEP = 5000 // 模拟采集时间间隔 ms - const AWAIT = 5000 // 每次发送完后休眠时间,防止消息速率过快 ms - const CLIENT_POOL = [] - startMock() - function sleep(timer = 100) { - return new Promise(resolve => { - setTimeout(resolve, timer) - }) - } - async function startMock() { - const now = Date.now() - for (let i = 0; i < CLIENT_NUM; i++) { - const client = await createClient(`mock_client_${i}`) - CLIENT_POOL.push(client) - } - // last 24h every 5s - const last = 24 * 3600 * 1000 - for (let ts = now - last; ts <= now; ts += STEP) { - for (const client of CLIENT_POOL) { - const mockData = generateMockData() - const data = { - ...mockData, - id: client.clientId, - area: 0, - ts, - } - client.publish('sensor/data', JSON.stringify(data)) - } - const dateStr = new Date(ts).toLocaleTimeString() - console.log(`${dateStr} send success.`) - await sleep(AWAIT) - } - console.log(`Done, use ${(Date.now() - now) / 1000}s`) - } - /** - * Init a virtual mqtt client - * @param {string} clientId ClientID - */ - function createClient(clientId) { - return new Promise((resolve, reject) => { - const client = mqtt.connect(EMQX_SERVER, { - clientId, - }) - client.on('connect', () => { - console.log(`client ${clientId} connected`) - resolve(client) - }) - client.on('reconnect', () => { - console.log('reconnect') - }) - client.on('error', (e) => { - console.error(e) - reject(e) - }) - }) - } - /** - * Generate mock data - */ - function generateMockData() { - return { - "temperature": parseFloat(Mock.Random.float(22, 100).toFixed(2)), - "humidity": parseFloat(Mock.Random.float(12, 86).toFixed(2)), - "volume": parseFloat(Mock.Random.float(20, 200).toFixed(2)), - "PM10": parseFloat(Mock.Random.float(0, 300).toFixed(2)), - "pm25": parseFloat(Mock.Random.float(0, 300).toFixed(2)), - "SO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)), - "NO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)), - "CO": parseFloat(Mock.Random.float(0, 50).toFixed(2)), - "area": Mock.Random.integer(0, 20), - "ts": 1596157444170, - } - } +${{#include docs-examples/other/mock.js}} ``` 注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。 @@ -189,4 +146,3 @@ node mock.js TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。 EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。 - diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md index d3eafebc14..bd2fcbe9a3 100644 --- a/docs-en/20-third-party/09-emq-broker.md +++ b/docs-en/20-third-party/09-emq-broker.md @@ -16,22 +16,15 @@ The following preparations are required for EMQX to add TDengine data sources co Depending on the current operating system, users can download the installation package from the [EMQX official website](https://www.emqx.io/downloads) and execute the installation. After installation, use `sudo emqx start` or `sudo systemctl start emqx` to start the EMQX service. -## Create the appropriate database and table schema in TDengine for receiving MQTT data -### Take the Docker installation of TDengine as an example +## Create Database and Table -```bash - docker exec -it tdengine bash - taos -``` - -### Create Database and Table +In this step we create the appropriate database and table schema in TDengine for receiving MQTT data. Open TDengine CLI and execute SQL bellow: ```sql - CREATE DATABASE test; - USE test; - - CREATE TABLE sensor_data (ts timestamp, temperature float, humidity float, volume float, PM10 float, pm25 float, SO2 float, NO2 float, CO float, sensor_id NCHAR(255), area TINYINT, coll_time timestamp); +CREATE DATABASE test; +USE test; +CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP); ``` Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario. @@ -54,6 +47,15 @@ Select "Rule" in the "Rule Engine" on the left and click the "Create" button: ! ### Edit SQL fields +Copy SQL bellow and paste it to the SQL edit area: + +```sql +SELECT + payload +FROM + "sensor/data" +``` + ![TDengine Database EMQX create rule](./emqx/create-rule.webp) ### Add "action handler" @@ -68,97 +70,45 @@ Select "Data to Web Service" and click the "New Resource" button. ### Edit "Resource" -Select "Data to Web Service" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values. +Select "WebHook" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values. ![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) ### Edit "action" -Edit the resource configuration to add the key/value pairing for Authorization. Please refer to the [ TDengine REST API documentation ](https://docs.taosdata.com/reference/rest-api/) for the authorization in details. Enter the rule engine replacement template in the message body. +Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is: +``` +Basic cm9vdDp0YW9zZGF0YQ== +``` + +Please refer to the [ TDengine REST API documentation ](/reference/rest-api/) for the authorization in details. + +Enter the rule engine replacement template in the message body: + +```sql +INSERT INTO test.sensor_data VALUES( + now, + ${payload.temperature}, + ${payload.humidity}, + ${payload.volume}, + ${payload.PM10}, + ${payload.pm25}, + ${payload.SO2}, + ${payload.NO2}, + ${payload.CO}, + '${payload.id}', + ${payload.area}, + ${payload.ts} +) +``` ![TDengine Database EMQX edit action](./emqx/edit-action.webp) +Finally, click the "Create" button at bottom left corner saving the rule. ## Compose program to mock data ```javascript - // mock.js - const mqtt = require('mqtt') - const Mock = require('mockjs') - const EMQX_SERVER = 'mqtt://localhost:1883' - const CLIENT_NUM = 10 - const STEP = 5000 // Data interval in ms - const AWAIT = 5000 // Sleep time after data be written once to avoid data writing too fast - const CLIENT_POOL = [] - startMock() - function sleep(timer = 100) { - return new Promise(resolve => { - setTimeout(resolve, timer) - }) - } - async function startMock() { - const now = Date.now() - for (let i = 0; i < CLIENT_NUM; i++) { - const client = await createClient(`mock_client_${i}`) - CLIENT_POOL.push(client) - } - // last 24h every 5s - const last = 24 * 3600 * 1000 - for (let ts = now - last; ts <= now; ts += STEP) { - for (const client of CLIENT_POOL) { - const mockData = generateMockData() - const data = { - ...mockData, - id: client.clientId, - area: 0, - ts, - } - client.publish('sensor/data', JSON.stringify(data)) - } - const dateStr = new Date(ts).toLocaleTimeString() - console.log(`${dateStr} send success.`) - await sleep(AWAIT) - } - console.log(`Done, use ${(Date.now() - now) / 1000}s`) - } - /** - * Init a virtual mqtt client - * @param {string} clientId ClientID - */ - function createClient(clientId) { - return new Promise((resolve, reject) => { - const client = mqtt.connect(EMQX_SERVER, { - clientId, - }) - client.on('connect', () => { - console.log(`client ${clientId} connected`) - resolve(client) - }) - client.on('reconnect', () => { - console.log('reconnect') - }) - client.on('error', (e) => { - console.error(e) - reject(e) - }) - }) - } - /** - * Generate mock data - */ - function generateMockData() { - return { - "temperature": parseFloat(Mock.Random.float(22, 100).toFixed(2)), - "humidity": parseFloat(Mock.Random.float(12, 86).toFixed(2)), - "volume": parseFloat(Mock.Random.float(20, 200).toFixed(2)), - "PM10": parseFloat(Mock.Random.float(0, 300).toFixed(2)), - "pm25": parseFloat(Mock.Random.float(0, 300).toFixed(2)), - "SO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)), - "NO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)), - "CO": parseFloat(Mock.Random.float(0, 50).toFixed(2)), - "area": Mock.Random.integer(0, 20), - "ts": 1596157444170, - } - } +${{#include docs-examples/other/mock.js}} ``` Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients. diff --git a/docs-examples/other/mock.js b/docs-examples/other/mock.js new file mode 100644 index 0000000000..136c5afa96 --- /dev/null +++ b/docs-examples/other/mock.js @@ -0,0 +1,78 @@ +// mock.js +const mqtt = require('mqtt') +const Mock = require('mockjs') +const EMQX_SERVER = 'mqtt://localhost:1883' +const CLIENT_NUM = 10 +const STEP = 5000 // Data interval in ms +const AWAIT = 5000 // Sleep time after data be written once to avoid data writing too fast +const CLIENT_POOL = [] +startMock() +function sleep(timer = 100) { + return new Promise(resolve => { + setTimeout(resolve, timer) + }) +} +async function startMock() { + const now = Date.now() + for (let i = 0; i < CLIENT_NUM; i++) { + const client = await createClient(`mock_client_${i}`) + CLIENT_POOL.push(client) + } + // last 24h every 5s + const last = 24 * 3600 * 1000 + for (let ts = now - last; ts <= now; ts += STEP) { + for (const client of CLIENT_POOL) { + const mockData = generateMockData() + const data = { + ...mockData, + id: client.clientId, + area: 0, + ts, + } + client.publish('sensor/data', JSON.stringify(data)) + } + const dateStr = new Date(ts).toLocaleTimeString() + console.log(`${dateStr} send success.`) + await sleep(AWAIT) + } + console.log(`Done, use ${(Date.now() - now) / 1000}s`) +} +/** + * Init a virtual mqtt client + * @param {string} clientId ClientID + */ +function createClient(clientId) { + return new Promise((resolve, reject) => { + const client = mqtt.connect(EMQX_SERVER, { + clientId, + }) + client.on('connect', () => { + console.log(`client ${clientId} connected`) + resolve(client) + }) + client.on('reconnect', () => { + console.log('reconnect') + }) + client.on('error', (e) => { + console.error(e) + reject(e) + }) + }) +} +/** +* Generate mock data +*/ +function generateMockData() { + return { + "temperature": parseFloat(Mock.Random.float(22, 100).toFixed(2)), + "humidity": parseFloat(Mock.Random.float(12, 86).toFixed(2)), + "volume": parseFloat(Mock.Random.float(20, 200).toFixed(2)), + "PM10": parseFloat(Mock.Random.float(0, 300).toFixed(2)), + "pm25": parseFloat(Mock.Random.float(0, 300).toFixed(2)), + "SO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)), + "NO2": parseFloat(Mock.Random.float(0, 50).toFixed(2)), + "CO": parseFloat(Mock.Random.float(0, 50).toFixed(2)), + "area": Mock.Random.integer(0, 20), + "ts": 1596157444170, + } +} \ No newline at end of file From 1302eaa3adb78945ce14d28dcd626340bb4cbdb7 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 8 Jun 2022 12:42:34 +0800 Subject: [PATCH 09/14] fix explain issue --- source/libs/command/src/explain.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 1076dfa5b0..a76a6e7811 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -173,6 +173,11 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo pPhysiChildren = partitionPhysiNode->node.pChildren; break; } + case QUERY_NODE_PHYSICAL_PLAN_MERGE: { + SMergePhysiNode *mergePhysiNode = (SMergePhysiNode *)pNode; + pPhysiChildren = mergePhysiNode->node.pChildren; + break; + } default: qError("not supported physical node type %d", pNode->type); QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR); From 5601729eabc35880745a986f37038f0708db22b8 Mon Sep 17 00:00:00 2001 From: dingbo Date: Wed, 8 Jun 2022 12:57:22 +0800 Subject: [PATCH 10/14] docs: fix emqx-broker.md --- docs-cn/20-third-party/09-emq-broker.md | 2 +- docs-en/20-third-party/09-emq-broker.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs-cn/20-third-party/09-emq-broker.md b/docs-cn/20-third-party/09-emq-broker.md index 5e5a065497..2125545f39 100644 --- a/docs-cn/20-third-party/09-emq-broker.md +++ b/docs-cn/20-third-party/09-emq-broker.md @@ -116,7 +116,7 @@ INSERT INTO test.sensor_data VALUES( ## 编写模拟测试程序 ```javascript -${{#include docs-examples/other/mock.js}} +{{#include docs-examples/other/mock.js}} ``` 注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。 diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md index bd2fcbe9a3..7c6b83cf99 100644 --- a/docs-en/20-third-party/09-emq-broker.md +++ b/docs-en/20-third-party/09-emq-broker.md @@ -108,7 +108,7 @@ Finally, click the "Create" button at bottom left corner saving the rule. ## Compose program to mock data ```javascript -${{#include docs-examples/other/mock.js}} +{{#include docs-examples/other/mock.js}} ``` Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients. From b00f6011ed4a19e6d5c8094307e02f73f4c8fa85 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Wed, 8 Jun 2022 13:39:49 +0800 Subject: [PATCH 11/14] docs:update taosBenchmark --- docs-en/14-reference/05-taosbenchmark.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-en/14-reference/05-taosbenchmark.md b/docs-en/14-reference/05-taosbenchmark.md index b029f3d3ee..7cf1f95eb1 100644 --- a/docs-en/14-reference/05-taosbenchmark.md +++ b/docs-en/14-reference/05-taosbenchmark.md @@ -21,7 +21,7 @@ There are two ways to install taosBenchmark: ### Configuration and running methods -taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. +TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file. From 9f5b21a841becd10781aa5229ee51210fba1c81d Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Wed, 8 Jun 2022 13:36:38 +0800 Subject: [PATCH 12/14] docs:update the taosBenchmark --- docs-cn/14-reference/05-taosbenchmark.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-cn/14-reference/05-taosbenchmark.md b/docs-cn/14-reference/05-taosbenchmark.md index f34d12a546..6b694543b1 100644 --- a/docs-cn/14-reference/05-taosbenchmark.md +++ b/docs-cn/14-reference/05-taosbenchmark.md @@ -21,7 +21,7 @@ taosBenchmark 有两种安装方式: ### 配置和运行方式 -taosBenchmark 支持两种配置方式:[命令行参数](#命令行参数详解) 和 [JSON 配置文件](#配置文件参数详解)。这两种方式是互斥的,在使用配置文件时只能使用一个命令行参数 `-f ` 指定配置文件。在使用命令行参数运行 taosBenchmark 并控制其行为时则不能使用 `-f` 参数而要用其它参数来进行配置。除此之外,taosBenchmark 还提供了一种特殊的运行方式,即无参数运行。 +taosBenchmark 需要在操作系统的终端执行,该工具支持两种配置方式:[命令行参数](#命令行参数详解) 和 [JSON 配置文件](#配置文件参数详解)。这两种方式是互斥的,在使用配置文件时只能使用一个命令行参数 `-f ` 指定配置文件。在使用命令行参数运行 taosBenchmark 并控制其行为时则不能使用 `-f` 参数而要用其它参数来进行配置。除此之外,taosBenchmark 还提供了一种特殊的运行方式,即无参数运行。 taosBenchmark 支持对 TDengine 做完备的性能测试,其所支持的 TDengine 功能分为三大类:写入、查询和订阅。这三种功能之间是互斥的,每次运行 taosBenchmark 只能选择其中之一。值得注意的是,所要测试的功能类型在使用命令行配置方式时是不可配置的,命令行配置方式只能测试写入性能。若要测试 TDengine 的查询和订阅性能,必须使用配置文件的方式,通过配置文件中的参数 `filetype` 指定所要测试的功能类型。 From 8e4690455d0695fb247401d8d9ad8268e07d9f6a Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 8 Jun 2022 14:06:42 +0800 Subject: [PATCH 13/14] fix:return false in is ture operator if result is null --- source/common/src/tdatablock.c | 2 ++ source/libs/scalar/src/sclvector.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d9293433ea..23b9bd0f97 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1130,6 +1130,7 @@ int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, size_t existRows, ui if (IS_VAR_DATA_TYPE(pColumn->info.type)) { char* tmp = taosMemoryRealloc(pColumn->varmeta.offset, sizeof(int32_t) * numOfRows); + if (tmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -1155,6 +1156,7 @@ int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, size_t existRows, ui if (tmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } + memset(tmp + pColumn->info.bytes * existRows, 0, pColumn->info.bytes * (numOfRows - existRows)); pColumn->pData = tmp; } diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index 59208de3c4..7440a6ae5b 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -633,7 +633,8 @@ int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut) { if (IS_VAR_DATA_TYPE(inType)) { return vectorConvertFromVarData(pIn, pOut, inType, outType); } - + + pOut->numOfRows = pIn->numOfRows; switch (outType) { case TSDB_DATA_TYPE_BOOL: { for (int32_t i = 0; i < pIn->numOfRows; ++i) { From b2d066521b141ab4f4f1e2f4915f4e23f3c5653d Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Wed, 8 Jun 2022 14:58:50 +0800 Subject: [PATCH 14/14] feat: add group id to multiway-sort-merge operator --- source/libs/executor/inc/tsort.h | 6 ++ source/libs/executor/src/sortoperator.c | 74 ++++++++++++++++++++++++- source/libs/executor/src/tsort.c | 4 ++ tests/script/tsim/query/scalarNull.sim | 1 + 4 files changed, 84 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index c8b1b3ee51..fd3581e2bf 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -130,6 +130,12 @@ bool tsortIsNullVal(STupleHandle* pVHandle, int32_t colId); */ void* tsortGetValue(STupleHandle* pVHandle, int32_t colId); +/** + * + * @param pVHandle + * @return + */ +uint64_t tsortGetGroupId(STupleHandle* pVHandle); /** * * @param pSortHandle diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 3034f409f7..95f9514b07 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -231,6 +231,10 @@ typedef struct SMultiwaySortMergeOperatorInfo { SSDataBlock* pInputBlock; int64_t startTs; // sort start time + + bool hasGroupId; + uint64_t groupId; + STupleHandle *prefetchedTuple; } SMultiwaySortMergeOperatorInfo; int32_t doOpenMultiwaySortMergeOperator(SOperatorInfo* pOperator) { @@ -269,6 +273,70 @@ int32_t doOpenMultiwaySortMergeOperator(SOperatorInfo* pOperator) { return TSDB_CODE_SUCCESS; } +SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, + SArray* pColMatchInfo, SMultiwaySortMergeOperatorInfo* pInfo) { + blockDataCleanup(pDataBlock); + + SSDataBlock* p = tsortGetSortedDataBlock(pHandle); + if (p == NULL) { + return NULL; + } + + blockDataEnsureCapacity(p, capacity); + + while (1) { + + STupleHandle* pTupleHandle = NULL; + if (pInfo->prefetchedTuple == NULL) { + pTupleHandle = tsortNextTuple(pHandle); + } else { + pTupleHandle = pInfo->prefetchedTuple; + pInfo->prefetchedTuple = NULL; + } + + if (pTupleHandle == NULL) { + break; + } + + uint64_t tupleGroupId = tsortGetGroupId(pTupleHandle); + if (!pInfo->hasGroupId) { + pInfo->groupId = tupleGroupId; + pInfo->hasGroupId = true; + appendOneRowToDataBlock(p, pTupleHandle); + } else if (pInfo->groupId == tupleGroupId) { + appendOneRowToDataBlock(p, pTupleHandle); + } else { + pInfo->prefetchedTuple = pTupleHandle; + pInfo->groupId = tupleGroupId; + break; + } + + if (p->info.rows >= capacity) { + break; + } + + } + + if (p->info.rows > 0) { + int32_t numOfCols = taosArrayGetSize(pColMatchInfo); + for (int32_t i = 0; i < numOfCols; ++i) { + SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, i); + ASSERT(pmInfo->matchType == COL_MATCH_FROM_SLOT_ID); + + SColumnInfoData* pSrc = taosArrayGet(p->pDataBlock, pmInfo->srcSlotId); + SColumnInfoData* pDst = taosArrayGet(pDataBlock->pDataBlock, pmInfo->targetSlotId); + colDataAssign(pDst, pSrc, p->info.rows); + } + + pDataBlock->info.rows = p->info.rows; + pDataBlock->info.capacity = p->info.rows; + } + + blockDataDestroy(p); + return (pDataBlock->info.rows > 0) ? pDataBlock : NULL; +} + + SSDataBlock* doMultiwaySortMerge(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -283,7 +351,11 @@ SSDataBlock* doMultiwaySortMerge(SOperatorInfo* pOperator) { } SSDataBlock* pBlock = - getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo); + getMultiwaySortedBlockData(pInfo->pSortHandle, + pInfo->binfo.pRes, + pOperator->resultInfo.capacity, + pInfo->pColMatchInfo, + pInfo); if (pBlock != NULL) { pOperator->resultInfo.totalRows += pBlock->info.rows; diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 3705d0a57b..846da4a32a 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -709,6 +709,10 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex) { return colDataGetData(pColInfo, pVHandle->rowIndex); } +uint64_t tsortGetGroupId(STupleHandle* pVHandle) { + return pVHandle->pBlock->info.groupId; +} + SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle) { SSortExecInfo info = {0}; diff --git a/tests/script/tsim/query/scalarNull.sim b/tests/script/tsim/query/scalarNull.sim index b08ac1d3d9..07bd5e57cd 100644 --- a/tests/script/tsim/query/scalarNull.sim +++ b/tests/script/tsim/query/scalarNull.sim @@ -66,6 +66,7 @@ if $rows != 0 then return -1 endi sql select * from tb1 where null; +print $rows if $rows != 0 then return -1 endi