From f8bd7789794407a6adcdb72041b563d7b43fbaa4 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 2 Aug 2022 16:18:53 +0800 Subject: [PATCH 01/11] enh: the fill physical scheme distinguishes between interpolated and non-interpolated expressions --- include/libs/nodes/plannodes.h | 5 +- include/libs/nodes/querynodes.h | 9 ++- source/libs/executor/src/executorimpl.c | 20 ++--- source/libs/nodes/src/nodesCloneFuncs.c | 2 + source/libs/nodes/src/nodesCodeFuncs.c | 22 ++++-- source/libs/nodes/src/nodesTraverseFuncs.c | 2 + source/libs/nodes/src/nodesUtilFuncs.c | 3 +- source/libs/planner/src/planLogicCreater.c | 78 ++++++++++++++++--- source/libs/planner/src/planPhysiCreater.c | 18 ++++- source/libs/planner/test/planIntervalTest.cpp | 5 +- source/util/src/terror.c | 1 - 11 files changed, 131 insertions(+), 34 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 2f6bb603c1..c06b4411e6 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -211,6 +211,8 @@ typedef struct SWindowLogicNode { typedef struct SFillLogicNode { SLogicNode node; EFillMode mode; + SNodeList* pFillExprs; + SNodeList* pNotFillExprs; SNode* pWStartTs; SNode* pValues; // SNodeListNode STimeWindow timeRange; @@ -435,9 +437,10 @@ typedef SIntervalPhysiNode SStreamSemiIntervalPhysiNode; typedef struct SFillPhysiNode { SPhysiNode node; EFillMode mode; + SNodeList* pFillExprs; + SNodeList* pNotFillExprs; SNode* pWStartTs; // SColumnNode SNode* pValues; // SNodeListNode - SNodeList* pTargets; STimeWindow timeRange; EOrder inputTsOrder; } SFillPhysiNode; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index cd2a7d3f9f..a452388052 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -53,7 +53,13 @@ typedef struct SExprNode { bool orderAlias; } SExprNode; -typedef enum EColumnType { COLUMN_TYPE_COLUMN = 1, COLUMN_TYPE_TAG, COLUMN_TYPE_TBNAME } EColumnType; +typedef enum EColumnType { + COLUMN_TYPE_COLUMN = 1, + COLUMN_TYPE_TAG, + COLUMN_TYPE_TBNAME, + COLUMN_TYPE_WINDOW_PC, + COLUMN_TYPE_GROUP_KEY +} EColumnType; typedef struct SColumnNode { SExprNode node; // QUERY_NODE_COLUMN @@ -291,6 +297,7 @@ typedef enum ESqlClause { SQL_CLAUSE_WHERE, SQL_CLAUSE_PARTITION_BY, SQL_CLAUSE_WINDOW, + SQL_CLAUSE_FILL, SQL_CLAUSE_GROUP_BY, SQL_CLAUSE_HAVING, SQL_CLAUSE_DISTINCT, diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index e52cbf40a9..3c9969a3be 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3596,7 +3596,8 @@ void doDestroyExchangeOperatorInfo(void* param) { } static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t numOfCols, SNodeListNode* pValNode, - STimeWindow win, int32_t capacity, const char* id, SInterval* pInterval, int32_t fillType, int32_t order) { + STimeWindow win, int32_t capacity, const char* id, SInterval* pInterval, int32_t fillType, + int32_t order) { SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pValNode); STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, win.skey); @@ -3627,13 +3628,13 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* int32_t num = 0; SSDataBlock* pResBlock = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc); - SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pTargets, NULL, &num); + SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &num); SInterval* pInterval = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType ? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval : &((SIntervalAggOperatorInfo*)downstream->info)->interval; - int32_t order = (pPhyFillNode->inputTsOrder == ORDER_ASC)? TSDB_ORDER_ASC:TSDB_ORDER_DESC; + int32_t order = (pPhyFillNode->inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; int32_t type = convertFillType(pPhyFillNode->mode); SResultInfo* pResultInfo = &pOperator->resultInfo; @@ -3641,7 +3642,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pInfo->primaryTsCol = ((SColumnNode*)pPhyFillNode->pWStartTs)->slotId; int32_t numOfOutputCols = 0; - SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pTargets, pPhyFillNode->node.pOutputDataBlockDesc, + SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID); int32_t code = initFillInfo(pInfo, pExprInfo, num, (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, @@ -3835,7 +3836,7 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo, int32_t groupNum) return TDB_CODE_SUCCESS; } -bool groupbyTbname(SNodeList* pGroupList) { +bool groupbyTbname(SNodeList* pGroupList) { bool bytbname = false; if (LIST_LENGTH(pGroupList) > 0) { SNode* p = nodesListGetNode(pGroupList, 0); @@ -3877,7 +3878,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, bool assignUid = groupbyTbname(group); int32_t groupNum = 0; - size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); + size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); for (int32_t i = 0; i < numOfTables; i++) { STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); @@ -4619,7 +4620,7 @@ void releaseQueryBuf(size_t numOfTables) { } int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList) { - SExplainExecInfo execInfo = {0}; + SExplainExecInfo execInfo = {0}; SExplainExecInfo* pExplainInfo = taosArrayPush(pExecInfoList, &execInfo); pExplainInfo->numOfRows = operatorInfo->resultInfo.totalRows; @@ -4629,7 +4630,8 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf pExplainInfo->verboseInfo = NULL; if (operatorInfo->fpSet.getExplainFn) { - int32_t code = operatorInfo->fpSet.getExplainFn(operatorInfo, &pExplainInfo->verboseInfo, &pExplainInfo->verboseLen); + int32_t code = + operatorInfo->fpSet.getExplainFn(operatorInfo, &pExplainInfo->verboseInfo, &pExplainInfo->verboseLen); if (code) { qError("%s operator getExplainFn failed, code:%s", GET_TASKID(operatorInfo->pTaskInfo), tstrerror(code)); return code; @@ -4640,7 +4642,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf for (int32_t i = 0; i < operatorInfo->numOfDownstream; ++i) { code = getOperatorExplainExecInfo(operatorInfo->pDownstream[i], pExecInfoList); if (code != TSDB_CODE_SUCCESS) { -// taosMemoryFreeClear(*pRes); + // taosMemoryFreeClear(*pRes); return TSDB_CODE_QRY_OUT_OF_MEMORY; } } diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 79ef18eeb6..9449f66428 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -450,6 +450,8 @@ static int32_t logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* p static int32_t logicFillCopy(const SFillLogicNode* pSrc, SFillLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); COPY_SCALAR_FIELD(mode); + CLONE_NODE_LIST_FIELD(pFillExprs); + CLONE_NODE_LIST_FIELD(pNotFillExprs); CLONE_NODE_FIELD(pWStartTs); CLONE_NODE_FIELD(pValues); COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow)); diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 9d15b01acf..0fb063891b 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2062,9 +2062,10 @@ static int32_t jsonToPhysiIntervalNode(const SJson* pJson, void* pObj) { } static const char* jkFillPhysiPlanMode = "Mode"; +static const char* jkFillPhysiPlanFillExprs = "FillExprs"; +static const char* jkFillPhysiPlanNotFillExprs = "NotFillExprs"; static const char* jkFillPhysiPlanWStartTs = "WStartTs"; static const char* jkFillPhysiPlanValues = "Values"; -static const char* jkFillPhysiPlanTargets = "Targets"; static const char* jkFillPhysiPlanStartTime = "StartTime"; static const char* jkFillPhysiPlanEndTime = "EndTime"; static const char* jkFillPhysiPlanInputTsOrder = "inputTsOrder"; @@ -2076,15 +2077,18 @@ static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanMode, pNode->mode); } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkFillPhysiPlanFillExprs, pNode->pFillExprs); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkFillPhysiPlanNotFillExprs, pNode->pNotFillExprs); + } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkFillPhysiPlanWStartTs, nodeToJson, pNode->pWStartTs); } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkFillPhysiPlanValues, nodeToJson, pNode->pValues); } - if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkFillPhysiPlanTargets, pNode->pTargets); - } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanStartTime, pNode->timeRange.skey); } @@ -2104,7 +2108,12 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) { int32_t code = jsonToPhysicPlanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { tjsonGetNumberValue(pJson, jkFillPhysiPlanMode, pNode->mode, code); - ; + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkFillPhysiPlanFillExprs, &pNode->pFillExprs); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkFillPhysiPlanNotFillExprs, &pNode->pNotFillExprs); } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkFillPhysiPlanWStartTs, &pNode->pWStartTs); @@ -2112,9 +2121,6 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkFillPhysiPlanValues, &pNode->pValues); } - if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeList(pJson, jkFillPhysiPlanTargets, &pNode->pTargets); - } if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBigIntValue(pJson, jkFillPhysiPlanStartTime, &pNode->timeRange.skey); } diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c index 77681af1bc..2e23998aad 100644 --- a/source/libs/nodes/src/nodesTraverseFuncs.c +++ b/source/libs/nodes/src/nodesTraverseFuncs.c @@ -346,6 +346,7 @@ void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker wa if (NULL != pSelect->pWindow && QUERY_NODE_INTERVAL_WINDOW == nodeType(pSelect->pWindow)) { nodesWalkExpr(((SIntervalWindowNode*)pSelect->pWindow)->pFill, walker, pContext); } + case SQL_CLAUSE_FILL: nodesWalkExprs(pSelect->pGroupByList, walker, pContext); case SQL_CLAUSE_GROUP_BY: nodesWalkExpr(pSelect->pHaving, walker, pContext); @@ -379,6 +380,7 @@ void nodesRewriteSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeRewrit if (NULL != pSelect->pWindow && QUERY_NODE_INTERVAL_WINDOW == nodeType(pSelect->pWindow)) { nodesRewriteExpr(&(((SIntervalWindowNode*)pSelect->pWindow)->pFill), rewriter, pContext); } + case SQL_CLAUSE_FILL: nodesRewriteExprs(pSelect->pGroupByList, rewriter, pContext); case SQL_CLAUSE_GROUP_BY: nodesRewriteExpr(&(pSelect->pHaving), rewriter, pContext); diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index abab2126c0..638c600cef 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -931,9 +931,10 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_PHYSICAL_PLAN_FILL: { SFillPhysiNode* pPhyNode = (SFillPhysiNode*)pNode; destroyPhysiNode((SPhysiNode*)pPhyNode); + nodesDestroyList(pPhyNode->pFillExprs); + nodesDestroyList(pPhyNode->pNotFillExprs); nodesDestroyNode(pPhyNode->pWStartTs); nodesDestroyNode(pPhyNode->pValues); - nodesDestroyList(pPhyNode->pTargets); break; } case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index b51624336b..4020e92a42 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -38,6 +38,27 @@ typedef struct SRewriteExprCxt { SNodeList* pExprs; } SRewriteExprCxt; +static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol) { + switch (pFunc->funcType) { + case FUNCTION_TYPE_TBNAME: + pCol->colType = COLUMN_TYPE_TBNAME; + break; + case FUNCTION_TYPE_WSTART: + case FUNCTION_TYPE_WEND: + pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; + pCol->colType = COLUMN_TYPE_WINDOW_PC; + break; + case FUNCTION_TYPE_WDURATION: + pCol->colType = COLUMN_TYPE_WINDOW_PC; + break; + case FUNCTION_TYPE_GROUP_KEY: + pCol->colType = COLUMN_TYPE_GROUP_KEY; + break; + default: + break; + } +} + static EDealRes doRewriteExpr(SNode** pNode, void* pContext) { switch (nodeType(*pNode)) { case QUERY_NODE_OPERATOR: @@ -60,11 +81,7 @@ static EDealRes doRewriteExpr(SNode** pNode, void* pContext) { strcpy(pCol->node.aliasName, pToBeRewrittenExpr->aliasName); strcpy(pCol->colName, ((SExprNode*)pExpr)->aliasName); if (QUERY_NODE_FUNCTION == nodeType(pExpr)) { - if (FUNCTION_TYPE_WSTART == ((SFunctionNode*)pExpr)->funcType) { - pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; - } else if (FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pExpr)->funcType) { - pCol->colType = COLUMN_TYPE_TBNAME; - } + setColumnInfo((SFunctionNode*)pExpr, pCol); } nodesDestroyNode(*pNode); *pNode = (SNode*)pCol; @@ -746,6 +763,41 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele return TSDB_CODE_FAILED; } +static EDealRes needFillValueImpl(SNode* pNode, void* pContext) { + if (QUERY_NODE_COLUMN == nodeType(pNode)) { + SColumnNode* pCol = (SColumnNode*)pNode; + if (COLUMN_TYPE_WINDOW_PC != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) { + *(bool*)pContext = true; + return DEAL_RES_END; + } + } + return DEAL_RES_CONTINUE; +} + +static bool needFillValue(SNode* pNode) { + bool hasFillCol = false; + nodesWalkExpr(pNode, needFillValueImpl, &hasFillCol); + return hasFillCol; +} + +static int32_t partFillExprs(SNodeList* pProjectionList, SNodeList** pFillExprs, SNodeList** pNotFillExprs) { + int32_t code = TSDB_CODE_SUCCESS; + SNode* pProject = NULL; + FOREACH(pProject, pProjectionList) { + if (needFillValue(pProject)) { + code = nodesListMakeStrictAppend(pFillExprs, nodesCloneNode(pProject)); + } else { + code = nodesListMakeStrictAppend(pNotFillExprs, nodesCloneNode(pProject)); + } + if (TSDB_CODE_SUCCESS != code) { + NODES_DESTORY_LIST(*pFillExprs); + NODES_DESTORY_LIST(*pNotFillExprs); + break; + } + } + return code; +} + static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) { if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow) || NULL == ((SIntervalWindowNode*)pSelect->pWindow)->pFill) { @@ -767,10 +819,18 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pFill->node.resultDataOrder = DATA_ORDER_LEVEL_IN_GROUP; pFill->inputTsOrder = ORDER_ASC; - int32_t code = nodesCollectColumns(pSelect, SQL_CLAUSE_WINDOW, NULL, COLLECT_COL_TYPE_ALL, &pFill->node.pTargets); - if (TSDB_CODE_SUCCESS == code && NULL == pFill->node.pTargets) { - code = nodesListMakeStrictAppend(&pFill->node.pTargets, - nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0))); + int32_t code = partFillExprs(pSelect->pProjectionList, &pFill->pFillExprs, &pFill->pNotFillExprs); + if (TSDB_CODE_SUCCESS == code) { + code = rewriteExprsForSelect(pFill->pFillExprs, pSelect, SQL_CLAUSE_FILL); + } + if (TSDB_CODE_SUCCESS == code) { + code = rewriteExprsForSelect(pFill->pNotFillExprs, pSelect, SQL_CLAUSE_FILL); + } + if (TSDB_CODE_SUCCESS == code) { + code = createColumnByRewriteExprs(pFill->pFillExprs, &pFill->node.pTargets); + } + if (TSDB_CODE_SUCCESS == code) { + code = createColumnByRewriteExprs(pFill->pNotFillExprs, &pFill->node.pTargets); } pFill->mode = pFillNode->mode; diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 3771586b34..da46b402c8 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -311,6 +311,10 @@ static EDealRes doSetSlotId(SNode* pNode, void* pContext) { static int32_t setNodeSlotId(SPhysiPlanContext* pCxt, int16_t leftDataBlockId, int16_t rightDataBlockId, SNode* pNode, SNode** pOutput) { + if (NULL == pNode) { + return TSDB_CODE_SUCCESS; + } + SNode* pRes = nodesCloneNode(pNode); if (NULL == pRes) { return TSDB_CODE_OUT_OF_MEMORY; @@ -332,6 +336,10 @@ static int32_t setNodeSlotId(SPhysiPlanContext* pCxt, int16_t leftDataBlockId, i static int32_t setListSlotId(SPhysiPlanContext* pCxt, int16_t leftDataBlockId, int16_t rightDataBlockId, const SNodeList* pList, SNodeList** pOutput) { + if (NULL == pList) { + return TSDB_CODE_SUCCESS; + } + SNodeList* pRes = nodesCloneList(pList); if (NULL == pRes) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1368,9 +1376,15 @@ static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren pFill->inputTsOrder = pFillNode->inputTsOrder; SDataBlockDescNode* pChildTupe = (((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc); - int32_t code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->node.pTargets, &pFill->pTargets); + int32_t code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->pFillExprs, &pFill->pFillExprs); if (TSDB_CODE_SUCCESS == code) { - code = addDataBlockSlots(pCxt, pFill->pTargets, pFill->node.pOutputDataBlockDesc); + code = addDataBlockSlots(pCxt, pFill->pFillExprs, pFill->node.pOutputDataBlockDesc); + } + if (TSDB_CODE_SUCCESS == code) { + code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->pNotFillExprs, &pFill->pNotFillExprs); + } + if (TSDB_CODE_SUCCESS == code) { + code = addDataBlockSlots(pCxt, pFill->pNotFillExprs, pFill->node.pOutputDataBlockDesc); } if (TSDB_CODE_SUCCESS == code) { diff --git a/source/libs/planner/test/planIntervalTest.cpp b/source/libs/planner/test/planIntervalTest.cpp index 30f7051722..674603310a 100644 --- a/source/libs/planner/test/planIntervalTest.cpp +++ b/source/libs/planner/test/planIntervalTest.cpp @@ -45,8 +45,9 @@ TEST_F(PlanIntervalTest, fill) { "WHERE ts > TIMESTAMP '2022-04-01 00:00:00' and ts < TIMESTAMP '2022-04-30 23:59:59' " "INTERVAL(10s) FILL(VALUE, 10, 20)"); - run("SELECT COUNT(*) FROM st1 WHERE ts > TIMESTAMP '2022-04-01 00:00:00' and ts < TIMESTAMP '2022-04-30 23:59:59' " - "PARTITION BY TBNAME interval(10s) fill(prev)"); + run("SELECT _WSTART, TBNAME, COUNT(*) FROM st1 " + "WHERE ts > '2022-04-01 00:00:00' and ts < '2022-04-30 23:59:59' " + "PARTITION BY TBNAME INTERVAL(10s) FILL(PREV)"); } TEST_F(PlanIntervalTest, selectFunc) { diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 51dfa1ce13..b84b22c2e8 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -463,7 +463,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_FS_APP_ERROR, "tfs out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_CTG_INTERNAL_ERROR, "catalog internal error") TAOS_DEFINE_ERROR(TSDB_CODE_CTG_INVALID_INPUT, "invalid catalog input parameters") TAOS_DEFINE_ERROR(TSDB_CODE_CTG_NOT_READY, "catalog is not ready") -TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_MEMORY, "catalog memory error") TAOS_DEFINE_ERROR(TSDB_CODE_CTG_SYS_ERROR, "catalog system error") TAOS_DEFINE_ERROR(TSDB_CODE_CTG_DB_DROPPED, "Database is dropped") TAOS_DEFINE_ERROR(TSDB_CODE_CTG_OUT_OF_SERVICE, "catalog is out of service") From bf08d7dd0b7a1d811a87d5e416a9f49fc44d6990 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 6 Aug 2022 15:23:03 +0800 Subject: [PATCH 02/11] fix(query): fix bug in fill --- source/libs/executor/inc/tfill.h | 12 +- source/libs/executor/src/executorimpl.c | 21 +- source/libs/executor/src/tfill.c | 236 ++++++++---------- source/libs/executor/src/timewindowoperator.c | 2 +- 4 files changed, 117 insertions(+), 154 deletions(-) diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h index b604794dad..2d39cd8eb1 100644 --- a/source/libs/executor/inc/tfill.h +++ b/source/libs/executor/inc/tfill.h @@ -28,8 +28,9 @@ struct SSDataBlock; typedef struct SFillColInfo { SExprInfo *pExpr; - int16_t flag; // column flag: TAG COLUMN|NORMAL COLUMN - int16_t tagIndex; // index of current tag in SFillTagColInfo array list +// int16_t flag; // column flag: TAG COLUMN|NORMAL COLUMN + bool notFillCol; // denote if this column needs fill operation +// int16_t tagIndex; // index of current tag in SFillTagColInfo array list SVariant fillVal; } SFillColInfo; @@ -49,10 +50,7 @@ typedef struct SFillInfo { int32_t index; // active row index int32_t numOfTotal; // number of filled rows in one round int32_t numOfCurrent; // number of filled rows in current results - - int32_t numOfTags; // number of tags int32_t numOfCols; // number of columns, including the tags columns - int32_t rowSize; // size of each row SInterval interval; SArray *prev; @@ -71,10 +69,10 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey); void taosResetFillInfo(struct SFillInfo* pFillInfo, TSKEY startTimestamp); void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataBlock* pInput); -struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, const struct SNodeListNode* val); +struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr, int32_t numOfNotFillCols, const struct SNodeListNode* val); bool taosFillHasMoreResults(struct SFillInfo* pFillInfo); -SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, +SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId, int32_t order, const char* id); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 3c9969a3be..0e575d3552 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3267,8 +3267,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { } } - blockDataEnsureCapacity(pResBlock, pOperator->resultInfo.capacity); - int32_t numOfResultRows = pOperator->resultInfo.capacity - pResBlock->info.rows; taosFillResultDataBlock(pInfo->pFillInfo, pResBlock, numOfResultRows); @@ -3595,16 +3593,16 @@ void doDestroyExchangeOperatorInfo(void* param) { taosMemoryFreeClear(param); } -static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t numOfCols, SNodeListNode* pValNode, - STimeWindow win, int32_t capacity, const char* id, SInterval* pInterval, int32_t fillType, - int32_t order) { - SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pValNode); +static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t numOfCols, SExprInfo* pNotFillExpr, + int32_t numOfNotFillCols, SNodeListNode* pValNode, STimeWindow win, int32_t capacity, + const char* id, SInterval* pInterval, int32_t fillType, int32_t order) { + SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode); STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, win.skey); w = getFirstQualifiedTimeWindow(win.skey, &w, pInterval, TSDB_ORDER_ASC); pInfo->pFillInfo = - taosCreateFillInfo(w.skey, 0, capacity, numOfCols, pInterval, fillType, pColInfo, pInfo->primaryTsCol, order, id); + taosCreateFillInfo(w.skey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo, pInfo->primaryTsCol, order, id); pInfo->win = win; pInfo->p = taosMemoryCalloc(numOfCols, POINTER_BYTES); @@ -3626,9 +3624,11 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* goto _error; } - int32_t num = 0; + int32_t num = 0, num1 = 0; SSDataBlock* pResBlock = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc); SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &num); + SExprInfo* pCopyColumnExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &num1); + SInterval* pInterval = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType ? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval @@ -3639,13 +3639,16 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* SResultInfo* pResultInfo = &pOperator->resultInfo; initResultSizeInfo(&pOperator->resultInfo, 4096); + blockDataEnsureCapacity(pResBlock, pOperator->resultInfo.capacity); + pInfo->primaryTsCol = ((SColumnNode*)pPhyFillNode->pWStartTs)->slotId; int32_t numOfOutputCols = 0; SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID); - int32_t code = initFillInfo(pInfo, pExprInfo, num, (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, + int32_t code = initFillInfo(pInfo, pExprInfo, num, pCopyColumnExprInfo, num1, + (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity, pTaskInfo->id.str, pInterval, type, order); if (code != TSDB_CODE_SUCCESS) { goto _error; diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index c5d68676d2..c1bcf12cb2 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -33,7 +33,13 @@ #define DO_INTERPOLATION(_v1, _v2, _k1, _k2, _k) \ ((_v1) + ((_v2) - (_v1)) * (((double)(_k)) - ((double)(_k1))) / (((double)(_k2)) - ((double)(_k1)))) +#define GET_DEST_SLOT_ID(_p) ((_p)->pExpr->base.resSchema.slotId) +#define GET_SRC_SLOT_ID(_p) ((_p)->pExpr->base.pParam[0].pCol->slotId) + +static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey); + static void setTagsValue(SFillInfo* pFillInfo, void** data, int32_t genRows) { +#if 0 for (int32_t j = 0; j < pFillInfo->numOfCols; ++j) { SFillColInfo* pCol = &pFillInfo->pFillCol[j]; if (TSDB_COL_IS_NORMAL_COL(pCol->flag) || TSDB_COL_IS_UD_COL(pCol->flag)) { @@ -47,25 +53,28 @@ static void setTagsValue(SFillInfo* pFillInfo, void** data, int32_t genRows) { SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex]; assignVal(val1, pTag->tagVal, pSchema->bytes, pSchema->type); } +#endif } -static void setNullRow(SSDataBlock* pBlock, int64_t ts, int32_t rowIndex) { - // the first are always the timestamp column, so start from the second column. - for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); ++i) { - SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i); - if (p->info.type == TSDB_DATA_TYPE_TIMESTAMP) { // handle timestamp - colDataAppend(p, rowIndex, (const char*)&ts, false); +static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowIndex) { + for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { + SFillColInfo* pCol = &pFillInfo->pFillCol[i]; + int32_t dstSlotId = GET_DEST_SLOT_ID(pCol); + SColumnInfoData* pDstColInfo = taosArrayGet(pBlock->pDataBlock, dstSlotId); + if (pCol->notFillCol) { + if (pDstColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + colDataAppend(pDstColInfo, rowIndex, (const char*)&pFillInfo->currentKey, false); + } else { + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev : pFillInfo->next; + SGroupKeys* pKey = taosArrayGet(p, i); + doSetVal(pDstColInfo, rowIndex, pKey); + } } else { - colDataAppendNULL(p, rowIndex); + colDataAppendNULL(pDstColInfo, rowIndex); } } } -#define GET_DEST_SLOT_ID(_p) ((_p)->pExpr->base.resSchema.slotId) -#define GET_SRC_SLOT_ID(_p) ((_p)->pExpr->base.pParam[0].pCol->slotId) - -static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey); - static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32_t rowIndex, int64_t currentKey) { if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) { float v = 0; @@ -100,9 +109,6 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - if (TSDB_COL_IS_TAG(pCol->flag)) { - continue; - } SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol)); @@ -118,10 +124,6 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* // todo refactor: start from 0 not 1 for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - if (TSDB_COL_IS_TAG(pCol->flag)) { - continue; - } - SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol)); if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { @@ -134,59 +136,70 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* } else if (pFillInfo->type == TSDB_FILL_LINEAR) { // TODO : linear interpolation supports NULL value if (outOfBound) { - setNullRow(pBlock, pFillInfo->currentKey, index); + setNullRow(pBlock, pFillInfo, index); } else { for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - if (TSDB_COL_IS_TAG(pCol->flag)) { - continue; - } int32_t dstSlotId = GET_DEST_SLOT_ID(pCol); SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId); + int16_t type = pDstCol->info.type; - int16_t type = pDstCol->info.type; - if (type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstCol, index, (const char*)&pFillInfo->currentKey, false); - continue; + if (pCol->notFillCol) { + if (type == TSDB_DATA_TYPE_TIMESTAMP) { + colDataAppend(pDstCol, index, (const char*)&pFillInfo->currentKey, false); + } else { + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev : pFillInfo->next; + SGroupKeys* pKey = taosArrayGet(p, i); + doSetVal(pDstCol, index, pKey); + } + } else { + SGroupKeys* pKey = taosArrayGet(pFillInfo->prev, i); + if (IS_VAR_DATA_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || pKey->isNull) { + colDataAppendNULL(pDstCol, index); + continue; + } + + SGroupKeys* pKey1 = taosArrayGet(pFillInfo->prev, pFillInfo->tsSlotId); + + int64_t prevTs = *(int64_t*)pKey1->pData; + int32_t srcSlotId = GET_SRC_SLOT_ID(pCol); + + SColumnInfoData* pSrcCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId); + char* data = colDataGetData(pSrcCol, pFillInfo->index); + + point1 = (SPoint){.key = prevTs, .val = pKey->pData}; + point2 = (SPoint){.key = ts, .val = data}; + + int64_t out = 0; + point = (SPoint){.key = pFillInfo->currentKey, .val = &out}; + taosGetLinearInterpolationVal(&point, type, &point1, &point2, type); + + colDataAppend(pDstCol, index, (const char*)&out, false); } - - SGroupKeys* pKey = taosArrayGet(pFillInfo->prev, i); - if (IS_VAR_DATA_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || pKey->isNull) { - colDataAppendNULL(pDstCol, index); - continue; - } - - SGroupKeys* pKey1 = taosArrayGet(pFillInfo->prev, pFillInfo->tsSlotId); - - int64_t prevTs = *(int64_t*)pKey1->pData; - int32_t srcSlotId = GET_SRC_SLOT_ID(pCol); - - SColumnInfoData* pSrcCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId); - char* data = colDataGetData(pSrcCol, pFillInfo->index); - - point1 = (SPoint){.key = prevTs, .val = pKey->pData}; - point2 = (SPoint){.key = ts, .val = data}; - - int64_t out = 0; - point = (SPoint){.key = pFillInfo->currentKey, .val = &out}; - taosGetLinearInterpolationVal(&point, type, &point1, &point2, type); - - colDataAppend(pDstCol, index, (const char*)&out, false); } } } else if (pFillInfo->type == TSDB_FILL_NULL) { // fill with NULL - setNullRow(pBlock, pFillInfo->currentKey, index); + setNullRow(pBlock, pFillInfo, index); } else { // fill with user specified value for each column for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - if (TSDB_COL_IS_TAG(pCol->flag)) { - continue; - } - SVariant* pVar = &pFillInfo->pFillCol[i].fillVal; - SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i); - doSetUserSpecifiedValue(pDst, pVar, index, pFillInfo->currentKey); + int32_t slotId = GET_DEST_SLOT_ID(pCol); + SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, slotId); + + if (pCol->notFillCol) { + if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + colDataAppend(pDst, index, (const char*)&pFillInfo->currentKey, false); + } else { + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev : pFillInfo->next; + SGroupKeys* pKey = taosArrayGet(p, i); + doSetVal(pDst, index, pKey); + } + } else { + SVariant* pVar = &pFillInfo->pFillCol[i].fillVal; + doSetUserSpecifiedValue(pDst, pVar, index, pFillInfo->currentKey); + } } } @@ -284,12 +297,9 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t copyCurrentRowIntoBuf(pFillInfo, nextRowIndex, pFillInfo->next); } - // assign rows to dst buffer + // copy rows to dst buffer for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - if (TSDB_COL_IS_TAG(pCol->flag) /* || IS_VAR_DATA_TYPE(pCol->schema.type)*/) { - continue; - } int32_t srcSlotId = GET_SRC_SLOT_ID(pCol); int32_t dstSlotId = GET_DEST_SLOT_ID(pCol); @@ -298,11 +308,10 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t SColumnInfoData* pSrc = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, srcSlotId); char* src = colDataGetData(pSrc, pFillInfo->index); - if (/*i == 0 || (*/ !colDataIsNull_s(pSrc, pFillInfo->index)) { - bool isNull = colDataIsNull_s(pSrc, pFillInfo->index); - colDataAppend(pDst, index, src, isNull); - saveColData(pFillInfo->prev, i, src, isNull); - } else { + if (!colDataIsNull_s(pSrc, pFillInfo->index)) { + colDataAppend(pDst, index, src, false); + saveColData(pFillInfo->prev, i, src, false); + } else { // the value is null if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) { colDataAppend(pDst, index, (const char*)&pFillInfo->currentKey, false); } else { // i > 0 and data is null , do interpolation @@ -357,7 +366,11 @@ static void saveColData(SArray* rowBuf, int32_t columnIndex, const char* src, bo if (isNull) { pKey->isNull = true; } else { - memcpy(pKey->pData, src, pKey->bytes); + if (IS_VAR_DATA_TYPE(pKey->type)) { + memcpy(pKey->pData, src, varDataTLen(src)); + } else { + memcpy(pKey->pData, src, pKey->bytes); + } pKey->isNull = false; } } @@ -378,53 +391,6 @@ static int64_t appendFilledResult(SFillInfo* pFillInfo, SSDataBlock* pBlock, int return resultCapacity; } -// there are no duplicated tags in the SFillTagColInfo list -static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t capacity) { - int32_t rowsize = 0; - int32_t numOfTags = 0; - - int32_t k = 0; - for (int32_t i = 0; i < numOfCols; ++i) { - SFillColInfo* pColInfo = &pFillInfo->pFillCol[i]; - SResSchema* pSchema = &pColInfo->pExpr->base.resSchema; - - if (TSDB_COL_IS_TAG(pColInfo->flag) || pSchema->type == TSDB_DATA_TYPE_BINARY) { - numOfTags += 1; - - bool exists = false; - int32_t index = -1; - for (int32_t j = 0; j < k; ++j) { - if (pFillInfo->pTags[j].col.colId == pSchema->slotId) { - exists = true; - index = j; - break; - } - } - - if (!exists) { - SSchema* pSchema1 = &pFillInfo->pTags[k].col; - pSchema1->colId = pSchema->slotId; - pSchema1->type = pSchema->type; - pSchema1->bytes = pSchema->bytes; - - pFillInfo->pTags[k].tagVal = taosMemoryCalloc(1, pSchema->bytes); - pColInfo->tagIndex = k; - - k += 1; - } else { - pColInfo->tagIndex = index; - } - } - - rowsize += pSchema->bytes; - } - - pFillInfo->numOfTags = numOfTags; - - assert(k <= pFillInfo->numOfTags); - return rowsize; -} - static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { if (pFillInfo->numOfRows == 0 || (pFillInfo->numOfRows > 0 && pFillInfo->index >= pFillInfo->numOfRows)) { return 0; @@ -433,7 +399,7 @@ static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { return pFillInfo->numOfRows - pFillInfo->index; } -struct SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, +struct SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t primaryTsSlotId, int32_t order, const char* id) { if (fillType == TSDB_FILL_NONE) { @@ -476,26 +442,15 @@ struct SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfTags, int32_t capa pFillInfo->type = fillType; pFillInfo->pFillCol = pCol; - pFillInfo->numOfTags = numOfTags; - pFillInfo->numOfCols = numOfCols; + pFillInfo->numOfCols = numOfFillCols + numOfNotFillCols; pFillInfo->alloc = capacity; pFillInfo->id = id; pFillInfo->interval = *pInterval; - // if (numOfTags > 0) { - pFillInfo->pTags = taosMemoryCalloc(numOfCols, sizeof(SFillTagColInfo)); - for (int32_t i = 0; i < numOfCols; ++i) { - pFillInfo->pTags[i].col.colId = -2; // TODO - } - // } - - pFillInfo->next = taosArrayInit(numOfCols, sizeof(SGroupKeys)); - pFillInfo->prev = taosArrayInit(numOfCols, sizeof(SGroupKeys)); + pFillInfo->next = taosArrayInit(pFillInfo->numOfCols, sizeof(SGroupKeys)); + pFillInfo->prev = taosArrayInit(pFillInfo->numOfCols, sizeof(SGroupKeys)); initBeforeAfterDataBuf(pFillInfo); - - pFillInfo->rowSize = setTagColumnInfo(pFillInfo, pFillInfo->numOfCols, pFillInfo->alloc); - assert(pFillInfo->rowSize > 0); return pFillInfo; } @@ -524,9 +479,9 @@ void* taosDestroyFillInfo(SFillInfo* pFillInfo) { } taosArrayDestroy(pFillInfo->next); - for (int32_t i = 0; i < pFillInfo->numOfTags; ++i) { - taosMemoryFreeClear(pFillInfo->pTags[i].tagVal); - } +// for (int32_t i = 0; i < pFillInfo->numOfTags; ++i) { +// taosMemoryFreeClear(pFillInfo->pTags[i].tagVal); +// } taosMemoryFreeClear(pFillInfo->pTags); taosMemoryFreeClear(pFillInfo->pFillCol); @@ -642,17 +597,18 @@ int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, SSDataBlock* p, int32_t ca int64_t getFillInfoStart(struct SFillInfo* pFillInfo) { return pFillInfo->start; } -SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, const struct SNodeListNode* pValNode) { - SFillColInfo* pFillCol = taosMemoryCalloc(numOfOutput, sizeof(SFillColInfo)); +SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr, + int32_t numOfNotFillExpr, const struct SNodeListNode* pValNode) { + SFillColInfo* pFillCol = taosMemoryCalloc(numOfFillExpr + numOfNotFillExpr, sizeof(SFillColInfo)); if (pFillCol == NULL) { return NULL; } size_t len = (pValNode != NULL) ? LIST_LENGTH(pValNode->pNodeList) : 0; - for (int32_t i = 0; i < numOfOutput; ++i) { + for (int32_t i = 0; i < numOfFillExpr; ++i) { SExprInfo* pExprInfo = &pExpr[i]; pFillCol[i].pExpr = pExprInfo; - pFillCol[i].tagIndex = -2; + pFillCol[i].notFillCol = false; // todo refactor if (len > 0) { @@ -664,9 +620,15 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, const str } if (pExprInfo->base.numOfParams > 0) { - pFillCol[i].flag = pExprInfo->base.pParam[0].pCol->flag; // always be the normal column for table query +// pFillCol[i].flag = pExprInfo->base.pParam[0].pCol->flag; // always be the normal column for table query } } + for(int32_t i = 0; i < numOfNotFillExpr; ++i) { + SExprInfo* pExprInfo = &pNotFillExpr[i]; + pFillCol[i + numOfFillExpr].pExpr = pExprInfo; + pFillCol[i + numOfFillExpr].notFillCol = true; + } + return pFillCol; } \ No newline at end of file diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 802e1f2306..928a276c29 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -2359,7 +2359,7 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode pInfo->fillType = convertFillType(pInterpPhyNode->fillMode); initResultSizeInfo(&pOperator->resultInfo, 4096); - pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, (SNodeListNode*)pInterpPhyNode->pFillValues); + pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, NULL, 0, (SNodeListNode*)pInterpPhyNode->pFillValues); pInfo->pRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc); pInfo->win = pInterpPhyNode->timeRange; pInfo->interval.interval = pInterpPhyNode->interval; From c054ed889b8e163d569e0f77a915242e625b094b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 6 Aug 2022 15:26:34 +0800 Subject: [PATCH 03/11] other: merge 3.0 --- include/common/systable.h | 60 ++++++++-------- include/common/tdatablock.h | 1 + include/common/tgrant.h | 8 +-- include/common/tmsg.h | 12 ++-- include/libs/executor/executor.h | 7 ++ include/libs/nodes/cmdnodes.h | 5 +- include/libs/nodes/plannodes.h | 3 + include/libs/nodes/querynodes.h | 1 + include/libs/qcom/query.h | 1 + include/libs/stream/tstream.h | 119 +++++++++++++------------------ include/libs/sync/sync.h | 8 +-- include/os/osEnv.h | 2 + include/os/osFile.h | 1 + include/util/taoserror.h | 1 + tools/shell/src/shellCommand.c | 6 +- tools/shell/src/shellEngine.c | 31 +++++--- tools/shell/src/shellWebsocket.c | 34 +++++---- 17 files changed, 166 insertions(+), 134 deletions(-) diff --git a/include/common/systable.h b/include/common/systable.h index ca097cd8d2..ed2e6a46c3 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -23,38 +23,38 @@ extern "C" { #define TDENGINE_SYSTABLE_H #define TSDB_INFORMATION_SCHEMA_DB "information_schema" -#define TSDB_INS_TABLE_DNODES "dnodes" -#define TSDB_INS_TABLE_MNODES "mnodes" -#define TSDB_INS_TABLE_MODULES "modules" -#define TSDB_INS_TABLE_QNODES "qnodes" -#define TSDB_INS_TABLE_BNODES "bnodes" -#define TSDB_INS_TABLE_SNODES "snodes" -#define TSDB_INS_TABLE_CLUSTER "cluster" -#define TSDB_INS_TABLE_USER_DATABASES "user_databases" -#define TSDB_INS_TABLE_USER_FUNCTIONS "user_functions" -#define TSDB_INS_TABLE_USER_INDEXES "user_indexes" -#define TSDB_INS_TABLE_USER_STABLES "user_stables" -#define TSDB_INS_TABLE_USER_TABLES "user_tables" -#define TSDB_INS_TABLE_USER_TAGS "user_tags" -#define TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED "user_table_distributed" -#define TSDB_INS_TABLE_USER_USERS "user_users" -#define TSDB_INS_TABLE_LICENCES "grants" -#define TSDB_INS_TABLE_VGROUPS "vgroups" -#define TSDB_INS_TABLE_VNODES "vnodes" -#define TSDB_INS_TABLE_CONFIGS "configs" -#define TSDB_INS_TABLE_DNODE_VARIABLES "dnode_variables" +#define TSDB_INS_TABLE_DNODES "ins_dnodes" +#define TSDB_INS_TABLE_MNODES "ins_mnodes" +#define TSDB_INS_TABLE_MODULES "ins_modules" +#define TSDB_INS_TABLE_QNODES "ins_qnodes" +#define TSDB_INS_TABLE_BNODES "ins_bnodes" +#define TSDB_INS_TABLE_SNODES "ins_snodes" +#define TSDB_INS_TABLE_CLUSTER "ins_cluster" +#define TSDB_INS_TABLE_DATABASES "ins_databases" +#define TSDB_INS_TABLE_FUNCTIONS "ins_functions" +#define TSDB_INS_TABLE_INDEXES "ins_indexes" +#define TSDB_INS_TABLE_STABLES "ins_stables" +#define TSDB_INS_TABLE_TABLES "ins_tables" +#define TSDB_INS_TABLE_TAGS "ins_tags" +#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed" +#define TSDB_INS_TABLE_USERS "ins_users" +#define TSDB_INS_TABLE_LICENCES "ins_grants" +#define TSDB_INS_TABLE_VGROUPS "ins_vgroups" +#define TSDB_INS_TABLE_VNODES "ins_vnodes" +#define TSDB_INS_TABLE_CONFIGS "ins_configs" +#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables" #define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" -#define TSDB_PERFS_TABLE_SMAS "smas" -#define TSDB_PERFS_TABLE_CONNECTIONS "connections" -#define TSDB_PERFS_TABLE_QUERIES "queries" -#define TSDB_PERFS_TABLE_TOPICS "topics" -#define TSDB_PERFS_TABLE_CONSUMERS "consumers" -#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "subscriptions" -#define TSDB_PERFS_TABLE_OFFSETS "offsets" -#define TSDB_PERFS_TABLE_TRANS "trans" -#define TSDB_PERFS_TABLE_STREAMS "streams" -#define TSDB_PERFS_TABLE_APPS "apps" +#define TSDB_PERFS_TABLE_SMAS "perf_smas" +#define TSDB_PERFS_TABLE_CONNECTIONS "perf_connections" +#define TSDB_PERFS_TABLE_QUERIES "perf_queries" +#define TSDB_PERFS_TABLE_TOPICS "perf_topics" +#define TSDB_PERFS_TABLE_CONSUMERS "perf_consumers" +#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "perf_subscriptions" +#define TSDB_PERFS_TABLE_OFFSETS "perf_offsets" +#define TSDB_PERFS_TABLE_TRANS "perf_trans" +#define TSDB_PERFS_TABLE_STREAMS "perf_streams" +#define TSDB_PERFS_TABLE_APPS "perf_apps" typedef struct SSysDbTableSchema { const char* name; diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 22aac46560..7839859e8b 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -184,6 +184,7 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u int32_t getJsonValueLen(const char* data); int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull); +int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows); int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity, const SColumnInfoData* pSource, int32_t numOfRow2); int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows, diff --git a/include/common/tgrant.h b/include/common/tgrant.h index 6392fcf517..97fb773044 100644 --- a/include/common/tgrant.h +++ b/include/common/tgrant.h @@ -49,9 +49,9 @@ int32_t grantCheck(EGrantType grant); #ifndef GRANTS_CFG #define GRANTS_SCHEMA static const SSysDbTableSchema grantsSchema[] = { \ {.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ - {.name = "expire time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ + {.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ {.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ - {.name = "storage(GB)", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ + {.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ {.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ {.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ {.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ @@ -59,8 +59,8 @@ int32_t grantCheck(EGrantType grant); {.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ {.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ {.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ - {.name = "cpu cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ - {.name = "speed(PPS)", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ + {.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ + {.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ {.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \ } #define GRANT_CFG_ADD diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 7eafc4c3d8..bfb80ec8f8 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -227,8 +227,7 @@ typedef struct SSubmitBlk { int32_t sversion; // data schema version int32_t dataLen; // data part length, not including the SSubmitBlk head int32_t schemaLen; // schema length, if length is 0, no schema exists - int16_t numOfRows; // total number of rows in current submit block - int16_t padding; // TODO just for padding here + int32_t numOfRows; // total number of rows in current submit block char data[]; } SSubmitBlk; @@ -256,7 +255,7 @@ typedef struct { int32_t sversion; // data schema version int32_t dataLen; // data part length, not including the SSubmitBlk head int32_t schemaLen; // schema length, if length is 0, no schema exists - int16_t numOfRows; // total number of rows in current submit block + int32_t numOfRows; // total number of rows in current submit block // head of SSubmitBlk int32_t numOfBlocks; const void* pMsg; @@ -337,8 +336,10 @@ static FORCE_INLINE SSchemaWrapper* tCloneSSchemaWrapper(const SSchemaWrapper* p } static FORCE_INLINE void tDeleteSSchemaWrapper(SSchemaWrapper* pSchemaWrapper) { - taosMemoryFree(pSchemaWrapper->pSchema); - taosMemoryFree(pSchemaWrapper); + if (pSchemaWrapper) { + taosMemoryFree(pSchemaWrapper->pSchema); + taosMemoryFree(pSchemaWrapper); + } } static FORCE_INLINE int32_t taosEncodeSSchema(void** buf, const SSchema* pSchema) { @@ -2223,6 +2224,7 @@ typedef struct SAppClusterSummary { uint64_t insertBytes; // submit to tsdb since launched. uint64_t fetchBytes; + uint64_t numOfQueryReq; uint64_t queryElapsedTime; uint64_t numOfSlowQueries; uint64_t totalRequests; diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index e15708e357..ab33af6acf 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -76,6 +76,13 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n */ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type); +/** + * @brief Cleanup SSDataBlock for StreamScanInfo + * + * @param tinfo + */ +void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo); + /** * Update the table id list, add or remove. * diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 32a53c9f4e..21d3fa92a8 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -253,7 +253,8 @@ typedef struct SShowCreateTableStmt { ENodeType type; char dbName[TSDB_DB_NAME_LEN]; char tableName[TSDB_TABLE_NAME_LEN]; - void* pCfg; // STableCfg + void* pDbCfg; // SDbCfgInfo + void* pTableCfg; // STableCfg } SShowCreateTableStmt; typedef struct SShowTableDistributedStmt { @@ -282,6 +283,7 @@ typedef struct SCreateIndexStmt { ENodeType type; EIndexType indexType; bool ignoreExists; + char indexDbName[TSDB_DB_NAME_LEN]; char indexName[TSDB_INDEX_NAME_LEN]; char dbName[TSDB_DB_NAME_LEN]; char tableName[TSDB_TABLE_NAME_LEN]; @@ -292,6 +294,7 @@ typedef struct SCreateIndexStmt { typedef struct SDropIndexStmt { ENodeType type; bool ignoreNotExists; + char indexDbName[TSDB_DB_NAME_LEN]; char indexName[TSDB_INDEX_NAME_LEN]; } SDropIndexStmt; diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index c06b4411e6..82235033e1 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -114,6 +114,7 @@ typedef struct SAggLogicNode { SNodeList* pAggFuncs; bool hasLastRow; bool hasTimeLineFunc; + bool onlyHasKeepOrderFunc; } SAggLogicNode; typedef struct SProjectLogicNode { @@ -555,6 +556,8 @@ typedef struct SQueryPlan { void nodesWalkPhysiPlan(SNode* pNode, FNodeWalker walker, void* pContext); +const char* dataOrderStr(EDataOrderLevel order); + #ifdef __cplusplus } #endif diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index a452388052..f0a226db0a 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -275,6 +275,7 @@ typedef struct SSelectStmt { bool hasInterpFunc; bool hasLastRowFunc; bool hasTimeLineFunc; + bool onlyHasKeepOrderFunc; bool groupSort; } SSelectStmt; diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index b62f822313..34d870397f 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -71,6 +71,7 @@ typedef struct SIndexMeta { typedef struct SExecResult { int32_t code; uint64_t numOfRows; + uint64_t numOfBytes; int32_t msgType; void* res; } SExecResult; diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 240415b66b..103ca6a4f0 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -46,9 +46,10 @@ enum { }; enum { - TASK_EXEC_STATUS__IDLE = 1, - TASK_EXEC_STATUS__EXECUTING, - TASK_EXEC_STATUS__CLOSING, + TASK_SCHED_STATUS__INACTIVE = 1, + TASK_SCHED_STATUS__WAITING, + TASK_SCHED_STATUS__ACTIVE, + TASK_SCHED_STATUS__FAILED, }; enum { @@ -65,6 +66,25 @@ enum { TASK_OUTPUT_STATUS__BLOCKED, }; +enum { + TASK_TRIGGER_STATUS__INACTIVE = 1, + TASK_TRIGGER_STATUS__ACTIVE, +}; + +enum { + TASK_LEVEL__SOURCE = 1, + TASK_LEVEL__AGG, + TASK_LEVEL__SINK, +}; + +enum { + TASK_OUTPUT__FIXED_DISPATCH = 1, + TASK_OUTPUT__SHUFFLE_DISPATCH, + TASK_OUTPUT__TABLE, + TASK_OUTPUT__SMA, + TASK_OUTPUT__FETCH, +}; + typedef struct { int8_t type; } SStreamQueueItem; @@ -201,41 +221,6 @@ typedef struct { int8_t reserved; } STaskSinkFetch; -enum { - TASK_SOURCE__SCAN = 1, - TASK_SOURCE__PIPE, - TASK_SOURCE__MERGE, -}; - -enum { - TASK_EXEC__NONE = 1, - TASK_EXEC__PIPE, - TASK_EXEC__MERGE, -}; - -enum { - TASK_DISPATCH__NONE = 1, - TASK_DISPATCH__FIXED, - TASK_DISPATCH__SHUFFLE, -}; - -enum { - TASK_SINK__NONE = 1, - TASK_SINK__TABLE, - TASK_SINK__SMA, - TASK_SINK__FETCH, -}; - -enum { - TASK_INPUT_TYPE__SUMBIT_BLOCK = 1, - TASK_INPUT_TYPE__DATA_BLOCK, -}; - -enum { - TASK_TRIGGER_STATUS__IN_ACTIVE = 1, - TASK_TRIGGER_STATUS__ACTIVE, -}; - typedef struct { int32_t nodeId; int32_t childId; @@ -248,28 +233,24 @@ typedef struct { typedef struct SStreamTask { int64_t streamId; int32_t taskId; - int8_t isDataScan; - int8_t execType; - int8_t sinkType; - int8_t dispatchType; - int8_t isStreamDistributed; + int8_t taskLevel; + int8_t outputType; int16_t dispatchMsgType; int8_t taskStatus; - int8_t execStatus; + int8_t schedStatus; // node info int32_t selfChildId; int32_t nodeId; SEpSet epSet; - // used for semi or single task, - // while final task should have processedVer for each child + // used for task source and sink, + // while task agg should have processedVer for each child int64_t recoverSnapVer; int64_t startVer; int64_t checkpointVer; int64_t processedVer; - // int32_t numOfVgroups; // children info SArray* childEpInfo; // SArray @@ -277,19 +258,13 @@ typedef struct SStreamTask { // exec STaskExec exec; - // TODO: unify sink and dispatch - - // local sink - union { - STaskSinkTb tbSink; - STaskSinkSma smaSink; - STaskSinkFetch fetchSink; - }; - - // remote dispatcher + // output union { STaskDispatcherFixedEp fixedEpDispatcher; STaskDispatcherShuffle shuffleDispatcher; + STaskSinkTb tbSink; + STaskSinkSma smaSink; + STaskSinkFetch fetchSink; }; int8_t inputStatus; @@ -303,9 +278,6 @@ typedef struct SStreamTask { int64_t triggerParam; void* timer; - // application storage - // void* ahandle; - // msg handle SMsgCb* pMsgCb; } SStreamTask; @@ -342,7 +314,7 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem } if (pItem->type != STREAM_INPUT__GET_RES && pItem->type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { - atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__IN_ACTIVE, TASK_TRIGGER_STATUS__ACTIVE); + atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE); } #if 0 @@ -357,18 +329,15 @@ static FORCE_INLINE void streamTaskInputFail(SStreamTask* pTask) { } static FORCE_INLINE int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock) { - if (pTask->sinkType == TASK_SINK__TABLE) { - ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE); + if (pTask->outputType == TASK_OUTPUT__TABLE) { pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pBlock->blocks); taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); taosFreeQitem(pBlock); - } else if (pTask->sinkType == TASK_SINK__SMA) { - ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE); + } else if (pTask->outputType == TASK_OUTPUT__SMA) { pTask->smaSink.smaSink(pTask->smaSink.vnode, pTask->smaSink.smaId, pBlock->blocks); taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes); taosFreeQitem(pBlock); } else { - ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE); taosWriteQitem(pTask->outputQueue->queue, pBlock); } return 0; @@ -475,11 +444,10 @@ typedef struct { int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq); int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq); -int32_t streamLaunchByWrite(SStreamTask* pTask, int32_t vgId); int32_t streamSetupTrigger(SStreamTask* pTask); int32_t streamProcessRunReq(SStreamTask* pTask); -int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg); +int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec); int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp); int32_t streamProcessRecoverReq(SStreamTask* pTask, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg); int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp); @@ -487,6 +455,21 @@ int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg); int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp); +int32_t streamTryExec(SStreamTask* pTask); +int32_t streamSchedExec(SStreamTask* pTask); + +typedef struct SStreamMeta SStreamMeta; + +SStreamMeta* streamMetaOpen(); +void streamMetaClose(SStreamMeta* streamMeta); + +int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask); +int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); + +int32_t streamMetaBegin(SStreamMeta* pMeta); +int32_t streamMetaCommit(SStreamMeta* pMeta); +int32_t streamMetaRollBack(SStreamMeta* pMeta); + #ifdef __cplusplus } #endif diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index aec8a1f73e..d96a55c74c 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -28,10 +28,10 @@ extern bool gRaftDetailLog; #define SYNC_RESP_TTL_MS 10000000 -#define SYNC_MAX_BATCH_SIZE 500 -#define SYNC_INDEX_BEGIN 0 -#define SYNC_INDEX_INVALID -1 -#define SYNC_TERM_INVALID 0xFFFFFFFFFFFFFFFF +#define SYNC_MAX_BATCH_SIZE 1 +#define SYNC_INDEX_BEGIN 0 +#define SYNC_INDEX_INVALID -1 +#define SYNC_TERM_INVALID 0xFFFFFFFFFFFFFFFF typedef enum { SYNC_STRATEGY_NO_SNAPSHOT = 0, diff --git a/include/os/osEnv.h b/include/os/osEnv.h index a3f92a0b29..798bfc197e 100644 --- a/include/os/osEnv.h +++ b/include/os/osEnv.h @@ -49,6 +49,8 @@ void osDefaultInit(); void osUpdate(); void osCleanup(); bool osLogSpaceAvailable(); +bool osDataSpaceAvailable(); +bool osTempSpaceAvailable(); void osSetTimezone(const char *timezone); void osSetSystemLocale(const char *inLocale, const char *inCharSet); diff --git a/include/os/osFile.h b/include/os/osFile.h index 2f6a6ba480..21e3d2e6cf 100644 --- a/include/os/osFile.h +++ b/include/os/osFile.h @@ -54,6 +54,7 @@ typedef struct TdFile *TdFilePtr; #define TD_FILE_EXCL 0x0080 #define TD_FILE_STREAM 0x0100 // Only support taosFprintfFile, taosGetLineFile, taosEOFFile TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions); +TdFilePtr taosCreateFile(const char *path, int32_t tdFileOptions); #define TD_FILE_ACCESS_EXIST_OK 0x1 #define TD_FILE_ACCESS_READ_OK 0x2 diff --git a/include/util/taoserror.h b/include/util/taoserror.h index b15be12490..97a664d776 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -608,6 +608,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3151) #define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152) #define TSDB_CODE_RSMA_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x3153) +#define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154) //index #define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200) diff --git a/tools/shell/src/shellCommand.c b/tools/shell/src/shellCommand.c index 1af6ee4c06..d87e10fd08 100644 --- a/tools/shell/src/shellCommand.c +++ b/tools/shell/src/shellCommand.c @@ -525,7 +525,11 @@ int32_t shellReadCommand(char *command) { switch (c) { case 'A': // Up arrow hist_counter = (hist_counter + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE; - shellResetCommand(&cmd, (pHistory->hist[hist_counter] == NULL) ? "" : pHistory->hist[hist_counter]); + if (pHistory->hist[hist_counter] == NULL) { + hist_counter = (hist_counter + SHELL_MAX_HISTORY_SIZE + 1) % SHELL_MAX_HISTORY_SIZE; + } else { + shellResetCommand(&cmd, pHistory->hist[hist_counter]); + } break; case 'B': // Down arrow if (hist_counter != pHistory->hend) { diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 4526ff2230..f0bda82172 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -22,7 +22,8 @@ static bool shellIsEmptyCommand(const char *cmd); static int32_t shellRunSingleCommand(char *command); -static int32_t shellRunCommand(char *command); +static void shellRecordCommandToHistory(char *command); +static int32_t shellRunCommand(char *command, bool recordHistory); static void shellRunSingleCommandImp(char *command); static char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision); static int32_t shellDumpResultToFile(const char *fname, TAOS_RES *tres); @@ -101,11 +102,7 @@ int32_t shellRunSingleCommand(char *command) { return 0; } -int32_t shellRunCommand(char *command) { - if (shellIsEmptyCommand(command)) { - return 0; - } - +void shellRecordCommandToHistory(char *command) { SShellHistory *pHistory = &shell.history; if (pHistory->hstart == pHistory->hend || pHistory->hist[(pHistory->hend + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE] == NULL || @@ -120,6 +117,14 @@ int32_t shellRunCommand(char *command) { pHistory->hstart = (pHistory->hstart + 1) % SHELL_MAX_HISTORY_SIZE; } } +} + +int32_t shellRunCommand(char *command, bool recordHistory) { + if (shellIsEmptyCommand(command)) { + return 0; + } + + if (recordHistory) shellRecordCommandToHistory(command); char quote = 0, *cmd = command; for (char c = *command++; c != 0; c = *command++) { @@ -826,11 +831,15 @@ void shellSourceFile(const char *file) { size_t cmd_len = 0; char *line = NULL; char fullname[PATH_MAX] = {0}; + char sourceFileCommand[PATH_MAX + 8] = {0}; if (taosExpandDir(file, fullname, PATH_MAX) != 0) { tstrncpy(fullname, file, PATH_MAX); } + sprintf(sourceFileCommand, "source %s;",fullname); + shellRecordCommandToHistory(sourceFileCommand); + TdFilePtr pFile = taosOpenFile(fullname, TD_FILE_READ | TD_FILE_STREAM); if (pFile == NULL) { fprintf(stderr, "failed to open file %s\r\n", fullname); @@ -853,9 +862,13 @@ void shellSourceFile(const char *file) { continue; } + if (line[read_len - 1] == '\r') { + line[read_len - 1] = ' '; + } + memcpy(cmd + cmd_len, line, read_len); printf("%s%s\r\n", shell.info.promptHeader, cmd); - shellRunCommand(cmd); + shellRunCommand(cmd, false); memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); cmd_len = 0; } @@ -977,7 +990,7 @@ void *shellThreadLoop(void *arg) { } taosResetTerminalMode(); - } while (shellRunCommand(command) == 0); + } while (shellRunCommand(command, true) == 0); taosMemoryFreeClear(command); shellWriteHistory(); @@ -1019,7 +1032,7 @@ int32_t shellExecute() { if (pArgs->commands != NULL) { printf("%s%s\r\n", shell.info.promptHeader, pArgs->commands); char *cmd = strdup(pArgs->commands); - shellRunCommand(cmd); + shellRunCommand(cmd, true); taosMemoryFree(cmd); } diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index fee2325c34..2dcab04b3f 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -33,10 +33,11 @@ int shell_conn_ws_server(bool first) { return 0; } -static int horizontalPrintWebsocket(WS_RES* wres) { +static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) { const void* data = NULL; int rows; ws_fetch_block(wres, &data, &rows); + *execute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { return 0; } @@ -72,10 +73,11 @@ static int horizontalPrintWebsocket(WS_RES* wres) { return numOfRows; } -static int verticalPrintWebsocket(WS_RES* wres) { +static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) { int rows = 0; const void* data = NULL; ws_fetch_block(wres, &data, &rows); + *pexecute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { return 0; } @@ -112,7 +114,7 @@ static int verticalPrintWebsocket(WS_RES* wres) { return numOfRows; } -static int dumpWebsocketToFile(const char* fname, WS_RES* wres) { +static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute_time) { char fullname[PATH_MAX] = {0}; if (taosExpandDir(fname, fullname, PATH_MAX) != 0) { tstrncpy(fullname, fname, PATH_MAX); @@ -127,6 +129,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres) { int rows = 0; const void* data = NULL; ws_fetch_block(wres, &data, &rows); + *pexecute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { taosCloseFile(&pFile); return 0; @@ -162,14 +165,14 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres) { return numOfRows; } -static int shellDumpWebsocket(WS_RES *wres, char *fname, int *error_no, bool vertical) { +static int shellDumpWebsocket(WS_RES *wres, char *fname, int *error_no, bool vertical, double* pexecute_time) { int numOfRows = 0; if (fname != NULL) { - numOfRows = dumpWebsocketToFile(fname, wres); + numOfRows = dumpWebsocketToFile(fname, wres, pexecute_time); } else if (vertical) { - numOfRows = verticalPrintWebsocket(wres); + numOfRows = verticalPrintWebsocket(wres, pexecute_time); } else { - numOfRows = horizontalPrintWebsocket(wres); + numOfRows = horizontalPrintWebsocket(wres, pexecute_time); } *error_no = ws_errno(wres); return numOfRows; @@ -225,6 +228,8 @@ void shellRunSingleCommandWebsocketImp(char *command) { return; } + double execute_time = ws_take_timing(res)/1E6; + if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { fprintf(stdout, "Database changed.\r\n\r\n"); fflush(stdout); @@ -236,22 +241,27 @@ void shellRunSingleCommandWebsocketImp(char *command) { if (ws_is_update_query(res)) { numOfRows = ws_affected_rows(res); et = taosGetTimestampUs(); - printf("Query Ok, %d of %d row(s) in database (%.6fs)\n", numOfRows, numOfRows, - (et - st)/1E6); + double total_time = (et - st)/1E3; + double net_time = total_time - (double)execute_time; + printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows); + printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); } else { int error_no = 0; - numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode); + numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time); if (numOfRows < 0) { ws_free_result(res); return; } et = taosGetTimestampUs(); + double total_time = (et - st) / 1E3; + double net_time = total_time - execute_time; if (error_no == 0 && !shell.stop_query) { - printf("Query OK, %d row(s) in set (%.6fs)\n", numOfRows, - (et - st)/1E6); + printf("Query OK, %d row(s) in set\n", numOfRows); + printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); } else { printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows, (et - st)/1E6); + printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); } } printf("\n"); From d1c6835eda13e4e3e7f64af0cba578aba7ec5b13 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 6 Aug 2022 15:26:58 +0800 Subject: [PATCH 04/11] other: merge 3.0 --- Jenkinsfile2 | 1 + docs/en/07-develop/01-connect/index.md | 12 +- .../{12-interval.md => 12-distinguished.md} | 4 +- docs/en/12-taos-sql/13-tmq.md | 66 + docs/en/12-taos-sql/14-limit.md | 53 - docs/en/12-taos-sql/14-stream.md | 122 ++ .../{13-operators.md => 16-operators.md} | 0 .../en/12-taos-sql/{16-json.md => 17-json.md} | 0 docs/en/12-taos-sql/19-limit.md | 59 + docs/en/12-taos-sql/21-node.md | 154 ++ docs/en/12-taos-sql/22-meta.md | 247 ++++ .../23-show.md => en/12-taos-sql/24-show.md} | 0 docs/en/12-taos-sql/25-grant.md | 94 ++ docs/en/12-taos-sql/26-udf.md | 28 + docs/en/12-taos-sql/27-index.md | 47 + docs/en/12-taos-sql/28-recovery.md | 38 + .../03-connector/_verify_linux.mdx | 19 +- .../03-connector/_verify_windows.mdx | 25 +- docs/en/14-reference/06-taosdump.md | 7 +- docs/en/20-third-party/09-emq-broker.md | 3 +- docs/examples/R/connect_native.r | 4 +- docs/examples/csharp/AsyncQueryExample.cs | 206 +-- docs/examples/csharp/QueryExample.cs | 98 +- docs/examples/csharp/SQLInsertExample.cs | 8 +- docs/examples/csharp/StmtInsertExample.cs | 4 +- docs/examples/csharp/SubscribeDemo.cs | 98 +- docs/examples/csharp/asyncquery.csproj | 2 +- docs/examples/csharp/connect.csproj | 2 +- docs/examples/csharp/influxdbline.csproj | 2 +- docs/examples/csharp/optsjson.csproj | 2 +- docs/examples/csharp/optstelnet.csproj | 2 +- docs/examples/csharp/query.csproj | 2 +- docs/examples/csharp/sqlinsert.csproj | 2 +- docs/examples/csharp/stmtinsert.csproj | 2 +- docs/examples/csharp/subscribe.csproj | 4 +- docs/zh/02-intro.md | 2 +- docs/zh/07-develop/01-connect/index.md | 12 +- docs/zh/07-develop/07-tmq.md | 495 +++---- docs/zh/07-develop/09-udf.md | 188 +-- docs/zh/12-taos-sql/22-meta.md | 202 ++- docs/zh/12-taos-sql/24-show.md | 270 ++++ docs/zh/12-taos-sql/26-udf.md | 21 +- .../03-connector/01-error-code.md | 481 ++++++ .../03-connector/_verify_linux.mdx | 19 +- .../03-connector/_verify_windows.mdx | 25 +- docs/zh/14-reference/03-connector/csharp.mdx | 31 +- docs/zh/14-reference/05-taosbenchmark.md | 51 +- docs/zh/14-reference/06-taosdump.md | 5 +- docs/zh/17-operation/17-diagnose.md | 202 +-- docs/zh/20-third-party/09-emq-broker.md | 3 +- examples/c/tmq.c | 760 ++++------ tests/pytest/cluster/TD-3693/insert1Data.json | 2 +- tests/pytest/cluster/TD-3693/insert2Data.json | 2 +- tests/pytest/crash_gen/crash_gen_main.py | 3 +- tests/pytest/dockerCluster/insert.json | 2 +- .../TD-5114/insertDataDb3Replica2.json | 2 +- tests/pytest/perfbenchmark/bug3433.py | 2 +- tests/pytest/perfbenchmark/joinPerformance.py | 2 +- tests/pytest/perfbenchmark/taosdemoInsert.py | 2 +- .../pytest/query/nestedQuery/insertData.json | 2 +- tests/pytest/query/query1970YearsAf.py | 2 +- tests/pytest/tools/insert-interlace.json | 2 +- .../insert-tblimit-tboffset-createdb.json | 2 +- .../insert-tblimit-tboffset-insertrec.json | 2 +- .../pytest/tools/insert-tblimit-tboffset.json | 2 +- .../tools/insert-tblimit-tboffset0.json | 2 +- .../tools/insert-tblimit1-tboffset.json | 2 +- tests/pytest/tools/insert.json | 2 +- .../NanoTestCase/taosdemoInsertMSDB.json | 2 +- .../NanoTestCase/taosdemoInsertNanoDB.json | 2 +- .../NanoTestCase/taosdemoInsertUSDB.json | 2 +- .../taosdemoTestNanoDatabase.json | 2 +- .../taosdemoTestNanoDatabaseInsertForSub.json | 2 +- .../taosdemoTestNanoDatabaseNow.json | 2 +- .../taosdemoTestNanoDatabasecsv.json | 2 +- .../TD-3453/query-interrupt.json | 2 +- .../TD-4985/query-limit-offset.json | 2 +- .../TD-5213/insertSigcolumnsNum4096.json | 2 +- .../taosdemoAllTest/insert-1s1tnt1r.json | 2 +- .../taosdemoAllTest/insert-1s1tntmr.json | 2 +- .../taosdemoAllTest/insert-disorder.json | 2 +- .../insert-drop-exist-auto-N00.json | 2 +- .../insert-drop-exist-auto-Y00.json | 2 +- .../tools/taosdemoAllTest/insert-illegal.json | 2 +- .../taosdemoAllTest/insert-interlace-row.json | 2 +- .../insert-interval-speed.json | 2 +- .../tools/taosdemoAllTest/insert-newdb.json | 2 +- .../taosdemoAllTest/insert-newtable.json | 2 +- .../taosdemoAllTest/insert-nodbnodrop.json | 2 +- .../tools/taosdemoAllTest/insert-offset.json | 2 +- .../tools/taosdemoAllTest/insert-renewdb.json | 2 +- .../tools/taosdemoAllTest/insert-sample.json | 2 +- .../taosdemoAllTest/insert-timestep.json | 2 +- ...sertBinaryLenLarge16374AllcolLar49151.json | 2 +- .../taosdemoAllTest/insertChildTab0.json | 2 +- .../taosdemoAllTest/insertChildTabLess0.json | 2 +- .../insertColumnsAndTagNum4096.json | 2 +- .../insertColumnsAndTagNumLarge4096.json | 2 +- .../taosdemoAllTest/insertColumnsNum0.json | 2 +- .../insertInterlaceRowsLarge1M.json | 2 +- .../taosdemoAllTest/insertMaxNumPerReq.json | 2 +- .../insertNumOfrecordPerReq0.json | 2 +- .../insertNumOfrecordPerReqless0.json | 2 +- .../tools/taosdemoAllTest/insertRestful.json | 2 +- .../insertSigcolumnsNum4096.json | 2 +- .../insertTagsNumLarge128.json | 2 +- .../insertTimestepMulRowsLargeint16.json | 2 +- .../tools/taosdemoAllTest/insert_5M_rows.json | 2 +- .../taosdemoAllTest/manual_block1_comp.json | 2 +- .../tools/taosdemoAllTest/manual_block2.json | 2 +- .../manual_change_time_1_1_A.json | 2 +- .../manual_change_time_1_1_B.json | 2 +- .../moredemo-offset-limit1.json | 2 +- .../moredemo-offset-limit5.json | 2 +- .../moredemo-offset-limit94.json | 2 +- .../moredemo-offset-newdb.json | 2 +- .../taosdemoAllTest/query-interrupt.json | 2 +- .../taosdemoAllTest/queryInsertdata.json | 2 +- .../taosdemoAllTest/queryInsertrestdata.json | 2 +- .../taosdemoAllTest/stmt/1174-large-stmt.json | 2 +- .../stmt/1174-large-taosc.json | 2 +- .../stmt/1174-small-stmt-random.json | 2 +- .../taosdemoAllTest/stmt/1174-small-stmt.json | 2 +- .../stmt/1174-small-taosc.json | 2 +- .../stmt/insert-1s1tnt1r-stmt.json | 2 +- .../stmt/insert-1s1tntmr-stmt.json | 2 +- .../stmt/insert-disorder-stmt.json | 2 +- .../stmt/insert-drop-exist-auto-N00-stmt.json | 2 +- .../stmt/insert-drop-exist-auto-Y00-stmt.json | 2 +- .../stmt/insert-interlace-row-stmt.json | 2 +- .../stmt/insert-interval-speed-stmt.json | 2 +- .../stmt/insert-newdb-stmt.json | 2 +- .../stmt/insert-newtable-stmt.json | 2 +- .../stmt/insert-nodbnodrop-stmt.json | 2 +- .../stmt/insert-offset-stmt.json | 2 +- .../stmt/insert-renewdb-stmt.json | 2 +- .../stmt/insert-sample-stmt.json | 2 +- .../stmt/insert-timestep-stmt.json | 2 +- ...inaryLenLarge16374AllcolLar49151-stmt.json | 2 +- .../stmt/insertChildTab0-stmt.json | 2 +- .../stmt/insertChildTabLess0-stmt.json | 2 +- .../stmt/insertColumnsAndTagNum4096-stmt.json | 2 +- .../stmt/insertColumnsNum0-stmt.json | 2 +- .../stmt/insertInterlaceRowsLarge1M-stmt.json | 2 +- .../stmt/insertMaxNumPerReq-stmt.json | 2 +- .../stmt/insertNumOfrecordPerReq0-stmt.json | 2 +- .../insertNumOfrecordPerReqless0-stmt.json | 2 +- .../stmt/insertSigcolumnsNum4096-stmt.json | 2 +- .../stmt/insertTagsNumLarge128-stmt.json | 2 +- .../insertTimestepMulRowsLargeint16-stmt.json | 2 +- .../nsertColumnsAndTagNumLarge4096-stmt.json | 2 +- .../tools/taosdemoAllTest/subInsertdata.json | 2 +- .../subInsertdataMaxsql100.json | 2 +- .../taosdemoAllTest/taosdemoInsertMSDB.json | 2 +- .../taosdemoAllTest/taosdemoInsertNanoDB.json | 2 +- .../taosdemoAllTest/taosdemoInsertUSDB.json | 2 +- .../taosdemoTestNanoDatabase.json | 2 +- .../taosdemoTestNanoDatabaseInsertForSub.json | 2 +- .../taosdemoTestNanoDatabaseNow.json | 2 +- .../taosdemoTestNanoDatabasecsv.json | 2 +- tests/pytest/tools/taosdemoPerformance.py | 2 +- tests/pytest/tools/taosdumpTest.py | 77 +- tests/pytest/tools/taosdumpTest2.py | 70 +- tests/pytest/tools/taosdumpTestNanoSupport.py | 261 ++-- tests/pytest/tsdb/insertDataDb1.json | 2 +- tests/pytest/tsdb/insertDataDb1Replica2.json | 2 +- tests/pytest/tsdb/insertDataDb2.json | 2 +- tests/pytest/tsdb/insertDataDb2Newstab.json | 2 +- .../tsdb/insertDataDb2NewstabReplica2.json | 2 +- tests/pytest/tsdb/insertDataDb2Replica2.json | 2 +- tests/pytest/util/taosdemoCfg.py | 2 +- tests/pytest/wal/insertDataDb1.json | 2 +- tests/pytest/wal/insertDataDb1Replica2.json | 2 +- tests/pytest/wal/insertDataDb2.json | 2 +- tests/pytest/wal/insertDataDb2Newstab.json | 2 +- .../wal/insertDataDb2NewstabReplica2.json | 2 +- tests/pytest/wal/insertDataDb2Replica2.json | 2 +- tests/script/jenkins/basic.txt | 12 +- tests/script/sh/bit_and.c | 61 + tests/script/sh/compile_udf.sh | 10 + tests/script/sh/sqr_sum.c | 80 + tests/script/tmp/r1.sim | 47 + tests/script/tsim/parser/join_multitables.sim | 79 +- tests/script/tsim/parser/nestquery.sim | 4 +- tests/script/tsim/query/explain.sim | 10 +- tests/script/tsim/query/udf.sim | 161 ++ tests/script/tsim/show/basic.sim | 52 +- tests/script/tsim/sync/3Replica1VgElect.sim | 8 +- tests/script/tsim/sync/3Replica5VgElect.sim | 2 + tests/script/tsim/sync/start3replica.sim | 19 + .../tsim/sync/vnodesnapshot-rsma-test.sim | 20 +- tests/script/tsim/table/delete_writing.sim | 4 - tests/script/tsim/table/smallint.sim | 1 - .../script/tsim/tmq/basic2Of2ConsOverlap.sim | 4 +- tests/script/tsim/tmq/snapshot1.sim | 4 +- tests/script/tsim/valgrind/basic3.sim | 13 +- tests/script/tsim/valgrind/checkError1.sim | 14 +- tests/script/tsim/valgrind/checkError6.sim | 11 +- tests/system-test/0-others/user_control.py | 274 ++-- .../system-test/1-insert/db_tb_name_check.py | 88 ++ tests/system-test/1-insert/manyVgroups.json | 2 +- .../1-insert/performanceInsert.json | 2 +- tests/system-test/1-insert/time_range_wise.py | 206 +-- tests/system-test/2-query/check_tsdb.py | 43 +- tests/system-test/2-query/csum.py | 4 +- tests/system-test/2-query/irate.py | 8 +- tests/system-test/2-query/json_tag.py | 2 +- tests/system-test/2-query/qnodeCluster.py | 290 ++++ tests/system-test/2-query/queryQnode.py | 1 - tests/system-test/2-query/tsbsQuery.py | 141 +- tests/system-test/5-taos-tools/TD-12478.py | 151 ++ .../taosdump/taosdumpTestColTag.py | 1291 +++++++++++++++++ .../5dnode3mnodeSep1VnodeStopMnodeCreateDb.py | 10 +- ...ode3mnodeSep1VnodeStopMnodeCreateDbRep3.py | 4 +- tests/system-test/fulltest.sh | 21 +- 215 files changed, 5767 insertions(+), 2226 deletions(-) rename docs/en/12-taos-sql/{12-interval.md => 12-distinguished.md} (98%) create mode 100644 docs/en/12-taos-sql/13-tmq.md delete mode 100644 docs/en/12-taos-sql/14-limit.md create mode 100644 docs/en/12-taos-sql/14-stream.md rename docs/en/12-taos-sql/{13-operators.md => 16-operators.md} (100%) rename docs/en/12-taos-sql/{16-json.md => 17-json.md} (100%) create mode 100644 docs/en/12-taos-sql/19-limit.md create mode 100644 docs/en/12-taos-sql/21-node.md create mode 100644 docs/en/12-taos-sql/22-meta.md rename docs/{zh/12-taos-sql/23-show.md => en/12-taos-sql/24-show.md} (100%) create mode 100644 docs/en/12-taos-sql/25-grant.md create mode 100644 docs/en/12-taos-sql/26-udf.md create mode 100644 docs/en/12-taos-sql/27-index.md create mode 100644 docs/en/12-taos-sql/28-recovery.md create mode 100644 docs/zh/12-taos-sql/24-show.md create mode 100644 docs/zh/14-reference/03-connector/01-error-code.md create mode 100644 tests/script/sh/bit_and.c create mode 100755 tests/script/sh/compile_udf.sh create mode 100644 tests/script/sh/sqr_sum.c create mode 100644 tests/script/tmp/r1.sim create mode 100644 tests/script/tsim/query/udf.sim create mode 100644 tests/script/tsim/sync/start3replica.sim create mode 100644 tests/system-test/1-insert/db_tb_name_check.py create mode 100644 tests/system-test/2-query/qnodeCluster.py create mode 100644 tests/system-test/5-taos-tools/TD-12478.py create mode 100644 tests/system-test/5-taos-tools/taosdump/taosdumpTestColTag.py diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 4b47d56a6c..423169c007 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -40,6 +40,7 @@ def pre_test(){ git reset --hard cd ${WKC} git reset --hard + git clean -fxd ''' script { if (env.CHANGE_TARGET == 'master') { diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md index 720f8e2384..1318f4619b 100644 --- a/docs/en/07-develop/01-connect/index.md +++ b/docs/en/07-develop/01-connect/index.md @@ -73,7 +73,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding com.taosdata.jdbc taos-jdbcdriver - 2.0.38 + 3.0.0 ``` @@ -102,7 +102,7 @@ module goexample go 1.17 -require github.com/taosdata/driver-go/v2 develop +require github.com/taosdata/driver-go/v3 latest ``` :::note @@ -137,7 +137,7 @@ Node.js connector provides different ways of establishing connections by providi 1. Install Node.js Native Connector ``` -npm i td2.0-connector +npm install @tdengine/client ``` :::note @@ -147,7 +147,7 @@ It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13 2. Install Node.js REST Connector ``` -npm i td2.0-rest-connector +npm install @tdengine/rest ``` @@ -167,7 +167,7 @@ Just need to add the reference to [TDengine.Connector](https://www.nuget.org/pac - + @@ -187,7 +187,7 @@ The sample code below are based on dotnet6.0, they may need to be adjusted if yo -1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/2.0.38/). +1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/). 2. Install the dependency package `RJDBC`: ```R diff --git a/docs/en/12-taos-sql/12-interval.md b/docs/en/12-taos-sql/12-distinguished.md similarity index 98% rename from docs/en/12-taos-sql/12-interval.md rename to docs/en/12-taos-sql/12-distinguished.md index acfb0de0e1..d2f7cf66b6 100644 --- a/docs/en/12-taos-sql/12-interval.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -1,6 +1,6 @@ --- -sidebar_label: Interval -title: Aggregate by Time Window +sidebar_label: Distinguished +title: Distinguished Query for Time Series Database --- Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. diff --git a/docs/en/12-taos-sql/13-tmq.md b/docs/en/12-taos-sql/13-tmq.md new file mode 100644 index 0000000000..4d9c475a38 --- /dev/null +++ b/docs/en/12-taos-sql/13-tmq.md @@ -0,0 +1,66 @@ +--- +sidebar_label: 消息队列 +title: 消息队列 +--- + +TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。 + +## 创建订阅主题 + +```sql +CREATE TOPIC [IF NOT EXISTS] topic_name AS {subquery | DATABASE db_name | STABLE stb_name }; +``` + +订阅主题包括三种:列订阅、超级表订阅和数据库订阅。 + +**列订阅是**用 subquery 描述,支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下: + +1. TOPIC 一旦创建则返回结果的字段确定 +2. 被订阅或用于计算的列不可被删除、修改 +3. 列可以新增,但新增的列不出现在订阅结果字段中 +4. 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列) + +**超级表订阅和数据库订阅**规则如下: + +1. 被订阅主体的 schema 变更不受限 +2. 返回消息中 schema 是块级别的,每块的 schema 可能不一样 +3. 列变更后写入的数据若未落盘,将以写入时的 schema 返回 +4. 列变更后写入的数据若未已落盘,将以落盘时的 schema 返回 + +## 删除订阅主题 + +```sql +DROP TOPIC [IF EXISTS] topic_name; +``` + +此时如果该订阅主题上存在 consumer,则此 consumer 会收到一个错误。 + +## 查看订阅主题 + +## SHOW TOPICS + +```sql +SHOW TOPICS; +``` + +显示当前数据库下的所有主题的信息。 + +## 创建消费组 + +消费组的创建只能通过 TDengine 客户端驱动或者连接器所提供的 API 创建。 + +## 删除消费组 + +```sql +DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name; +``` + +删除主题 topic_name 上的消费组 cgroup_name。 + +## 查看消费组 + +```sql +SHOW CONSUMERS; +``` + +显示当前数据库下所有活跃的消费者的信息。 diff --git a/docs/en/12-taos-sql/14-limit.md b/docs/en/12-taos-sql/14-limit.md deleted file mode 100644 index e8bb77fc27..0000000000 --- a/docs/en/12-taos-sql/14-limit.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Naming & Restrictions ---- - -## Naming Rules - -1. Only characters from the English alphabet, digits and underscore are allowed -2. Names cannot start with a digit -3. Case insensitive without escape character "\`" -4. Identifier with escape character "\`" - To support more flexible table or column names, a new escape character "\`" is introduced. For more details please refer to [escape](/taos-sql/escape). - -## Password Rule - -The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`. - -## General Limits - -- Maximum length of database name is 32 bytes, and it can't include "." or special characters. -- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. -- Maximum length of each data row is 48K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. -- Maximum length of column name is 64. -- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. -- Maximum length of tag name is 64. -- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values should not exceed 16K bytes. -- Maximum length of singe SQL statement is 1048576, i.e. 1 MB. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. -- At most 4096 columns can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded. -- Maximum numbers of databases, STables, tables are dependent only on the system resources. -- Maximum number of replicas for a database is 3. -- Maximum length of user name is 23 bytes. -- Maximum length of password is 15 bytes. -- Maximum number of rows depends only on the storage space. -- Maximum number of vnodes for a single database is 1024. - -## Restrictions of Table/Column Names - -### Name Restrictions of Table/Column - -The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. - -### Name Restrictions After Escaping - -To support more flexible table or column names, new escape character "\`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table names. The escape character is not counted in the length of table name. - -With escaping, the string inside escape characters are case sensitive, i.e. will not be converted to lower case internally. - -For example: -\`aBc\` and \`abc\` are different table or column names, but "abc" and "aBc" are same names because internally they are all "abc". - -:::note -The characters inside escape characters must be printable characters. - -::: diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md new file mode 100644 index 0000000000..7ff7da2bfb --- /dev/null +++ b/docs/en/12-taos-sql/14-stream.md @@ -0,0 +1,122 @@ +--- +sidebar_label: 流式计算 +title: 流式计算 +--- + +在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。用户通常需要在时序数据库之外再搭建 Kafka、Flink、Spark 等流计算处理引擎,增加了用户的开发成本和维护成本。 + +使用 TDengine 3.0 的流式计算引擎能够最大限度的减少对这些额外中间件的依赖,真正将数据的写入、预处理、长期存储、复杂分析、实时计算、实时报警触发等功能融为一体,并且,所有这些任务只需要使用 SQL 完成,极大降低了用户的学习成本、使用成本。 + +## 创建流式计算 + +```sql +CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery +stream_options: { + TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time] + WATERMARK time +} + +``` + +其中 subquery 是 select 普通查询语法的子集: + +```sql +subquery: SELECT [DISTINCT] select_list + from_clause + [WHERE condition] + [PARTITION BY tag_list] + [window_clause] + [group_by_clause] +``` + +不支持 order_by,limit,slimit,fill 语句 + +例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。 + +```sql +CREATE STREAM avg_vol_s INTO avg_vol AS +SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); +``` + +## 删除流式计算 + +```sql +DROP STREAM [IF NOT EXISTS] stream_name +``` + +仅删除流式计算任务,由流式计算写入的数据不会被删除。 + +## 展示流式计算 + +```sql +SHOW STREAMS; +``` + +## 流式计算的触发模式 + +在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。 + +对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式: + +1. AT_ONCE:写入立即触发 + +2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用,详见《流式计算的乱序数据容忍策略》) + +3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。 + +由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 + +因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。 + +MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 + +## 流式计算的乱序数据容忍策略 + +在创建流时,可以在 stream_option 中指定 watermark。 + +流式计算通过 watermark 来度量对乱序数据的容忍程度,watermark 默认为 0。 + +T = 最新事件时间 - watermark + +每批到来的数据都会以上述公式更新窗口关闭时间,并将窗口结束时间 < T 的所有打开的窗口关闭,若触发模式为 WINDOW_CLOSE 或 MAX_DELAY,则推送窗口聚合结果。 + +流式计算的过期数据处理策略 +对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据,对于过期数据,流式计算提供两种处理方式: + +1. 直接丢弃:这是常见流式计算引擎提供的默认(甚至是唯一)计算模式 + +2. 重新计算:从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果 + +无论在哪种模式下,watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。 + +## 流式计算的数据填充策略 + +TODO + +## 流式计算与会话窗口(session window) + +```sql +window_clause: { + SESSION(ts_col, tol_val) + | STATE_WINDOW(col) + | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [FILL(fill_mod_and_val)] +} +``` + +其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。 + +## 流式计算的监控与流任务分布查询 + +TODO + +## 流式计算的内存控制与存算分离 + +TODO + +## 流式计算的暂停与恢复 + +```sql +STOP STREAM stream_name; + +RESUME STREAM stream_name; +``` diff --git a/docs/en/12-taos-sql/13-operators.md b/docs/en/12-taos-sql/16-operators.md similarity index 100% rename from docs/en/12-taos-sql/13-operators.md rename to docs/en/12-taos-sql/16-operators.md diff --git a/docs/en/12-taos-sql/16-json.md b/docs/en/12-taos-sql/17-json.md similarity index 100% rename from docs/en/12-taos-sql/16-json.md rename to docs/en/12-taos-sql/17-json.md diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md new file mode 100644 index 0000000000..ff552fc977 --- /dev/null +++ b/docs/en/12-taos-sql/19-limit.md @@ -0,0 +1,59 @@ +--- +sidebar_label: 命名与边界限制 +title: 命名与边界限制 +--- + +## 名称命名规则 + +1. 合法字符:英文字符、数字和下划线 +2. 允许英文字符或下划线开头,不允许以数字开头 +3. 不区分大小写 +4. 转义后表(列)名规则: + 为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查 + 转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一 + + 例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 + 需要注意的是转义字符中的内容必须是可打印字符。 + +## 密码合法字符集 + +`[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]` + +去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格) + +## 一般限制 + +- 数据库名最大长度为 32 +- 表名最大长度为 192,不包括数据库名前缀和分隔符 +- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) +- 列名最大长度为 64 +- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。 +- 标签名最大长度为 64 +- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB +- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 +- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错 +- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 +- 数据库的副本数只能设置为 1 或 3 +- 用户名的最大长度是 23 个字节 +- 用户密码的最大长度是 15 个字节 +- 总数据行数取决于可用资源 +- 单个数据库的虚拟结点数上限为 1024 + +## 表(列)名合法性说明 + +### TDengine 中的表(列)名命名规则如下: + +只能由字母、数字、下划线构成,数字不能在首位,长度不能超过 192 字节,不区分大小写。这里表名称不包括数据库名的前缀和分隔符。 + +### 转义后表(列)名规则: + +为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`",可以避免表名与关键词的冲突,同时不受限于上述表名合法性约束检查,转义符不计入表名的长度。 +转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 + +例如: +\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 + +:::note +转义字符中的内容必须是可打印字符。 + +::: diff --git a/docs/en/12-taos-sql/21-node.md b/docs/en/12-taos-sql/21-node.md new file mode 100644 index 0000000000..4816daf420 --- /dev/null +++ b/docs/en/12-taos-sql/21-node.md @@ -0,0 +1,154 @@ +--- +sidebar_label: 集群管理 +title: 集群管理 +--- + +组成 TDengine 集群的物理实体是 dnode (data node 的缩写),它是一个运行在操作系统之上的进程。在 dnode 中可以建立负责时序数据存储的 vnode (virtual node),在多节点集群环境下当某个数据库的 replica 为 3 时,该数据库中的每个 vgroup 由 3 个 vnode 组成;当数据库的 replica 为 1 时,该数据库中的每个 vgroup 由 1 个 vnode 组成。如果要想配置某个数据库为多副本,则集群中的 dnode 数量至少为 3。在 dnode 还可以创建 mnode (management node),单个集群中最多可以创建三个 mnode。在 TDengine 3.0.0.0 中为了支持存算分离,引入了一种新的逻辑节点 qnode (query node),qnode 和 vnode 既可以共存在一个 dnode 中,也可以完全分离在不同的 dnode 上。 + +## 创建数据节点 + +```sql +CREATE DNODE {dnode_endpoint | dnode_host_name PORT port_val} +``` + +其中 `dnode_endpoint` 是形成 `hostname:port`的格式。也可以分开指定 hostname 和 port。 + +实际操作中推荐先创建 dnode,再启动相应的 dnode 进程,这样该 dnode 就可以立即根据其配置文件中的 firstEP 加入集群。每个 dnode 在加入成功后都会被分配一个 ID。 + +## 查看数据节点 + +```sql +SHOW DNODES; +``` + +可以列出集群中所有的数据节点,所列出的字段有 dnode 的 ID, endpoint, status。 + +## 删除数据节点 + +```sql +DROP DNODE {dnode_id | dnode_endpoint} +``` + +可以用 dnoe_id 或 endpoint 两种方式从集群中删除一个 dnode。注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。 + +## 修改数据节点配置 + +```sql +ALTER DNODE dnode_id dnode_option + +ALTER ALL DNODES dnode_option + +dnode_option: { + 'resetLog' + | 'balance' value + | 'monitor' value + | 'debugFlag' value + | 'monDebugFlag' value + | 'vDebugFlag' value + | 'mDebugFlag' value + | 'cDebugFlag' value + | 'httpDebugFlag' value + | 'qDebugflag' value + | 'sdbDebugFlag' value + | 'uDebugFlag' value + | 'tsdbDebugFlag' value + | 'sDebugflag' value + | 'rpcDebugFlag' value + | 'dDebugFlag' value + | 'mqttDebugFlag' value + | 'wDebugFlag' value + | 'tmrDebugFlag' value + | 'cqDebugFlag' value +} +``` + +上面语法中的这些可修改配置项其配置方式与 dnode 配置文件中的配置方式相同,区别是修改是动态的立即生效,且不需要重启 dnode。 + +## 添加管理节点 + +```sql +CREATE MNODE ON DNODE dnode_id +``` + +系统启动默认在 firstEP 节点上创建一个 MNODE,用户可以使用此语句创建更多的 MNODE 来提高系统可用性。一个集群最多存在三个 MNODE,一个 DNODE 上只能创建一个 MNODE。 + +## 查看管理节点 + +```sql +SHOW MNODES; +``` + +列出集群中所有的管理节点,包括其 ID,所在 DNODE 以及状态。 + +## 删除管理节点 + +```sql +DROP MNODE ON DNODE dnode_id; +``` + +删除 dnode_id 所指定的 DNODE 上的 MNODE。 + +## 创建查询节点 + +```sql +CREATE QNODE ON DNODE dnode_id; +``` + +系统启动默认没有 QNODE,用户可以创建 QNODE 来实现计算和存储的分离。一个 DNODE 上只能创建一个 QNODE。一个 DNODE 的 `supportVnodes` 参数如果不为 0,同时又在其上创建上 QNODE,则在该 dnode 中既有负责存储管理的 vnode 又有负责查询计算的 qnode,如果还在该 dnode 上创建了 mnode,则一个 dnode 上最多三种逻辑节点都可以存在。但通过配置也可以使其彻底分离。将一个 dnode 的`supportVnodes`配置为 0,可以选择在其上创建 mnode 或者 qnode 中的一种,这样可以实现三种逻辑节点在物理上的彻底分离。 + +## 查看查询节点 + +```sql +SHOW QNODES; +``` + +列出集群中所有查询节点,包括 ID,及所在 DNODE。 + +## 删除查询节点 + +```sql +DROP QNODE ON DNODE dnode_id; +``` + +删除 ID 为 dnode_id 的 DNODE 上的 QNODE,但并不会影响该 dnode 的状态。 + +## 修改客户端配置 + +如果将客户端也看作广义的集群的一部分,可以通过如下命令动态修改客户端配置参数。 + +```sql +ALTER LOCAL local_option + +local_option: { + 'resetLog' + | 'rpcDebugFlag' value + | 'tmrDebugFlag' value + | 'cDebugFlag' value + | 'uDebugFlag' value + | 'debugFlag' value +} +``` + +上面语法中的参数与在配置文件中配置客户端的用法相同,但不需要重启客户端,修改后立即生效。 + +## 查看客户端配置 + +```sql +SHOW LOCAL VARIABLES; +``` + +## 合并 vgroup + +```sql +MERGE VGROUP vgroup_no1 vgroup_no2; +``` + +如果在系统实际运行一段时间后,因为不同时间线的数据特征不同导致在 vgroups 之间的数据和负载分布不均衡,可以通过合并或拆分 vgroups 的方式逐步实现负载均衡。 + +## 拆分 vgroup + +```sql +SPLIT VGROUP vgroup_no; +``` + +会创建一个新的 vgroup,并将指定 vgroup 中的数据按照一致性 HASH 迁移一部分到新的 vgroup 中。此过程中,原 vgroup 可以正常提供读写服务。 diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md new file mode 100644 index 0000000000..1e17870685 --- /dev/null +++ b/docs/en/12-taos-sql/22-meta.md @@ -0,0 +1,247 @@ +--- +sidebar_label: 元数据库 +title: 元数据库 +--- + +TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点: + +1. 可以使用 USE 语句将 INFORMATION_SCHEMA 设为默认数据库 +2. 可以使用 SELECT 语句熟悉的语法,只需要学习一些表名和列名 +3. 可以对查询结果进行筛选、排序等操作。事实上,可以使用任意 TDengine 支持的 SELECT 语句对 INFORMATION_SCHEMA 中的表进行查询 +4. TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响 +5. 与其他数据库系统更具互操作性。例如,Oracle 数据库用户熟悉查询 Oracle 数据字典中的表 + +Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。 + +本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。 + +## INS_DNODES + +提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :------------: | ------------ | ------------------------- | +| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数 | +| 2 | support_vnodes | SMALLINT | 最多支持的 vnode 个数 | +| 3 | status | BINARY(10) | 当前状态 | +| 4 | note | BINARY(256) | 离线原因等信息 | +| 5 | id | SMALLINT | dnode id | +| 6 | endpoint | BINARY(134) | dnode 的地址 | +| 7 | create | TIMESTAMP | 创建时间 | + +## INS_MNODES + +提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | ------------------ | +| 1 | id | SMALLINT | mnode id | +| 2 | endpoint | BINARY(134) | mnode 的地址 | +| 3 | role | BINARY(10) | 当前角色 | +| 4 | role_time | TIMESTAMP | 成为当前角色的时间 | +| 5 | create_time | TIMESTAMP | 创建时间 | + +## INS_MODULES + +提供组件的相关信息。也可以使用 SHOW MODULES 来查询这些信息 + +| # | **列名** | **数据类型** | **说明** | +| --- | :------: | ------------ | ---------- | +| 1 | id | SMALLINT | module id | +| 2 | endpoint | BINARY(134) | 组件的地址 | +| 3 | module | BINARY(10) | 组件状态 | + +## INS_QNODES + +当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | ------------ | +| 1 | id | SMALLINT | qnode id | +| 2 | endpoint | BINARY(134) | qnode 的地址 | +| 3 | create_time | TIMESTAMP | 创建时间 | + +## INS_CLUSTER + +存储集群相关信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | ---------- | +| 1 | id | BIGINT | cluster id | +| 2 | name | BINARY(134) | 集群名称 | +| 3 | create_time | TIMESTAMP | 创建时间 | + +## INS_DATABASES + +提供用户创建的数据库对象的相关信息。也可以使用 SHOW DATABASES 来查询这些信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :------------------: | ---------------- | ------------------------------------------------ | +| 1 | name | BINARY(32) | 数据库名 | +| 2 | create_time | TIMESTAMP | 创建时间 | +| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 | +| 4 | vgroups | INT | 数据库中有多少个 vgroup | +| 6 | replica | INT | 副本数 | +| 7 | quorum | BINARY(3) | 强一致性 | +| 8 | duration | INT | 单文件存储数据的时间跨度 | +| 9 | keep | INT | 数据保留时长 | +| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB | +| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB | +| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数 | +| 13 | minrows | INT | 文件块中记录的最大条数 | +| 14 | maxrows | INT | 文件块中记录的最小条数 | +| 15 | comp | INT | 数据压缩方式 | +| 16 | precision | BINARY(2) | 时间分辨率 | +| 17 | status | BINARY(10) | 数据库状态 | +| 18 | retention | BINARY (60) | 数据的聚合周期和保存时长 | +| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表 | +| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据 | +| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小 | +| 22 | wal_level | INT | WAL 级别 | +| 23 | wal_fsync_period | INT | 数据落盘周期 | +| 24 | wal_retention_period | INT | WAL 的保存时长 | +| 25 | wal_retention_size | INT | WAL 的保存上限 | +| 26 | wal_roll_period | INT | wal 文件切换时长 | +| 27 | wal_segment_size | wal 单个文件大小 | + +## INS_FUNCTIONS + +用户创建的自定义函数的信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | -------------- | +| 1 | name | BINARY(64) | 函数名 | +| 2 | comment | BINARY(255) | 补充说明 | +| 3 | aggregate | INT | 是否为聚合函数 | +| 4 | output_type | BINARY(31) | 输出类型 | +| 5 | create_time | TIMESTAMP | 创建时间 | +| 6 | code_len | INT | 代码长度 | +| 7 | bufsize | INT | buffer 大小 | + +## INS_INDEXES + +提供用户创建的索引的相关信息。也可以使用 SHOW INDEX 来查询这些信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :--------------: | ------------ | ---------------------------------------------------------------------------------- | +| 1 | db_name | BINARY(32) | 包含此索引的表所在的数据库名 | +| 2 | table_name | BINARY(192) | 包含此索引的表的名称 | +| 3 | index_name | BINARY(192) | 索引名 | +| 4 | column_name | BINARY(64) | 建索引的列的列名 | +| 5 | index_type | BINARY(10) | 目前有 SMA 和 FULLTEXT | +| 6 | index_extensions | BINARY(256) | 索引的额外信息。对 SMA 类型的索引,是函数名的列表。对 FULLTEXT 类型的索引为 NULL。 | + +## INS_STABLES + +提供用户创建的超级表的相关信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :-----------: | ------------ | ------------------------ | +| 1 | stable_name | BINARY(192) | 超级表表名 | +| 2 | db_name | BINARY(64) | 超级表所在的数据库的名称 | +| 3 | create_time | TIMESTAMP | 创建时间 | +| 4 | columns | INT | 列数目 | +| 5 | tags | INT | 标签数目 | +| 6 | last_update | TIMESTAMP | 最后更新时间 | +| 7 | table_comment | BINARY(1024) | 表注释 | +| 8 | watermark | BINARY(64) | 窗口的关闭时间 | +| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟 | +| 10 | rollup | BINARY(128) | rollup 聚合函数 | + +## INS_TABLES + +提供用户创建的普通表和子表的相关信息 + +| # | **列名** | **数据类型** | **说明** | +| --- | :-----------: | ------------ | ---------------- | +| 1 | table_name | BINARY(192) | 表名 | +| 2 | db_name | BINARY(64) | 数据库名 | +| 3 | create_time | TIMESTAMP | 创建时间 | +| 4 | columns | INT | 列数目 | +| 5 | stable_name | BINARY(192) | 所属的超级表表名 | +| 6 | uid | BIGINT | 表 id | +| 7 | vgroup_id | INT | vgroup id | +| 8 | ttl | INT | 表的生命周期 | +| 9 | table_comment | BINARY(1024) | 表注释 | +| 10 | type | BINARY(20) | 表类型 | + +## INS_TAGS + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------- | ---------------------- | +| 1 | table_name | BINARY(192) | 表名 | +| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 | +| 3 | stable_name | BINARY(192) | 所属的超级表表名 | +| 4 | tag_name | BINARY(64) | tag 的名称 | +| 5 | tag_type | BINARY(64) | tag 的类型 | +| 6 | tag_value | BINARY(16384) | tag 的值 | + +## INS_USERS + +提供系统中创建的用户的相关信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | -------- | +| 1 | user_name | BINARY(23) | 用户名 | +| 2 | privilege | BINARY(256) | 权限 | +| 3 | create_time | TIMESTAMP | 创建时间 | + +## INS_GRANTS + +提供企业版授权的相关信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | -------------------------------------------------- | +| 1 | version | BINARY(9) | 企业版授权说明:official(官方授权的)/trial(试用的) | +| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 | +| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量 | +| 4 | streams | BINARY(10) | 授权创建的流数量 | +| 5 | users | BINARY(10) | 授权创建的用户数量 | +| 6 | accounts | BINARY(10) | 授权创建的帐户数量 | +| 7 | storage | BINARY(21) | 授权使用的存储空间大小 | +| 8 | connections | BINARY(21) | 授权使用的客户端连接数量 | +| 9 | databases | BINARY(11) | 授权使用的数据库数量 | +| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 | +| 11 | querytime | BINARY(9) | 授权使用的查询总时长 | +| 12 | timeseries | BINARY(21) | 授权使用的测点数量 | +| 13 | expired | BINARY(5) | 是否到期,true:到期,false:未到期 | +| 14 | expire_time | BINARY(19) | 试用期到期时间 | + +## INS_VGROUPS + +系统中所有 vgroups 的信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :-------: | ------------ | ------------------------------------------------------ | +| 1 | vgroup_id | INT | vgroup id | +| 2 | db_name | BINARY(32) | 数据库名 | +| 3 | tables | INT | 此 vgroup 内有多少表 | +| 4 | status | BINARY(10) | 此 vgroup 的状态 | +| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id | +| 6 | v1_status | BINARY(10) | 第一个成员的状态 | +| 7 | v2_dnode | INT | 第二个成员所在的 dnode 的 id | +| 8 | v2_status | BINARY(10) | 第二个成员的状态 | +| 9 | v3_dnode | INT | 第三个成员所在的 dnode 的 id | +| 10 | v3_status | BINARY(10) | 第三个成员的状态 | +| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 | +| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 | +| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是, 0: 否 | + +## INS_CONFIGS + +系统配置参数。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :------: | ------------ | ------------ | +| 1 | name | BINARY(32) | 配置项名称 | +| 2 | value | BINARY(64) | 该配置项的值 | + +## INS_DNODE_VARIABLES + +系统中每个 dnode 的配置参数。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :------: | ------------ | ------------ | +| 1 | dnode_id | INT | dnode 的 ID | +| 2 | name | BINARY(32) | 配置项名称 | +| 3 | value | BINARY(64) | 该配置项的值 | diff --git a/docs/zh/12-taos-sql/23-show.md b/docs/en/12-taos-sql/24-show.md similarity index 100% rename from docs/zh/12-taos-sql/23-show.md rename to docs/en/12-taos-sql/24-show.md diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md new file mode 100644 index 0000000000..0c290350cc --- /dev/null +++ b/docs/en/12-taos-sql/25-grant.md @@ -0,0 +1,94 @@ +--- +sidebar_label: 权限管理 +title: 权限管理 +--- + +本节讲述如何在 TDengine 中进行权限管理的相关操作。 + +## 创建用户 + +```sql +CREATE USER use_name PASS password; +``` + +创建用户。 + +use_name最长为23字节。 + +password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。 + +## 删除用户 + +```sql +DROP USER user_name; +``` + +## 修改用户信息 + +```sql +ALTER USER user_name alter_user_clause + +alter_user_clause: { + PASS 'literal' + | ENABLE value + | SYSINFO value +} +``` + +- PASS:修改用户密码。 +- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。 +- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。 + + +## 授权 + +```sql +GRANT privileges ON priv_level TO user_name + +privileges : { + ALL + | priv_type [, priv_type] ... +} + +priv_type : { + READ + | WRITE +} + +priv_level : { + dbname.* + | *.* +} +``` + +对用户授权。 + +授权级别支持到DATABASE,权限有READ和WRITE两种。 + +TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root,拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下,普通用户可以创建DATABASE,并拥有自己创建的DATABASE的所有权限,包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他DATABASE的读写权限,使其可以在此DATABASE上读写数据,但不能对其进行删除和修改数据库的操作。 + +对于非DATABASE的对象,如USER、DNODE、UDF、QNODE等,普通用户只有读权限(一般为SHOW命令),不能创建和修改。 + +## 撤销授权 + +```sql +REVOKE privileges ON priv_level FROM user_name + +privileges : { + ALL + | priv_type [, priv_type] ... +} + +priv_type : { + READ + | WRITE +} + +priv_level : { + dbname.* + | *.* +} + +``` + +收回对用户的授权。 \ No newline at end of file diff --git a/docs/en/12-taos-sql/26-udf.md b/docs/en/12-taos-sql/26-udf.md new file mode 100644 index 0000000000..bd8d61a584 --- /dev/null +++ b/docs/en/12-taos-sql/26-udf.md @@ -0,0 +1,28 @@ +--- +sidebar_label: 自定义函数 +title: 用户自定义函数 +--- + +除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。 + +## 创建函数 + +```sql +CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value] +``` + +语法说明: + +AGGREGATE:标识此函数是标量函数还是聚集函数。 +func_name:函数名,必须与函数实现中udfNormalFunc的实际名称一致。 +library_path:包含UDF函数实现的动态链接库的绝对路径,是在客户端侧主机上的绝对路径。 +OUTPUTTYPE:标识此函数的返回类型。 +BUFSIZE:中间结果的缓冲区大小,单位是字节。不设置则默认为0。最大不可超过512字节。 + +关于如何开发自定义函数,请参考 [UDF使用说明](../../develop/udf)。 + +## 删除自定义函数 + +```sql +DROP FUNCTION func_name +``` \ No newline at end of file diff --git a/docs/en/12-taos-sql/27-index.md b/docs/en/12-taos-sql/27-index.md new file mode 100644 index 0000000000..2c0907723e --- /dev/null +++ b/docs/en/12-taos-sql/27-index.md @@ -0,0 +1,47 @@ +--- +sidebar_label: 索引 +title: 使用索引 +--- + +TDengine 从 3.0.0.0 版本开始引入了索引功能,支持 SMA 索引和 FULLTEXT 索引。 + +## 创建索引 + +```sql +CREATE FULLTEXT INDEX index_name ON tb_name (col_name [, col_name] ...) + +CREATE SMA INDEX index_name ON tb_name index_option + +index_option: + FUNCTION(functions) INTERVAL(interval_val [, interval_offset]) [SLIDING(sliding_val)] [WATERMARK(watermark_val)] [MAX_DELAY(max_delay_val)] + +functions: + function [, function] ... +``` + +### SMA 索引 + +对指定列按 INTERVAL 子句定义的时间窗口创建进行预聚合计算,预聚合计算类型由 functions_string 指定。SMA 索引能提升指定时间段的聚合查询的性能。目前,限制一个超级表只能创建一个 SMA INDEX。 + +- 支持的函数包括 MAX、MIN 和 SUM。 +- WATERMARK: 最小单位毫秒,取值范围 [0ms, 900000ms],默认值为 5 秒,只可用于超级表。 +- MAX_DELAY: 最小单位毫秒,取值范围 [1ms, 900000ms],默认值为 interval 的值(但不能超过最大值),只可用于超级表。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。 + +### FULLTEXT 索引 + +对指定列建立文本索引,可以提升含有文本过滤的查询的性能。FULLTEXT 索引不支持 index_option 语法。现阶段只支持对 JSON 类型的标签列创建 FULLTEXT 索引。不支持多列联合索引,但可以为每个列分布创建 FULLTEXT 索引。 + +## 删除索引 + +```sql +DROP INDEX index_name; +``` + +## 查看索引 + +````sql +```sql +SHOW INDEXES FROM tbl_name [FROM db_name]; +```` + +显示在所指定的数据库或表上已创建的索引。 diff --git a/docs/en/12-taos-sql/28-recovery.md b/docs/en/12-taos-sql/28-recovery.md new file mode 100644 index 0000000000..72b220b8ff --- /dev/null +++ b/docs/en/12-taos-sql/28-recovery.md @@ -0,0 +1,38 @@ +--- +sidebar_label: 异常恢复 +title: 异常恢复 +--- + +在一个复杂的应用场景中,连接和查询任务等有可能进入一种错误状态或者耗时过长迟迟无法结束,此时需要有能够终止这些连接或任务的方法。 + +## 终止连接 + +```sql +KILL CONNECTION conn_id; +``` + +conn_id 可以通过 `SHOW CONNECTIONS` 获取。 + +## 终止查询 + +```sql +SHOW QUERY query_id; +``` + +query_id 可以通过 `SHOW QUERIES` 获取。 + +## 终止事务 + +```sql +KILL TRANSACTION trans_id +``` + +trans_id 可以通过 `SHOW TRANSACTIONS` 获取。 + +## 重置客户端缓存 + +```sql +RESET QUERY CACHE; +``` + +如果在多客户端情况下出现元数据不同步的情况,可以用这条命令强制清空客户端缓存,随后客户端会从服务端拉取最新的元数据。 diff --git a/docs/en/14-reference/03-connector/_verify_linux.mdx b/docs/en/14-reference/03-connector/_verify_linux.mdx index a6e5455224..875c9e132b 100644 --- a/docs/en/14-reference/03-connector/_verify_linux.mdx +++ b/docs/en/14-reference/03-connector/_verify_linux.mdx @@ -2,13 +2,18 @@ Execute TDengine CLI program `taos` directly from the Linux shell to connect to ```text $ taos -Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 -Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. +Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. + +Server is Community Edition. + taos> show databases; -name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB)| blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | -========================================================================================================================================================================================================================= -test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16| 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | -log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1| 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | -Query OK, 2 row(s) in set (0.001198s) + name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size | +========================================================================================================================================================================================================================================================================================================================================================================================================================================================================= + information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | + performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | + db | 2022-08-04 14:14:49.385 | 2 | 4 | 1 | off | 14400m | 5254560m,5254560m,5254560m | 96 | 4 | 256 | 100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 | +Query OK, 3 rows in database (0.019154s) + taos> ``` diff --git a/docs/en/14-reference/03-connector/_verify_windows.mdx b/docs/en/14-reference/03-connector/_verify_windows.mdx index daeb151bb1..4813bd24c3 100644 --- a/docs/en/14-reference/03-connector/_verify_windows.mdx +++ b/docs/en/14-reference/03-connector/_verify_windows.mdx @@ -1,14 +1,19 @@ Go to the `C:\TDengine` directory from `cmd` and execute TDengine CLI program `taos.exe` directly to connect to the TDengine service and enter the TDengine CLI interface, for example, as follows: ```text - C:\TDengine>taos - Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 - Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. - taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | - =================================================================================================================================================================================================================================================================== - test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | - log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | - Query OK, 2 row(s) in set (0.045000s) - taos> +Welcome to the TDengine shell from Windows, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. + +Server is Community Edition. + +taos> show databases; + name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size | +========================================================================================================================================================================================================================================================================================================================================================================================================================================================================= + information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | + performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | + test | 2022-08-04 16:46:40.506 | 2 | 0 | 1 | off | 14400m | 5256000m,5256000m,5256000m | 96 | 4 | 256 | +100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 | +Query OK, 3 rows in database (0.123000s) + +taos> ``` diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md index 96e68d0edb..2105ba83fa 100644 --- a/docs/en/14-reference/06-taosdump.md +++ b/docs/en/14-reference/06-taosdump.md @@ -29,7 +29,7 @@ There are two ways to install taosdump: 1. backing up all databases: specify `-A` or `-all-databases` parameter. 2. backup multiple specified databases: use `-D db1,db2,... ` parameters; -3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. +3. back up some super or normal tables in the specified database: use `dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. 4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. 5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. @@ -104,7 +104,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...] use letter and number only. Default is NOT. -n, --no-escape No escape char '`'. Default is using it. -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is - 5. + 8. + -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service + -R, --restful Use RESTful interface to connect TDengine + -t, --timeout=SECONDS The timeout seconds for websocket to interact. -g, --debug Print debug info. -?, --help Give this help list --usage Give a short usage message diff --git a/docs/en/20-third-party/09-emq-broker.md b/docs/en/20-third-party/09-emq-broker.md index 8dfa09e6c7..0900dd3d75 100644 --- a/docs/en/20-third-party/09-emq-broker.md +++ b/docs/en/20-third-party/09-emq-broker.md @@ -16,6 +16,7 @@ The following preparations are required for EMQX to add TDengine data sources co Depending on the current operating system, users can download the installation package from the [EMQX official website](https://www.emqx.io/downloads) and execute the installation. After installation, use `sudo emqx start` or `sudo systemctl start emqx` to start the EMQX service. +Note: this chapter is based on EMQX v4.4.5. Other version of EMQX probably change its user interface, configuration methods or functions. ## Create Database and Table @@ -31,7 +32,7 @@ Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage ## Configuring EMQX Rules -Since the configuration interface of EMQX differs from version to version, here is v4.4.3 as an example. For other versions, please refer to the corresponding official documentation. +Since the configuration interface of EMQX differs from version to version, here is v4.4.5 as an example. For other versions, please refer to the corresponding official documentation. ### Login EMQX Dashboard diff --git a/docs/examples/R/connect_native.r b/docs/examples/R/connect_native.r index 18c142872b..3c5c9e199b 100644 --- a/docs/examples/R/connect_native.r +++ b/docs/examples/R/connect_native.r @@ -8,9 +8,9 @@ library("rJava") library("RJDBC") args<- commandArgs(trailingOnly = TRUE) -driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-2.0.37-dist.jar" +driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.0.0-dist.jar" driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path) conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata") dbGetQuery(conn, "SELECT server_version()") dbDisconnect(conn) -# ANCHOR_END: demo \ No newline at end of file +# ANCHOR_END: demo diff --git a/docs/examples/csharp/AsyncQueryExample.cs b/docs/examples/csharp/AsyncQueryExample.cs index 3dabbebd16..0d47325932 100644 --- a/docs/examples/csharp/AsyncQueryExample.cs +++ b/docs/examples/csharp/AsyncQueryExample.cs @@ -1,4 +1,7 @@ +using System; +using System.Collections.Generic; using TDengineDriver; +using TDengineDriver.Impl; using System.Runtime.InteropServices; namespace TDengineExample @@ -19,8 +22,8 @@ namespace TDengineExample { if (code == 0 && taosRes != IntPtr.Zero) { - FetchRowAsyncCallback fetchRowAsyncCallback = new FetchRowAsyncCallback(FetchRowCallback); - TDengine.FetchRowAsync(taosRes, fetchRowAsyncCallback, param); + FetchRawBlockAsyncCallback fetchRowAsyncCallback = new FetchRawBlockAsyncCallback(FetchRawBlockCallback); + TDengine.FetchRawBlockAsync(taosRes, fetchRowAsyncCallback, param); } else { @@ -28,179 +31,44 @@ namespace TDengineExample } } - static void FetchRowCallback(IntPtr param, IntPtr taosRes, int numOfRows) + // Iteratively call this interface until "numOfRows" is no greater than 0. + static void FetchRawBlockCallback(IntPtr param, IntPtr taosRes, int numOfRows) { if (numOfRows > 0) { Console.WriteLine($"{numOfRows} rows async retrieved"); - DisplayRes(taosRes); - TDengine.FetchRowAsync(taosRes, FetchRowCallback, param); + IntPtr pdata = TDengine.GetRawBlock(taosRes); + List metaList = TDengine.FetchFields(taosRes); + List dataList = LibTaos.ReadRawBlock(pdata, metaList, numOfRows); + + for (int i = 0; i < dataList.Count; i++) + { + if (i != 0 && (i+1) % metaList.Count == 0) + { + Console.WriteLine("{0}\t|", dataList[i]); + } + else + { + Console.Write("{0}\t|", dataList[i]); + } + } + Console.WriteLine(""); + TDengine.FetchRawBlockAsync(taosRes, FetchRawBlockCallback, param); } else { if (numOfRows == 0) { Console.WriteLine("async retrieve complete."); - } else { - Console.WriteLine($"FetchRowAsync callback error, error code {numOfRows}"); + Console.WriteLine($"FetchRawBlockCallback callback error, error code {numOfRows}"); } TDengine.FreeResult(taosRes); } } - public static void DisplayRes(IntPtr res) - { - if (!IsValidResult(res)) - { - TDengine.Cleanup(); - System.Environment.Exit(1); - } - - List metaList = TDengine.FetchFields(res); - int fieldCount = metaList.Count; - // metaList.ForEach((item) => { Console.Write("{0} ({1}) \t|\t", item.name, item.size); }); - - List dataList = QueryRes(res, metaList); - for (int index = 0; index < dataList.Count; index++) - { - if (index % fieldCount == 0 && index != 0) - { - Console.WriteLine(""); - } - Console.Write("{0} \t|\t", dataList[index].ToString()); - - } - Console.WriteLine(""); - } - - public static bool IsValidResult(IntPtr res) - { - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - if (res != IntPtr.Zero) - { - Console.Write("reason: " + TDengine.Error(res)); - return false; - } - Console.WriteLine(""); - return false; - } - return true; - } - - private static List QueryRes(IntPtr res, List meta) - { - IntPtr taosRow; - List dataRaw = new(); - while ((taosRow = TDengine.FetchRows(res)) != IntPtr.Zero) - { - dataRaw.AddRange(FetchRow(taosRow, res)); - } - if (TDengine.ErrorNo(res) != 0) - { - Console.Write("Query is not complete, Error {0} {1}", TDengine.ErrorNo(res), TDengine.Error(res)); - } - TDengine.FreeResult(res); - Console.WriteLine(""); - return dataRaw; - } - - public static List FetchRow(IntPtr taosRow, IntPtr taosRes)//, List metaList, int numOfFiled - { - List metaList = TDengine.FetchFields(taosRes); - int numOfFiled = TDengine.FieldCount(taosRes); - - - List dataRaw = new(); - - IntPtr colLengthPrt = TDengine.FetchLengths(taosRes); - int[] colLengthArr = new int[numOfFiled]; - Marshal.Copy(colLengthPrt, colLengthArr, 0, numOfFiled); - - for (int i = 0; i < numOfFiled; i++) - { - TDengineMeta meta = metaList[i]; - IntPtr data = Marshal.ReadIntPtr(taosRow, IntPtr.Size * i); - - if (data == IntPtr.Zero) - { - dataRaw.Add("NULL"); - continue; - } - switch ((TDengineDataType)meta.type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - bool v1 = Marshal.ReadByte(data) != 0; - dataRaw.Add(v1); - break; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - sbyte v2 = (sbyte)Marshal.ReadByte(data); - dataRaw.Add(v2); - break; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - short v3 = Marshal.ReadInt16(data); - dataRaw.Add(v3); - break; - case TDengineDataType.TSDB_DATA_TYPE_INT: - int v4 = Marshal.ReadInt32(data); - dataRaw.Add(v4); - break; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - long v5 = Marshal.ReadInt64(data); - dataRaw.Add(v5); - break; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); - dataRaw.Add(v6); - break; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); - dataRaw.Add(v7); - break; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - string v8 = Marshal.PtrToStringUTF8(data, colLengthArr[i]); - dataRaw.Add(v8); - break; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - long v9 = Marshal.ReadInt64(data); - dataRaw.Add(v9); - break; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - string v10 = Marshal.PtrToStringUTF8(data, colLengthArr[i]); - dataRaw.Add(v10); - break; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - byte v12 = Marshal.ReadByte(data); - dataRaw.Add(v12.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - ushort v13 = (ushort)Marshal.ReadInt16(data); - dataRaw.Add(v13); - break; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - uint v14 = (uint)Marshal.ReadInt32(data); - dataRaw.Add(v14); - break; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - ulong v15 = (ulong)Marshal.ReadInt64(data); - dataRaw.Add(v15); - break; - case TDengineDataType.TSDB_DATA_TYPE_JSONTAG: - string v16 = Marshal.PtrToStringUTF8(data, colLengthArr[i]); - dataRaw.Add(v16); - break; - default: - dataRaw.Add("nonsupport data type"); - break; - } - - } - return dataRaw; - } - static IntPtr GetConnection() { string host = "localhost"; @@ -223,16 +91,16 @@ namespace TDengineExample } } -//output: -// Connect to TDengine success -// 8 rows async retrieved +// //output: +// // Connect to TDengine success +// // 8 rows async retrieved -// 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 | -// 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 | -// 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 | -// 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 | -// 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 | -// 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 | -// 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 | -// 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 | -// async retrieve complete. \ No newline at end of file +// // 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 | +// // 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 | +// // 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 | +// // 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 | +// // 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 | +// // 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 | +// // 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 | +// // 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 | +// // async retrieve complete. \ No newline at end of file diff --git a/docs/examples/csharp/QueryExample.cs b/docs/examples/csharp/QueryExample.cs index 97f0c456d4..c90a8cd0b7 100644 --- a/docs/examples/csharp/QueryExample.cs +++ b/docs/examples/csharp/QueryExample.cs @@ -1,4 +1,5 @@ using TDengineDriver; +using TDengineDriver.Impl; using System.Runtime.InteropServices; namespace TDengineExample @@ -23,7 +24,7 @@ namespace TDengineExample Console.WriteLine("fieldCount=" + fieldCount); // print column names - List metas = TDengine.FetchFields(res); + List metas = LibTaos.GetMeta(res); for (int i = 0; i < metas.Count; i++) { Console.Write(metas[i].name + "\t"); @@ -31,98 +32,17 @@ namespace TDengineExample Console.WriteLine(); // print values - IntPtr row; - while ((row = TDengine.FetchRows(res)) != IntPtr.Zero) + List resData = LibTaos.GetData(res); + for (int i = 0; i < resData.Count; i++) { - List metaList = TDengine.FetchFields(res); - int numOfFiled = TDengine.FieldCount(res); - - List dataRaw = new List(); - - IntPtr colLengthPrt = TDengine.FetchLengths(res); - int[] colLengthArr = new int[numOfFiled]; - Marshal.Copy(colLengthPrt, colLengthArr, 0, numOfFiled); - - for (int i = 0; i < numOfFiled; i++) + Console.Write($"|{resData[i].ToString()} \t"); + if (((i + 1) % metas.Count == 0)) { - TDengineMeta meta = metaList[i]; - IntPtr data = Marshal.ReadIntPtr(row, IntPtr.Size * i); - - if (data == IntPtr.Zero) - { - Console.Write("NULL\t"); - continue; - } - switch ((TDengineDataType)meta.type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - bool v1 = Marshal.ReadByte(data) == 0 ? false : true; - Console.Write(v1.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - sbyte v2 = (sbyte)Marshal.ReadByte(data); - Console.Write(v2.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - short v3 = Marshal.ReadInt16(data); - Console.Write(v3.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_INT: - int v4 = Marshal.ReadInt32(data); - Console.Write(v4.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - long v5 = Marshal.ReadInt64(data); - Console.Write(v5.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); - Console.Write(v6.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); - Console.Write(v7.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - string v8 = Marshal.PtrToStringUTF8(data, colLengthArr[i]); - Console.Write(v8 + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - long v9 = Marshal.ReadInt64(data); - Console.Write(v9.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - string v10 = Marshal.PtrToStringUTF8(data, colLengthArr[i]); - Console.Write(v10 + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - byte v12 = Marshal.ReadByte(data); - Console.Write(v12.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - ushort v13 = (ushort)Marshal.ReadInt16(data); - Console.Write(v13.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - uint v14 = (uint)Marshal.ReadInt32(data); - Console.Write(v14.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - ulong v15 = (ulong)Marshal.ReadInt64(data); - Console.Write(v15.ToString() + "\t"); - break; - case TDengineDataType.TSDB_DATA_TYPE_JSONTAG: - string v16 = Marshal.PtrToStringUTF8(data, colLengthArr[i]); - Console.Write(v16 + "\t"); - break; - default: - Console.Write("nonsupport data type value"); - break; - } - + Console.WriteLine(""); } - Console.WriteLine(); } + Console.WriteLine(); + if (TDengine.ErrorNo(res) != 0) { Console.WriteLine($"Query is not complete, Error {TDengine.ErrorNo(res)} {TDengine.Error(res)}"); diff --git a/docs/examples/csharp/SQLInsertExample.cs b/docs/examples/csharp/SQLInsertExample.cs index d5462c1062..192ea96d57 100644 --- a/docs/examples/csharp/SQLInsertExample.cs +++ b/docs/examples/csharp/SQLInsertExample.cs @@ -15,10 +15,10 @@ namespace TDengineExample CheckRes(conn, res, "failed to change database"); res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); CheckRes(conn, res, "failed to create stable"); - var sql = "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + - "d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + - "d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + - "d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; res = TDengine.Query(conn, sql); CheckRes(conn, res, "failed to insert data"); int affectedRows = TDengine.AffectRows(res); diff --git a/docs/examples/csharp/StmtInsertExample.cs b/docs/examples/csharp/StmtInsertExample.cs index 6ade424b95..0a4098091f 100644 --- a/docs/examples/csharp/StmtInsertExample.cs +++ b/docs/examples/csharp/StmtInsertExample.cs @@ -21,7 +21,7 @@ namespace TDengineExample CheckStmtRes(res, "failed to prepare stmt"); // 2. bind table name and tags - TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("California.SanFrancisco"), TaosBind.BindInt(2) }; + TAOS_MULTI_BIND[] tags = new TAOS_MULTI_BIND[2] { TaosMultiBind.MultiBindBinary(new string[]{"California.SanFrancisco"}), TaosMultiBind.MultiBindInt(new int?[] {2}) }; res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags); CheckStmtRes(res, "failed to bind table name and tags"); @@ -44,7 +44,7 @@ namespace TDengineExample CheckStmtRes(res, "faild to execute"); // 6. free - TaosBind.FreeTaosBind(tags); + TaosMultiBind.FreeTaosBind(tags); TaosMultiBind.FreeTaosBind(values); TDengine.Close(conn); TDengine.Cleanup(); diff --git a/docs/examples/csharp/SubscribeDemo.cs b/docs/examples/csharp/SubscribeDemo.cs index 34509215da..b62ff12e5e 100644 --- a/docs/examples/csharp/SubscribeDemo.cs +++ b/docs/examples/csharp/SubscribeDemo.cs @@ -1,12 +1,100 @@ using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; +using TDengineTMQ; +using TDengineDriver; +using System.Runtime.InteropServices; -namespace csharp +namespace TMQExample { internal class SubscribeDemo { + static void Main(string[] args) + { + IntPtr conn = GetConnection(); + string topic = "topic_example"; + Console.WriteLine($"create topic if not exist {topic} as select * from meters"); + //create topic + IntPtr res = TDengine.Query(conn, $"create topic if not exists {topic} as select * from meters"); + + if (res == IntPtr.Zero) + { + throw new Exception($"create topic failed, reason:{TDengine.Error(res)}"); + } + + var cfg = new ConsumerConfig + { + GourpId = "group_1", + TDConnectUser = "root", + TDConnectPasswd = "taosdata", + MsgWithTableName = "true", + TDConnectIp = "127.0.0.1", + }; + + // create consumer + var consumer = new ConsumerBuilder(cfg) + .Build(); + + // subscribe + consumer.Subscribe(topic); + + // consume + for (int i = 0; i < 5; i++) + { + var consumeRes = consumer.Consume(300); + // print consumeResult + foreach (KeyValuePair kv in consumeRes.Message) + { + Console.WriteLine("topic partitions:\n{0}", kv.Key.ToString()); + + kv.Value.Metas.ForEach(meta => + { + Console.Write("{0} {1}({2}) \t|", meta.name, meta.TypeName(), meta.size); + }); + Console.WriteLine(""); + kv.Value.Datas.ForEach(data => + { + Console.WriteLine(data.ToString()); + }); + } + + consumer.Commit(consumeRes); + Console.WriteLine("\n================ {0} done ", i); + + } + + // retrieve topic list + List topics = consumer.Subscription(); + topics.ForEach(t => Console.WriteLine("topic name:{0}", t)); + + + // unsubscribe + consumer.Unsubscribe(); + + // close consumer after use.Otherwise will lead memory leak. + consumer.Close(); + TDengine.Close(conn); + + + } + + static IntPtr GetConnection() + { + string host = "localhost"; + short port = 6030; + string username = "root"; + string password = "taosdata"; + string dbname = "power"; + var conn = TDengine.Connect(host, username, password, dbname, port); + if (conn == IntPtr.Zero) + { + Console.WriteLine("Connect to TDengine failed"); + System.Environment.Exit(0); + } + else + { + Console.WriteLine("Connect to TDengine success"); + } + return conn; + } } + } diff --git a/docs/examples/csharp/asyncquery.csproj b/docs/examples/csharp/asyncquery.csproj index 7a952fe7ab..045969edd7 100644 --- a/docs/examples/csharp/asyncquery.csproj +++ b/docs/examples/csharp/asyncquery.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/connect.csproj b/docs/examples/csharp/connect.csproj index 27cffa30ae..3a912f8987 100644 --- a/docs/examples/csharp/connect.csproj +++ b/docs/examples/csharp/connect.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/influxdbline.csproj b/docs/examples/csharp/influxdbline.csproj index a8b197dc71..58bca48508 100644 --- a/docs/examples/csharp/influxdbline.csproj +++ b/docs/examples/csharp/influxdbline.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/optsjson.csproj b/docs/examples/csharp/optsjson.csproj index b1bd83405e..da16025dcd 100644 --- a/docs/examples/csharp/optsjson.csproj +++ b/docs/examples/csharp/optsjson.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/optstelnet.csproj b/docs/examples/csharp/optstelnet.csproj index 1ab4106771..194de21bcc 100644 --- a/docs/examples/csharp/optstelnet.csproj +++ b/docs/examples/csharp/optstelnet.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/query.csproj b/docs/examples/csharp/query.csproj index 63f13c3ddb..39fc135d5a 100644 --- a/docs/examples/csharp/query.csproj +++ b/docs/examples/csharp/query.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/sqlinsert.csproj b/docs/examples/csharp/sqlinsert.csproj index 0380395a5a..ab0e5e717a 100644 --- a/docs/examples/csharp/sqlinsert.csproj +++ b/docs/examples/csharp/sqlinsert.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/stmtinsert.csproj b/docs/examples/csharp/stmtinsert.csproj index 8defb895eb..3d459fbeda 100644 --- a/docs/examples/csharp/stmtinsert.csproj +++ b/docs/examples/csharp/stmtinsert.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/subscribe.csproj b/docs/examples/csharp/subscribe.csproj index 8286922c6f..eff29b3bf4 100644 --- a/docs/examples/csharp/subscribe.csproj +++ b/docs/examples/csharp/subscribe.csproj @@ -5,11 +5,11 @@ net6.0 enable enable - TDengineExample.SubscribeDemo + TMQExample.SubscribeDemo - + diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md index cd777b7d87..91f0376d8c 100644 --- a/docs/zh/02-intro.md +++ b/docs/zh/02-intro.md @@ -37,7 +37,7 @@ TDengine的主要功能如下: - **云原生**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。 -- **极简时序数据平台**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降系统的复杂度,降低应用开发和运营维护成本。 +- **极简时序数据平台**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。 - **分析能力**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。 diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index b1857b9739..89faf812ff 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -74,7 +74,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 com.taosdata.jdbc taos-jdbcdriver - 2.0.38 + 3.0.0 ``` @@ -103,7 +103,7 @@ module goexample go 1.17 -require github.com/taosdata/driver-go/v2 develop +require github.com/taosdata/driver-go/v3 latest ``` :::note @@ -138,7 +138,7 @@ Node.js 连接器通过不同的包提供不同的连接方式。 1. 安装 Node.js 原生连接器 ``` - npm i td2.0-connector + npm install @tdengine/client ``` :::note @@ -148,7 +148,7 @@ Node.js 连接器通过不同的包提供不同的连接方式。 2. 安装 Node.js REST 连接器 ``` - npm i td2.0-rest-connector + npm install @tdengine/rest ``` @@ -168,7 +168,7 @@ Node.js 连接器通过不同的包提供不同的连接方式。 - + @@ -188,7 +188,7 @@ dotnet add package TDengine.Connector -1. 下载 [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/2.0.38/)。 +1. 下载 [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/)。 2. 安装 R 的依赖包`RJDBC`: ```R diff --git a/docs/zh/07-develop/07-tmq.md b/docs/zh/07-develop/07-tmq.md index 0f531e07c9..358c824ffa 100644 --- a/docs/zh/07-develop/07-tmq.md +++ b/docs/zh/07-develop/07-tmq.md @@ -1,254 +1,241 @@ ---- -sidebar_label: 数据订阅 -description: "轻量级的数据订阅与推送服务。连续写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。" -title: 数据订阅 ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import Java from "./_sub_java.mdx"; -import Python from "./_sub_python.mdx"; -import Go from "./_sub_go.mdx"; -import Rust from "./_sub_rust.mdx"; -import Node from "./_sub_node.mdx"; -import CSharp from "./_sub_cs.mdx"; -import CDemo from "./_sub_c.mdx"; - -基于数据天然的时间序列特性,TDengine 的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致,均可视为系统中插入一条带时间戳的新记录。同时,TDengine 在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说,TDengine 中每一张表均可视为一个标准的消息队列。 - -TDengine 内嵌支持轻量级的消息订阅与推送服务。使用系统提供的 API,用户可使用普通查询语句订阅数据库中的一张或多张表。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 - -TDengine 的订阅与推送服务的状态是由客户端维持,TDengine 服务端并不维持。因此如果应用重启,从哪个时间点开始获取最新数据,由应用决定。 - -TDengine 的 API 中,与订阅相关的主要有以下三个: - -```c -taos_subscribe -taos_consume -taos_unsubscribe -``` - -这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面仍以智能电表场景为例介绍一下它们的具体用法(超级表和子表结构请参考上一节“连续查询”),完整的示例代码可以在 [这里](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c) 找到。 - -如果我们希望当某个电表的电流超过一定限制(比如 10A)后能得到通知并进行一些处理, 有两种方法:一是分别对每张子表进行查询,每次查询后记录最后一条数据的时间戳,后续只查询这个时间戳之后的数据: - -```sql -select * from D1001 where ts > {last_timestamp1} and current > 10; -select * from D1002 where ts > {last_timestamp2} and current > 10; -... -``` - -这确实可行,但随着电表数量的增加,查询数量也会增加,客户端和服务端的性能都会受到影响,当电表数增长到一定的程度,系统就无法承受了。 - -另一种方法是对超级表进行查询。这样,无论有多少电表,都只需一次查询: - -```sql -select * from meters where ts > {last_timestamp} and current > 10; -``` - -但是,如何选择 `last_timestamp` 就成了一个新的问题。因为,一方面数据的产生时间(也就是数据时间戳)和数据入库的时间一般并不相同,有时偏差还很大;另一方面,不同电表的数据到达 TDengine 的时间也会有差异。所以,如果我们在查询中使用最慢的那台电表的数据的时间戳作为 `last_timestamp`,就可能重复读入其它电表的数据;如果使用最快的电表的时间戳,其它电表的数据就可能被漏掉。 - -TDengine 的订阅功能为上面这个问题提供了一个彻底的解决方案。 - -首先是使用 `taos_subscribe` 创建订阅: - -```c -TAOS_SUB* tsub = NULL; -if (async) { -  // create an asynchronized subscription, the callback function will be called every 1s -  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); -} else { -  // create an synchronized subscription, need to call 'taos_consume' manually -  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); -} -``` - -TDengine 中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数 `async` 的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用 `taos_consume` 来拉取数据,而异步则由 API 在内部的另一个线程中调用 `taos_consume`,然后把拉取到的数据交给回调函数 `subscribe_callback`去处理。(注意,`subscribe_callback` 中不宜做较为耗时的操作,否则有可能导致客户端阻塞等不可控的问题。) - -参数 `taos` 是一个已经建立好的数据库连接,在同步模式下无特殊要求。但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误,因为回调函数在 API 的内部线程中被调用,而 TDengine 的部分 API 不是线程安全的。 - -参数 `sql` 是查询语句,可以在其中使用 where 子句指定过滤条件。在我们的例子中,如果只想订阅电流超过 10A 时的数据,可以这样写: - -```sql -select * from meters where current > 10; -``` - -注意,这里没有指定起始时间,所以会读到所有时间的数据。如果只想从一天前的数据开始订阅,而不需要更早的历史数据,可以再加上一个时间条件: - -```sql -select * from meters where ts > now - 1d and current > 10; -``` - -订阅的 `topic` 实际上是它的名字,因为订阅功能是在客户端 API 中实现的,所以没必要保证它全局唯一,但需要它在一台客户端机器上唯一。 - -如果名为 `topic` 的订阅不存在,参数 `restart` 没有意义;但如果用户程序创建这个订阅后退出,当它再次启动并重新使用这个 `topic` 时,`restart` 就会被用于决定是从头开始读取数据,还是接续上次的位置进行读取。本例中,如果 `restart` 是 **true**(非零值),用户程序肯定会读到所有数据。但如果这个订阅之前就存在了,并且已经读取了一部分数据,且 `restart` 是 **false**(**0**),用户程序就不会读到之前已经读取的数据了。 - -`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。在同步模式下,如果前后两次调用 `taos_consume` 的时间间隔小于此时间,`taos_consume` 会阻塞,直到间隔超过此时间。异步模式下,这个时间是两次调用回调函数的最小时间间隔。 - -`taos_subscribe` 的倒数第二个参数用于用户程序向回调函数传递附加参数,订阅 API 不对其做任何处理,只原样传递给回调函数。此参数在同步模式下无意义。 - -订阅创建以后,就可以消费其数据了,同步模式下,示例代码是下面的 else 部分: - -```c -if (async) { -  getchar(); -} else while(1) { -  TAOS_RES* res = taos_consume(tsub); -  if (res == NULL) { -    printf("failed to consume data."); -    break; -  } else { -    print_result(res, blockFetch); -    getchar(); -  } -} -``` - -这里是一个 **while** 循环,用户每按一次回车键就调用一次 `taos_consume`,而 `taos_consume` 的返回值是查询到的结果集,与 `taos_use_result` 完全相同,例子中使用这个结果集的代码是函数 `print_result`: - -```c -void print_result(TAOS_RES* res, int blockFetch) { -  TAOS_ROW row = NULL; -  int num_fields = taos_num_fields(res); -  TAOS_FIELD* fields = taos_fetch_fields(res); -  int nRows = 0; -  if (blockFetch) { -    nRows = taos_fetch_block(res, &row); -    for (int i = 0; i < nRows; i++) { -      char temp[256]; -      taos_print_row(temp, row + i, fields, num_fields); -      puts(temp); -    } -  } else { -    while ((row = taos_fetch_row(res))) { -      char temp[256]; -      taos_print_row(temp, row, fields, num_fields); -      puts(temp); -      nRows++; -    } -  } -  printf("%d rows consumed.\n", nRows); -} -``` - -其中的 `taos_print_row` 用于处理订阅到数据,在我们的例子中,它会打印出所有符合条件的记录。而异步模式下,消费订阅到的数据则显得更为简单: - -```c -void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { -  print_result(res, *(int*)param); -} -``` - -当要结束一次数据订阅时,需要调用 `taos_unsubscribe`: - -```c -taos_unsubscribe(tsub, keep); -``` - -其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录"),每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 - -代码介绍完毕,我们来看一下实际的运行效果。假设: - -- 示例代码已经下载到本地 -- TDengine 也已经在同一台机器上安装好 -- 示例所需的数据库、超级表、子表已经全部创建好 - -则可以在示例代码所在目录执行以下命令来编译并启动示例程序: - -```bash -make -./subscribe -sql='select * from meters where current > 10;' -``` - -示例程序启动后,打开另一个终端窗口,启动 TDengine CLI 向 **D1001** 插入一条电流为 12A 的数据: - -```sql -$ taos -> use test; -> insert into D1001 values(now, 12, 220, 1); -``` - -这时,因为电流超过了 10A,您应该可以看到示例程序将它输出到了屏幕上。您可以继续插入一些数据观察示例程序的输出。 - -## 示例程序 - -下面的示例程序展示是如何使用连接器订阅所有电流超过 10A 的记录。 - -### 准备数据 - -``` -# create database "power" -taos> create database power; -# use "power" as the database in following operations -taos> use power; -# create super table "meters" -taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); -# create tabes using the schema defined by super table "meters" -taos> create table d1001 using meters tags ("California.SanFrancisco", 2); -taos> create table d1002 using meters tags ("California.LosAngeles", 2); -# insert some rows -taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); -taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); -# filter out the rows in which current is bigger than 10A -taos> select * from meters where current > 10; - ts | current | voltage | phase | location | groupid | -=========================================================================================================== - 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LosAngeles | 2 | - 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LosAngeles | 2 | - 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | - 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | - 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | -Query OK, 5 row(s) in set (0.004896s) -``` - -### 示例代码 - - - - - - - - - {/* - - */} - - - - {/* - - - - - */} - - - - - -### 运行示例程序 - -示例程序会先消费符合查询条件的所有历史数据: - -```bash -ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 -ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 -ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 -ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 -ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 -``` - -接着,使用 TDengine CLI 向表中新增一条数据: - -``` -# taos -taos> use power; -taos> insert into d1001 values(now, 12.4, 220, 1); -``` - -因为这条数据的电流大于 10A,示例程序会将其消费: - -``` -ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 -``` +--- +sidebar_label: 消息队列 +description: "数据订阅与推送服务。连续写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。" +title: 消息队列 +--- + +基于数据天然的时间序列特性,TDengine 的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致,均可视为系统中插入一条带时间戳的新记录。同时,TDengine 在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说,TDengine 中每一张表均可视为一个标准的消息队列。 + +TDengine 内嵌支持消息订阅与推送服务(下文都简称TMQ)。使用系统提供的 API,用户可使用普通查询语句订阅数据库中的一张或多张表,或整个库。客户端启动订阅后,定时或按需轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 + +TMQ提供了提交机制来保证消息队列的可靠性和正确性。在调用方法上,支持自动提交和手动提交。 + +TMQ 的 API 中,与订阅相关的主要数据结构和API如下: + +```c +typedef struct tmq_t tmq_t; +typedef struct tmq_conf_t tmq_conf_t; +typedef struct tmq_list_t tmq_list_t; + +typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + +DLL_EXPORT tmq_list_t *tmq_list_new(); +DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); +DLL_EXPORT void tmq_list_destroy(tmq_list_t *); +DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); +DLL_EXPORT const char *tmq_err2str(int32_t code); + +DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); +DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); +DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); +DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); +DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); +DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + +enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +}; +typedef enum tmq_conf_res_t tmq_conf_res_t; + +DLL_EXPORT tmq_conf_t *tmq_conf_new(); +DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); +DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); +DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); +``` + +这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码可以在 [tmq.c](https://github.com/taosdata/TDengine/blob/3.0/examples/c/tmq.c) 看到。 + +一、首先完成建库、建一张超级表和多张子表,并每个子表插入若干条数据记录: + +```sql +drop database if exists tmqdb; +create database tmqdb; +create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16) tags(t1 int, t3 varchar(16)); +create table tmqdb.ctb0 using tmqdb.stb tags(0, "subtable0"); +create table tmqdb.ctb1 using tmqdb.stb tags(1, "subtable1"); +create table tmqdb.ctb2 using tmqdb.stb tags(2, "subtable2"); +create table tmqdb.ctb3 using tmqdb.stb tags(3, "subtable3"); +insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00'); +insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11'); +insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22'); +insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33'); +``` + +二、创建topic: + +```sql +create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; +``` + +注:TMQ支持多种订阅类型: +1、列订阅 + +语法:CREATE TOPIC topic_name as subquery +通过select语句订阅(包括select *,或select ts, c1等指定列描述订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合) + +- TOPIC一旦创建则schema确定 +- 被订阅或用于计算的column和tag不可被删除、修改 +- 若发生schema变更,新增的column不出现在结果中 + +2、超级表订阅 +语法:CREATE TOPIC topic_name AS STABLE stbName + +- 订阅某超级表的全部数据,schema变更不受限,schema变更后写入的数据将以最新schema返回 +- 在tmq的返回消息中schema是块级别的,每块的schema可能不一样 +- 列变更后写入的数据若未落盘,将以写入时的schema返回 +- 列变更后写入的数据若已落盘,将以落盘时的schema返回 + +3、db订阅 +语法:CREATE TOPIC topic_name AS DATABASE db_name + +- 订阅某一db的全部数据,schema变更不受限 +- 在tmq的返回消息中schema是块级别的,每块的schema可能不一样 +- 列变更后写入的数据若未落盘,将以写入时的schema返回 +- 列变更后写入的数据若已落盘,将以落盘时的schema返回 + +三、创建consumer + +目前支持的config: + +| 参数名称 | 参数值 | 备注 | +| ---------------------------- | ------------------------------ | ------------------------------------------------------ | +| group.id | 最大长度:192 | | +| enable.auto.commit | 合法值:true, false | | +| auto.commit.interval.ms | | | +| auto.offset.reset | 合法值:earliest, latest, none | | +| td.connect.ip | 用于连接,同taos_connect的参数 | | +| td.connect.user | 用于连接,同taos_connect的参数 | | +| td.connect.pass | 用于连接,同taos_connect的参数 | | +| td.connect.port | 用于连接,同taos_connect的参数 | | +| enable.heartbeat.background | 合法值:true, false | 开启后台心跳,即consumer不会因为长时间不poll而认为离线 | +| experimental.snapshot.enable | 合法值:true, false | 从wal开始消费,还是从tsbs开始消费 | +| msg.with.table.name | 合法值:true, false | 从消息中能否解析表名 | + +```sql +/* 根据需要,设置消费组(group.id)、自动提交(enable.auto.commit)、自动提交时间间隔(auto.commit.interval.ms)、用户名(td.connect.user)、密码(td.connect.pass)等参数 */ + tmq_conf_t* conf = tmq_conf_new(); + tmq_conf_set(conf, "enable.auto.commit", "true"); + tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); + tmq_conf_set(conf, "group.id", "cgrpName"); + tmq_conf_set(conf, "td.connect.user", "root"); + tmq_conf_set(conf, "td.connect.pass", "taosdata"); + tmq_conf_set(conf, "auto.offset.reset", "earliest"); + tmq_conf_set(conf, "experimental.snapshot.enable", "true"); + tmq_conf_set(conf, "msg.with.table.name", "true"); + tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + return tmq; +``` + +四、创建订阅主题列表 + +```sql + tmq_list_t* topicList = tmq_list_new(); + tmq_list_append(topicList, "topicName"); + return topicList; +``` + +单个consumer支持同时订阅多个topic。 + +五、启动订阅并开始消费 + +```sql + /* 启动订阅 */ + tmq_subscribe(tmq, topicList); + tmq_list_destroy(topicList); + + /* 循环poll消息 */ + int32_t totalRows = 0; + int32_t msgCnt = 0; + int32_t consumeDelay = 5000; + while (running) { + TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, consumeDelay); + if (tmqmsg) { + msgCnt++; + totalRows += msg_process(tmqmsg); + taos_free_result(tmqmsg); + } else { + break; + } + } + + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); +``` + +这里是一个 **while** 循环,每调用一次tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析API完成消息内容的解析: + +```sql + static int32_t msg_process(TAOS_RES* msg) { + char buf[1024]; + int32_t rows = 0; + + const char* topicName = tmq_get_topic_name(msg); + const char* dbName = tmq_get_db_name(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); + + printf("topic: %s\n", topicName); + printf("db: %s\n", dbName); + printf("vgroup id: %d\n", vgroupId); + + while (1) { + TAOS_ROW row = taos_fetch_row(msg); + if (row == NULL) break; + + TAOS_FIELD* fields = taos_fetch_fields(msg); + int32_t numOfFields = taos_field_count(msg); + int32_t* length = taos_fetch_lengths(msg); + int32_t precision = taos_result_precision(msg); + const char* tbName = tmq_get_table_name(msg); + rows++; + taos_print_row(buf, row, fields, numOfFields); + printf("row content from %s: %s\n", (tbName != NULL ? tbName : "null table"), buf); + } + + return rows; +} +``` + +五、结束消费 + +```sql + /* 取消订阅 */ + tmq_unsubscribe(tmq); + + /* 关闭消费 */ + tmq_consumer_close(tmq); +``` + +六、删除topic + +如果不再需要,可以删除创建topic,但注意:只有没有被订阅的topic才能别删除。 + +```sql + /* 删除topic */ + drop topic topicName; +``` + +七、状态查看 + +1、topics:查询已经创建的topic + +```sql + show topics; +``` + +2、consumers:查询consumer的状态及其订阅的topic + +```sql + show consumers; +``` + +3、subscriptions:查询consumer与vgroup之间的分配关系 + +```sql + show subscriptions; +``` + + diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md index 09681650db..b8ae618105 100644 --- a/docs/zh/07-develop/09-udf.md +++ b/docs/zh/07-develop/09-udf.md @@ -16,72 +16,96 @@ description: "支持用户编码的聚合函数和标量函数,在查询中嵌 用户可以按照下列函数模板定义自己的标量计算函数 - `void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` + `int32_t udf(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)` - 其中 udfNormalFunc 是函数名的占位符,以上述模板实现的函数对行数据块进行标量计算,其参数项是固定的,用于按照约束完成与引擎之间的数据交换。 + 其中 udf 是函数名的占位符,以上述模板实现的函数对行数据块进行标量计算。 -- udfNormalFunc 中各参数的具体含义是: - - data:输入数据。 - - itype:输入数据的类型。这里采用的是短整型表示法,与各种数据类型对应的值可以参见 [column_meta 中的列类型说明](/reference/rest-api/)。例如 4 用于表示 INT 型。 - - iBytes:输入数据中每个值会占用的字节数。 - - numOfRows:输入数据的总行数。 - - ts:主键时间戳在输入中的列数据(只读)。 - - dataOutput:输出数据的缓冲区,缓冲区大小为用户指定的输出类型大小 \* numOfRows。 - - interBuf:中间计算结果的缓冲区,大小为用户在创建 UDF 时指定的 BUFSIZE 大小。通常用于计算中间结果与最终结果不一致时使用,由引擎负责分配与释放。 - - tsOutput:主键时间戳在输出时的列数据,如果非空可用于输出结果对应的时间戳。 - - numOfOutput:输出结果的个数(行数)。 - - oType:输出数据的类型。取值含义与 itype 参数一致。 - - oBytes:输出数据中每个值占用的字节数。 - - buf:用于在 UDF 与引擎间的状态控制信息传递块。 - - [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) 是结构最简单的 UDF 实现,也即上面定义的 udfNormalFunc 函数的一个具体实现。其功能为:对传入的一个数据列(可能因 WHERE 子句进行了筛选)中的每一项,都输出 +1 之后的值,并且要求输入的列数据类型为 INT。 +- scalarFunction 中各参数的具体含义是: + - inputDataBlock: 输入的数据块 + - resultColumn: 输出列 ### 聚合函数 用户可以按照如下函数模板定义自己的聚合函数。 -`void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` +`int32_t udf_start(SUdfInterBuf *interBuf)` -其中 udfMergeFunc 是函数名的占位符,以上述模板实现的函数用于对计算中间结果进行聚合,只有针对超级表的聚合查询才需要调用该函数。其中各参数的具体含义是: +`int32_t udf(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf)` - - data:udfNormalFunc 的输出数据数组,如果使用了 interBuf 那么 data 就是 interBuf 的数组。 - - numOfRows:data 中数据的行数。 - - dataOutput:输出数据的缓冲区,大小等于一条最终结果的大小。如果此时输出还不是最终结果,可以选择输出到 interBuf 中即 data 中。 - - numOfOutput:输出结果的个数(行数)。 - - buf:用于在 UDF 与引擎间的状态控制信息传递块。 +`int32_t udf_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)` +其中 udf 是函数名的占位符。其中各参数的具体含义是: -[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) 实现的是一个聚合函数,功能是对一组数据按绝对值取最大值。 + - interBuf:中间结果 buffer。 + - inputBlock:输入的数据块。 + - newInterBuf:新的中间结果buffer。 + - result:最终结果。 -其计算过程为:与所在查询语句相关的数据会被分为多个行数据块,对每个行数据块调用 udfNormalFunc(在本例的实现代码中,实际函数名是 `abs_max`)来生成每个子表的中间结果,再将子表的中间结果调用 udfMergeFunc(本例中,其实际的函数名是 `abs_max_merge`)进行聚合,生成超级表的最终聚合结果或中间结果。聚合查询最后还会通过 udfFinalizeFunc(本例中,其实际的函数名是 `abs_max_finalize`)再把超级表的中间结果处理为最终结果,最终结果只能含 0 或 1 条结果数据。 -其他典型场景,如协方差的计算,也可通过定义聚合 UDF 的方式实现。 +其计算过程为:首先调用udf_start生成结果buffer,然后相关的数据会被分为多个行数据块,对每个行数据块调用 udf 用数据块更新中间结果,最后再调用 udf_finish 从中间结果产生最终结果,最终结果只能含 0 或 1 条结果数据。 -### 最终计算 +### UDF 初始化和销毁 +`int32_t udf_init()` -用户可以按下面的函数模板实现自己的函数对计算结果进行最终计算,通常用于有 interBuf 使用的场景。 +`int32_t udf_destroy()` -`void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` - -其中 udfFinalizeFunc 是函数名的占位符 ,其中各参数的具体含义是: - - dataOutput:输出数据的缓冲区。 - - interBuf:中间结算结果缓冲区,可作为输入。 - - numOfOutput:输出数据的个数,对聚合函数来说只能是 0 或者 1。 - - buf:用于在 UDF 与引擎间的状态控制信息传递块。 - -## UDF 实现方式的规则总结 - -三类 UDF 函数: udfNormalFunc、udfMergeFunc、udfFinalizeFunc ,其函数名约定使用相同的前缀,此前缀即 udfNormalFunc 的实际函数名,也即 udfNormalFunc 函数不需要在实际函数名后添加后缀;而udfMergeFunc 的函数名要加上后缀 `_merge`、udfFinalizeFunc 的函数名要加上后缀 `_finalize`,这是 UDF 实现规则的一部分,系统会按照这些函数名后缀来调用相应功能。 - -根据 UDF 函数类型的不同,用户所要实现的功能函数也不同: - -- 标量函数:UDF 中需实现 udfNormalFunc。 -- 聚合函数:UDF 中需实现 udfNormalFunc、udfMergeFunc(对超级表查询)、udfFinalizeFunc。 +其中 udf 是函数名的占位符。udf_init 完成初始化工作。 udf_destroy 完成清理工作。 :::note 如果对应的函数不需要具体的功能,也需要实现一个空函数。 ::: +### UDF 数据结构 +```c +typedef struct SUdfColumnMeta { + int16_t type; + int32_t bytes; + uint8_t precision; + uint8_t scale; +} SUdfColumnMeta; + +typedef struct SUdfColumnData { + int32_t numOfRows; + int32_t rowsAlloc; + union { + struct { + int32_t nullBitmapLen; + char *nullBitmap; + int32_t dataLen; + char *data; + } fixLenCol; + + struct { + int32_t varOffsetsLen; + int32_t *varOffsets; + int32_t payloadLen; + char *payload; + int32_t payloadAllocLen; + } varLenCol; + }; +} SUdfColumnData; + +typedef struct SUdfColumn { + SUdfColumnMeta colMeta; + bool hasNull; + SUdfColumnData colData; +} SUdfColumn; + +typedef struct SUdfDataBlock { + int32_t numOfRows; + int32_t numOfCols; + SUdfColumn **udfCols; +} SUdfDataBlock; + +typedef struct SUdfInterBuf { + int32_t bufLen; + char* buf; + int8_t numOfResult; //zero or one +} SUdfInterBuf; +``` + +为了更好的操作以上数据结构,提供了一些便利函数,定义在 taosudf.h。 + ## 编译 UDF 用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 动态链接库,之后才能载入 TDengine 系统。 @@ -100,52 +124,49 @@ gcc -g -O0 -fPIC -shared add_one.c -o add_one.so 用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。 -在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外, UDF 支持输入与输出类型不一致,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。 +在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。 - 创建标量函数 ```sql -CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type; ``` - - ids(X):标量函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; - - ids(Y):包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; - - typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; - - B:中间计算结果的缓冲区大小,单位是字节,最小 0,最大 512,如果不使用可以不设置。 + - function_name:标量函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udf 的实际名称一致; + - library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; + - output_type:此函数计算结果的数据类型名称; - 例如,如下语句可以把 add_one.so 创建为系统中可用的 UDF: + 例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF: ```sql - CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; + CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT; ``` - 创建聚合函数: ```sql -CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ]; ``` - - ids(X):聚合函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; - - ids(Y):包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; - - typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; - - B:中间计算结果的缓冲区大小,单位是字节,最小 0,最大 512,如果不使用可以不设置。 + - function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; + - library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; + - output_type:此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; + - buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。 - 关于中间计算结果的使用,可以参考示例程序[demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) - - 例如,如下语句可以把 demo.so 创建为系统中可用的 UDF: + 例如,如下语句可以把 libsqrsum.so 创建为系统中可用的 UDF: ```sql - CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; + CREATE AGGREGATE FUNCTION sqr_sum AS "/home/taos/udf_example/libsqrsum.so" OUTPUTTYPE DOUBLE bufsize 8; ``` ### 管理 UDF - 删除指定名称的用户定义函数: ``` -DROP FUNCTION ids(X); +DROP FUNCTION function_name; ``` -- ids(X):此参数的含义与 CREATE 指令中的 ids(X) 参数一致,也即要删除的函数的名字,例如 +- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如 ```sql -DROP FUNCTION add_one; +DROP FUNCTION bit_and; ``` - 显示系统中当前可用的所有 UDF: ```sql @@ -156,53 +177,32 @@ SHOW FUNCTIONS; 在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如: ```sql -SELECT X(c) FROM table/stable; +SELECT X(c1,c2) FROM table/stable; ``` -表示对名为 c 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。 +表示对名为 c1, c2 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。 -## UDF 的一些使用限制 - -在当前版本下,使用 UDF 存在如下这些限制: - -1. 在创建和调用 UDF 时,服务端和客户端都只支持 Linux 操作系统; -2. UDF 不能与系统内建的 SQL 函数混合使用,暂不支持在一条 SQL 语句中使用多个不同名的 UDF ; -3. UDF 只支持以单个数据列作为输入; -4. UDF 只要创建成功,就会被持久化存储到 MNode 节点中; -5. 无法通过 RESTful 接口来创建 UDF; -6. UDF 在 SQL 中定义的函数名,必须与 .so 库文件实现中的接口函数名前缀保持一致,也即必须是 udfNormalFunc 的名称,而且不可与 TDengine 中已有的内建 SQL 函数重名。 ## 示例代码 -### 标量函数示例 [add_one](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) +### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c)
-add_one.c +bit_and.c ```c -{{#include tests/script/sh/add_one.c}} +{{#include tests/script/sh/bit_and.c}} ```
-### 向量函数示例 [abs_max](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) +### 聚合函数示例 [sqr_sum](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sqr_sum.c)
-abs_max.c +sqr_sum.c ```c -{{#include tests/script/sh/abs_max.c}} -``` - -
- -### 使用中间计算结果示例 [demo](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) - -
-demo.c - -```c -{{#include tests/script/sh/demo.c}} +{{#include tests/script/sh/sqr_sum.c}} ```
diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md index e5bc800de7..1e17870685 100644 --- a/docs/zh/12-taos-sql/22-meta.md +++ b/docs/zh/12-taos-sql/22-meta.md @@ -15,21 +15,21 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们 本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。 -## DNODES +## INS_DNODES 提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 -| # | **列名** | **数据类型** | **说明** | -| --- | :------------: | ------------ | --------------------- | -| 1 | vnodes | SMALLINT | dnode 中的 vnode 个数 | -| 2 | support_vnodes | SMALLINT | 支持的 vnode 个数 | -| 3 | status | BINARY(10) | 当前状态 | -| 4 | note | BINARY(256) | 离线原因等信息 | -| 5 | id | SMALLINT | dnode id | -| 6 | endpoint | BINARY(134) | dnode 的地址 | -| 7 | create | TIMESTAMP | 创建时间 | +| # | **列名** | **数据类型** | **说明** | +| --- | :------------: | ------------ | ------------------------- | +| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数 | +| 2 | support_vnodes | SMALLINT | 最多支持的 vnode 个数 | +| 3 | status | BINARY(10) | 当前状态 | +| 4 | note | BINARY(256) | 离线原因等信息 | +| 5 | id | SMALLINT | dnode id | +| 6 | endpoint | BINARY(134) | dnode 的地址 | +| 7 | create | TIMESTAMP | 创建时间 | -## MNODES +## INS_MNODES 提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 @@ -41,7 +41,7 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们 | 4 | role_time | TIMESTAMP | 成为当前角色的时间 | | 5 | create_time | TIMESTAMP | 创建时间 | -## MODULES +## INS_MODULES 提供组件的相关信息。也可以使用 SHOW MODULES 来查询这些信息 @@ -51,46 +51,74 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们 | 2 | endpoint | BINARY(134) | 组件的地址 | | 3 | module | BINARY(10) | 组件状态 | -## QNODES +## INS_QNODES 当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。 | # | **列名** | **数据类型** | **说明** | | --- | :---------: | ------------ | ------------ | -| 1 | id | SMALLINT | module id | +| 1 | id | SMALLINT | qnode id | | 2 | endpoint | BINARY(134) | qnode 的地址 | | 3 | create_time | TIMESTAMP | 创建时间 | -## USER_DATABASES +## INS_CLUSTER + +存储集群相关信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | ---------- | +| 1 | id | BIGINT | cluster id | +| 2 | name | BINARY(134) | 集群名称 | +| 3 | create_time | TIMESTAMP | 创建时间 | + +## INS_DATABASES 提供用户创建的数据库对象的相关信息。也可以使用 SHOW DATABASES 来查询这些信息。 -TODO +| # | **列名** | **数据类型** | **说明** | +| --- | :------------------: | ---------------- | ------------------------------------------------ | +| 1 | name | BINARY(32) | 数据库名 | +| 2 | create_time | TIMESTAMP | 创建时间 | +| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 | +| 4 | vgroups | INT | 数据库中有多少个 vgroup | +| 6 | replica | INT | 副本数 | +| 7 | quorum | BINARY(3) | 强一致性 | +| 8 | duration | INT | 单文件存储数据的时间跨度 | +| 9 | keep | INT | 数据保留时长 | +| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB | +| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB | +| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数 | +| 13 | minrows | INT | 文件块中记录的最大条数 | +| 14 | maxrows | INT | 文件块中记录的最小条数 | +| 15 | comp | INT | 数据压缩方式 | +| 16 | precision | BINARY(2) | 时间分辨率 | +| 17 | status | BINARY(10) | 数据库状态 | +| 18 | retention | BINARY (60) | 数据的聚合周期和保存时长 | +| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表 | +| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据 | +| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小 | +| 22 | wal_level | INT | WAL 级别 | +| 23 | wal_fsync_period | INT | 数据落盘周期 | +| 24 | wal_retention_period | INT | WAL 的保存时长 | +| 25 | wal_retention_size | INT | WAL 的保存上限 | +| 26 | wal_roll_period | INT | wal 文件切换时长 | +| 27 | wal_segment_size | wal 单个文件大小 | -| # | **列名** | **数据类型** | **说明** | -| --- | :---------: | ------------ | ------------------------------------------------ | -| 1 | name | BINARY(32) | 数据库名 | -| 2 | create_time | TIMESTAMP | 创建时间 | -| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 | -| 4 | vgroups | INT | 数据库中有多少个 vgroup | -| 5 | replica | INT | 副本数 | -| 6 | quorum | INT | 写成功的确认数 | -| 7 | days | INT | 单文件存储数据的时间跨度 | -| 8 | keep | INT | 数据保留时长 | -| 9 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB | -| 10 | minrows | INT | 文件块中记录的最大条数 | -| 11 | maxrows | INT | 文件块中记录的最小条数 | -| 12 | wal_level | INT | WAL 级别 | -| 13 | walfsync_period | INT | 数据落盘周期 | -| 14 | comp | INT | 数据压缩方式 | -| 15 | precision | BINARY(2) | 时间分辨率 | -| 16 | status | BINARY(10) | 数据库状态 | +## INS_FUNCTIONS -## USER_FUNCTIONS +用户创建的自定义函数的信息。 -TODO +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | -------------- | +| 1 | name | BINARY(64) | 函数名 | +| 2 | comment | BINARY(255) | 补充说明 | +| 3 | aggregate | INT | 是否为聚合函数 | +| 4 | output_type | BINARY(31) | 输出类型 | +| 5 | create_time | TIMESTAMP | 创建时间 | +| 6 | code_len | INT | 代码长度 | +| 7 | bufsize | INT | buffer 大小 | -## USER_INDEXES +## INS_INDEXES 提供用户创建的索引的相关信息。也可以使用 SHOW INDEX 来查询这些信息。 @@ -103,7 +131,7 @@ TODO | 5 | index_type | BINARY(10) | 目前有 SMA 和 FULLTEXT | | 6 | index_extensions | BINARY(256) | 索引的额外信息。对 SMA 类型的索引,是函数名的列表。对 FULLTEXT 类型的索引为 NULL。 | -## USER_STABLES +## INS_STABLES 提供用户创建的超级表的相关信息。 @@ -120,19 +148,7 @@ TODO | 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟 | | 10 | rollup | BINARY(128) | rollup 聚合函数 | -## USER_STREAMS - -提供用户创建的流计算的相关信息。 - -| # | **列名** | **数据类型** | **说明** | -| --- | :---------: | ------------ | --------------------------- | -| 1 | stream_name | BINARY(192) | 流计算名称 | -| 2 | user_name | BINARY(23) | 创建流计算的用户 | -| 3 | dest_table | BINARY(192) | 流计算写入的目标表 | -| 4 | create_time | TIMESTAMP | 创建时间 | -| 5 | sql | BLOB | 创建流计算时提供的 SQL 语句 | - -## USER_TABLES +## INS_TABLES 提供用户创建的普通表和子表的相关信息 @@ -149,7 +165,18 @@ TODO | 9 | table_comment | BINARY(1024) | 表注释 | | 10 | type | BINARY(20) | 表类型 | -## USER_USERS +## INS_TAGS + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------- | ---------------------- | +| 1 | table_name | BINARY(192) | 表名 | +| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 | +| 3 | stable_name | BINARY(192) | 所属的超级表表名 | +| 4 | tag_name | BINARY(64) | tag 的名称 | +| 5 | tag_type | BINARY(64) | tag 的类型 | +| 6 | tag_value | BINARY(16384) | tag 的值 | + +## INS_USERS 提供系统中创建的用户的相关信息。 @@ -159,21 +186,62 @@ TODO | 2 | privilege | BINARY(256) | 权限 | | 3 | create_time | TIMESTAMP | 创建时间 | -## VGROUPS +## INS_GRANTS + +提供企业版授权的相关信息。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | -------------------------------------------------- | +| 1 | version | BINARY(9) | 企业版授权说明:official(官方授权的)/trial(试用的) | +| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 | +| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量 | +| 4 | streams | BINARY(10) | 授权创建的流数量 | +| 5 | users | BINARY(10) | 授权创建的用户数量 | +| 6 | accounts | BINARY(10) | 授权创建的帐户数量 | +| 7 | storage | BINARY(21) | 授权使用的存储空间大小 | +| 8 | connections | BINARY(21) | 授权使用的客户端连接数量 | +| 9 | databases | BINARY(11) | 授权使用的数据库数量 | +| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 | +| 11 | querytime | BINARY(9) | 授权使用的查询总时长 | +| 12 | timeseries | BINARY(21) | 授权使用的测点数量 | +| 13 | expired | BINARY(5) | 是否到期,true:到期,false:未到期 | +| 14 | expire_time | BINARY(19) | 试用期到期时间 | + +## INS_VGROUPS 系统中所有 vgroups 的信息。 -| # | **列名** | **数据类型** | **说明** | -| --- | :--------: | ------------ | ---------------------------- | -| 1 | vg_id | INT | vgroup id | -| 2 | db_name | BINARY(32) | 数据库名 | -| 3 | tables | INT | 此 vgroup 内有多少表 | -| 4 | status | BINARY(10) | 此 vgroup 的状态 | -| 5 | onlines | INT | 在线的成员数目 | -| 6 | v1_dnode | INT | 第一个成员所在的 dnode 的 id | -| 7 | v1_status | BINARY(10) | 第一个成员的状态 | -| 8 | v2_dnode | INT | 第二个成员所在的 dnode 的 id | -| 9 | v2_status | BINARY(10) | 第二个成员的状态 | -| 10 | v3_dnode | INT | 第三个成员所在的 dnode 的 id | -| 11 | v3_status | BINARY(10) | 第三个成员的状态 | -| 12 | compacting | INT | compact 状态 | +| # | **列名** | **数据类型** | **说明** | +| --- | :-------: | ------------ | ------------------------------------------------------ | +| 1 | vgroup_id | INT | vgroup id | +| 2 | db_name | BINARY(32) | 数据库名 | +| 3 | tables | INT | 此 vgroup 内有多少表 | +| 4 | status | BINARY(10) | 此 vgroup 的状态 | +| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id | +| 6 | v1_status | BINARY(10) | 第一个成员的状态 | +| 7 | v2_dnode | INT | 第二个成员所在的 dnode 的 id | +| 8 | v2_status | BINARY(10) | 第二个成员的状态 | +| 9 | v3_dnode | INT | 第三个成员所在的 dnode 的 id | +| 10 | v3_status | BINARY(10) | 第三个成员的状态 | +| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 | +| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 | +| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是, 0: 否 | + +## INS_CONFIGS + +系统配置参数。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :------: | ------------ | ------------ | +| 1 | name | BINARY(32) | 配置项名称 | +| 2 | value | BINARY(64) | 该配置项的值 | + +## INS_DNODE_VARIABLES + +系统中每个 dnode 的配置参数。 + +| # | **列名** | **数据类型** | **说明** | +| --- | :------: | ------------ | ------------ | +| 1 | dnode_id | INT | dnode 的 ID | +| 2 | name | BINARY(32) | 配置项名称 | +| 3 | value | BINARY(64) | 该配置项的值 | diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md new file mode 100644 index 0000000000..781f94324c --- /dev/null +++ b/docs/zh/12-taos-sql/24-show.md @@ -0,0 +1,270 @@ +--- +sidebar_label: SHOW 命令 +title: 使用 SHOW 命令查看系统元数据 +--- + +除了使用 `select` 语句查询 `INFORMATION_SCHEMA` 数据库中的表获得系统中的各种元数据、系统信息和状态之外,也可以用 `SHOW` 命令来实现同样的目的。 + +## SHOW ACCOUNTS + +```sql +SHOW ACCOUNTS; +``` + +显示当前系统中所有租户的信息。 + +注:企业版独有 + +## SHOW APPS + +```sql +SHOW APPS; +``` + +显示接入集群的应用(客户端)信息。 + +## SHOW BNODES + +```sql +SHOW BNODES; +``` + +显示当前系统中存在的 BNODE (backup node, 即备份节点)的信息。 + +## SHOW CLUSTER + +```sql +SHOW CLUSTER; +``` + +显示当前集群的信息 + +## SHOW CONNECTIONS + +```sql +SHOW CONNECTIONS; +``` + +显示当前系统中存在的连接的信息。 + +## SHOW CONSUMERS + +```sql +SHOW CONSUMERS; +``` + +显示当前数据库下所有活跃的消费者的信息。 + +## SHOW CREATE DATABASE + +```sql +SHOW CREATE DATABASE db_name; +``` + +显示 db_name 指定的数据库的创建语句。 + +## SHOW CREATE STABLE + +```sql +SHOW CREATE STABLE [db_name.]stb_name; +``` + +显示 tb_name 指定的超级表的创建语句 + +## SHOW CREATE TABLE + +```sql +SHOW CREATE TABLE [db_name.]tb_name +``` + +显示 tb_name 指定的表的创建语句。支持普通表、超级表和子表。 + +## SHOW DATABASES + +```sql +SHOW DATABASES; +``` + +显示用户定义的所有数据库。 + +## SHOW DNODES + +```sql +SHOW DNODES; +``` + +显示当前系统中 DNODE 的信息。 + +## SHOW FUNCTIONS + +```sql +SHOW FUNCTIONS; +``` + +显示用户定义的自定义函数。 + +## SHOW LICENSE + +```sql +SHOW LICENSE; +SHOW GRANTS; +``` + +显示企业版许可授权的信息。 + +注:企业版独有 + +## SHOW INDEXES + +```sql +SHOW INDEXES FROM tbl_name [FROM db_name]; +``` + +显示已创建的索引。 + +## SHOW LOCAL VARIABLES + +```sql +SHOW LOCAL VARIABLES; +``` + +显示当前客户端配置参数的运行值。 + +## SHOW MNODES + +```sql +SHOW MNODES; +``` + +显示当前系统中 MNODE 的信息。 + +## SHOW MODULES + +```sql +SHOW MODULES; +``` + +显示当前系统中所安装的组件的信息。 + +## SHOW QNODES + +```sql +SHOW QNODES; +``` + +显示当前系统中 QNODE (查询节点)的信息。 + +## SHOW SCORES + +```sql +SHOW SCORES; +``` + +显示系统被许可授权的容量的信息。 + +注:企业版独有 + +## SHOW SNODES + +```sql +SHOW SNODES; +``` + +显示当前系统中 SNODE (流计算节点)的信息。 + +## SHOW STABLES + +```sql +SHOW [db_name.]STABLES [LIKE 'pattern']; +``` + +显示当前数据库下的所有超级表的信息。可以使用 LIKE 对表名进行模糊匹配。 + +## SHOW STREAMS + +```sql +SHOW STREAMS; +``` + +显示当前系统内所有流计算的信息。 + +## SHOW SUBSCRIPTIONS + +```sql +SHOW SUBSCRIPTIONS; +``` + +显示当前数据库下的所有的订阅关系 + +## SHOW TABLES + +```sql +SHOW [db_name.]TABLES [LIKE 'pattern']; +``` + +显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。 + +## SHOW TABLE DISTRIBUTED + +```sql +SHOW TABLE DISTRIBUTED table_name; +``` + +显示表的数据分布信息。 + +## SHOW TAGS + +```sql +SHOW TAGS FROM child_table_name [FROM db_name]; +``` + +显示子表的标签信息。 + +## SHOW TOPICS + +```sql +SHOW TOPICS; +``` + +显示当前数据库下的所有主题的信息。 + +## SHOW TRANSACTIONS + +```sql +SHOW TRANSACTIONS; +``` + +显示当前系统中正在执行的事务的信息 + +## SHOW USERS + +```sql +SHOW USERS; +``` + +显示当前系统中所有用户的信息。包括用户自定义的用户和系统默认用户。 + +## SHOW VARIABLES + +```sql +SHOW VARIABLES; +SHOW DNODE dnode_id VARIABLES; +``` + +显示当前系统中各节点需要相同的配置参数的运行值,也可以指定 DNODE 来查看其的配置参数。 + +## SHOW VGROUPS + +```sql +SHOW [db_name.]VGROUPS; +``` + +显示当前系统中所有 VGROUP 或某个 db 的 VGROUPS 的信息。 + +## SHOW VNODES + +```sql +SHOW VNODES [dnode_name]; +``` + +显示当前系统中所有 VNODE 或某个 DNODE 的 VNODE 的信息。 diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md index bd8d61a584..1292206311 100644 --- a/docs/zh/12-taos-sql/26-udf.md +++ b/docs/zh/12-taos-sql/26-udf.md @@ -8,21 +8,30 @@ title: 用户自定义函数 ## 创建函数 ```sql -CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value] +CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE buffer_size] ``` 语法说明: AGGREGATE:标识此函数是标量函数还是聚集函数。 -func_name:函数名,必须与函数实现中udfNormalFunc的实际名称一致。 +func_name:函数名,必须与函数实现中 udf 的实际名称一致。 library_path:包含UDF函数实现的动态链接库的绝对路径,是在客户端侧主机上的绝对路径。 -OUTPUTTYPE:标识此函数的返回类型。 -BUFSIZE:中间结果的缓冲区大小,单位是字节。不设置则默认为0。最大不可超过512字节。 +type_name:标识此函数的返回类型。 +buffer_size:中间结果的缓冲区大小,单位是字节。不设置则默认为0。 关于如何开发自定义函数,请参考 [UDF使用说明](../../develop/udf)。 ## 删除自定义函数 +``` +DROP FUNCTION function_name; +``` + +- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如 + + +## 显示 UDF + ```sql -DROP FUNCTION func_name -``` \ No newline at end of file +SHOW FUNCTION; +``` diff --git a/docs/zh/14-reference/03-connector/01-error-code.md b/docs/zh/14-reference/03-connector/01-error-code.md new file mode 100644 index 0000000000..53e006e108 --- /dev/null +++ b/docs/zh/14-reference/03-connector/01-error-code.md @@ -0,0 +1,481 @@ +--- +sidebar_label: 错误码 +title: TDengine C/C++ 连接器错误码 +--- + +本文中详细列举了在使用 TDengine C/C++ 连接器时客户端可能得到的错误码以及所要采取的相应动作。其它语言的连接器在使用原生连接方式时也会所得到的返回码返回给连接器的调用者。 + + | **Error Code** | **说明** | 如何处理错误 | + | -------------- | ------------------ | ------------ | + | 0 | 请求处理成功 | None | + | -1 | 请求失败,未知原因 | TODO | + | 0x0003 | RPC 认证失败 | TODO | + | 0x0004 | RPC 重定向 |TODO| + | 0x000B | 无法建立连接 |TODO| + | 0x0015 | FQDN 解析失败 |检查各个dnode配置的FQDN是否正确,并且能够从其它节点 ping 到 | + | 0x0017 | 端口已经被占用 | 检查所配置的端口被哪个进程占用,关闭该进程以释放端口;或者更改配置使用另一端口号 | + | 0x0018 | 连接被断开 | 检查 dnode 进程是否还在,如果无异常检查网络情况 | + | 0x0013 | 客户端和服务端的时间未同步 | 检查客户端和服务端的时间设置是否同步| + | 0x0014 | 数据库不可用 |检查数据库是否存在,检查数据库的vgroups的状态 | + | 0x0100 | 该操作不支持 | 参考SQL手册,使用正确的操作 | + | 0x0102 | 无法分配内存,系统内存耗尽 | 检查系统中内存被耗尽的原因,采取措施释放内存。如果内存是被 dnode 耗尽的话重启该进程 | + | 0x0104 | 文件被破坏 | TODO | + | 0x0111 | 操作在进行中 | 等待该操作完成 | + | 0x0115 | 无效的消息 | TODO | + | 0x0116 | 无效的消息长度 | TODO | + | 0x0117 | 无效指针 | TODO | + | 0x0118 | 无效参数 | | + | 0x0119 | 无效配置 | 检查配置参数的值是否在合法值域范围内 | + | 0x011A | 无效选项 | 检查配置中是否有不支持的无效配置项 | + | 0x011B | 无效的JSON 格式 | 修正插入数据中的JSON值| + | 0x011C | 无效版本号 | 检查客户端版本是否匹配,更换正确的客户端 | + | 0x011D | 无效版本信息 | 检查客户端版本是否匹配,更换正确的客户端 | + | 0x011E | 版本不匹配 | TODO | + | 0x011F | 校验和错误 | TODO | + | 0x0120 | 数据压缩失败 | TODO | + | 0x0121 | 消息未处理 | TODO | + | 0x0122 | 配置未找到 | TODO| + | 0x0123 | 重复初始化 | TODO | + | 0x0124 | 无法将重复的key加入hash| TODO | + | 0x0125 | 需要重试 | 重试 | + | 0x0126 | RPC 队列内存耗尽 | TODO | + | 0x0127 | 无效的时间戳 | 修正插入数据 | + | 0x0128 | 消息解码失败 | 检查客户端版本是否匹配和兼容 | + | 0x0129 | 磁盘耗尽 | TODO | + | 0x012A | TODO | TODO | + | 0x0200 | 无效操作 | 检查SQL命令,根据SQL手册使用正确的操作| + | 0x0201 | 无效的查询句柄 | TODO | + | 0x0202 | 无效的客户端/服务端时间组合 | TODO | + | 0x0203 | 无效值 | TODO | + | 0x0204 | 无效的客户端版本 | 检查客户端版本是否匹配,更换为正确的客户端版本 | + | 0x0205 | TODO | TODO | + | 0x0206 | 无效的 FQDN | 检查服务器 fqdn 配置是否正确,是否能够从其它服务器访问到该 fqdn | + | 0x0207 | 用户名长度过长 | 检查用户名长度是否过长 | + | 0x0208 | 密码长度过长 | 检查密码长度是否过长 | + | 0x0209 | 数据库名字长度过长 | 检查数据库名是否过长 | + | 0x020A | 表名长度过长 | 检查表名是否过长 | + | 0x020B | 无效连接 | 检查该连接是否已经断开 | + | 0x020C | 系统内存耗尽 | 检查系统中内存被耗尽的原因,尝试释放出内存 | + | 0x020D | 磁盘空间耗尽 | 检查系统中磁盘耗尽的原因,尝试释放或增加磁盘空间 | + | 0x020E | 查询缓存被清除 |TODO | + | 0x020F | 查询已经被结束 | 尝试优化查询条件再重启查询 | + | 0x0210 | 结果集过大无法存储 | 尝试优化查询条件,缩小结果集,再重启查询 | + | 0x0211 | 数据库不可用 | 查看数据库是否存在,查看数据库的 vgroups 状态 | + | 0x0212 | 操作正在进行中 | 等待操作完成 | + | 0x0213 | 连接被服务端断开 | 查看服务端进程是否发生 crash 等异常终止情况 | + | 0x0214 | 没有写权限 | 只进行读操作或者尝试获取写权限 | + | 0x0215 | 连接被 kill | 重新建立连接 | + | 0x0216 | SQL 语法错误 | 参考 SQL手册,纠正语法错误再重试 | + | 0x0217 | 未指定数据库或者指定的数据库不可用 | 指定数据库,或者检查指定数据库的状态 | + | 0x0218 | 所查询的表不存在 | 确认表名后纠正查询语句 | + | 0x0219 | SQL 语句超长 | 根据 maxSQLLength 缩短SQL语句或者加大maxSQLLength (如果还未配置到上限)| + | 0x021A | 空文件 | TODO | + | 0x021B | Line 协议语法错误 | 纠正插入语句 | + | 0x021C | 元数据未被缓存 | TODO | + | 0x021D | 重复的列名 | 纠正SQL语句中的相应错误 | + | 0x021E | tag 过长 | 纠正过长的 tag | + | 0x021F | 列名过长 | 纠正过长的列名 | + | 0x0220 | 重复的表名**TODO** | TODO| + | 0x0221 | JSON 格式错误 | 纠正错误的 JSON 结构 | + | 0x0222 | JSON 中使用了无效的数据类型 | 纠正 JSON 结构中的错误 | + | 0x0224 | 超出了所支持的值域 | 纠正到值域范围内 | + | 0x0229 | 无效的 tsc 输入 **TODO** | TODO | + | 0x022A | stmt API 使用错误 | 根据参考手册正确使用 | + | 0x022B | stmt 使用时未指定表名 | 指定表名 | + | 0x022C | 不支持 stmt 子名 | 根据参考手册纠正错误用法 | + | 0x022D | 查询被 kill | 优化查询语句,尽量减小计算量和结果集,然后重新启动查询 | + | 0x022E | 在当前配置的查询策略下没有可用的计算节点 | 创建新的 qnode | + | 0x022F | 所指定的表不是超级表 | 确认下该查询场景适用于超级表还是子表/普通表,如果是前者则纠正为超级表名 | + | 0x0303 | 没有权限进行所发起的操作 | 申请权限或调整操作 | + | 0x0304 | 管理节点内部错误 | TODO | + | 0x0305 | 无效连接 | TODO | + | 0x030B | 所要展示的操作其数据已经因为超时而被删除 | 更换想要展示操作或者放弃此次操作 | + | 0x030C | 无效的查询ID| 确认正确的查询ID再重新发起 | + | 0x030D | 无效的流ID| 确认正确的流ID再重新发起| + | 0x030E | 无效的连接ID| 确认正确的连接ID再重新发起| + | 0x0310 | mnode已经在运行 | 无须采取任何动作 | + | 0x0311 | 配置同步失败 | TODO | + | 0x0312 | 无法启动同步 | TODO | + | 0x0313 | 无法创建 mnode 对应的目录 | 确认磁盘上是否有可用空间以及是否有相应的写权限 | + | 0x0314 | 启动组件失败 | TODO | + | 0x0315 | 用户帐号被禁用 | 联系管理员激活该帐号 | + | 0x0320 | 元数据中已经存在所要创建的对象 | 检查所要创建的对象,比如超级表或表,是否已经存在 | + | 0x0321 | 元数据库中非预期的一般性错误 | TODO | + | 0x0322 | 无效的表类型 | TODO | + | 0x0323 | 所要查找的对象不存在 | TODO | + | 0x0325 | 无效的 key 类型 | TODO | + | 0x0326 | 无效的动作类型 | TODO | + | 0x0327 | 无效的状态类型 | TODO | + | 0x0328 | 无效的原始数据版本 | TODO | + | 0x0329 | 无效的原始数据长度 | TODO | + | 0x032A | 无效的原始数据内容 | TODO | + | 0x032B | 无效的 wal 版本 | TODO | + | 0x032C | 对象创建中 | TODO | + | 0x032D | 对象停止中 | TODO | + | 0x0330 | dnode 已经存在 | 无需任何动作,放弃重复创建dnode的操作 | + | 0x0331 | dnode 不存在 | 确认所要查询或者操作的dnode ID 或者 end point 是否正确 | + | 0x0332 | vgroup 不存在 | 确认所要查询或者操作的vgroup ID 是否正确 | + | 0x0333 | 系统拒绝 drop 其角色是 leader 的 mnode | 放弃该操作 | + | 0x0334 | 没有足够的 dnode 创建所指定的 vgroups | 增加 dnode 或者修改现有dnode的配置参数 `supportVgroups` | + | 0x0335 | 集群中各个dnode的配置不一致 | 检查各个dnode的配置参数确保其一致 | + | 0x0338 | 所要查询或操作的vgroup不在所指定的dnode中| 检查vgroup ID 和 dnode ID是否正确 | + | 0x0339 | 所要查询或操作的 vgroup 已经在所指定的dnode中 | 检查 vgroup ID 和 dnode ID是否正确 | + | 0x033B | 集群 ID 不匹配 | TODO | + | 0x0340 | 该帐户已经存在 | 放弃重复创建帐户的操作 | + | 0x0342 | 无效的帐户选项 | 检查创建帐户时的参数选项是否正确 | + | 0x0343 | 帐户授权已经过期 | 联系管理员重新授权 | + | 0x0344 | 无效帐户 | 联系管理员确认帐户 | + | 0x0345 | 操作的帐户过多,无法支持 | 减少同时操作的帐户数 | + | 0x0350 | 用户已经存在 | 放弃重复创建用户的操作 | + | 0x0351 | 无效用户 | 检查并确认正确用户 | + | 0x0352 | 无效的用户名格式 | 查看参考手册修改用户名 | + | 0x0353 | 无效的密码格式 | 查看参考手册修改密码 | + | 0x0354 | 无法从连接中获取用户名 | 检查客户端环境初始化是否使用了正确的用户名 | + | 0x0355 | 一次尝试操作的用户过多 | 查看参考手册减少同时操作的用户数 | + | 0x0356 | 无效的修改操作 | 查看参考手册使用正确的操作 | + | 0x0357 | 认证失败 | 使用正确的用户名和密码 | + | 0x0360 | 要创建的超级表已经存在 | 放弃重复创建的操作或者删除该超级表再重新创建 | + | 0x0362 | 所使用或查询的超级表不存在 | 确认超级表名是否正确,如果正确则需要先创建超级表 | + | 0x0364 | 标签过多 | 查看参考手册减少标签数量 | + | 0x0365 | 列过多 | 查看参考手册减少列数量 | + | 0x0369 | 要添加的标签已经存在 | 修改标签名 | + | 0x036A | 要查询或修改的标签不存在 | 确认标签名是否正确 | + | 0x036B | 要添加的列已经存在 | 修改列名或者放弃该操作 | + | 0x036C | 要查询或修改的列不存在 | 确认列名是否正确 | + | 0x036E | 无效的超级表操作 | 查看参考手册进行正确的操作 | + | 0x036F | 错误的行字节数 | TODO | + | 0x0370 | 无效的函数名 | 确认函数名是否正确 | + | 0x0372 | 无效的函数代码 | 无效的函数编码 | + | 0x0373 | 该函数已经存在 | 修改函数名或者放弃该操作| + | 0x0374 | 所引用的函数不存在 | 确认函数名是否正确 | + | 0x0375 | 无效的 bufSize | 查看参考手册修改 bufSize | + | 0x0378 | 无效的函数注释 | 查看参考手册修改函数注释 | + | 0x0379 | 无效的函数检索消息 | TODO | + | 0x0380 | 未指定数据库或者指定的数据库不可用 | 指定数据库,或者检查所指定的数据库的状态 | + | 0x0381 | 数据库已经存在 | 放弃重复创建,或者修改数据库名 | + | 0x0382 | 无效的数据库参数 | 查看参考手册使用正确的参数 | + | 0x0383 | 无效的数据库名称 | 查看参考手册使用正确的数据库名 | + | 0x0385 | 该帐号下的数据库过多 | 删除旧的数据库再尝试创建新数据库 | + | 0x0388 | 数据库不存在 | 确认数据库名是否正确 | + | 0x0389 | 无效的数据库帐户 | 确认帐户是否正确 | + | 0x038A | 数据库参数未修改 | 查看参考手册确认修改的参数和值是否正确 | + | 0x038B | 索引不存在 |确认索引名称是否正确 | + | 0x039A | 无效的系统表名 | 查看参考手册确认表名是否正确 | + | 0x03A0 | mnode 已经存在 | 放弃该操作 | + | 0x03A1 | mnode 不存在 | 确认要查看或操作的 mnode ID | + | 0x03A2 | qnode 已经存在 | 放弃该操作 | + | 0x03A3 | qnode 不存在 | 确认要查看或操作的 qnode ID 是否正确 | + | 0x03A8 | mnode 的 replica 不能小于1 | 停止 drop mnode | + | 0x03A9 | mnode 的 replica 不能大于3 | 停止 create mnode | + | 0x03B0 | dnode 数量过多 | 停止添加新的 dnode | + | 0x03B1 | dnode 没有足够的可用内存 | 检查所在系统的内存使用情况 ,尝试释放出内存 | + | 0x03B2 | 无效的 dnode 配置 | 查看参考手册纠正配置 | + | 0x03B3 | 无效的 dnode 地址 | 确认 dnode 的 FQDN 和 serverPort参数是否正确 | + | 0x03B4 | 无效的 dnode ID | 确认正确的 dnode ID | + | 0x03B5 | vgroup 的分布未发生变化 | TODO | + | 0x03B6 | 存在状态为 offline 的 dnode | drop 这些 dnode 或者启动相应的 dnode 使其状态为 ready | + | 0x03B7 | 无效的 vgroup 副本 | TODO | + | 0x03C0 | topic 与超级表冲突 | TODO | + | 0x03C1 | 订阅了过多的超级表 | 查看参考手册减少超级表数量 | + | 0x03C2 | 无效的 超级表修改参数 | 查看参考手册进行纠正 | + | 0x03C3 | 超级表参数未被修改 | 查看参考手册确认参数是否正确 | + | 0x03C4 | 该字段被某个主题所使用 | TODO | + | 0x03C5 | 该数据库是单超级表模式 | 修改数据库为多超级表模式或者放弃创建新的超级表 | + | 0x03C6 | 修改超级表使用了无效的 schema 版本 | TODO | + | 0x03C7 | 修改超级表使用了无效的超级表 ID | 确认超级表使用是否正确 | + | 0x03C8 | 该字段被 tsma 所使用 | TODO | + | 0x03D0 | 该事务已经存在 | TODO | + | 0x03D1 | 该事务不存在 | TODO | + | 0x03D2 | 要 kill 的 stage 不存在 | TODO | + | 0x03D3 | 冲突的事务没有完成 | TODO | + | 0x03D4 | 未知的事务错误 | TODO | + | 0x03D5 | 事务提交日志已满 | TODO | + | 0x03DF | 在执行事务时无法建立连接 | 等待事务完成尝试重新建立连接 | + | 0x03E0 | Topic 已经存在 | 修改 topic 名字或者放弃创建重复的 topic | + | 0x03E1 | Topic 不存在 | 确认 Topic 名字是否正确 | + | 0x03E2 | Topic 过多 | 尝试删除不用的 topic 再建立新的,或者放弃此次操作 | + | 0x03E3 | 无效的 Topic | 确认 Topic 是否正确 | + | 0x03E4 | 建立 Topic 的查询子名无效 | 查看参考手册纠正查询子名 | + | 0x03E5 | 建立 Topic 的参数无效 | 查看参考手册使用正确的参数 | + | 0x03E6 | 消费者不存在 | 确认正确的消费者 ID | + | 0x03E7 | 消费者未修改 | TODO | + | 0x03E8 | 订阅不存在 | 确认正确的订阅 ID | + | 0x03E9 | 偏移量不存在 | 纠正偏移量 | + | 0x03EA | 消费者不可用 | TODO | + | 0x03EB | 无法删除已经被订阅的 Topic | 先取消订阅再尝试删除 | + | 0x03EC | Consumer group正在被某些消费者使用 | TODO | + | 0x03F0 | 流已经存在 | 修改流名称或者放弃创建该流 | + | 0x03F1 | 要查询或操作的流不存在 | 确认正确的流 ID | + | 0x03F2 | 无效的流参数 | 查看参考手册纠正错误的参数 | + | 0x0480 | SMA 已经存在 | 修改 SMA 名称或者放弃创建 | + | 0x0481 | SMA 不存在 | 确认正确的 SMA 名称或者 ID | + | 0x0482 | SMA 参数错误 | 查看参考手册纠正参数 | + | 0x0408 | 节点不在线 | TODO | + | 0x0409 | 节点已经部署 | TODO | + | 0x040A | 节点未部署 | TODO | + | 0x0500 | 该动作作在进行中| TODO | + | 0x0501 | 消息未被处理 | TODO | + | 0x0502 | 该动作需要被重新处理 | TODO | + | 0x0503 | 无效的 vgroup ID | 检查确认正确的 vgroups ID | + | 0x0504 | vnode 初始化失败 | TODO | + | 0x0505 | 系统磁盘空间耗尽 | 尝试释放或者增加磁盘空间 | + | 0x0506 | 对磁盘文件没有写权限 | 检查启动 TDengine 的系统帐号的写权限 | + | 0x0507 | 数据文件缺失 | TODO | + | 0x0508 | vnode 没有可用内存 | TODO | + | 0x0509 | vnode 中未预期的一般性错误 | TODO | + | 0x050C | 数据库无空闲内存 | TODO | + | 0x050D | 数据库正在删除中 | TODO | + | 0x050E | 数据库正在更新中 | TODO | + | 0x0510 | 数据库正在关闭中 | TODO | + | 0x0511 | 数据库被暂停操作 | TODO | + | 0x0512 | 数据库写操作被拒绝 | 检查用户权限,申请写操作授权 | + | 0x0513 | 数据库正在同步中 | TODO | + | 0x0514 | 无效的 tsdb 状态 | TODO | + | 0x0520 | 指定的表不存在 | 检查确认正确的表名 | + | 0x0521 | 指定的SMA 不存在 | 检查确认正确的 SMA名称 | + | 0x0522 | Hash 值不匹配 | TODO | + | 0x0523 | 指定的表不存在 | 检查确认正确的表名 | + | 0x0524 | 无效的表动作 | TODO | + | 0x0525 | 列名已经存在 | 修改列名或放弃操作 | + | 0x0526 | 列名不存在 | 确认正确的列名 | + | 0x0527 | 该列已经被订阅 | 先取消订阅再操作或者放弃操作 | + | 0x0528 | 无效的配置文件 | 检查配置文件的路径和访问权限 | + | 0x0529 | 无效的 term 文件 | TODO | + | 0x0600 | 无效的表 ID | 确认表名是否正确 | + | 0x0601 | 无效的表 类型 | TODO | + | 0x0602 | 无效的 schema 版本 | TODO | + | 0x0603 | 表已经存在 | 修改表名或放弃操作 | + | 0x0604 | 配置无效 | 查看参考手册纠正配置 | + | 0x0605 | TSDB 初始化失败 | TODO | + | 0x0606 | 磁盘空间耗尽 | 查看磁盘空间耗尽的原因,尝试释放或增加磁盘空间 | + | 0x0607 | 磁盘文件没有访问权限 | 确认启动集群的系统帐户是否有相应的写权限 | + | 0x0608 | 数据文件被破坏 | TODO | + | 0x0609 | 内存耗尽 | 检查内存被耗尽的原因,尝试释放内存 | + | 0x060A | 标签版本过老 | TODO | + | 0x060B | 时间戳不在允许范围内 | 查看参考手册了解允许写入的时间戳规则 | + | 0x060C | 提交消息被破坏 | TODO | + | 0x060D | 无效操作 | TODO | + | 0x060E | 建表消息无效 | TODO | + | 0x060F | 内存跳表中没有表的数据 | TODO | + | 0x0610 | 文件已经存在 | TODO | + | 0x0611 | 需要重新配置该表 | TODO | + | 0x0612 | 建表的信息无效 | TODO | + | 0x0613 | 磁盘空间耗尽 | 尝试释放或增加磁盘空间 | + | 0x0614 | 消息被破坏 |TODO | + | 0x0615 | 无效的标签值 | 修正标签值 | + | 0x0616 | 未缓存最后一行的原始数据 | 修改数据库的 cacheModel 参数 | + | 0x0618 | 该表不存在 | 检查表名是否正确 | + | 0x0619 | 超级表已经存在 | 修改超级表名再次尝试 | + | 0x061A | 超级表不存在 | 检查超级表名是否正确 | + | 0x061B | 表被重新创建 | TODO | + | 0x061C | TDB 环境打开错误 | N/A | + | 0x0700 | 无效的查询句柄 | N/A | + | 0x0701 | 无效的消息 | TODO | + | 0x0702 | 磁盘空间耗尽 | 尝试释放或增加磁盘空间 | + | 0x0703 | 系统内存耗尽 | 尝试释放内存 | + | 0x0704 | 未知错误 | TODO | + | 0x0705 | 重复的 Join Key | 修正查询语句中的 Join Key | + | 0x0706 | 标签过滤条件过多 | 减小查询语句中的标签过滤条件 | + | 0x0707 | 查询不可用 | TODO | + | 0x0708 | TODO | TODO | + | 0x0709 | TODO | TODO | + | 0x070A | 查询中的时间窗口过多 | 修改查询语句以减小时间窗口的数量 | + | 0x070B | 查询缓冲区达到上限 | TODO | + | 0x070C | 多副本数据不一致 | TODO | + | 0x070D | 系统错误 | TODO | + | 0x070E | 无效的时间范围 | 修正查询语句中的时间范围 | + | 0x070F | 无效输入 | 修正查询语句 | + | 0x0720 | 调度器不存在 | TODO | + | 0x0721 | 任务不存在 | TODO | + | 0x0722 | 任务已经存在 | TODO | + | 0x0723 | 任务上下文不存在 | TODO | + | 0x0724 | 任务被取消 | TODO | + | 0x0725 | 任务被停止 | TODO | + | 0x0726 | 任务正在取消中 | TODO | + | 0x0727 | 任务正在停止中 | TODO | + | 0x0728 | 重复操作 | TODO | + | 0x0729 | 任务消息错误 | TODO | + | 0x072A | 作业已经被释放 | TODO | + | 0x072B | 任务状态错误 | TODO | + | 0x072C | in 和 not in 操作符不支持 JSON 类型 | 修正查询语句 | + | 0x072D | 此处不支持 JSON |修正查询语句 | + | 0x072E | group 和 partition by 不支持 JSON 类型 | + | 0x072F | 查询作业不存在 | TODO | + | 0x0800 | License 已经过期 | 重新激活或获取 License | + | 0x0801 | 受限于 License 无法创建 dnode | 获取新的License | + | 0x0802 | 受限于 License 无法创建帐户 | 获取新的 License | + | 0x0803 | 受限于 License 无法创建表 | 获取新的 License | + | 0x0804 | 受限于 License 无法创建数据库 | 获取新的 License | + | 0x0805 | 受限于 License 无法创建用户 | 获取新的 License | + | 0x0806 | 受限于 License 无法创建连接 | 获取新的 License | + | 0x0807 | 受限于 License 无法创建流 | 获取新的 License | + | 0x0808 | 写入速度受限于 License | 获取新的 License | + | 0x0809 | 存储容量受限于 License | 获取新的 License | + | 0x080A | 查询时间受限于 License | 获取新的 License | + | 0x080B | CPU 核数受限于 License | 获取新的 License | + | 0x080C | 受限于 License 无法创建超级表 | 获取新的 License | + | 0x080D | 受限于 License 无法创建表 | 获取新的 License | + | 0x0A00 | TQ 无效配置 | TODO | + | 0x0A01 | TQ 初始化失败 | TODO | + | 0x0A02 | TQ 磁盘空间耗尽 | 尝试释放或增加磁盘空间 | + | 0x0A03 | TQ 没有写磁盘权限 | 确认启动集群的系统帐号是否具有写磁盘权限 | + | 0x0A04 | TQ 文件被破坏 | TODO | + | 0x0A05 | TQ 内存耗尽 | 尝试释放内存 | + | 0x0A06 | TQ 文件已经存在 | TODO | + | 0x0A07 | TQ 创建目录失败 | TODO | + | 0x0A08 | TQ meta 中不存在该 key | TODO | + | 0x0A09 | meta key在事务中不存在 | TODO | + | 0x0A0A | meta key在事务中重复 | TODO | + | 0x0A0B | 消费组不存在 | 指定正确的消费组 | + | 0x0A0C | 该表的 schema 不存在 | 确认表名是否正确 | + | 0x0A0D | 没有已经提交的 offset | TODO | + | 0x1000 | WAL 未知错误 | TODO | + | 0x1001 | WAL 文件被破坏 | TODO | + | 0x1002 | WAL 大小超出上限 | TODO | + | 0x1003 | WAL 使用了错误的版本号 | TODO | + | 0x1004 | 系统内存耗尽 | 尝试释放内存 | + | 0x1005 | WAL 日志不存在 | TODO | + | 0x2201 | 无效的 mount 配置 | 修正 mount 配置参数 | + | 0x2202 | mount 点过多 | TODO | + | 0x2203 | 重复的 primary mount | TODO | + | 0x2204 | primary mount 缺失 | TODO | + | 0x2205 | no mount at tier: TODO | TODO | + | 0x2206 | 文件已经存在 | 更改文件名或者删除该文件 | + | 0x2207 | 无效的级别 | TODO | + | 0x2208 | 没有可用磁盘 | TODO | + | 0x220F | 系统内存耗尽 | TODO | + | 0x2400 | catalog 内部错误 | TODO | + | 0x2401 | 无效的 catalog 输入参数 | TODO | + | 0x2402 | catalog 不可用 | TODO | + | 0x2403 | catalog 系统错误 | TODO | + | 0x2404 | 数据库被删除 | TODO | + | 0x2405 | catalog 不可用 | TODO | + | 0x2406 | 表元数据和 vgroup 不匹配 | TODO | + | 0x2407 | catalog 不存在 | TODO | + | 0x2550 | 无效的消息顺序 | TODO | + | 0x2501 | 调度器状态错误 | TODO | + | 0x2502 | 调度器内部错误 | TODO | + | 0x2504 | 任务超时 | TODO | + | 0x2505 | 作业正在停止中 | TODO | + | 0x2600 | 语法错误 | 参考 SQL 手册纠正 | + | 0x2601 | 不完整的 SQL 语句 | 参考 SQL 手册纠正 | + | 0x2602 | 无效列名 | 使用正确的列名 | + | 0x2603 | 表不存在 | 使用正确的表名 | + | 0x2604 | 表名定义有二义性 | 参考 SQL 手册纠正 | + | 0x2605 | 无效的值类型 | 参考 SQL 手册纠正 | + | 0x2608 | 此处不能使用聚合查询 | 参考 SQL 手册纠正 | + | 0x2609 | ORDER BY 只能用于查询语句中的结果列 | 参考 SQL 手册纠正 | + | 0x260A | GROUP BY 缺失表达式 (TODO) | 参考 SQL 手册纠正 | + | 0x260B | 不是 SELECT 表达式 | 参考 SQL 手册纠正 | + | 0x260C | 不是单一分组的分组函数 (TODO) | 参考 SQL 手册纠正 | + | 0x260D | 标签数量不匹配 | 参考 SQL 手册纠正 | + | 0x260E | 无效的标签名 | 改用正确的标签名 | + | 0x2610 | 名字或密码过长 | 参考 SQL 手册纠正 | + | 0x2611 | 密码不能为空 | 提供非空密码 | + | 0x2612 | 端口无效 | 端口号必须在 (0,65535) 范围内 | + | 0x2613 | 地址格式错误 | 正确格式是 "fqdn: port" | + | 0x2614 | 该语句不再支持 | 参考 SQL 手册纠正 | + | 0x2615 | 时间窗口过小 | 参考 SQL 手册纠正 | + | 0x2616 | 未指定数据库 | 在表名或超级表名前添加 "." 指定数据库 | + | 0x2617 | 标识符无效 | 参考 SQL 手册纠正 | + | 0x2618 | 该数据库中不存在对应的超级表 | 使用正确的数据库名或者超级表名 | + | 0x2619 | 数据库参数无效 | 参考 SQL 手册纠正 | + | 0x261A | 建表参数无效 | 参考 SQL 手册纠正 | + | 0x2624 | GROUP BY 和 窗口子句不能共用 | 参考 SQL 手册纠正 | + | 0x2627 | 聚合函数不支持嵌套 | 参考 SQL 手册纠正 | + | 0x2628 | 在 integer/bool/varchar 类型的列上只支持 状态窗口 | 参考 SQL 手册纠正 | + | 0x2629 | 标签列上不支持状态窗口 | 参考 SQL 手册纠正 | + | 0x262A | 状态窗口查询不支持超级表 | 参考 SQL 手册纠正 | + | 0x262B | 会话之间的 gap 应该是大于 0 的固定大小的窗口 | 参考 SQL 手册纠正 | + | 0x262C | 只在主键时间戳列上支持会话 | 参考 SQL 手册纠正 | + | 0x262D | 窗口偏移量不能是负值 | 参考 SQL 手册纠正 | + | 0x262E | 当 interval 的单位是 "year" 时 offset 的单位不能是 "month" | 参考 SQL 手册纠正 | + | 0x262F | offset 所指定的时间长度应该小于 interval 所指定的时间长度 | 参考 SQL 手册纠正 | + | 0x2630 | 当 interval 是自然年/月时不能使用 slidig | 参考 SQL 手册纠正 | + | 0x2631 | sliding 所指定的时间长度不能大于 interval 所指定的时间长度 | 参考 SQL 手册纠正 | + | 0x2632 | sliding 不能小于 interval 的 1%% | 参考 SQL 手册纠正 | + | 0x2633 | 当使用 JSON 类型的 tag 时只允许这一个 tag 的存在 | 去除其它 tag | + | 0x2634 | 查询块中包含的结果列的数量不正确 | TODO | + | 0x2635 | 时间戳不正确 | TODO | + | 0x2637 | offset/soffset 不能小于 0 | 纠正 offset/soffset | + | 0x2638 | offset/soffset 只能用于 partition by | 参考 SQL 手册纠正 | + | 0x2639 | 无效的 topic 查询 | TODO | + | 0x263A | 不能批量删除超级表 | 请逐个删除 | + | 0x263B | 查询时间范围未指定起止时间或者时间范围过大 | 参考 SQL 手册纠正 | + | 0x263C | 重复的列表 | 参考 SQL 手册纠正 | + | 0x263D | 标签长度超过上限 | 参考 SQL 手册纠正 | + | 0x263E | 行长度超过上限 | 参考 SQL 手册纠正 | + | 0x263F | 不合法的列数量 | 参考 SQL 手册纠正 | + | 0x2640 | 列数过多 | 参考 SQL 手册纠正 | + | 0x2641 | 首列必须是时间戳 | 参考 SQL 手册纠正 | + | 0x2642 | binary/nchar 类型的列长度无效 | 参考 SQL 手册纠正 | + | 0x2643 | 标签列数量无效 | 参考 SQL 手册纠正 | + | 0x2644 | 无权进行该操作 | 参考 SQL 手册纠正 | + | 0x2645 | 无效的流查询 | 参考 SQL 手册纠正 | + | 0x2646 | 无效的 _c0 或 _rowts 表达式 | 参考 SQL 手册纠正 | + | 0x2647 | 无效的时间线函数 | 参考 SQL 手册纠正 | + | 0x2648 | 无效的密码 | 参考 SQL 手册纠正 | + | 0x2649 | 无效的 alter table 语句 | 参考 SQL 手册纠正 | + | 0x264A | 不能删除时间戳主列 | 参考 SQL 手册纠正 | + | 0x264B | 只有 binary/nchar 类型的列能够修改长度 | 参考 SQL 手册纠正 | + | 0x264C | 无效的 tbname 伪列 | 参考 SQL 手册纠正 | + | 0x264D | 无效的函数名 | 参考 SQL 手册纠正 | + | 0x264E | 注释过长 | 参考 SQL 手册纠正 | + | 0x264F | 有些函数只能用在查询的 SELECT 列表中,且不能与其它非标量函数或列混用 | 参考 SQL 手册纠正 | + | 0x2650 | 不支持窗口查询,因为子查询的结果不包含时间戳列 | 参考 SQL 手册纠正 | + | 0x2651 | 任何列都不能被删除 | 参考 SQL 手册纠正 | + | 0x2652 | 只有 标签列可以是 JSON 类型 | 参考 SQL 手册纠正 | + | 0x2653 | 列或标签的值过长 | 参考 SQL 手册纠正 | + | 0x2655 | DELETE 语句必须有一个确定的时间范围 | 参考 SQL 手册纠正 | + | 0x2656 | REDISTRIBUTE VGROUP 语句只支持 1 到 3 个 vgroup | 参考 SQL 手册纠正 | + | 0x2657 | 不支持 Fill | 参考 SQL 手册纠正 | + | 0x2658 | 无效的窗口伪列 | 参考 SQL 手册纠正 | + | 0x2659 | 不允许做窗口查询: TODO | 参考 SQL 手册纠正 | + | 0x265A | 不允许做流计算: TODO | 参考 SQL 手册纠正 | + | 0x265B | 不允许做 Group By | 参考 SQL 手册纠正 | + | 0x265D | interp 子句错误 | 参考 SQL 手册纠正 | + | 0x265E | 窗口查询中不支持该函数 | 参考 SQL 手册纠正 | + | 0x265F | 只支持单表 | 参考 SQL 手册纠正 | + | 0x2660 | 无效的 SMA 索引 | 参考 SQL 手册纠正 | + | 0x2661 | 无效的 SELECT 表达式 | 参考 SQL 手册纠正 | + | 0x2662 | 获取表的元数据失败 | TODO | + | 0x2663 | 表名/表名不唯一 | 参考 SQL 手册纠正 | + | 0x266F | 解析器内部错误 | TODO | + | 0x2700 | 计划器内部错误 | TODO | + | 0x2701 | TODO | TODO | + | 0x2702 | 不支持 cross join | 参考 SQL 手册纠正 | + | 0x2800 | 函数内部错误 | 参考 SQL 手册纠正 | + | 0x2801 | 函数参数个数错误 | 参考 SQL 手册纠正 | + | 0x2802 | 函数参数类型错误 | 参考 SQL 手册纠正 | + | 0x2803 | 函数参数值错误 | 参考 SQL 手册纠正 | + | 0x2804 | 非内置函数 | 参考 SQL 手册纠正 | + | 0x2901 | UDF 正在停止 | TODO | + | 0x2902 | UDF 管道读取错误 | TODO | + | 0x2903 | UDF 连接错误 | TODO | + | 0x2904 | UDF 管道缺失 | TODO | + | 0x2905 | UDF 加载失败 | TODO | + | 0x2906 | UDF 无效状态 | TODO | + | 0x2907 | UDF 无效输入 | TODO | + | 0x2908 | UDF 没有函数句柄 | TODO | + | 0x2909 | UDF 无效的 bufsize | TODO | + | 0x290A | UDF 无效的输出类型 | TODO | + | 0x3000 | 无效的行协议类型 | 修正数据中的协议类型 | + | 0x3001 | 无效的时间戳精度类型 | 修正时间戳精度类型 | + | 0x3002 | 无效的数据格式 | 修正数据格式 | + | 0x3003 | 无效的无模式数据库配置 | 修改配置 | + | 0x3004 | 写入类型与之前的不同 | 修正写入类型 | + | 0x3100 | TSMA 初始化失败 | TODO | + | 0x3101 | TSMA 已经存在 | 放弃重复建立 TSMA | + | 0x3102 | 元数据中没有 TSMA 索引 | TODO | + | 0x3103 | 无效的 TSMA 环境 | TODO | + | 0x3104 | 无效的 TSMA 状态 | TODO | + | 0x3105 | 无效的 TSMA 指针 | TODO | + | 0x3106 | 无效的 TSMA 参数 | 参考 SQL 手册纠正 | + | 0x3107 | cache 中没有该 TSMA 的索引 | TODO | + | 0x3150 | 无效的 RSMA 索引 | TODO | + | 0x3151 | 无效的 RSMA 状态 | TODO | + | 0x3152 | RSMA 创建 qtaskinfo 失败 | TODO | + | 0x3153 | RSMA 文件被破坏 | TODO | + | 0x3200 | 索引正在重建中 |TODO | + | 0x3201 | 无效的索引文件 | TODO | + | 0x4000 | 无效消息 | TODO | \ No newline at end of file diff --git a/docs/zh/14-reference/03-connector/_verify_linux.mdx b/docs/zh/14-reference/03-connector/_verify_linux.mdx index fcb8aae6ae..4979b6f72f 100644 --- a/docs/zh/14-reference/03-connector/_verify_linux.mdx +++ b/docs/zh/14-reference/03-connector/_verify_linux.mdx @@ -2,13 +2,18 @@ ```text $ taos -Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 -Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. +Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. + +Server is Community Edition. + taos> show databases; -name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB)| blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | -========================================================================================================================================================================================================================= -test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16| 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | -log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1| 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | -Query OK, 2 row(s) in set (0.001198s) + name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size | +========================================================================================================================================================================================================================================================================================================================================================================================================================================================================= + information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | + performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | + db | 2022-08-04 14:14:49.385 | 2 | 4 | 1 | off | 14400m | 5254560m,5254560m,5254560m | 96 | 4 | 256 | 100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 | +Query OK, 3 rows in database (0.019154s) + taos> ``` diff --git a/docs/zh/14-reference/03-connector/_verify_windows.mdx b/docs/zh/14-reference/03-connector/_verify_windows.mdx index 87c9fbd024..8873d2928a 100644 --- a/docs/zh/14-reference/03-connector/_verify_windows.mdx +++ b/docs/zh/14-reference/03-connector/_verify_windows.mdx @@ -1,14 +1,19 @@ 在 cmd 下进入到 C:\TDengine 目录下直接执行 `taos.exe`,连接到 TDengine 服务,进入到 TDengine CLI 界面,示例如下: ```text - C:\TDengine>taos - Welcome to the TDengine shell from Linux, Client Version:2.0.5.0 - Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. - taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | - =================================================================================================================================================================================================================================================================== - test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | - log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | - Query OK, 2 row(s) in set (0.045000s) - taos> +Welcome to the TDengine shell from Windows, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. + +Server is Community Edition. + +taos> show databases; + name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size | +========================================================================================================================================================================================================================================================================================================================================================================================================================================================================= + information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | + performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | + test | 2022-08-04 16:46:40.506 | 2 | 0 | 1 | off | 14400m | 5256000m,5256000m,5256000m | 96 | 4 | 256 | +100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 | +Query OK, 3 rows in database (0.123000s) + +taos> ``` diff --git a/docs/zh/14-reference/03-connector/csharp.mdx b/docs/zh/14-reference/03-connector/csharp.mdx index 1e23df9286..723c12932b 100644 --- a/docs/zh/14-reference/03-connector/csharp.mdx +++ b/docs/zh/14-reference/03-connector/csharp.mdx @@ -22,7 +22,9 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" 本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 -`TDengine.Connector` 的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-dotnet)。 +注意:`TDengine.Connector` 3.x 不兼容 TDengine 2.x,如果在运行 TDengine 2.x 版本的环境下需要使用 C# 连接器请使用 TDengine.Connector 的 1.x 版本 。 + +`TDengine.Connector` 的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-dotnet/tree/3.0)。 ## 支持的平台 @@ -63,15 +65,15 @@ dotnet add package TDengine.Connector
-可以下载 TDengine 的源码,直接引用最新版本的 TDengine.Connector 库 +也可以[下载源码](https://github.com/taosdata/taos-connector-dotnet/tree/3.0),直接引用 TDengine.Connector 库 ```bash -git clone https://github.com/taosdata/TDengine.git -cd TDengine/src/connector/C#/src/ -cp -r TDengineDriver/ myProject +git clone -b 3.0 https://github.com/taosdata/taos-connector-dotnet.git +cd taos-connector-dotnet +cp -r src/ myProject cd myProject -dotnet add TDengineDriver/TDengineDriver.csproj +dotnet add exmaple.csproj reference src/TDengine.csproj ``` @@ -145,20 +147,19 @@ namespace TDengineExample |示例程序 | 示例程序描述 | |--------------------------------------------------------------------------------------------------------------------|--------------------------------------------| -| [C#checker](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/C%23checker) | 使用 TDengine.Connector 可以通过 help 命令中提供的参数,测试C# Driver的同步写入和查询 | -| [TDengineTest](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/TDengineTest) | 使用 TDengine.Connector 实现的简单写入和查询的示例 | -| [insertCn](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/insertCn) | 使用 TDengine.Connector 实现的写入和查询中文字符的示例 | -| [jsonTag](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/jsonTag) | 使用 TDengine.Connector 实现的写入和查询 json tag 类型数据的示例 | -| [stmt](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/stmt) | 使用 TDengine.Connector 实现的参数绑定的示例 | -| [schemaless](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 | -| [benchmark](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/taosdemo) | 使用 TDengine.Connector 实现的简易 Benchmark | -| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/develop/examples/QueryAsyncSample.cs) | 使用 TDengine.Connector 实现的异步查询的示例 | -| [subscribe](https://github.com/taosdata/taos-connector-dotnet/blob/develop/examples/SubscribeSample.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 | +| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 | +| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 | +| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 | +| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 | +| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 | +| [TMQ](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 | ## 重要更新记录 | TDengine.Connector | 说明 | |--------------------|--------------------------------| +| 3.0.0 | 支持 TDengine 3.0.0.0,不兼容 2.x。新增接口TDengine.Impl.GetData(),解析查询结果。 | +| 1.0.7 | 修复 TDengine.Query()内存泄露。 | | 1.0.6 | 修复 schemaless 在 1.0.4 和 1.0.5 中失效 bug。 | | 1.0.5 | 修复 Windows 同步查询中文报错 bug。 | | 1.0.4 | 新增异步查询,订阅等功能。修复绑定参数 bug。 | diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md index 6b694543b1..f84ec65b4c 100644 --- a/docs/zh/14-reference/05-taosbenchmark.md +++ b/docs/zh/14-reference/05-taosbenchmark.md @@ -227,45 +227,34 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) #### 数据库相关配置参数 -创建数据库时的相关参数在 json 配置文件中的 `dbinfo` 中配置,具体参数如下。这些参数与 TDengine 中 `create database` 时所指定的数据库参数相对应。 +创建数据库时的相关参数在 json 配置文件中的 `dbinfo` 中配置,个别具体参数如下。其余参数均与 TDengine 中 `create database` 时所指定的数据库参数相对应,详见[../../taos-sql/database] - **name** : 数据库名。 - **drop** : 插入前是否删除数据库,默认为 true。 -- **replica** : 创建数据库时指定的副本数。 +#### 流式计算相关配置参数 -- **days** : 单个数据文件中存储数据的时间跨度,默认值为 10。 +创建流式计算的相关参数在 json 配置文件中的 `stream` 中配置,具体参数如下。 -- **cache** : 缓存块的大小,单位是 MB,默认值是 16。 +- **stream_name** : 流式计算的名称,必填项。 -- **blocks** : 每个 vnode 中缓存块的数量,默认为 6。 +- **stream_stb** : 流式计算对应的超级表名称,必填项。 -- **precision** : 数据库时间精度,默认值为 "ms"。 +- **stream_sql** : 流式计算的sql语句,必填项。 -- **keep** : 保留数据的天数,默认值为 3650。 +- **trigger_mode** : 流式计算的触发模式,可选项。 -- **minRows** : 文件块中的最小记录数,默认值为 100。 +- **watermark** : 流式计算的水印,可选项。 -- **maxRows** : 文件块中的最大记录数,默认值为 4096。 - -- **comp** : 文件压缩标志,默认值为 2。 - -- **walLevel** : WAL 级别,默认为 1。 - -- **cacheLast** : 是否允许将每个表的最后一条记录保留在内存中,默认值为 0,可选值为 0,1,2,3。 - -- **quorum** : 多副本模式下的写确认数量,默认值为 1。 - -- **fsync** : 当 wal 设置为 2 时,fsync 的间隔时间,单位为 ms,默认值为 3000。 - -- **update** : 是否支持数据更新,默认值为 0, 可选值为 0, 1, 2。 +- **drop** : 是否创建流式计算,可选项为 "yes" 或者 "no", 为 "no" 时不创建。 #### 超级表相关配置参数 -创建超级表时的相关参数在 json 配置文件中的 `super_tables` 中配置,具体参数如下表。 +创建超级表时的相关参数在 json 配置文件中的 `super_tables` 中配置,具体参数如下。 - **name**: 超级表名,必须配置,没有默认值。 + - **child_table_exists** : 子表是否已经存在,默认值为 "no",可选值为 "yes" 或 "no"。 - **child_table_count** : 子表的数量,默认值为 10。 @@ -316,6 +305,22 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **tags_file** : 仅当 insert_mode 为 taosc, rest 的模式下生效。 最终的 tag 的数值与 childtable_count 有关,如果 csv 文件内的 tag 数据行小于给定的子表数量,那么会循环读取 csv 文件数据直到生成 childtable_count 指定的子表数量;否则则只会读取 childtable_count 行 tag 数据。也即最终生成的子表数量为二者取小。 +#### tsma配置参数 + +指定tsma的配置参数在 `super_tables` 中的 `tsmas` 中,具体参数如下。 + +- **name** : 指定 tsma 的名字,必选项。 + +- **function** : 指定 tsma 的函数,必选项。 + +- **interval** : 指定 tsma 的时间间隔,必选项。 + +- **sliding** : 指定 tsma 的窗口时间位移,必选项。 + +- **custom** : 指定 tsma 的创建语句结尾追加的自定义配置,可选项。 + +- **start_when_inserted** : 指定当插入多少行时创建 tsma,可选项,默认为 0。 + #### 标签列与数据列配置参数 指定超级表标签列与数据列的配置参数分别在 `super_tables` 中的 `columns` 和 `tag` 中。 @@ -335,6 +340,8 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **values** : nchar/binary 列/标签的值域,将从值中随机选择。 +- **sma**: 将该列加入bsma中,值为 "yes" 或者 "no",默认为 "no"。 + #### 插入行为配置参数 - **thread_count** : 插入数据的线程数量,默认为 8。 diff --git a/docs/zh/14-reference/06-taosdump.md b/docs/zh/14-reference/06-taosdump.md index 95ee20bfba..625499a949 100644 --- a/docs/zh/14-reference/06-taosdump.md +++ b/docs/zh/14-reference/06-taosdump.md @@ -107,7 +107,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...] use letter and number only. Default is NOT. -n, --no-escape No escape char '`'. Default is using it. -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is - 5. + 8. + -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service + -R, --restful Use RESTful interface to connect TDengine + -t, --timeout=SECONDS The timeout seconds for websocket to interact. -g, --debug Print debug info. -?, --help Give this help list --usage Give a short usage message diff --git a/docs/zh/17-operation/17-diagnose.md b/docs/zh/17-operation/17-diagnose.md index e2a2ef035a..e6e9be7153 100644 --- a/docs/zh/17-operation/17-diagnose.md +++ b/docs/zh/17-operation/17-diagnose.md @@ -1,131 +1,71 @@ ---- -title: 诊断及其他 ---- - -## 网络连接诊断 - -当出现客户端应用无法访问服务端时,需要确认客户端与服务端之间网络的各端口连通情况,以便有针对性地排除故障。 - -目前网络连接诊断支持在:Linux 与 Linux,Linux 与 Windows 之间进行诊断测试。 - -诊断步骤: - -1. 如拟诊断的端口范围与服务器 taosd 实例的端口范围相同,须先停掉 taosd 实例 -2. 服务端命令行输入:`taos -n server -P -l ` 以服务端身份启动对端口 port 为基准端口的监听 -3. 客户端命令行输入:`taos -n client -h -P -l ` 以客户端身份启动对指定的服务器、指定的端口发送测试包 - --l : 测试网络包的大小(单位:字节)。最小值是 11、最大值是 64000,默认值为 1000。 -注:两端命令行中指定的测试包长度必须一致,否则测试显示失败。 - -服务端运行正常的话会输出以下信息: - -```bash -# taos -n server -P 6000 -12/21 14:50:13.522509 0x7f536f455200 UTL work as server, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000 - -12/21 14:50:13.522659 0x7f5352242700 UTL TCP server at port:6000 is listening -12/21 14:50:13.522727 0x7f5351240700 UTL TCP server at port:6001 is listening -... -... -... -12/21 14:50:13.523954 0x7f5342fed700 UTL TCP server at port:6011 is listening -12/21 14:50:13.523989 0x7f53437ee700 UTL UDP server at port:6010 is listening -12/21 14:50:13.524019 0x7f53427ec700 UTL UDP server at port:6011 is listening -12/21 14:50:22.192849 0x7f5352242700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6000 -12/21 14:50:22.192993 0x7f5352242700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6000 -12/21 14:50:22.237082 0x7f5351a41700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6000 -12/21 14:50:22.237203 0x7f5351a41700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6000 -12/21 14:50:22.237450 0x7f5351240700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6001 -12/21 14:50:22.237576 0x7f5351240700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6001 -12/21 14:50:22.281038 0x7f5350a3f700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6001 -12/21 14:50:22.281141 0x7f5350a3f700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6001 -... -... -... -12/21 14:50:22.677443 0x7f5342fed700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6011 -12/21 14:50:22.677576 0x7f5342fed700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6011 -12/21 14:50:22.721144 0x7f53427ec700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6011 -12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011 -``` - -客户端运行正常会输出以下信息: - -```bash -# taos -n client -h 172.27.0.7 -P 6000 -12/21 14:50:22.192434 0x7fc95d859200 UTL work as client, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000 - -12/21 14:50:22.192472 0x7fc95d859200 UTL server ip:172.27.0.7 is resolved from host:172.27.0.7 -12/21 14:50:22.236869 0x7fc95d859200 UTL successed to test TCP port:6000 -12/21 14:50:22.237215 0x7fc95d859200 UTL successed to test UDP port:6000 -... -... -... -12/21 14:50:22.676891 0x7fc95d859200 UTL successed to test TCP port:6010 -12/21 14:50:22.677240 0x7fc95d859200 UTL successed to test UDP port:6010 -12/21 14:50:22.720893 0x7fc95d859200 UTL successed to test TCP port:6011 -12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011 -``` - -仔细阅读打印出来的错误信息,可以帮助管理员找到原因,以解决问题。 - -## 启动状态及 RPC 诊断 - -`taos -n startup -h ` - -判断 taosd 服务端是否成功启动,是数据库管理员经常遇到的一种情形。特别当若干台服务器组成集群时,判断每个服务端实例是否成功启动就会是一个重要问题。除检索 taosd 服务端日志文件进行问题定位、分析外,还可以通过 `taos -n startup -h ` 来诊断一个 taosd 进程的启动状态。 - -针对多台服务器组成的集群,当服务启动过程耗时较长时,可通过该命令行来诊断每台服务器的 taosd 实例的启动状态,以准确定位问题。 - -`taos -n rpc -h ` - -该命令用来诊断已经启动的 taosd 实例的端口是否可正常访问。如果 taosd 程序异常或者失去响应,可以通过 `taos -n rpc -h ` 来发起一个与指定 fqdn 的 rpc 通信,看看 taosd 是否能收到,以此来判定是网络问题还是 taosd 程序异常问题。 - -## sync 及 arbitrator 诊断 - -``` -taos -n sync -P 6040 -h -taos -n sync -P 6042 -h -``` - -用来诊断 sync 端口是否工作正常,判断服务端 sync 模块是否成功工作。另外,-P 6042 用来诊断 arbitrator 是否配置正常,判断指定服务器的 arbitrator 是否能正常工作。 - -## 网络速度诊断 - -`taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` - -从 2.2.0.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: - --n:设为“speed”时,表示对网络速度进行诊断。 --h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 --P:所连接服务端的网络端口。默认值为 6030。 --N:诊断过程中使用的网络包总数。最小值是 1、最大值是 10000,默认值为 100。 --l:单个网络包的大小(单位:字节)。最小值是 1024、最大值是 1024 `*` 1024 `*` 1024,默认值为 1024。 --S:网络封包的类型。可以是 TCP 或 UDP,默认值为 TCP。 - -## FQDN 解析速度诊断 - -`taos -n fqdn -h ` - -从 2.2.0.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下: - --n:设为“fqdn”时,表示对 FQDN 解析进行诊断。 --h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 - -## 服务端日志 - -taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。 - -一旦设定为 135 或 143,日志文件增长很快,特别是写入、查询请求量较大时,增长速度惊人。如合并保存日志,很容易把日志内的关键信息(如配置信息、错误信息等)冲掉。为此,服务端将重要信息日志与其他日志分开存放: - -- taosinfo 存放重要信息日志, 包括:INFO/ERROR/WARNING 级别的日志信息。不记录 DEBUG、TRACE 级别的日志。 -- taosdlog 服务器端生成的日志,记录 taosinfo 中全部信息外,还根据设置的日志输出级别,记录 DEBUG(日志级别 135)、TRACE(日志级别是 143)。 - -## 客户端日志 - -每个独立运行的客户端(一个进程)生成一个独立的客户端日志,其命名方式采用 taoslog+<序号> 的方式命名。文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。 - -- taoslog 客户端(driver)生成的日志,默认记录客户端 INFO/ERROR/WARNING 级别日志,还根据设置的日志输出级别,记录 DEBUG(日志级别 135)、TRACE(日志级别是 143)。 - -其中,日志文件最大长度由 numOfLogLines 来进行配置,一个 taosd 实例最多保留两个文件。 - -taosd 服务端日志采用异步落盘写入机制,优点是可以避免硬盘写入压力太大,对性能造成很大影响。缺点是,在极端情况下,存在少量日志行数丢失的可能。 +--- +title: 诊断及其他 +--- + +## 网络连接诊断 + +当出现客户端应用无法访问服务端时,需要确认客户端与服务端之间网络的各端口连通情况,以便有针对性地排除故障。 + +目前网络连接诊断支持在:Linux 与 Linux,Linux 与 Windows 之间进行诊断测试。 + +诊断步骤: + +1. 如拟诊断的端口范围与服务器 taosd 实例的端口范围相同,须先停掉 taosd 实例 +2. 服务端命令行输入:`taos -n server -P -l ` 以服务端身份启动对端口 port 为基准端口的监听 +3. 客户端命令行输入:`taos -n client -h -P -l ` 以客户端身份启动对指定的服务器、指定的端口发送测试包 + +-l : 测试网络包的大小(单位:字节)。最小值是 11、最大值是 64000,默认值为 1000。 +注:两端命令行中指定的测试包长度必须一致,否则测试显示失败。 + +服务端运行正常的话会输出以下信息: + +```bash +# taos -n server -P 6030 -l 1000 +network test server is initialized, port:6030 +request is received, size:1000 +request is received, size:1000 +... +... +... +request is received, size:1000 +request is received, size:1000 +``` + +客户端运行正常会输出以下信息: + +```bash +# taos -n client -h 172.27.0.7 -P 6000 +taos -n client -h v3s2 -P 6030 -l 1000 +network test client is initialized, the server is v3s2:6030 +request is sent, size:1000 +response is received, size:1000 +request is sent, size:1000 +response is received, size:1000 +... +... +... +request is sent, size:1000 +response is received, size:1000 +request is sent, size:1000 +response is received, size:1000 + +total succ: 100/100 cost: 16.23 ms speed: 5.87 MB/s +``` + +仔细阅读打印出来的错误信息,可以帮助管理员找到原因,以解决问题。 + +## 服务端日志 + +taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。 + +一旦设定为 135 或 143,日志文件增长很快,特别是写入、查询请求量较大时,增长速度惊人。请注意日志文件目录所在磁盘的空间大小。 + +## 客户端日志 + +每个独立运行的客户端(一个进程)生成一个独立的客户端日志,其命名方式采用 taoslog+<序号> 的方式命名。文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。 + +- taoslog 客户端(driver)生成的日志,默认记录客户端 INFO/ERROR/WARNING 级别日志,还根据设置的日志输出级别,记录 DEBUG(日志级别 135)、TRACE(日志级别是 143)。 + +其中,日志文件最大长度由 numOfLogLines 来进行配置,一个 taosd 实例最多保留两个文件。 + +taosd 服务端日志采用异步落盘写入机制,优点是可以避免硬盘写入压力太大,对性能造成很大影响。缺点是,在极端情况下,存在少量日志行数丢失的可能。当问题分析需要的时候,可以考虑将 参数 asynclog 设置成 0,修改为同步落盘写入机制,保证日志不会丢失。 diff --git a/docs/zh/20-third-party/09-emq-broker.md b/docs/zh/20-third-party/09-emq-broker.md index 84b1027f6b..dd98374558 100644 --- a/docs/zh/20-third-party/09-emq-broker.md +++ b/docs/zh/20-third-party/09-emq-broker.md @@ -17,6 +17,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 用户可以根据当前的操作系统,到 EMQX 官网下载安装包,并执行安装。下载地址如下:。安装后使用 `sudo emqx start` 或 `sudo systemctl start emqx` 启动 EMQX 服务。 +注意:本文基于 EMQX v4.4.5 版本,其他版本由于相关配置界面、配置方法以及功能可能随着版本升级有所区别。 ## 创建数据库和表 @@ -32,7 +33,7 @@ CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volum ## 配置 EMQX 规则 -由于 EMQX 不同版本配置界面所有不同,这里仅以 v4.4.3 为例,其他版本请参考相应官网文档。 +由于 EMQX 不同版本配置界面所有不同,这里仅以 v4.4.5 为例,其他版本请参考相应官网文档。 ### 登录 EMQX Dashboard diff --git a/examples/c/tmq.c b/examples/c/tmq.c index 3686251b4b..1cdd4c02da 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -1,473 +1,287 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include -#include "taos.h" - -static int running = 1; -static void msg_process(TAOS_RES* msg) { - char buf[1024]; - /*memset(buf, 0, 1024);*/ - printf("topic: %s\n", tmq_get_topic_name(msg)); - printf("db: %s\n", tmq_get_db_name(msg)); - printf("vg: %d\n", tmq_get_vgroup_id(msg)); - if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) { - tmq_raw_data raw = {0}; - int32_t code = tmq_get_raw(msg, &raw); - if (code == 0) { - TAOS* pConn = taos_connect("192.168.1.86", "root", "taosdata", NULL, 0); - if (pConn == NULL) { - return; - } - - TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 5"); - if (taos_errno(pRes) != 0) { - printf("error in create db, reason:%s\n", taos_errstr(pRes)); - return; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - return; - } - taos_free_result(pRes); - - int32_t ret = tmq_write_raw(pConn, raw); - printf("write raw data: %s\n", tmq_err2str(ret)); - taos_close(pConn); - } - char* result = tmq_get_json_meta(msg); - if (result) { - printf("meta result: %s\n", result); - } - tmq_free_json_meta(result); - return; - } - while (1) { - TAOS_ROW row = taos_fetch_row(msg); - if (row == NULL) break; - TAOS_FIELD* fields = taos_fetch_fields(msg); - int32_t numOfFields = taos_field_count(msg); - taos_print_row(buf, row, fields, numOfFields); - printf("%s\n", buf); - - const char* tbName = tmq_get_table_name(msg); - if (tbName) { - printf("from tb: %s\n", tbName); - } - } -} - -int32_t init_env() { - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - if (pConn == NULL) { - return -1; - } - - TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 5"); - if (taos_errno(pRes) != 0) { - printf("error in create db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, - "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 " - "nchar(8), t4 bool)"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ct0 values(now, 1, 2, 'a')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ct1 values(now, 3, 4, 'b')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ct3 values(now, 5, 6, 'c')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - -#if 0 - pRes = taos_query(pConn, "alter table st1 add column c4 bigint"); - if (taos_errno(pRes) != 0) { - printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)"); - if (taos_errno(pRes) != 0) { - printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)"); - if (taos_errno(pRes) != 0) { - printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table ct3 set tag t1=5000"); - if (taos_errno(pRes) != 0) { - printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop table ct3 ct1"); - if (taos_errno(pRes) != 0) { - printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop table st1"); - if (taos_errno(pRes) != 0) { - printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))"); - if (taos_errno(pRes) != 0) { - printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 add column c3 bigint"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 rename column c3 cc3"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 comment 'hello'"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 drop column c1"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop table n1"); - if (taos_errno(pRes) != 0) { - printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table jt2 using jt tags('')"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, - "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 " - "nchar(8), t4 bool)"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop table st1"); - if (taos_errno(pRes) != 0) { - printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); -#endif - - return 0; -} - -int32_t create_topic() { - printf("create topic\n"); - TAOS_RES* pRes; - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - if (pConn == NULL) { - return -1; - } - - pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - // pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1"); - pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1"); - if (taos_errno(pRes) != 0) { - printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create topic topic2 as select ts, c1, c2, c3 from st1"); - if (taos_errno(pRes) != 0) { - printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - -#if 0 - pRes = taos_query(pConn, "insert into tu1 values(now, 1, 1.0, 'bi1')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - pRes = taos_query(pConn, "insert into tu1 values(now+1d, 1, 1.0, 'bi1')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - pRes = taos_query(pConn, "insert into tu2 values(now, 2, 2.0, 'bi2')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - pRes = taos_query(pConn, "insert into tu2 values(now+1d, 2, 2.0, 'bi2')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); -#endif - - taos_close(pConn); - return 0; -} - -void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { - printf("commit %d tmq %p param %p\n", code, tmq, param); -} - -tmq_t* build_consumer() { -#if 0 - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - assert(pConn != NULL); - - TAOS_RES* pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - } - taos_free_result(pRes); -#endif - - tmq_conf_t* conf = tmq_conf_new(); - tmq_conf_set(conf, "group.id", "tg2"); - tmq_conf_set(conf, "client.id", "my app 1"); - tmq_conf_set(conf, "td.connect.user", "root"); - tmq_conf_set(conf, "td.connect.pass", "taosdata"); - tmq_conf_set(conf, "msg.with.table.name", "true"); - tmq_conf_set(conf, "enable.auto.commit", "true"); - - /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/ - - tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); - tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); - assert(tmq); - tmq_conf_destroy(conf); - return tmq; -} - -tmq_list_t* build_topic_list() { - tmq_list_t* topic_list = tmq_list_new(); - tmq_list_append(topic_list, "topic_ctb_column"); - /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/ - return topic_list; -} - -void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { - int32_t code; - - if ((code = tmq_subscribe(tmq, topics))) { - fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code)); - printf("subscribe err\n"); - return; - } - int32_t cnt = 0; - while (running) { - TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, -1); - if (tmqmessage) { - cnt++; - msg_process(tmqmessage); - /*if (cnt >= 2) break;*/ - /*printf("get data\n");*/ - taos_free_result(tmqmessage); - /*} else {*/ - /*break;*/ - /*tmq_commit_sync(tmq, NULL);*/ - } - } - - code = tmq_consumer_close(tmq); - if (code) - fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code)); - else - fprintf(stderr, "%% Consumer closed\n"); -} - -void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) { - static const int MIN_COMMIT_COUNT = 1; - - int msg_count = 0; - int32_t code; - - if ((code = tmq_subscribe(tmq, topics))) { - fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code)); - return; - } - - tmq_list_t* subList = NULL; - tmq_subscription(tmq, &subList); - char** subTopics = tmq_list_to_c_array(subList); - int32_t sz = tmq_list_get_size(subList); - printf("subscribed topics: "); - for (int32_t i = 0; i < sz; i++) { - printf("%s, ", subTopics[i]); - } - printf("\n"); - tmq_list_destroy(subList); - - while (running) { - TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000); - if (tmqmessage) { - msg_process(tmqmessage); - taos_free_result(tmqmessage); - - /*tmq_commit_sync(tmq, NULL);*/ - /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/ - } - } - - code = tmq_consumer_close(tmq); - if (code) - fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code)); - else - fprintf(stderr, "%% Consumer closed\n"); -} - -int main(int argc, char* argv[]) { - if (argc > 1) { - printf("env init\n"); - if (init_env() < 0) { - return -1; - } - create_topic(); - } - tmq_t* tmq = build_consumer(); - tmq_list_t* topic_list = build_topic_list(); - basic_consume_loop(tmq, topic_list); - /*sync_consume_loop(tmq, topic_list);*/ -} +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include "taos.h" + +static int running = 1; +static char dbName[64] = "tmqdb"; +static char stbName[64] = "stb"; +static char topicName[64] = "topicname"; + +static int32_t msg_process(TAOS_RES* msg) { + char buf[1024]; + int32_t rows = 0; + + const char* topicName = tmq_get_topic_name(msg); + const char* dbName = tmq_get_db_name(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); + + printf("topic: %s\n", topicName); + printf("db: %s\n", dbName); + printf("vgroup id: %d\n", vgroupId); + + while (1) { + TAOS_ROW row = taos_fetch_row(msg); + if (row == NULL) break; + + TAOS_FIELD* fields = taos_fetch_fields(msg); + int32_t numOfFields = taos_field_count(msg); + int32_t* length = taos_fetch_lengths(msg); + int32_t precision = taos_result_precision(msg); + const char* tbName = tmq_get_table_name(msg); + rows++; + taos_print_row(buf, row, fields, numOfFields); + printf("row content from %s: %s\n", (tbName != NULL ? tbName : "null table"), buf); + } + + return rows; +} + +static int32_t init_env() { + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + if (pConn == NULL) { + return -1; + } + + TAOS_RES* pRes; + // drop database if exists + printf("create database\n"); + pRes = taos_query(pConn, "drop database if exists tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + // create database + pRes = taos_query(pConn, "create database tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + // create super table + printf("create super table\n"); + pRes = taos_query(pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + // create sub tables + printf("create sub tables\n"); + pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + // insert data + printf("insert data into sub tables\n"); + pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + taos_close(pConn); + return 0; +} + +int32_t create_topic() { + printf("create topic\n"); + TAOS_RES* pRes; + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + if (pConn == NULL) { + return -1; + } + + pRes = taos_query(pConn, "use tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + // pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1"); + pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1"); + if (taos_errno(pRes) != 0) { + printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + taos_close(pConn); + return 0; +} + +void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { + printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param); +} + +tmq_t* build_consumer() { + tmq_conf_res_t code; + tmq_conf_t* conf = tmq_conf_new(); + code = tmq_conf_set(conf, "enable.auto.commit", "true"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "group.id", "cgrpName"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "td.connect.user", "root"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "td.connect.pass", "taosdata"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "auto.offset.reset", "earliest"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "experimental.snapshot.enable", "true"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "msg.with.table.name", "true"); + if (TMQ_CONF_OK != code) return NULL; + + tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + return tmq; +} + +tmq_list_t* build_topic_list() { + tmq_list_t* topicList = tmq_list_new(); + int32_t code = tmq_list_append(topicList, "topicname"); + if (code) { + return NULL; + } + return topicList; +} + +void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) { + int32_t code; + + if ((code = tmq_subscribe(tmq, topicList))) { + fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code)); + return; + } + + int32_t totalRows = 0; + int32_t msgCnt = 0; + int32_t consumeDelay = 5000; + while (running) { + TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, consumeDelay); + if (tmqmsg) { + msgCnt++; + totalRows += msg_process(tmqmsg); + taos_free_result(tmqmsg); + } else { + break; + } + } + + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); +} + +int main(int argc, char* argv[]) { + int32_t code; + + if (init_env() < 0) { + return -1; + } + + if (create_topic() < 0) { + return -1; + } + + tmq_t* tmq = build_consumer(); + if (NULL == tmq) { + fprintf(stderr, "%% build_consumer() fail!\n"); + return -1; + } + + tmq_list_t* topic_list = build_topic_list(); + if (NULL == topic_list) { + return -1; + } + + basic_consume_loop(tmq, topic_list); + + code = tmq_unsubscribe(tmq); + if (code) { + fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code)); + } + else { + fprintf(stderr, "%% unsubscribe\n"); + } + + code = tmq_consumer_close(tmq); + if (code) { + fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code)); + } + else { + fprintf(stderr, "%% Consumer closed\n"); + } + + return 0; +} diff --git a/tests/pytest/cluster/TD-3693/insert1Data.json b/tests/pytest/cluster/TD-3693/insert1Data.json index 6900ce0366..ad83a35160 100644 --- a/tests/pytest/cluster/TD-3693/insert1Data.json +++ b/tests/pytest/cluster/TD-3693/insert1Data.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/cluster/TD-3693/insert2Data.json b/tests/pytest/cluster/TD-3693/insert2Data.json index e55fa996fb..86495f0ce9 100644 --- a/tests/pytest/cluster/TD-3693/insert2Data.json +++ b/tests/pytest/cluster/TD-3693/insert2Data.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 7ab09383bf..2fa99230bc 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1339,8 +1339,9 @@ class Task(): 0x03A1, # STable [does] not exist 0x03AA, # Tag already exists 0x0603, # Table already exists - 0x2603, # Table does not exist + 0x2603, # Table does not exist, replaced by 2662 below 0x260d, # Tags number not matched + 0x2662, # Table does not exist #TODO: what about 2603 above? diff --git a/tests/pytest/dockerCluster/insert.json b/tests/pytest/dockerCluster/insert.json index 32e1043c4e..ce8d7978fa 100644 --- a/tests/pytest/dockerCluster/insert.json +++ b/tests/pytest/dockerCluster/insert.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 1, + "create_table_thread_count": 1, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "databases": [{ diff --git a/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json b/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json index dc9de1626a..4b622c3f28 100644 --- a/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json +++ b/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/perfbenchmark/bug3433.py b/tests/pytest/perfbenchmark/bug3433.py index 7f2dfad403..3e7de39bed 100644 --- a/tests/pytest/perfbenchmark/bug3433.py +++ b/tests/pytest/perfbenchmark/bug3433.py @@ -185,7 +185,7 @@ class TDTestCase: "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "/tmp/insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/perfbenchmark/joinPerformance.py b/tests/pytest/perfbenchmark/joinPerformance.py index b85c09926a..d30bec6664 100644 --- a/tests/pytest/perfbenchmark/joinPerformance.py +++ b/tests/pytest/perfbenchmark/joinPerformance.py @@ -168,7 +168,7 @@ class JoinPerf: "user": self.user, "password": self.password, "thread_count": cpu_count(), - "thread_count_create_tbl": cpu_count(), + "create_table_thread_count": cpu_count(), "result_file": "/tmp/insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/perfbenchmark/taosdemoInsert.py b/tests/pytest/perfbenchmark/taosdemoInsert.py index 774103aa85..a23797a62b 100644 --- a/tests/pytest/perfbenchmark/taosdemoInsert.py +++ b/tests/pytest/perfbenchmark/taosdemoInsert.py @@ -172,7 +172,7 @@ class Taosdemo: "user": self.user, "password": self.password, "thread_count": cpu_count(), - "thread_count_create_tbl": cpu_count(), + "create_table_thread_count": cpu_count(), "result_file": "/tmp/insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/query/nestedQuery/insertData.json b/tests/pytest/query/nestedQuery/insertData.json index 1aad170bb0..18a843015c 100644 --- a/tests/pytest/query/nestedQuery/insertData.json +++ b/tests/pytest/query/nestedQuery/insertData.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file":"./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 6a5c0796ed..e7e9fa5329 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -133,7 +133,7 @@ class TDTestCase: "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "/tmp/insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/insert-interlace.json b/tests/pytest/tools/insert-interlace.json index 0e17edf8fd..8d96c20fe7 100644 --- a/tests/pytest/tools/insert-interlace.json +++ b/tests/pytest/tools/insert-interlace.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 5000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json index bbac60872e..e50e67943e 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json index 8f795338d2..fe4945483c 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/insert-tblimit-tboffset.json b/tests/pytest/tools/insert-tblimit-tboffset.json index 2c2d86c481..92b28241a6 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset.json +++ b/tests/pytest/tools/insert-tblimit-tboffset.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/insert-tblimit-tboffset0.json b/tests/pytest/tools/insert-tblimit-tboffset0.json index ce83ea3e60..0c1e00976b 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset0.json +++ b/tests/pytest/tools/insert-tblimit-tboffset0.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/insert-tblimit1-tboffset.json b/tests/pytest/tools/insert-tblimit1-tboffset.json index b15aaf4eed..ff002e9528 100644 --- a/tests/pytest/tools/insert-tblimit1-tboffset.json +++ b/tests/pytest/tools/insert-tblimit1-tboffset.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/insert.json b/tests/pytest/tools/insert.json index 523561dc6d..4489730722 100644 --- a/tests/pytest/tools/insert.json +++ b/tests/pytest/tools/insert.json @@ -7,7 +7,7 @@ "password": "taosdata", "thread_count": 2, "num_of_records_per_req": 10, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "databases": [{ "dbinfo": { "name": "db01", diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json index a11261681a..3c876c61c7 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json index 080231551e..b9162242d4 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json index fe0ecbe2de..3fbaeceeba 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json index 1af2952a69..6b0631da39 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json index 39c5e49909..bf9b015154 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json index f4dbf1ee41..346fe31be9 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json index 84b511a446..65a2836a49 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json index 75dbcb4432..b7b6c186e6 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json index 0c2e9cf34a..edb9ed7cb8 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json index e90474e872..b1d7dc4935 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json index 21603b1902..c1c27cf6d7 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json index c944c26915..360ec07370 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json index 4908d3999c..930496a877 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file":"./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json index 03f531f52b..12dadf8006 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json index ce2a34627b..759a3f074d 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal.json index 6e438b33df..321495782d 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-illegal.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json index 54e646a5a0..5dd37ee8b0 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json index 9a47a873dd..7fbee6fee0 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 100, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json index 2eb17b1aab..16e1f94481 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newtable.json b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json index abe277bf5b..86c9359ffb 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-newtable.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json index 2dae7eb1d7..7eee9ce55b 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-offset.json b/tests/pytest/tools/taosdemoAllTest/insert-offset.json index 642d01db3e..d3946cee3c 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-offset.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-offset.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json index 3ef4360aef..c812b4971e 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-sample.json b/tests/pytest/tools/taosdemoAllTest/insert-sample.json index 5b25281e78..e24e20067c 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-sample.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-sample.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file":"./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-timestep.json b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json index 6432fde4ba..ceadfc677a 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-timestep.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file":"./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json index 4e59d86679..69ebe45e50 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json +++ b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json b/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json index 80d6817b5d..8b7086530e 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json b/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json index a35c28f0ac..1e052ff2a4 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json index 05d47c3611..c67b1dba14 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json index e63b3613ba..25e43aefa7 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json index 137e608386..af04d9c1a3 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json index 63a4a2ab58..84a5fe9452 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json +++ b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json index f3212bc30d..d092a41483 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json +++ b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json index 9711ead80e..45523618f0 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json index 24c61cfa8c..a95c40f9eb 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertRestful.json b/tests/pytest/tools/taosdemoAllTest/insertRestful.json index ab7ee9a73b..26770c3d09 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertRestful.json +++ b/tests/pytest/tools/taosdemoAllTest/insertRestful.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json index d835822e8f..74737b4dec 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json index 4c7cdfe39d..e0e9f72a56 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json +++ b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json b/tests/pytest/tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json index b563dcc94b..fdc1994782 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json +++ b/tests/pytest/tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json index 0f1a874cc3..91d6c1a837 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json +++ b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json index bdab459987..45a718705a 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/manual_block2.json b/tests/pytest/tools/taosdemoAllTest/manual_block2.json index 763421c7f3..f01e55fb53 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_block2.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_block2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json index 0579aedf69..f097f15ee1 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json index d541cb6567..2df1fc42aa 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json index c134391a5f..be1df2030f 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json @@ -7,7 +7,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json index e9f759f8f7..a8552404d5 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json @@ -7,7 +7,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json index 9b46ff105b..316fbba4a0 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json @@ -7,7 +7,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json index fdcaa131e6..d03b29d90f 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/query-interrupt.json b/tests/pytest/tools/taosdemoAllTest/query-interrupt.json index 01028f68ad..1b276cb2b0 100644 --- a/tests/pytest/tools/taosdemoAllTest/query-interrupt.json +++ b/tests/pytest/tools/taosdemoAllTest/query-interrupt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json index 0fc789c7e3..8565e4a711 100644 --- a/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json +++ b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json index 940adfb61c..0f9be9bdc3 100644 --- a/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json +++ b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/1174-large-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/1174-large-stmt.json index a4baf73689..443da39fa1 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/1174-large-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/1174-large-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 1, + "create_table_thread_count": 1, "result_file": "1174.out", "confirm_parameter_prompt": "no", "num_of_records_per_req": 51, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/1174-large-taosc.json b/tests/pytest/tools/taosdemoAllTest/stmt/1174-large-taosc.json index a7a514e9dc..bd5709ca5e 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/1174-large-taosc.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/1174-large-taosc.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 1, + "create_table_thread_count": 1, "result_file": "1174.out", "confirm_parameter_prompt": "no", "num_of_records_per_req": 51, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-stmt-random.json b/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-stmt-random.json index 3c38f92680..209f414c1b 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-stmt-random.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-stmt-random.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 1, + "create_table_thread_count": 1, "result_file": "1174.out", "confirm_parameter_prompt": "no", "num_of_records_per_req": 51, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-stmt.json index 2ee489c7a3..903c8a9c93 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 1, + "create_table_thread_count": 1, "result_file": "1174.out", "confirm_parameter_prompt": "no", "num_of_records_per_req": 51, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-taosc.json b/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-taosc.json index 44da22aa3f..dcbec40034 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-taosc.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/1174-small-taosc.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 1, + "create_table_thread_count": 1, "result_file": "1174.out", "confirm_parameter_prompt": "no", "num_of_records_per_req": 51, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json index b2805a38e5..1ea4de5cfe 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json index ac540befb6..86f2fa6c4d 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json index 9a7ad93636..d634ab8369 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file":"./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json index 919b918395..4b69118ef5 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json index dcf52931ad..32043996b6 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json index d2304ed537..a1a0b89e48 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json index d297240613..f5cea2ccc3 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 100, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json index d117c5b345..c3bdea61c6 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json index 1b36b3cbe9..e92644d33e 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json index ea95736a00..0618c04b30 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json index 8318de6672..356ac38d14 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json index b6cb47f2c5..2f8f693166 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json index 348e93ff8b..c1da95ba8c 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file":"./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json index edbaae60a1..9522f0e7b5 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file":"./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json index 1c72b4f402..bcbda0a301 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json index 4626babd95..2b30aa3e9e 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json index f140883de1..f3c577b30c 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json index d1d2db2df3..a0ff887250 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json index d79d4cace5..5ff9ec63a2 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json index eb0ab0f04a..79ce66097b 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json index 489632c645..4b21f0a184 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json index 19eb92bf4c..9fb85aef23 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json index dbda4f74a1..80944de3f5 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json index 966c285d2f..834ffb56d3 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json index c1fc02553f..f39aa94830 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json index ed3eb280f6..6345227788 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json index 1d7ad8a90e..75a365bbff 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json index 1ca302a320..f5e7ac3018 100644 --- a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json +++ b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json index ef63546278..896a72598d 100644 --- a/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json +++ b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json index b6e5847b54..8211a92a2d 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json index ed97fea33e..304ff99c26 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json index db34bfc6b8..444e6564be 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json index d029ddea21..67003a1fb5 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json index f8a181d352..7454af6521 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json index b06ec55ef6..602a39ca24 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json index 6a6a6da297..79d3bc5ed8 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "create_table_thread_count": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 82c57a656d..9a4b564319 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -94,7 +94,7 @@ class taosdemoPerformace: "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "databases": [db] } diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py index bc31b9fbcc..d23e2f79af 100644 --- a/tests/pytest/tools/taosdumpTest.py +++ b/tests/pytest/tools/taosdumpTest.py @@ -35,7 +35,7 @@ class TDTestCase: else: return True - def getBuildPath(self): + def getPath(self, tool="taosdump"): selfPath = os.path.dirname(os.path.realpath(__file__)) if ("community" in selfPath): @@ -43,25 +43,33 @@ class TDTestCase: else: projPath = selfPath[:selfPath.find("tests")] + paths = [] for root, dirs, files in os.walk(projPath): - if ("taosdump" in files): + if ((tool) in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] + paths.append(os.path.join(root, tool)) break - return buildPath + if (len(paths) == 0): + return "" + return paths[0] def run(self): if not os.path.exists("./taosdumptest/tmp1"): os.makedirs("./taosdumptest/tmp1") else: - print("目录存在") + os.system("rm -rf ./taosdumptest/tmp1") + os.makedirs("./taosdumptest/tmp1") if not os.path.exists("./taosdumptest/tmp2"): os.makedirs("./taosdumptest/tmp2") + else: + os.system("rm -rf ./taosdumptest/tmp2") + os.makedirs("./taosdumptest/tmp2") + tdSql.execute("drop database if exists db") - tdSql.execute("create database db duration 11 keep 3649 blocks 8 ") - tdSql.execute("create database db1 duration 12 keep 3640 blocks 7 ") + tdSql.execute("create database db duration 11 keep 3649") + tdSql.execute("create database db1 duration 12 keep 3640") tdSql.execute("use db") tdSql.execute( "create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") @@ -78,31 +86,30 @@ class TDTestCase: sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100) tdSql.execute(sql) - buildPath = self.getBuildPath() - if (buildPath == ""): + binPath = self.getPath() + if (binPath == ""): tdLog.exit("taosdump not found!") else: - tdLog.info("taosdump found in %s" % buildPath) - binPath = buildPath + "/build/bin/" + tdLog.info("taosdump found: %s" % binPath) - os.system("%staosdump --databases db -o ./taosdumptest/tmp1" % binPath) + os.system("%s -y --databases db -o ./taosdumptest/tmp1" % binPath) os.system( - "%staosdump --databases db1 -o ./taosdumptest/tmp2" % + "%s -y --databases db1 -o ./taosdumptest/tmp2" % binPath) tdSql.execute("drop database db") tdSql.execute("drop database db1") tdSql.query("show databases") - tdSql.checkRows(0) + tdSql.checkRows(2) - os.system("%staosdump -i ./taosdumptest/tmp1" % binPath) - os.system("%staosdump -i ./taosdumptest/tmp2" % binPath) + os.system("%s -i ./taosdumptest/tmp1" % binPath) + os.system("%s -i ./taosdumptest/tmp2" % binPath) tdSql.execute("use db") tdSql.query("show databases") - tdSql.checkRows(2) + tdSql.checkRows(4) dbresult = tdSql.queryResult - # 6--duration,7--keep0,keep1,keep, 12--block, + # 6--duration,7--keep0,keep1,keep isCommunity = self.checkCommunity() print("iscommunity: %d" % isCommunity) @@ -111,20 +118,15 @@ class TDTestCase: print(dbresult[i]) print(type(dbresult[i][6])) print(type(dbresult[i][7])) - print(type(dbresult[i][9])) - assert dbresult[i][6] == 11 - if isCommunity: - assert dbresult[i][7] == "3649" - else: - assert dbresult[i][7] == "3649,3649,3649" - assert dbresult[i][9] == 8 + print((dbresult[i][6])) + assert dbresult[i][6] == "15840m" + print((dbresult[i][7])) + assert dbresult[i][7] == "5254560m,5254560m,5254560m" if dbresult[i][0] == 'db1': - assert dbresult[i][6] == 12 - if isCommunity: - assert dbresult[i][7] == "3640" - else: - assert dbresult[i][7] == "3640,3640,3640" - assert dbresult[i][9] == 7 + print((dbresult[i][6])) + assert dbresult[i][6] == "17280m" + print((dbresult[i][7])) + assert dbresult[i][7] == "5241600m,5241600m,5241600m" tdSql.query("show stables") tdSql.checkRows(1) @@ -132,8 +134,10 @@ class TDTestCase: tdSql.query("show tables") tdSql.checkRows(2) - tdSql.checkData(0, 0, 't2') - tdSql.checkData(1, 0, 't1') + dbresult = tdSql.queryResult + print(dbresult) + for i in range(len(dbresult)): + assert ((dbresult[i][0] == "t1") or (dbresult[i][0] == "t2")) tdSql.query("select * from t1") tdSql.checkRows(100) @@ -155,7 +159,7 @@ class TDTestCase: os.system("rm -rf ./taosdumptest/tmp2") os.makedirs("./taosdumptest/tmp1") tdSql.execute("create database db12312313231231321312312312_323") - tdSql.error("create database db12312313231231321312312312_3231") + tdSql.error("create database db012345678911234567892234567893323456789423456789523456789bcdefe") tdSql.execute("use db12312313231231321312312312_323") tdSql.execute("create stable st12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") tdSql.error("create stable st_12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") @@ -168,9 +172,10 @@ class TDTestCase: tdSql.query("show stables") tdSql.checkRows(2) os.system( - "%staosdump --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" % binPath) + "%s -y --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" % + binPath) tdSql.execute("drop database db12312313231231321312312312_323") - os.system("%staosdump -i ./taosdumptest/tmp1" % binPath) + os.system("%s -i ./taosdumptest/tmp1" % binPath) tdSql.execute("use db12312313231231321312312312_323") tdSql.query("show stables") tdSql.checkRows(2) diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py index 839988375b..f611623241 100644 --- a/tests/pytest/tools/taosdumpTest2.py +++ b/tests/pytest/tools/taosdumpTest2.py @@ -26,9 +26,9 @@ class TDTestCase: self.ts = 1601481600000 self.numberOfTables = 1 - self.numberOfRecords = 15000 + self.numberOfRecords = 150 - def getBuildPath(self): + def getPath(self, tool="taosdump"): selfPath = os.path.dirname(os.path.realpath(__file__)) if ("community" in selfPath): @@ -36,15 +36,24 @@ class TDTestCase: else: projPath = selfPath[:selfPath.find("tests")] + paths = [] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ((tool) in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] + paths.append(os.path.join(root, tool)) break - return buildPath + if (len(paths) == 0): + return "" + return paths[0] def run(self): + if not os.path.exists("./taosdumptest/tmp"): + os.makedirs("./taosdumptest/tmp") + else: + os.system("rm -rf ./taosdumptest/tmp") + os.makedirs("./taosdumptest/tmp") + tdSql.prepare() tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)") @@ -60,27 +69,26 @@ class TDTestCase: break tdSql.execute(sql) - buildPath = self.getBuildPath() - if (buildPath == ""): + binPath = self.getPath() + if (binPath == ""): tdLog.exit("taosdump not found!") else: - tdLog.info("taosdump found in %s" % buildPath) - binPath = buildPath + "/build/bin/" + tdLog.info("taosdump found in %s" % binPath) - os.system("rm /tmp/*.sql") + os.system("rm ./taosdumptest/tmp/*.sql") os.system( - "%staosdump --databases db -o /tmp -B 32766 -L 1048576" % + "%s --databases db -o ./taosdumptest/tmp -B 32766 -L 1048576" % binPath) tdSql.execute("drop database db") tdSql.query("show databases") - tdSql.checkRows(0) + tdSql.checkRows(2) - os.system("%staosdump -i /tmp" % binPath) + os.system("%s -i ./taosdumptest/tmp" % binPath) tdSql.query("show databases") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 'db') + tdSql.checkRows(3) + tdSql.checkData(2, 0, 'db') tdSql.execute("use db") tdSql.query("show stables") @@ -90,6 +98,38 @@ class TDTestCase: tdSql.query("select count(*) from t1") tdSql.checkData(0, 0, self.numberOfRecords) + # test case for TS-1225 + tdSql.execute("create database test") + tdSql.execute("use test") + tdSql.execute( + "create table stb(ts timestamp, c1 binary(16374), c2 binary(16374), c3 binary(16374)) tags(t1 nchar(256))") + tdSql.execute( + "insert into t1 using stb tags('t1') values(now, '%s', '%s', '%s')" % + ("16374", + "16374", + "16374")) + +# sys.exit(0) + os.system("rm ./taosdumptest/tmp/*.sql") + os.system("rm ./taosdumptest/tmp/*.avro*") + os.system("%s -D test -o ./taosdumptest/tmp -y" % binPath) + + tdSql.execute("drop database test") + tdSql.query("show databases") + tdSql.checkRows(3) + + os.system("%s -i ./taosdumptest/tmp -y" % binPath) + + tdSql.execute("use test") + tdSql.error("show vnodes '' ") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'stb') + + tdSql.query("select * from stb") + tdSql.checkRows(1) + os.system("rm -rf dump_result.txt") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/tools/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py index e96de674d7..c40462b8db 100644 --- a/tests/pytest/tools/taosdumpTestNanoSupport.py +++ b/tests/pytest/tools/taosdumpTestNanoSupport.py @@ -35,7 +35,7 @@ class TDTestCase: else: return True - def getBuildPath(self): + def getPath(self, tool="taosdump"): selfPath = os.path.dirname(os.path.realpath(__file__)) if ("community" in selfPath): @@ -43,15 +43,16 @@ class TDTestCase: else: projPath = selfPath[:selfPath.find("tests")] + paths = [] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ((tool) in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] + paths.append(os.path.join(root, tool)) break - return buildPath - - + if (len(paths) == 0): + return "" + return paths[0] def createdb(self, precision="ns"): tb_nums = self.numberOfTables @@ -60,13 +61,16 @@ class TDTestCase: def build_db(precision, start_time): tdSql.execute("drop database if exists timedb1") tdSql.execute( - "create database timedb1 duration 10 keep 36500 blocks 8 precision "+"\""+precision+"\"") + "create database timedb1 duration 10 keep 36500 precision " + + "\"" + + precision + + "\"") tdSql.execute("use timedb1") tdSql.execute( "create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))") for tb in range(tb_nums): - tbname = "t"+str(tb) + tbname = "t" + str(tb) tdSql.execute("create table " + tbname + " using st tags(1, 'beijing')") sql = "insert into " + tbname + " values" @@ -79,8 +83,8 @@ class TDTestCase: ts_seed = 1000 for i in range(per_tb_rows): - sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i % - 100, i % 100, currts + i*100) # currts +1000ms (1000000000ns) + sql += "(%d, %d, 'nchar%d',%d)" % (currts + i * ts_seed, i % + 100, i % 100, currts + i * 100) # currts +1000ms (1000000000ns) tdSql.execute(sql) if precision == "ns": @@ -97,7 +101,6 @@ class TDTestCase: else: print("other time precision not valid , please check! ") - def run(self): @@ -118,12 +121,11 @@ class TDTestCase: if not os.path.exists("./taosdumptest/dumptmp3"): os.makedirs("./taosdumptest/dumptmp3") - buildPath = self.getBuildPath() - if (buildPath == ""): + binPath = self.getPath("taosdump") + if (binPath == ""): tdLog.exit("taosdump not found!") else: - tdLog.info("taosdump found in %s" % buildPath) - binPath = buildPath + "/build/bin/" + tdLog.info("taosdump found: %s" % binPath) # create nano second database @@ -132,67 +134,51 @@ class TDTestCase: # dump all data os.system( - "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + "%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" % + binPath) # dump part data with -S -E os.system( - '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' % + '%s -y -g --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' % binPath) os.system( - '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % + '%s -y -g --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % binPath) - # replace strings to dump in databases - os.system( - "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") - os.system( - "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") - os.system( - "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - - os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) - + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp2" % binPath) # dump data and check for taosdump - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0,0,1000) + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 510) - tdSql.query("select count(*) from dumptmp2.st") - tdSql.checkData(0,0,510) + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp3" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 900) - tdSql.query("select count(*) from dumptmp3.st") - tdSql.checkData(0,0,900) + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 1000) # check data origin_res = tdSql.getResult("select * from timedb1.st") - dump_res = tdSql.getResult("select * from dumptmp1.st") + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + dump_res = tdSql.getResult("select * from timedb1.st") if origin_res == dump_res: - tdLog.info("test nano second : dump check data pass for all data!" ) + tdLog.info("test nano second : dump check data pass for all data!") else: - tdLog.info("test nano second : dump check data failed for all data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000") - dump_res = tdSql.getResult("select * from dumptmp2.st") - if origin_res == dump_res: - tdLog.info(" test nano second : dump check data pass for data! " ) - else: - tdLog.info(" test nano second : dump check data failed for data !" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ") - dump_res = tdSql.getResult("select * from dumptmp3.st") - if origin_res == dump_res: - tdLog.info(" test nano second : dump check data pass for data! " ) - else: - tdLog.info(" test nano second : dump check data failed for data !" ) - + tdLog.info( + "test nano second : dump check data failed for all data!") # us second support test case os.system("rm -rf ./taosdumptest/") - tdSql.execute("drop database if exists dumptmp1") - tdSql.execute("drop database if exists dumptmp2") - tdSql.execute("drop database if exists dumptmp3") + tdSql.execute("drop database if exists timedb1") if not os.path.exists("./taosdumptest/tmp1"): os.makedirs("./taosdumptest/dumptmp1") @@ -205,75 +191,63 @@ class TDTestCase: if not os.path.exists("./taosdumptest/dumptmp3"): os.makedirs("./taosdumptest/dumptmp3") - buildPath = self.getBuildPath() - if (buildPath == ""): + binPath = self.getPath() + if (binPath == ""): tdLog.exit("taosdump not found!") else: - tdLog.info("taosdump found in %s" % buildPath) - binPath = buildPath + "/build/bin/" + tdLog.info("taosdump found: %s" % binPath) self.createdb(precision="us") os.system( - "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) - - os.system( - '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' % - binPath) - os.system( - '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % + "%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) os.system( - "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + '%s -y -g --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' % + binPath) os.system( - "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") - os.system( - "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + '%s -y -g --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % + binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + os.system("%s -i ./taosdumptest/dumptmp1" % binPath) + os.system("%s -i ./taosdumptest/dumptmp2" % binPath) + os.system("%s -i ./taosdumptest/dumptmp3" % binPath) - - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0,0,1000) + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp2" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 510) - tdSql.query("select count(*) from dumptmp2.st") - tdSql.checkData(0,0,510) + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp3" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 900) - tdSql.query("select count(*) from dumptmp3.st") - tdSql.checkData(0,0,900) + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 1000) - + # check data origin_res = tdSql.getResult("select * from timedb1.st") - dump_res = tdSql.getResult("select * from dumptmp1.st") + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + dump_res = tdSql.getResult("select * from timedb1.st") if origin_res == dump_res: - tdLog.info("test us second : dump check data pass for all data!" ) + tdLog.info("test micro second : dump check data pass for all data!") else: - tdLog.info("test us second : dump check data failed for all data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") - dump_res = tdSql.getResult("select * from dumptmp2.st") - if origin_res == dump_res: - tdLog.info(" test us second : dump check data pass for data! " ) - else: - tdLog.info(" test us second : dump check data failed for data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ") - dump_res = tdSql.getResult("select * from dumptmp3.st") - if origin_res == dump_res: - tdLog.info(" test us second : dump check data pass for data! " ) - else: - tdLog.info(" test us second : dump check data failed for data! " ) + tdLog.info( + "test micro second : dump check data failed for all data!") - # ms second support test case os.system("rm -rf ./taosdumptest/") - tdSql.execute("drop database if exists dumptmp1") - tdSql.execute("drop database if exists dumptmp2") - tdSql.execute("drop database if exists dumptmp3") + tdSql.execute("drop database if exists timedb1") if not os.path.exists("./taosdumptest/tmp1"): os.makedirs("./taosdumptest/dumptmp1") @@ -286,69 +260,60 @@ class TDTestCase: if not os.path.exists("./taosdumptest/dumptmp3"): os.makedirs("./taosdumptest/dumptmp3") - buildPath = self.getBuildPath() - if (buildPath == ""): + binPath = self.getPath() + if (binPath == ""): tdLog.exit("taosdump not found!") else: - tdLog.info("taosdump found in %s" % buildPath) - binPath = buildPath + "/build/bin/" + tdLog.info("taosdump found: %s" % binPath) self.createdb(precision="ms") os.system( - "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) - - os.system( - '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' % - binPath) - os.system( - '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % + "%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) os.system( - "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + '%s -y -g --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' % + binPath) os.system( - "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") - os.system( - "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + '%s -y -g --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % + binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + os.system("%s -i ./taosdumptest/dumptmp1" % binPath) + os.system("%s -i ./taosdumptest/dumptmp2" % binPath) + os.system("%s -i ./taosdumptest/dumptmp3" % binPath) - - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0,0,1000) + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp2" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 510) - tdSql.query("select count(*) from dumptmp2.st") - tdSql.checkData(0,0,510) + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp3" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 900) - tdSql.query("select count(*) from dumptmp3.st") - tdSql.checkData(0,0,900) + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + tdSql.query("select count(*) from timedb1.st") + tdSql.checkData(0, 0, 1000) - + # check data origin_res = tdSql.getResult("select * from timedb1.st") - dump_res = tdSql.getResult("select * from dumptmp1.st") + tdSql.execute("drop database timedb1") + os.system("%s -i ./taosdumptest/dumptmp1" % binPath) + # dump data and check for taosdump + dump_res = tdSql.getResult("select * from timedb1.st") if origin_res == dump_res: - tdLog.info("test ms second : dump check data pass for all data!" ) + tdLog.info( + "test million second : dump check data pass for all data!") else: - tdLog.info("test ms second : dump check data failed for all data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") - dump_res = tdSql.getResult("select * from dumptmp2.st") - if origin_res == dump_res: - tdLog.info(" test ms second : dump check data pass for data! " ) - else: - tdLog.info(" test ms second : dump check data failed for data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ") - dump_res = tdSql.getResult("select * from dumptmp3.st") - if origin_res == dump_res: - tdLog.info(" test ms second : dump check data pass for data! " ) - else: - tdLog.info(" test ms second : dump check data failed for data! " ) + tdLog.info( + "test million second : dump check data failed for all data!") - os.system("rm -rf ./taosdumptest/") os.system("rm -rf ./dump_result.txt") os.system("rm -rf *.py.sql") diff --git a/tests/pytest/tsdb/insertDataDb1.json b/tests/pytest/tsdb/insertDataDb1.json index 92735dad69..f771551b26 100644 --- a/tests/pytest/tsdb/insertDataDb1.json +++ b/tests/pytest/tsdb/insertDataDb1.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tsdb/insertDataDb1Replica2.json b/tests/pytest/tsdb/insertDataDb1Replica2.json index a5fc525157..ec84d71d88 100644 --- a/tests/pytest/tsdb/insertDataDb1Replica2.json +++ b/tests/pytest/tsdb/insertDataDb1Replica2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tsdb/insertDataDb2.json b/tests/pytest/tsdb/insertDataDb2.json index 02301e0242..494465d23c 100644 --- a/tests/pytest/tsdb/insertDataDb2.json +++ b/tests/pytest/tsdb/insertDataDb2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tsdb/insertDataDb2Newstab.json b/tests/pytest/tsdb/insertDataDb2Newstab.json index 2f5f2367b4..647a587cad 100644 --- a/tests/pytest/tsdb/insertDataDb2Newstab.json +++ b/tests/pytest/tsdb/insertDataDb2Newstab.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json b/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json index 67f3b2cd4f..13cf2e561c 100644 --- a/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json +++ b/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/tsdb/insertDataDb2Replica2.json b/tests/pytest/tsdb/insertDataDb2Replica2.json index 3d033f13cc..c651657a6d 100644 --- a/tests/pytest/tsdb/insertDataDb2Replica2.json +++ b/tests/pytest/tsdb/insertDataDb2Replica2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/util/taosdemoCfg.py b/tests/pytest/util/taosdemoCfg.py index 7523a80898..f708d303de 100644 --- a/tests/pytest/util/taosdemoCfg.py +++ b/tests/pytest/util/taosdemoCfg.py @@ -50,7 +50,7 @@ class TDTaosdemoCfg: "user": "root", "password": "taosdata", "thread_count": cpu_count(), - "thread_count_create_tbl": cpu_count(), + "create_table_thread_count": cpu_count(), "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/wal/insertDataDb1.json b/tests/pytest/wal/insertDataDb1.json index a14fe58141..2dc0cf2b7f 100644 --- a/tests/pytest/wal/insertDataDb1.json +++ b/tests/pytest/wal/insertDataDb1.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/wal/insertDataDb1Replica2.json b/tests/pytest/wal/insertDataDb1Replica2.json index a5fc525157..ec84d71d88 100644 --- a/tests/pytest/wal/insertDataDb1Replica2.json +++ b/tests/pytest/wal/insertDataDb1Replica2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/wal/insertDataDb2.json b/tests/pytest/wal/insertDataDb2.json index 891a21f73e..35232a6333 100644 --- a/tests/pytest/wal/insertDataDb2.json +++ b/tests/pytest/wal/insertDataDb2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/wal/insertDataDb2Newstab.json b/tests/pytest/wal/insertDataDb2Newstab.json index 2f5f2367b4..647a587cad 100644 --- a/tests/pytest/wal/insertDataDb2Newstab.json +++ b/tests/pytest/wal/insertDataDb2Newstab.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/wal/insertDataDb2NewstabReplica2.json b/tests/pytest/wal/insertDataDb2NewstabReplica2.json index 67f3b2cd4f..13cf2e561c 100644 --- a/tests/pytest/wal/insertDataDb2NewstabReplica2.json +++ b/tests/pytest/wal/insertDataDb2NewstabReplica2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/pytest/wal/insertDataDb2Replica2.json b/tests/pytest/wal/insertDataDb2Replica2.json index 3d033f13cc..c651657a6d 100644 --- a/tests/pytest/wal/insertDataDb2Replica2.json +++ b/tests/pytest/wal/insertDataDb2Replica2.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_count_create_tbl": 4, + "create_table_thread_count": 4, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 910b99ace3..356eaed57e 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -131,7 +131,7 @@ ./test.sh -f tsim/parser/insert_tb.sim # TD-17038 ./test.sh -f tsim/parser/interp.sim ./test.sh -f tsim/parser/join_manyblocks.sim -# TD-18018 ./test.sh -f tsim/parser/join_multitables.sim +./test.sh -f tsim/parser/join_multitables.sim ./test.sh -f tsim/parser/join_multivnode.sim ./test.sh -f tsim/parser/join.sim ./test.sh -f tsim/parser/last_cache.sim @@ -183,7 +183,7 @@ # ---- qnode ./test.sh -f tsim/qnode/basic1.sim -# ---- snode +# ---- snode ---- # unsupport ./test.sh -f tsim/snode/basic1.sim # ---- bnode @@ -199,7 +199,7 @@ # ---- show ---- ./test.sh -f tsim/show/basic.sim -# ---- table +# ---- table ---- ./test.sh -f tsim/table/autocreate.sim ./test.sh -f tsim/table/basic1.sim ./test.sh -f tsim/table/basic2.sim @@ -248,7 +248,7 @@ ./test.sh -f tsim/stream/ignoreExpiredData.sim ./test.sh -f tsim/stream/sliding.sim -# ---- transaction +# ---- transaction ---- ./test.sh -f tsim/trans/lossdata1.sim ./test.sh -f tsim/trans/create_db.sim @@ -307,7 +307,7 @@ ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim -# --- valgrind +# --- valgrind ---- ./test.sh -f tsim/valgrind/checkError1.sim ./test.sh -f tsim/valgrind/checkError2.sim ./test.sh -f tsim/valgrind/checkError3.sim @@ -331,7 +331,7 @@ ./test.sh -f tsim/vnode/stable_replica3_vnode3.sim # --- sync -./test.sh -f tsim/sync/3Replica1VgElect.sim +#./test.sh -f tsim/sync/3Replica1VgElect.sim #./test.sh -f tsim/sync/3Replica5VgElect.sim ./test.sh -f tsim/sync/oneReplica1VgElect.sim ./test.sh -f tsim/sync/oneReplica5VgElect.sim diff --git a/tests/script/sh/bit_and.c b/tests/script/sh/bit_and.c new file mode 100644 index 0000000000..2f2e48fdb0 --- /dev/null +++ b/tests/script/sh/bit_and.c @@ -0,0 +1,61 @@ +#include +#include +#include +#include "taosudf.h" + + +DLL_EXPORT int32_t bit_and_init() { + return 0; +} + +DLL_EXPORT int32_t bit_and_destroy() { + return 0; +} + +DLL_EXPORT int32_t bit_and(SUdfDataBlock* block, SUdfColumn *resultCol) { + + if (block->numOfCols < 2) { + return TSDB_CODE_UDF_INVALID_INPUT; + } + + for (int32_t i = 0; i < block->numOfCols; ++i) { + SUdfColumn* col = block->udfCols[i]; + if (!(col->colMeta.type == TSDB_DATA_TYPE_INT)) { + return TSDB_CODE_UDF_INVALID_INPUT; + } + } + + SUdfColumnMeta *meta = &resultCol->colMeta; + meta->bytes = 4; + meta->type = TSDB_DATA_TYPE_INT; + meta->scale = 0; + meta->precision = 0; + + + SUdfColumnData *resultData = &resultCol->colData; + + resultData->numOfRows = block->numOfRows; + + for (int32_t i = 0; i < resultData->numOfRows; ++i) { + if (udfColDataIsNull(block->udfCols[0], i)) { + udfColDataSetNull(resultCol, i); + continue; + } + int32_t result = *(int32_t*)udfColDataGetData(block->udfCols[0], i); + int j = 1; + for (; j < block->numOfCols; ++j) { + if (udfColDataIsNull(block->udfCols[j], i)) { + udfColDataSetNull(resultCol, i); + break; + } + + char* colData = udfColDataGetData(block->udfCols[j], i); + result &= *(int32_t*)colData; + } + if (j == block->numOfCols) { + udfColDataSet(resultCol, i, (char*)&result, false); + } + + } + return TSDB_CODE_SUCCESS; +} diff --git a/tests/script/sh/compile_udf.sh b/tests/script/sh/compile_udf.sh new file mode 100755 index 0000000000..12e922b2df --- /dev/null +++ b/tests/script/sh/compile_udf.sh @@ -0,0 +1,10 @@ +set +e + +rm -rf /tmp/udf/libbitand.so /tmp/udf/libsqrsum.so +mkdir -p /tmp/udf +echo "compile udf bit_and and sqr_sum" +gcc -fPIC -shared sh/bit_and.c -o /tmp/udf/libbitand.so +gcc -fPIC -shared sh/sqr_sum.c -o /tmp/udf/libsqrsum.so +echo "debug show /tmp/udf/*.so" +ls /tmp/udf/*.so + diff --git a/tests/script/sh/sqr_sum.c b/tests/script/sh/sqr_sum.c new file mode 100644 index 0000000000..af57f377ab --- /dev/null +++ b/tests/script/sh/sqr_sum.c @@ -0,0 +1,80 @@ +#include +#include +#include +#include + +#include "taosudf.h" + +DLL_EXPORT int32_t sqr_sum_init() { + return 0; +} + +DLL_EXPORT int32_t sqr_sum_destroy() { + return 0; +} + +DLL_EXPORT int32_t sqr_sum_start(SUdfInterBuf *buf) { + *(int64_t*)(buf->buf) = 0; + buf->bufLen = sizeof(double); + buf->numOfResult = 0; + return 0; +} + +DLL_EXPORT int32_t sqr_sum(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) { + double sumSquares = *(double*)interBuf->buf; + int8_t numNotNull = 0; + for (int32_t i = 0; i < block->numOfCols; ++i) { + SUdfColumn* col = block->udfCols[i]; + if (!(col->colMeta.type == TSDB_DATA_TYPE_INT || + col->colMeta.type == TSDB_DATA_TYPE_DOUBLE)) { + return TSDB_CODE_UDF_INVALID_INPUT; + } + } + for (int32_t i = 0; i < block->numOfCols; ++i) { + for (int32_t j = 0; j < block->numOfRows; ++j) { + SUdfColumn* col = block->udfCols[i]; + if (udfColDataIsNull(col, j)) { + continue; + } + switch (col->colMeta.type) { + case TSDB_DATA_TYPE_INT: { + char* cell = udfColDataGetData(col, j); + int32_t num = *(int32_t*)cell; + sumSquares += (double)num * num; + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + char* cell = udfColDataGetData(col, j); + double num = *(double*)cell; + sumSquares += num * num; + break; + } + default: + break; + } + ++numNotNull; + } + } + + *(double*)(newInterBuf->buf) = sumSquares; + newInterBuf->bufLen = sizeof(double); + + if (interBuf->numOfResult == 0 && numNotNull == 0) { + newInterBuf->numOfResult = 0; + } else { + newInterBuf->numOfResult = 1; + } + return 0; +} + +DLL_EXPORT int32_t sqr_sum_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) { + if (buf->numOfResult == 0) { + resultData->numOfResult = 0; + return 0; + } + double sumSquares = *(double*)(buf->buf); + *(double*)(resultData->buf) = sqrt(sumSquares); + resultData->bufLen = sizeof(double); + resultData->numOfResult = 1; + return 0; +} diff --git a/tests/script/tmp/r1.sim b/tests/script/tmp/r1.sim new file mode 100644 index 0000000000..3fc875ad23 --- /dev/null +++ b/tests/script/tmp/r1.sim @@ -0,0 +1,47 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +sql connect + +print =============== step1: create dnodes +sql create dnode $hostname port 7200 + +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> rows: $rows +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +if $rows != 2 then + return -1 +endi +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi + +print =============== step2: create database +sql create database db vgroups 1 replica 1 +sql show databases +if $rows != 3 then + return -1 +endi + +sql use db; +sql create table stb (ts timestamp, c int) tags (t int); +sql create table t0 using stb tags (0); +sql insert into t0 values(now, 1); +sql insert into t0 values(now+1s, 1); + +return diff --git a/tests/script/tsim/parser/join_multitables.sim b/tests/script/tsim/parser/join_multitables.sim index 6c5138f8cb..4278be52f3 100644 --- a/tests/script/tsim/parser/join_multitables.sim +++ b/tests/script/tsim/parser/join_multitables.sim @@ -682,7 +682,7 @@ if $data08 != 3 then return -1 endi -sql select st0.f1,st1.f1 from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1; +sql select st0.f1,st1.f1 from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 order by st0.f1; if $rows != 25 then return -1 endi @@ -721,22 +721,10 @@ endi if $data01 != @21-03-01 01:00:00.000@ then return -1 endi -if $data10 != @21-03-02 01:00:00.000@ then +if $data50 != @21-03-02 01:00:00.000@ then return -1 endi -if $data11 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data20 != @21-03-03 01:00:00.000@ then - return -1 -endi -if $data21 != @21-03-03 01:00:00.000@ then - return -1 -endi -if $data30 != @21-03-04 01:00:00.000@ then - return -1 -endi -if $data31 != @21-03-04 01:00:00.000@ then +if $data51 != @21-03-02 01:00:00.000@ then return -1 endi @@ -782,23 +770,23 @@ endi if $data04 != 01 then return -1 endi -if $data10 != @21-03-02 01:00:00.000@ then +if $data50 != @21-03-02 01:00:00.000@ then return -1 endi -if $data11 != 9901.000000000 then +if $data51 != 9901.000000000 then return -1 endi -if $data12 != 11 then +if $data52 != 11 then return -1 endi -if $data13 != 9911.000000000 then +if $data53 != 9911.000000000 then return -1 endi -if $data14 != 01 then +if $data54 != 01 then return -1 endi -sql select last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval(10a); +sql select _wstart, last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval(10a); if $rows != 25 then return -1 endi @@ -830,7 +818,7 @@ if $data08 != 11 then return -1 endi -sql select last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval(1d) sliding(1d); +sql select _wstart, last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval(1d) sliding(1d); if $rows != 5 then return -1 endi @@ -937,7 +925,7 @@ sql select st0.*,st1.* from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and if $rows != 5 then return -1 endi -if $data00 != @21-03-01 01:00:00.000@ then +if $data00 != @21-03-02 01:00:00.000@ then print $data00 return -1 endi @@ -965,7 +953,7 @@ endi if $data08 != 3 then return -1 endi -if $data09 != @21-03-01 01:00:00.000@ then +if $data09 != @21-03-02 01:00:00.000@ then return -1 endi @@ -973,38 +961,23 @@ sql select top(st1.f1, 5) from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts if $rows != 5 then return -1 endi -if $data00 != @21-03-01 05:00:00.000@ then - return -1 -endi -if $data01 != 9915 then +if $data00 != 9915 then return -1 endi -if $data10 != @21-03-02 05:00:00.000@ then - return -1 -endi -if $data11 != 9915 then +if $data10 != 9915 then return -1 endi -if $data20 != @21-03-03 05:00:00.000@ then - return -1 -endi -if $data21 != 9915 then +if $data20 != 9915 then return -1 endi -if $data30 != @21-03-04 05:00:00.000@ then - return -1 -endi -if $data31 != 9915 then +if $data30 != 9915 then return -1 endi -if $data40 != @21-03-05 05:00:00.000@ then - return -1 -endi -if $data41 != 9915 then +if $data40 != 9915 then return -1 endi -sql select top(st0.f1,5) from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and st1.ts=st0.ts and st0.id1=st1.id1; +sql select st0.ts, top(st0.f1,5) from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and st1.ts=st0.ts and st0.id1=st1.id1 order by st0.ts; if $rows != 5 then return -1 endi @@ -1329,25 +1302,25 @@ if $data09 != 9925 then endi sql_error select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.f1=tb1_1.f1; -sql_error select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.ts=tb1_1.ts and tb0_1.id1=tb1_1.id2; -sql_error select tb0_5.*, tb1_5.*,tb2_5.*,tb3_5.*,tb4_5.*,tb5_5.*, tb6_5.*,tb7_5.*,tb8_5.*,tb9_5.*,tba_5.* from tb0_5, tb1_5, tb2_5, tb3_5, tb4_5,tb5_5, tb6_5, tb7_5, tb8_5, tb9_5, tba_5 where tb9_5.ts=tb8_5.ts and tb8_5.ts=tb7_5.ts and tb7_5.ts=tb6_5.ts and tb6_5.ts=tb5_5.ts and tb5_5.ts=tb4_5.ts and tb4_5.ts=tb3_5.ts and tb3_5.ts=tb2_5.ts and tb2_5.ts=tb1_5.ts and tb1_5.ts=tb0_5.ts and tb0_5.ts=tba_5.ts; +sql select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.ts=tb1_1.ts and tb0_1.id1=tb1_1.id2; +sql select tb0_5.*, tb1_5.*,tb2_5.*,tb3_5.*,tb4_5.*,tb5_5.*, tb6_5.*,tb7_5.*,tb8_5.*,tb9_5.*,tba_5.* from tb0_5, tb1_5, tb2_5, tb3_5, tb4_5,tb5_5, tb6_5, tb7_5, tb8_5, tb9_5, tba_5 where tb9_5.ts=tb8_5.ts and tb8_5.ts=tb7_5.ts and tb7_5.ts=tb6_5.ts and tb6_5.ts=tb5_5.ts and tb5_5.ts=tb4_5.ts and tb4_5.ts=tb3_5.ts and tb3_5.ts=tb2_5.ts and tb2_5.ts=tb1_5.ts and tb1_5.ts=tb0_5.ts and tb0_5.ts=tba_5.ts; -sql_error select * from st0, st1 where st0.ts=st1.ts; +sql select * from st0, st1 where st0.ts=st1.ts; sql_error select * from st0, st1 where st0.id1=st1.id1; sql_error select * from st0, st1 where st0.f1=st1.f1 and st0.id1=st1.id1; -sql_error select * from st0, st1, st2, st3 where st0.id1=st1.id1 and st2.id1=st3.id1 and st0.ts=st1.ts and st1.ts=st2.ts and st2.ts=st3.ts; +sql select * from st0, st1, st2, st3 where st0.id1=st1.id1 and st2.id1=st3.id1 and st0.ts=st1.ts and st1.ts=st2.ts and st2.ts=st3.ts; sql_error select * from st0, st1, st2 where st0.id1=st1.id1; sql_error select * from st0, st1 where st0.id1=st1.id1 and st0.id2=st1.id3; sql_error select * from st0, st1 where st0.id1=st1.id1 or st0.ts=st1.ts; sql_error select * from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 or st0.id2=st1.id2; sql_error select * from st0, st1, st2 where st0.ts=st1.ts and st0.id1=st1.id1; sql_error select * from st0, st1 where st0.id1=st1.ts and st0.ts=st1.id1; -sql_error select * from st0, st1 where st0.id1=st1.id2 and st0.ts=st1.ts; -sql_error select * from st0, st1 where st1.id4=st0.id4 and st1.ts=st0.ts; -sql_error select * from st0, st1 where st0.id1=st1.id2 and st1.ts=st0.ts; +sql select * from st0, st1 where st0.id1=st1.id2 and st0.ts=st1.ts; +sql select * from st0, st1 where st1.id4=st0.id4 and st1.ts=st0.ts; +sql select * from st0, st1 where st0.id1=st1.id2 and st1.ts=st0.ts; sql_error select * from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval 10a; sql_error select last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 group by f1; sql_error select st0.*,st1.*,st2.*,st3.*,st4.*,st5.*,st6.*,st7.*,st8.*,st9.* from st0,st1,st2,st3,st4,st5,st6,st7,st8,st9 where st0.ts=st2.ts and st0.ts=st4.ts and st0.ts=st6.ts and st0.ts=st8.ts and st1.ts=st3.ts and st3.ts=st5.ts and st5.ts=st7.ts and st7.ts=st9.ts and st0.id1=st2.id1 and st0.id1=st4.id1 and st0.id1=st6.id1 and st0.id1=st8.id1 and st1.id1=st3.id1 and st3.id1=st5.id1 and st5.id1=st7.id1 and st7.id1=st9.id1; -sql_error select st0.*,st1.*,st2.*,st3.*,st4.*,st5.*,st6.*,st7.*,st8.*,st9.* from st0,st1,st2,st3,st4,st5,st6,st7,st8,st9,sta where st0.ts=st2.ts and st0.ts=st4.ts and st0.ts=st6.ts and st0.ts=st8.ts and st1.ts=st3.ts and st3.ts=st5.ts and st5.ts=st7.ts and st7.ts=st9.ts and st0.ts=st1.ts and st0.id1=st2.id1 and st0.id1=st4.id1 and st0.id1=st6.id1 and st0.id1=st8.id1 and st1.id1=st3.id1 and st3.id1=st5.id1 and st5.id1=st7.id1 and st7.id1=st9.id1 and st0.id1=st1.id1 and st0.id1=sta.id1 and st0.ts=sta.ts; +sql select st0.*,st1.*,st2.*,st3.*,st4.*,st5.*,st6.*,st7.*,st8.*,st9.* from st0,st1,st2,st3,st4,st5,st6,st7,st8,st9,sta where st0.ts=st2.ts and st0.ts=st4.ts and st0.ts=st6.ts and st0.ts=st8.ts and st1.ts=st3.ts and st3.ts=st5.ts and st5.ts=st7.ts and st7.ts=st9.ts and st0.ts=st1.ts and st0.id1=st2.id1 and st0.id1=st4.id1 and st0.id1=st6.id1 and st0.id1=st8.id1 and st1.id1=st3.id1 and st3.id1=st5.id1 and st5.id1=st7.id1 and st7.id1=st9.id1 and st0.id1=st1.id1 and st0.id1=sta.id1 and st0.ts=sta.ts; system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/nestquery.sim b/tests/script/tsim/parser/nestquery.sim index 101205db74..8cb7a3790b 100644 --- a/tests/script/tsim/parser/nestquery.sim +++ b/tests/script/tsim/parser/nestquery.sim @@ -160,12 +160,12 @@ endi sql select stddev(c1) from (select c1 from nest_tb0); sql_error select percentile(c1, 20) from (select * from nest_tb0); -sql select interp(c1) from (select * from nest_tb0); +#sql select interp(c1) from (select * from nest_tb0); sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0); sql_error select twa(c1) from (select c1 from nest_tb0); sql_error select irate(c1) from (select c1 from nest_tb0); sql_error select diff(c1), twa(c1) from (select * from nest_tb0); -sql_error select irate(c1), interp(c1), twa(c1) from (select * from nest_tb0); +#sql_error select irate(c1), interp(c1), twa(c1) from (select * from nest_tb0); sql select _wstart, apercentile(c1, 50) from (select * from nest_tb0) interval(1d) if $rows != 7 then diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim index fcf91c45da..7cc1458657 100644 --- a/tests/script/tsim/query/explain.sim +++ b/tests/script/tsim/query/explain.sim @@ -35,7 +35,7 @@ sql explain select * from st1 where -2; sql explain select ts from tb1; sql explain select * from st1; sql explain select * from st1 order by ts; -sql explain select * from information_schema.user_stables; +sql explain select * from information_schema.ins_stables; sql explain select count(*),sum(f1) from tb1; sql explain select count(*),sum(f1) from st1; sql explain select count(*),sum(f1) from st1 group by f1; @@ -46,7 +46,7 @@ sql explain verbose true select * from st1 where -2; sql explain verbose true select ts from tb1 where f1 > 0; sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00'; sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1; -sql explain verbose true select * from information_schema.user_stables where db_name='db2'; +sql explain verbose true select * from information_schema.ins_stables where db_name='db2'; print ======== step4 sql explain analyze select ts from st1 where -2; @@ -54,7 +54,7 @@ sql explain analyze select ts from tb1; sql explain analyze select ts from st1; sql explain analyze select ts from st1; sql explain analyze select ts from st1 order by ts; -sql explain analyze select * from information_schema.user_stables; +sql explain analyze select * from information_schema.ins_stables; sql explain analyze select count(*),sum(f1) from tb1; sql explain analyze select count(*),sum(f1) from st1; sql explain analyze select count(*),sum(f1) from st1 group by f1; @@ -65,14 +65,14 @@ sql explain analyze verbose true select ts from tb1; sql explain analyze verbose true select ts from st1; sql explain analyze verbose true select ts from st1; sql explain analyze verbose true select ts from st1 order by ts; -sql explain analyze verbose true select * from information_schema.user_stables; +sql explain analyze verbose true select * from information_schema.ins_stables; sql explain analyze verbose true select count(*),sum(f1) from tb1; sql explain analyze verbose true select count(*),sum(f1) from st1; sql explain analyze verbose true select count(*),sum(f1) from st1 group by f1; #sql explain analyze verbose true select count(f1) from tb1 interval(10s, 2s) sliding(3s) fill(prev); sql explain analyze verbose true select ts from tb1 where f1 > 0; sql explain analyze verbose true select f1 from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00'; -sql explain analyze verbose true select * from information_schema.user_stables where db_name='db2'; +sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2'; sql explain analyze verbose true select * from (select min(f1),count(*) a from st1 where f1 > 0) where a < 0; #not pass case diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim new file mode 100644 index 0000000000..7259b1e779 --- /dev/null +++ b/tests/script/tsim/query/udf.sim @@ -0,0 +1,161 @@ +system_content printf %OS% +if $system_content == Windows_NT then + return 0; +endi + +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c udf -v 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ======== step1 udf +system sh/compile_udf.sh +sql create database udf vgroups 3; +sql use udf; +sql show databases; + +sql create table t (ts timestamp, f int); +sql insert into t values(now, 1)(now+1s, 2); + +system_content printf %OS% +if $system_content == Windows_NT then + return 0; +endi +if $system_content == Windows_NT then + sql create function bit_and as 'C:\\Windows\\Temp\\bitand.dll' outputtype int bufSize 8; + sql create aggregate function sqr_sum as 'C:\\Windows\\Temp\\sqrsum.dll' outputtype double bufSize 8; +else + sql create function bit_and as '/tmp/udf/libbitand.so' outputtype int bufSize 8; + sql create aggregate function sqr_sum as '/tmp/udf/libsqrsum.so' outputtype double bufSize 8; +endi +sql show functions; +if $rows != 2 then + return -1 +endi +sql select bit_and(f, f) from t; +if $rows != 2 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi + +sql select sqr_sum(f) from t; +if $rows != 1 then + print expect 1, actual $rows + return -1 +endi +if $data00 != 2.236067977 then + return -1 +endi + +sql create table t2 (ts timestamp, f1 int, f2 int); +sql insert into t2 values(now, 0, 0)(now+1s, 1, 1); +sql select bit_and(f1, f2) from t2; +if $rows != 2 then + return -1 +endi +if $data00 != 0 then + return -1 +endi +if $data10 != 1 then + return -1 +endi + +sql select sqr_sum(f1, f2) from t2; +if $rows != 1 then + return -1 +endi +if $data00 != 1.414213562 then + return -1 +endi + +sql insert into t2 values(now+2s, 1, null)(now+3s, null, 2); +sql select bit_and(f1, f2) from t2; +print $rows , $data00 , $data10 , $data20 , $data30 +if $rows != 4 then + return -1 +endi +if $data00 != 0 then + return -1 +endi +if $data10 != 1 then + return -1 +endi + +if $data20 != NULL then + return -1 +endi + +if $data30 != NULL then + return -1 +endi + +sql select sqr_sum(f1, f2) from t2; +print $rows, $data00 +if $rows != 1 then + return -1 +endi +if $data00 != 2.645751311 then + return -1 +endi + +sql insert into t2 values(now+4s, 4, 8)(now+5s, 5, 9); +sql select sqr_sum(f1-f2), sqr_sum(f1+f2) from t2; +print $rows , $data00 , $data01 +if $rows != 1 then + return -1; +endi +if $data00 != 5.656854249 then + return -1 +endi +if $data01 != 18.547236991 then + return -1 +endi + +sql select sqr_sum(bit_and(f2, f1)), sqr_sum(bit_and(f1, f2)) from t2; +print $rows , $data00 , $data01 +if $rows != 1 then + return -1 +endi +if $data00 != 1.414213562 then + return -1 +endi +if $data01 != 1.414213562 then + return -1 +endi + +sql select sqr_sum(f2) from udf.t2 group by 1-bit_and(f1, f2) order by 1-bit_and(f1,f2); +print $rows , $data00 , $data10 , $data20 +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != 9.055385138 then + return -1 +endi +if $data20 != 8.000000000 then + return -1 +endi + +sql drop function bit_and; +sql show functions; +if $rows != 1 then + return -1 +endi +if $data00 != @sqr_sum@ then + return -1 + endi +sql drop function sqr_sum; +sql show functions; +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/show/basic.sim b/tests/script/tsim/show/basic.sim index c4af7f3f3c..cc2b847d1a 100644 --- a/tests/script/tsim/show/basic.sim +++ b/tests/script/tsim/show/basic.sim @@ -77,37 +77,37 @@ if $rows != 3 then endi print =============== run select * from information_schema.xxxx -sql select * from information_schema.`dnodes` +sql select * from information_schema.ins_dnodes if $rows != 2 then return -1 endi -sql select * from information_schema.`mnodes` +sql select * from information_schema.ins_mnodes if $rows != 1 then return -1 endi -#sql select * from information_schema.`modules` -#sql select * from information_schema.`qnodes` -sql select * from information_schema.user_databases +#sql select * from information_schema.ins_modules +#sql select * from information_schema.ins_qnodes +sql select * from information_schema.ins_databases if $rows != 3 then return -1 endi -#sql select * from information_schema.user_functions -#sql select * from information_schema.user_indexes -sql select * from information_schema.user_stables +#sql select * from information_schema.ins_functions +#sql select * from information_schema.ins_indexes +sql select * from information_schema.ins_stables if $rows != 1 then return -1 endi -#sql select * from information_schema.`streams` -sql select * from information_schema.user_tables +#sql select * from information_schema.ins_streams +sql select * from information_schema.ins_tables if $rows <= 0 then return -1 endi -#sql select * from information_schema.user_table_distributed -sql select * from information_schema.user_users +#sql select * from information_schema.ins_table_distributed +sql select * from information_schema.ins_users if $rows != 1 then return -1 endi -sql select * from information_schema.`vgroups` +sql select * from information_schema.ins_vgroups if $rows != 3 then return -1 endi @@ -175,37 +175,37 @@ if $rows != 3 then endi print =============== run select * from information_schema.xxxx -sql select * from information_schema.`dnodes` +sql select * from information_schema.ins_dnodes if $rows != 2 then return -1 endi -sql select * from information_schema.`mnodes` +sql select * from information_schema.ins_mnodes if $rows != 1 then return -1 endi -#sql select * from information_schema.`modules` -#sql select * from information_schema.`qnodes` -sql select * from information_schema.user_databases +#sql select * from information_schema.ins_modules +#sql select * from information_schema.ins_qnodes +sql select * from information_schema.ins_databases if $rows != 3 then return -1 endi -#sql select * from information_schema.user_functions -#sql select * from information_schema.user_indexes -sql select * from information_schema.user_stables +#sql select * from information_schema.ins_functions +#sql select * from information_schema.ins_indexes +sql select * from information_schema.ins_stables if $rows != 1 then return -1 endi -#sql select * from performance_schema.`streams` -sql select * from information_schema.user_tables +#sql select * from performance_schema.perf_streams +sql select * from information_schema.ins_tables if $rows <= 0 then return -1 endi -#sql select * from information_schema.user_table_distributed -sql select * from information_schema.user_users +#sql select * from information_schema.ins_table_distributed +sql select * from information_schema.ins_users if $rows != 1 then return -1 endi -sql select * from information_schema.`vgroups` +sql select * from information_schema.ins_vgroups if $rows != 3 then return -1 endi diff --git a/tests/script/tsim/sync/3Replica1VgElect.sim b/tests/script/tsim/sync/3Replica1VgElect.sim index 1cadf7c81d..4e127654ee 100644 --- a/tests/script/tsim/sync/3Replica1VgElect.sim +++ b/tests/script/tsim/sync/3Replica1VgElect.sim @@ -140,6 +140,8 @@ if $rows != $totalTblNum then return -1 endi +print ====> start_switch_leader: + start_switch_leader: $switch_loop_cnt = 0 @@ -299,7 +301,7 @@ print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $da print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] -print ===> $rows $data[4][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +print ===> $rows $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] if $rows != 5 then return -1 @@ -341,6 +343,8 @@ print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $da print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +print ===> $rows $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] + if $data[0][0] != 1 then return -1 endi @@ -358,6 +362,8 @@ if $data[3][4] != ready then goto check_dnode_ready_2 endi +print ====> final test: create child table ctb2* and table ntb2* + sql use db; $ctbPrefix = ctb2 $ntbPrefix = ntb2 diff --git a/tests/script/tsim/sync/3Replica5VgElect.sim b/tests/script/tsim/sync/3Replica5VgElect.sim index 2a9f8c30a2..c4ab9bd4bc 100644 --- a/tests/script/tsim/sync/3Replica5VgElect.sim +++ b/tests/script/tsim/sync/3Replica5VgElect.sim @@ -542,6 +542,8 @@ if $data[3][4] != ready then goto check_dnode_ready_2 endi +print ====> final test: create child table ctb2* and table ntb2* + sql use db; $ctbPrefix = ctb2 $ntbPrefix = ntb2 diff --git a/tests/script/tsim/sync/start3replica.sim b/tests/script/tsim/sync/start3replica.sim new file mode 100644 index 0000000000..f66021f88a --- /dev/null +++ b/tests/script/tsim/sync/start3replica.sim @@ -0,0 +1,19 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +sql connect +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 + + diff --git a/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim b/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim index 241781eed1..c4e0503aa9 100644 --- a/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim +++ b/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim @@ -167,12 +167,26 @@ system sh/exec.sh -n dnode4 -s start sleep 3000 -print =============== query data + + + +print =============== query data of level 1 sql connect sql use db + sql select * from ct1 where ts > now - 1d -print rows: $rows +print rows of level 1: $rows print $data00 $data01 $data02 if $rows != 100 then return -1 -endi \ No newline at end of file +endi + +print =============== query data of level 2 +sql select * from ct1 where ts > now - 10d +print rows of level 2: $rows +print $data00 $data01 $data02 + +print =============== query data of level 3 +sql select * from ct1 +print rows of level 3: $rows +print $data00 $data01 $data02 diff --git a/tests/script/tsim/table/delete_writing.sim b/tests/script/tsim/table/delete_writing.sim index df7c7f47e8..2f11424e8a 100644 --- a/tests/script/tsim/table/delete_writing.sim +++ b/tests/script/tsim/table/delete_writing.sim @@ -3,10 +3,6 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ========= start dnodes -system sh/exec.sh -n dnode1 -s start -sql connect - sql create database db sql create table db.tb (ts timestamp, i int) sql insert into db.tb values(now, 1) diff --git a/tests/script/tsim/table/smallint.sim b/tests/script/tsim/table/smallint.sim index 87140c561a..a7547c7282 100644 --- a/tests/script/tsim/table/smallint.sim +++ b/tests/script/tsim/table/smallint.sim @@ -90,7 +90,6 @@ endi if $data01 != 2 then return -1 endi -return sql drop database $db sql show databases if $rows != 2 then diff --git a/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim b/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim index d9c6b195d2..dda5e0059e 100644 --- a/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim +++ b/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim @@ -342,12 +342,12 @@ endi return -1 check_ok_3: -sql select * from performance_schema.`consumers` +sql select * from performance_schema.perf_consumers if $rows != 0 then return -1 endi -#sql select * from performance_schema.`subscriptions` +#sql select * from performance_schema.perf_subscriptions #if $rows != 0 then # return -1 #endi diff --git a/tests/script/tsim/tmq/snapshot1.sim b/tests/script/tsim/tmq/snapshot1.sim index 58541b725d..e586719db2 100644 --- a/tests/script/tsim/tmq/snapshot1.sim +++ b/tests/script/tsim/tmq/snapshot1.sim @@ -294,12 +294,12 @@ endi return -1 check_ok_3: -sql select * from performance_schema.`consumers` +sql select * from performance_schema.perf_consumers if $rows != 0 then return -1 endi -#sql select * from performance_schema.`subscriptions` +#sql select * from performance_schema.perf_subscriptions #if $rows != 0 then # return -1 #endi diff --git a/tests/script/tsim/valgrind/basic3.sim b/tests/script/tsim/valgrind/basic3.sim index b9ed1641c8..0913691a11 100644 --- a/tests/script/tsim/valgrind/basic3.sim +++ b/tests/script/tsim/valgrind/basic3.sim @@ -29,7 +29,7 @@ $rowNum = 10 print =============== step2: prepare data sql create database db vgroups 2 sql use db -sql create table if not exists stb (ts timestamp, tbcol int, tbcol2 float, tbcol3 double) tags (tgcol int unsigned) +sql create table if not exists stb (ts timestamp, tbcol int, tbcol2 float, tbcol3 double, tbcol4 binary(30), tbcol5 binary(30)) tags (tgcol int unsigned) $i = 0 while $i < $tbNum @@ -39,19 +39,22 @@ while $i < $tbNum while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x , $x , $x ) + sql insert into $tb values ($ms , $x , $x , $x , "abcd1234=-+*" , "123456 0" ) $x = $x + 1 endw + + $cc = $x * 60000 + $ms = 1601481600000 + $cc + sql insert into $tb values ($ms , NULL , NULL , NULL , NULL , NULL ) $i = $i + 1 endw print =============== step3: tb -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql select tbcol5 - tbcol3 from tb1 print =============== step4: stb -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql select tbcol5 - tbcol3 from stb -_OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT print =============== check $null= diff --git a/tests/script/tsim/valgrind/checkError1.sim b/tests/script/tsim/valgrind/checkError1.sim index 059808e4be..f06a26aec6 100644 --- a/tests/script/tsim/valgrind/checkError1.sim +++ b/tests/script/tsim/valgrind/checkError1.sim @@ -84,37 +84,37 @@ if $rows != 1 then endi print =============== run select * from information_schema.xxxx -sql select * from information_schema.`dnodes` +sql select * from information_schema.ins_dnodes if $rows != 2 then return -1 endi -sql select * from information_schema.`mnodes` +sql select * from information_schema.ins_mnodes if $rows != 1 then return -1 endi -sql select * from information_schema.user_databases +sql select * from information_schema.ins_databases if $rows != 3 then return -1 endi -sql select * from information_schema.user_stables +sql select * from information_schema.ins_stables if $rows != 1 then return -1 endi -sql select * from information_schema.user_tables +sql select * from information_schema.ins_tables if $rows <= 0 then return -1 endi -sql select * from information_schema.user_users +sql select * from information_schema.ins_users if $rows != 1 then return -1 endi -sql select * from information_schema.`vgroups` +sql select * from information_schema.ins_vgroups if $rows != 3 then return -1 endi diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim index 7a16f5668a..6e456148bf 100644 --- a/tests/script/tsim/valgrind/checkError6.sim +++ b/tests/script/tsim/valgrind/checkError6.sim @@ -77,6 +77,8 @@ sql select length("abcd1234"), char_length("abcd1234=-+*") from tb1 sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from tb1 sql select * from tb1 where tbcol not in (1,2,3,null); sql select * from tb1 where tbcol + 3 <> null; +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql select tbcol5 - tbcol3 from tb1 print =============== step4: stb sql select avg(tbcol) as c from stb @@ -105,17 +107,20 @@ sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), sql select * from stb where tbcol not in (1,2,3,null); sql select * from stb where tbcol + 3 <> null; sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb where tbcol = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol interval(1d) +sql select _wstart, count(*) from tb1 session(ts, 1m) +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql select tbcol5 - tbcol3 from stb print =============== step5: explain sql explain analyze select ts from stb where -2; sql explain analyze select ts from tb1; sql explain analyze select ts from stb order by ts; -sql explain analyze select * from information_schema.user_stables; +sql explain analyze select * from information_schema.ins_stables; sql explain analyze select count(*),sum(tbcol) from tb1; sql explain analyze select count(*),sum(tbcol) from stb; sql explain analyze select count(*),sum(tbcol) from stb group by tbcol; -sql explain analyze select * from information_schema.user_stables; -sql explain analyze verbose true select * from information_schema.user_stables where db_name='db2'; +sql explain analyze select * from information_schema.ins_stables; +sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2'; sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) diff --git a/tests/system-test/0-others/user_control.py b/tests/system-test/0-others/user_control.py index ce8ac6941b..3be59f0adf 100644 --- a/tests/system-test/0-others/user_control.py +++ b/tests/system-test/0-others/user_control.py @@ -1,15 +1,16 @@ -from tabnanny import check import taos import time import inspect import traceback import socket from dataclasses import dataclass +from datetime import datetime from util.log import * from util.sql import * from util.cases import * from util.dnodes import * +from util.common import * PRIVILEGES_ALL = "ALL" PRIVILEGES_READ = "READ" @@ -21,17 +22,40 @@ WEIGHT_WRITE = 3 PRIMARY_COL = "ts" -INT_COL = "c1" -BINT_COL = "c2" -SINT_COL = "c3" -TINT_COL = "c4" -FLOAT_COL = "c5" -DOUBLE_COL = "c6" -BOOL_COL = "c7" +INT_COL = "c_int" +BINT_COL = "c_bint" +SINT_COL = "c_sint" +TINT_COL = "c_tint" +FLOAT_COL = "c_float" +DOUBLE_COL = "c_double" +BOOL_COL = "c_bool" +TINT_UN_COL = "c_utint" +SINT_UN_COL = "c_usint" +BINT_UN_COL = "c_ubint" +INT_UN_COL = "c_uint" +BINARY_COL = "c_binary" +NCHAR_COL = "c_nchar" +TS_COL = "c_ts" -BINARY_COL = "c8" -NCHAR_COL = "c9" -TS_COL = "c10" +NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [BOOL_COL, ] +TS_TYPE_COL = [TS_COL, ] + +INT_TAG = "t_int" + +ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL] +TAG_COL = [INT_TAG] + +# insert data args: +TIME_STEP = 10000 +NOW = int(datetime.timestamp(datetime.now()) * 1000) + +# init db/table +DBNAME = "db" +STBNAME = "stb1" +CTBNAME = "ct1" +NTBNAME = "nt1" class TDconnect: def __init__(self, @@ -247,25 +271,26 @@ class TDTestCase: with taos_connect(user=user.name, passwd=user.passwd) as use: time.sleep(2) if check_priv == PRIVILEGES_ALL: - use.query("use db") - use.query("show tables") - use.query("select * from ct1") - use.query("insert into t1 (ts) values (now())") + use.query(f"use {DBNAME}") + use.query(f"show {DBNAME}.tables") + use.query(f"select * from {DBNAME}.{CTBNAME}") + use.query(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())") elif check_priv == PRIVILEGES_READ: - use.query("use db") - use.query("show tables") - use.query("select * from ct1") - use.error("insert into t1 (ts) values (now())") + use.query(f"use {DBNAME}") + use.query(f"show {DBNAME}.tables") + use.query(f"select * from {DBNAME}.{CTBNAME}") + use.error(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())") elif check_priv == PRIVILEGES_WRITE: - use.query("use db") - use.query("show tables") - use.error("select * from ct1") - use.query("insert into t1 (ts) values (now())") + use.query(f"use {DBNAME}") + use.query(f"show {DBNAME}.tables") + use.error(f"select * from {DBNAME}.{CTBNAME}") + use.query(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())") elif check_priv is None: - use.error("use db") - use.error("show tables") - use.error("select * from db.ct1") - use.error("insert into db.t1 (ts) values (now())") + use.error(f"use {DBNAME}") + # use.error(f"show {DBNAME}.tables") + use.error(f"show tables") + use.error(f"select * from {DBNAME}.{CTBNAME}") + use.error(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())") def __change_user_priv(self, user: User, pre_priv, invoke=False): if user.priv == pre_priv and invoke : @@ -418,7 +443,7 @@ class TDTestCase: self.__grant_user_privileges(privilege="", dbname="db", user_name=self.__user_list[0]) , self.__grant_user_privileges(privilege=" ".join(self.__privilege), user_name=self.__user_list[0]) , f"GRANT {self.__privilege[0]} ON * TO {self.__user_list[0]}" , - f"GRANT {self.__privilege[0]} ON db.t1 TO {self.__user_list[0]}" , + f"GRANT {self.__privilege[0]} ON {DBNAME}.{NTBNAME} TO {self.__user_list[0]}" , ] def __revoke_err(self): @@ -430,7 +455,7 @@ class TDTestCase: self.__revoke_user_privileges(privilege="", dbname="db", user_name=self.__user_list[0]) , self.__revoke_user_privileges(privilege=" ".join(self.__privilege), user_name=self.__user_list[0]) , f"REVOKE {self.__privilege[0]} ON * FROM {self.__user_list[0]}" , - f"REVOKE {self.__privilege[0]} ON db.t1 FROM {self.__user_list[0]}" , + f"REVOKE {self.__privilege[0]} ON {DBNAME}.{NTBNAME} FROM {self.__user_list[0]}" , ] def test_grant_err(self): @@ -505,101 +530,48 @@ class TDTestCase: self.drop_user_error() self.drop_user_current() - def __create_tb(self): - - tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( - ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, - {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, - {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) tags (t1 int) - ''' - create_ntb_sql = f'''create table t1( - ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, - {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, - {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) + def __create_tb(self, stb=STBNAME, ctb_num=20, ntbnum=1, dbname=DBNAME): + tdLog.printNoPrefix("==========step: create table") + create_stb_sql = f'''create table {dbname}.{stb}( + {PRIMARY_COL} timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, + {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, + {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned + ) tags ({INT_TAG} int) ''' tdSql.execute(create_stb_sql) - tdSql.execute(create_ntb_sql) - for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') - { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} - - def __insert_data(self, rows): - now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) - for i in range(rows): - tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" - ) - tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" - ) - tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" - ) - tdSql.execute( - f'''insert into ct1 values - ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) - ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) - ''' - ) - - tdSql.execute( - f'''insert into ct4 values - ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( - { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, - { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + for i in range(ntbnum): + create_ntb_sql = f'''create table {dbname}.nt{i+1}( + {PRIMARY_COL} timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, + {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, + {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned ) - ( - { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, - { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} - ) - ''' - ) - - tdSql.execute( - f'''insert into ct2 values - ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( - { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, - { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } - ) - ( - { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, - { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } - ) - ''' - ) - - for i in range(rows): - insert_data = f'''insert into t1 values - ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, - "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' - tdSql.execute(insert_data) - tdSql.execute( - f'''insert into t1 values - ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, - { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, - "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } - ) - ( - { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, - { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, - "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } - ) + tdSql.execute(create_ntb_sql) + + for i in range(ctb_num): + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )') + + def __insert_data(self, rows, ctb_num=20, dbname=DBNAME, star_time=NOW): + tdLog.printNoPrefix("==========step: start inser data into tables now.....") + # from ...pytest.util.common import DataSet + data = DataSet() + data.get_order_set(rows) + + for i in range(rows): + row_data = f''' + {data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]}, + {data.bool_data[i]}, '{data.vchar_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.utint_data[i]}, + {data.usint_data[i]}, {data.uint_data[i]}, {data.ubint_data[i]} ''' - ) + tdSql.execute( f"insert into {dbname}.{NTBNAME} values ( {star_time - i * int(TIME_STEP * 1.2)}, {row_data} )" ) + + for j in range(ctb_num): + tdSql.execute( f"insert into {dbname}.ct{j+1} values ( {star_time - j * i * TIME_STEP}, {row_data} )" ) def run(self): tdSql.prepare() @@ -656,27 +628,81 @@ class TDTestCase: with taos_connect(user=self.__user_list[0], passwd=f"new{self.__passwd_list[0]}") as user: # user = conn # 不能创建用户 - tdLog.printNoPrefix("==========step5: normal user can not create user") + tdLog.printNoPrefix("==========step4.1: normal user can not create user") user.error("create use utest1 pass 'utest1pass'") # 可以查看用户 - tdLog.printNoPrefix("==========step6: normal user can show user") + tdLog.printNoPrefix("==========step4.2: normal user can show user") user.query("show users") assert user.queryRows == self.users_count + 1 # 不可以修改其他用户的密码 - tdLog.printNoPrefix("==========step7: normal user can not alter other user pass") + tdLog.printNoPrefix("==========step4.3: normal user can not alter other user pass") user.error(self.__alter_pass_sql(self.__user_list[1], self.__passwd_list[1] )) user.error(self.__alter_pass_sql("root", "taosdata_root" )) # 可以修改自己的密码 - tdLog.printNoPrefix("==========step8: normal user can alter owner pass") + tdLog.printNoPrefix("==========step4.4: normal user can alter owner pass") user.query(self.__alter_pass_sql(self.__user_list[0], self.__passwd_list[0])) # 不可以删除用户,包括自己 - tdLog.printNoPrefix("==========step9: normal user can not drop any user ") + tdLog.printNoPrefix("==========step4.5: normal user can not drop any user ") user.error(f"drop user {self.__user_list[0]}") user.error(f"drop user {self.__user_list[1]}") user.error("drop user root") + tdLog.printNoPrefix("==========step5: enable info") + taos1_conn = taos.connect(user=self.__user_list[1], password=f"new{self.__passwd_list[1]}") + taos1_conn.query(f"show databases") + tdSql.execute(f"alter user {self.__user_list[1]} enable 0") + tdSql.execute(f"alter user {self.__user_list[2]} enable 0") + taos1_except = True + try: + taos1_conn.query("show databases") + except BaseException: + taos1_except = False + if taos1_except: + tdLog.exit("taos 1 connect except error not occured, when enable == 0, should not r/w ") + else: + tdLog.info("taos 1 connect except error occured, enable == 0") + + taos2_except = True + try: + taos.connect(user=self.__user_list[2], password=f"new{self.__passwd_list[2]}") + except BaseException: + taos2_except = False + if taos2_except: + tdLog.exit("taos 2 connect except error not occured, when enable == 0, should not connect") + else: + tdLog.info("taos 2 connect except error occured, enable == 0, can not login") + + tdLog.printNoPrefix("==========step6: sysinfo info") + taos3_conn = taos.connect(user=self.__user_list[3], password=f"new{self.__passwd_list[3]}") + taos3_conn.query(f"show dnodes") + taos3_conn.query(f"show {DBNAME}.vgroups") + tdSql.execute(f"alter user {self.__user_list[3]} sysinfo 0") + tdSql.execute(f"alter user {self.__user_list[4]} sysinfo 0") + taos3_except = True + try: + taos3_conn.query(f"show dnodes") + taos3_conn.query(f"show {DBNAME}.vgroups") + except BaseException: + taos3_except = False + if taos3_except: + tdLog.exit("taos 3 query except error not occured, when sysinfo == 0, should not show info:dnode/monde/qnode ") + else: + tdLog.info("taos 3 query except error occured, sysinfo == 0, can not show dnode/vgroups") + + taos4_conn = taos.connect(user=self.__user_list[4], password=f"new{self.__passwd_list[4]}") + taos4_except = True + try: + taos4_conn.query(f"show mnodes") + taos4_conn.query(f"show {DBNAME}.vgroups") + except BaseException: + taos4_except = False + if taos4_except: + tdLog.exit("taos 4 query except error not occured, when sysinfo == 0, when enable == 0, should not show info:dnode/monde/qnode") + else: + tdLog.info("taos 4 query except error occured, sysinfo == 0, can not show dnode/vgroups") + # root删除用户测试 - tdLog.printNoPrefix("==========step10: super user drop normal user") + tdLog.printNoPrefix("==========step7: super user drop normal user") self.test_drop_user() tdSql.query("show users") diff --git a/tests/system-test/1-insert/db_tb_name_check.py b/tests/system-test/1-insert/db_tb_name_check.py new file mode 100644 index 0000000000..e4b164e01f --- /dev/null +++ b/tests/system-test/1-insert/db_tb_name_check.py @@ -0,0 +1,88 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.special_name = ['!','@','#','$','%','^','&','*','(',')','[',']','{','}',\ + ':',';','\'','\"',',','<','>','/','?','-','_','+','=','~'] + def db_name_check(self): + dbname = tdCom.getLongName(10) + for j in self.special_name: + for i in range(len(list(dbname))+1): + new_dbname = list(dbname) + new_dbname.insert(i,j) + dbname_1 = ''.join(new_dbname) + tdSql.execute(f'create database if not exists `{dbname_1}`') + tdSql.query('show databases') + tdSql.checkEqual(tdSql.queryResult[2][0],str(dbname_1)) + tdSql.execute(f'drop database `{dbname_1}`') + for i in range(len(list(dbname))+1): + new_dbname = list(dbname) + new_dbname.insert(i,'.') + dbname_1 = ''.join(new_dbname) + tdSql.error(f'create database if not exists `{dbname_1}`') + + def tb_name_check(self): + dbname = tdCom.getLongName(10) + tdSql.execute(f'create database if not exists `{dbname}`') + tdSql.execute(f'use `{dbname}`') + tbname = tdCom.getLongName(5) + for i in self.special_name: + for j in range(len(list(tbname))+1): + tbname1 = list(tbname) + tbname1.insert(j,i) + new_tbname = ''.join(tbname1) + for sql in [f'`{dbname}`.`{new_tbname}`',f'`{new_tbname}`']: + tdSql.execute(f'create table {sql} (ts timestamp,c0 int)') + tdSql.execute(f'insert into {sql} values(now,1)') + tdSql.query(f'select * from {sql}') + tdSql.checkRows(1) + tdSql.execute(f'drop table {sql}') + for i in range(len(list(tbname))+1): + tbname1 = list(tbname) + tbname1.insert(i,'.') + new_tbname = ''.join(tbname1) + for sql in [f'`{dbname}`.`{new_tbname}`',f'`{new_tbname}`']: + tdSql.error(f'create table {sql} (ts timestamp,c0 int)') + tdSql.execute(f'drop database `{dbname}`') + def run(self): + self.db_name_check() + self.tb_name_check() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/manyVgroups.json b/tests/system-test/1-insert/manyVgroups.json index 20ac320552..3b0fa96b08 100644 --- a/tests/system-test/1-insert/manyVgroups.json +++ b/tests/system-test/1-insert/manyVgroups.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 8, - "thread_count_create_tbl": 8, + "create_table_thread_count": 8, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/system-test/1-insert/performanceInsert.json b/tests/system-test/1-insert/performanceInsert.json index de410c30f2..7278a6f735 100644 --- a/tests/system-test/1-insert/performanceInsert.json +++ b/tests/system-test/1-insert/performanceInsert.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 8, - "thread_count_create_tbl": 8, + "create_table_thread_count": 8, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, diff --git a/tests/system-test/1-insert/time_range_wise.py b/tests/system-test/1-insert/time_range_wise.py index e65dded601..e66f5560df 100644 --- a/tests/system-test/1-insert/time_range_wise.py +++ b/tests/system-test/1-insert/time_range_wise.py @@ -1,4 +1,4 @@ -import datetime +from datetime import datetime import time from dataclasses import dataclass @@ -8,6 +8,7 @@ from util.sql import * from util.cases import * from util.dnodes import * from util.constant import * +from util.common import * PRIMARY_COL = "ts" @@ -38,7 +39,7 @@ TAG_COL = [INT_TAG] # insert data args: TIME_STEP = 10000 -NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) +NOW = int(datetime.timestamp(datetime.now()) * 1000) # init db/table DBNAME = "db" @@ -47,40 +48,6 @@ CTBNAME = "ct1" NTBNAME = "nt1" -@dataclass -class DataSet: - ts_data : List[int] = None - int_data : List[int] = None - bint_data : List[int] = None - sint_data : List[int] = None - tint_data : List[int] = None - int_un_data : List[int] = None - bint_un_data: List[int] = None - sint_un_data: List[int] = None - tint_un_data: List[int] = None - float_data : List[float] = None - double_data : List[float] = None - bool_data : List[int] = None - binary_data : List[str] = None - nchar_data : List[str] = None - - def __post_init__(self): - self.ts_data = [] - self.int_data = [] - self.bint_data = [] - self.sint_data = [] - self.tint_data = [] - self.int_un_data = [] - self.bint_un_data = [] - self.sint_un_data = [] - self.tint_un_data = [] - self.float_data = [] - self.double_data = [] - self.bool_data = [] - self.binary_data = [] - self.nchar_data = [] - - @dataclass class SMAschema: creation : str = "CREATE" @@ -164,10 +131,6 @@ class SMAschema: del self.other[k] - -# from ...pytest.util.sql import * -# from ...pytest.util.constant import * - class TDTestCase: updatecfgDict = {"querySmaOptimize": 1} @@ -469,14 +432,12 @@ class TDTestCase: err_sqls.append( SMAschema(index_flag="SMA INDEX ,", tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) ) err_sqls.append( SMAschema(index_name="tbname", tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) ) - # current_set cur_sqls.append( SMAschema(max_delay="",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) ) cur_sqls.append( SMAschema(watermark="",index_name="sma_index_2",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) ) cur_sqls.append( SMAschema(sliding="",index_name='sma_index_3',tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) ) - return err_sqls, cur_sqls def test_create_sma(self): @@ -512,102 +473,48 @@ class TDTestCase: self.test_create_sma() self.test_drop_sma() - pass - - def __create_tb(self): + def __create_tb(self, stb=STBNAME, ctb_num=20, ntbnum=1, dbname=DBNAME): tdLog.printNoPrefix("==========step: create table") - create_stb_sql = f'''create table {STBNAME}( - ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + create_stb_sql = f'''create table {dbname}.{stb}( + {PRIMARY_COL} timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned ) tags ({INT_TAG} int) ''' - create_ntb_sql = f'''create table {NTBNAME}( - ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, - {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, - {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, - {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, - {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned - ) - ''' tdSql.execute(create_stb_sql) - tdSql.execute(create_ntb_sql) - for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + for i in range(ntbnum): + create_ntb_sql = f'''create table {dbname}.nt{i+1}( + {PRIMARY_COL} timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, + {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, + {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned + ) + ''' + tdSql.execute(create_ntb_sql) - def __data_set(self, rows): - data_set = DataSet() + for i in range(ctb_num): + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )') + + def __insert_data(self, rows, ctb_num=20, dbname=DBNAME, star_time=NOW): + tdLog.printNoPrefix("==========step: start inser data into tables now.....") + # from ...pytest.util.common import DataSet + data = DataSet() + data.get_order_set(rows, bint_step=2) for i in range(rows): - data_set.ts_data.append(NOW + 1 * (rows - i)) - data_set.int_data.append(rows - i) - data_set.bint_data.append(11111 * (rows - i)) - data_set.sint_data.append(111 * (rows - i) % 32767) - data_set.tint_data.append(11 * (rows - i) % 127) - data_set.int_un_data.append(rows - i) - data_set.bint_un_data.append(11111 * (rows - i)) - data_set.sint_un_data.append(111 * (rows - i) % 32767) - data_set.tint_un_data.append(11 * (rows - i) % 127) - data_set.float_data.append(1.11 * (rows - i)) - data_set.double_data.append(1100.0011 * (rows - i)) - data_set.bool_data.append((rows - i) % 2) - data_set.binary_data.append(f'binary{(rows - i)}') - data_set.nchar_data.append(f'nchar_测试_{(rows - i)}') - - return data_set - - def __insert_data(self): - tdLog.printNoPrefix("==========step: start inser data into tables now.....") - data = self.__data_set(rows=self.rows) - - # now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) - null_data = '''null, null, null, null, null, null, null, null, null, null, null, null, null, null''' - zero_data = "0, 0, 0, 0, 0, 0, 0, 'binary_0', 'nchar_0', 0, 0, 0, 0, 0" - - for i in range(self.rows): row_data = f''' {data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]}, - {data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.tint_un_data[i]}, - {data.sint_un_data[i]}, {data.int_un_data[i]}, {data.bint_un_data[i]} - ''' - neg_row_data = f''' - {-1 * data.int_data[i]}, {-1 * data.bint_data[i]}, {-1 * data.sint_data[i]}, {-1 * data.tint_data[i]}, {-1 * data.float_data[i]}, {-1 * data.double_data[i]}, - {data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {1 * data.tint_un_data[i]}, - {1 * data.sint_un_data[i]}, {1 * data.int_un_data[i]}, {1 * data.bint_un_data[i]} + {data.bool_data[i]}, '{data.vchar_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.utint_data[i]}, + {data.usint_data[i]}, {data.uint_data[i]}, {data.ubint_data[i]} ''' + tdSql.execute( f"insert into {dbname}.{NTBNAME} values ( {star_time - i * int(TIME_STEP * 1.2)}, {row_data} )" ) - tdSql.execute( - f"insert into ct1 values ( {NOW - i * TIME_STEP}, {row_data} )") - tdSql.execute( - f"insert into ct2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )") - tdSql.execute( - f"insert into ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )") - tdSql.execute( - f"insert into {NTBNAME} values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )") - - tdSql.execute( - f"insert into ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )") - tdSql.execute( - f"insert into ct2 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.6)}, {null_data} )") - tdSql.execute( - f"insert into ct2 values ( {NOW - self.rows * int(TIME_STEP * 0.29) }, {null_data} )") - - tdSql.execute( - f"insert into ct4 values ( {NOW + int(TIME_STEP * 0.8)}, {null_data} )") - tdSql.execute( - f"insert into ct4 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.8)}, {null_data} )") - tdSql.execute( - f"insert into ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )") - - tdSql.execute( - f"insert into {NTBNAME} values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )") - tdSql.execute( - f"insert into {NTBNAME} values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )") - tdSql.execute( - f"insert into {NTBNAME} values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )") + for j in range(ctb_num): + tdSql.execute( f"insert into {dbname}.ct{j+1} values ( {star_time - j * i * TIME_STEP}, {row_data} )" ) def run(self): self.rows = 10 @@ -616,14 +523,60 @@ class TDTestCase: tdLog.printNoPrefix("==========step1:create table in normal database") tdSql.prepare() - self.__create_tb() - self.__insert_data() + self.__create_tb(dbname=DBNAME) + self.__insert_data(rows=self.rows) self.all_test() + # # from ...pytest.util.sql import * + # drop databases, create same name db、stb and sma index tdSql.prepare() - self.__create_tb() - self.__insert_data() + self.__create_tb(dbname=DBNAME) + self.__insert_data(rows=self.rows,star_time=NOW + self.rows * 2 * TIME_STEP) + tdLog.printNoPrefix("==========step1.1 : create a tsma index and checkdata") + tdSql.execute(f"create sma index {DBNAME}.sma_index_name1 on {DBNAME}.{STBNAME} function(max({INT_COL}),max({BINT_COL}),min({INT_COL})) interval(6m,10s) sliding(6m)") + self.__insert_data(rows=self.rows) + tdSql.query(f"select max({INT_COL}), max({BINT_COL}), min({INT_COL}) from {DBNAME}.{STBNAME} interval(6m,10s) sliding(6m)") + tdSql.checkData(0, 0, self.rows - 1) + tdSql.checkData(0, 1, (self.rows - 1) * 2 ) + tdSql.checkData(tdSql.queryRows - 1, 2, 0) + # tdSql.checkData(0, 2, 0) + + tdLog.printNoPrefix("==========step1.2 : alter table schema, drop col without index") + tdSql.execute(f"alter stable {DBNAME}.{STBNAME} drop column {BINARY_COL}") + tdSql.query(f"select max({INT_COL}), max({BINT_COL}), min({INT_COL}) from {DBNAME}.{STBNAME} interval(6m,10s) sliding(6m)") + tdSql.checkData(0, 0, self.rows - 1) + tdSql.checkData(0, 1, (self.rows - 1) * 2 ) + tdSql.checkData(tdSql.queryRows - 1, 2, 0) + + tdLog.printNoPrefix("==========step1.3 : alter table schema, drop col with index") + # TODO: TD-18047, can not drop col, when col in tsma-index and tsma-index is not dropped. + tdSql.error(f"alter stable {DBNAME}.stb1 drop column {BINT_COL}") + + tdLog.printNoPrefix("==========step1.4 : alter table schema, add col") + tdSql.execute(f"alter stable {DBNAME}.{STBNAME} add column {BINT_COL}_1 bigint") + tdSql.execute(f"insert into {DBNAME}.{CTBNAME} ({PRIMARY_COL}, {BINT_COL}_1) values(now(), 111)") + tdSql.query(f"select max({INT_COL}), max({BINT_COL}), min({INT_COL}) from {DBNAME}.{STBNAME} interval(6m,10s) sliding(6m)") + tdSql.checkData(0, 0, self.rows - 1) + tdSql.checkData(0, 1, (self.rows - 1) * 2 ) + tdSql.checkData(tdSql.queryRows - 1, 2, 0) + # tdSql.checkData(0, 2, 0) + tdSql.query(f"select max({BINT_COL}_1) from {DBNAME}.{STBNAME} ") + tdSql.checkData(0, 0 , 111) + + tdSql.execute(f"flush database {DBNAME}") + + tdLog.printNoPrefix("==========step1.5 : drop child table") + tdSql.execute(f"drop table {CTBNAME}") + tdSql.query(f"select max({INT_COL}), max({BINT_COL}), min({INT_COL}) from {DBNAME}.{STBNAME} interval(6m,10s) sliding(6m)") + tdSql.checkData(0, 0, self.rows - 1) + tdSql.checkData(0, 1, (self.rows - 1) * 2 ) + tdSql.checkData(tdSql.queryRows - 1, 2, 0) + + tdLog.printNoPrefix("==========step1.6 : drop stable") + tdSql.execute(f"drop table {STBNAME}") + tdSql.error(f"select * from {DBNAME}.{STBNAME}") + self.all_test() tdLog.printNoPrefix("==========step2:create table in rollup database") @@ -640,7 +593,6 @@ class TDTestCase: tdSql.execute("flush database db ") - tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() diff --git a/tests/system-test/2-query/check_tsdb.py b/tests/system-test/2-query/check_tsdb.py index 746906776d..8d5bcc370d 100644 --- a/tests/system-test/2-query/check_tsdb.py +++ b/tests/system-test/2-query/check_tsdb.py @@ -49,29 +49,11 @@ class TDTestCase: tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - # tdSql.execute( - # f'''insert into t1 values - # ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - # ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) - # ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) - # ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) - # ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) - # ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - # ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) - # ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) - # ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) - # ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) - # ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) - # ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - # ''' - # ) - - def restart_taosd_query_sum(self, dbname="db"): for i in range(5): - tdLog.info(" this is %d_th restart taosd " %i) - os.system(f"taos -s ' use db ;select c6 from {dbname}.stb1 ; '") + tdLog.notice(" this is %d_th restart taosd " %i) + # os.system(f"taos -s ' use db ;select c6 from {dbname}.stb1 ; '") tdSql.execute(f"use {dbname} ") tdSql.query(f"select count(*) from {dbname}.stb1") tdSql.checkRows(1) @@ -85,6 +67,25 @@ class TDTestCase: tdDnodes.stop(1) tdDnodes.start(1) time.sleep(2) + tdSql.query("show databases") + + status = False + while status==False: + tdSql.query("show databases") + for db_info in tdSql.queryResult: + if db_info[0]==dbname : + if db_info[15]=="ready": + status = True + tdLog.notice(" ==== database {} status is ready ==== ".format(dbname)) + break + else: + status = False + else: + continue + + + + @@ -96,7 +97,7 @@ class TDTestCase: self.prepare_datas() - os.system(f"taos -s ' select c6 from {dbname}.stb1 ; '") + # os.system(f"taos -s ' select c6 from {dbname}.stb1 ; '") self.restart_taosd_query_sum() def stop(self): diff --git a/tests/system-test/2-query/csum.py b/tests/system-test/2-query/csum.py index f38a99d809..953dd1e491 100644 --- a/tests/system-test/2-query/csum.py +++ b/tests/system-test/2-query/csum.py @@ -162,9 +162,9 @@ class TDTestCase: self.checkcsum(**case6) # case7~8: nested query - case7 = {"table_expr": "(select c1 from db.stb1 order by tbname ,ts )"} + case7 = {"table_expr": "(select c1 from db.stb1 order by ts, tbname )"} self.checkcsum(**case7) - case8 = {"table_expr": "(select csum(c1) c1 from db.t1 partition by tbname)"} + case8 = {"table_expr": "(select csum(c1) c1 from db.t1)"} self.checkcsum(**case8) # case9~10: mix with tbname/ts/tag/col not support , must partition by alias ,such as select tbname ,csum(c1) partition by tbname diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py index 856006aaf1..f70f30f55a 100644 --- a/tests/system-test/2-query/irate.py +++ b/tests/system-test/2-query/irate.py @@ -6,7 +6,7 @@ import inspect from util.log import * from util.sql import * from util.cases import * -import random +import random ,math class TDTestCase: @@ -41,8 +41,8 @@ class TDTestCase: c2 = random.randint(0,100000) c3 = random.randint(0,125) c4 = random.randint(0,125) - c5 = random.random()/1.0 - c6 = random.random()/1.0 + c5 = random.randint(0,10000)/1000 + c6 = random.randint(0,10000)/1000 c7 = "'true'" c8 = "'binary_val'" c9 = "'nchar_val'" @@ -72,7 +72,7 @@ class TDTestCase: comput_irate_value = origin_result[1][0]*1000/( origin_result[1][-1] - origin_result[0][-1]) else: comput_irate_value = (origin_result[1][0] - origin_result[0][0])*1000/( origin_result[1][-1] - origin_result[0][-1]) - if comput_irate_value ==irate_value: + if abs(comput_irate_value - irate_value) <= 0.0000001: tdLog.info(" irate work as expected , sql is %s "% irate_sql) else: tdLog.exit(" irate work not as expected , sql is %s "% irate_sql) diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py index b817b1afb4..d9d7ef2300 100644 --- a/tests/system-test/2-query/json_tag.py +++ b/tests/system-test/2-query/json_tag.py @@ -506,7 +506,7 @@ class TDTestCase: #show create table tdSql.query("show create table jsons1") - tdSql.checkData(0, 1, 'CREATE STABLE `jsons1` (`ts` TIMESTAMP, `dataint` INT, `databool` BOOL, `datastr` NCHAR(50), `datastrbin` VARCHAR(150)) TAGS (`jtag` JSON) WATERMARK 5000a, 5000a') + tdSql.checkData(0, 1, 'CREATE STABLE `jsons1` (`ts` TIMESTAMP, `dataint` INT, `databool` BOOL, `datastr` NCHAR(50), `datastrbin` VARCHAR(150)) TAGS (`jtag` JSON)') #test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares tdSql.query("select count(*) from jsons1 where jtag is not null") diff --git a/tests/system-test/2-query/qnodeCluster.py b/tests/system-test/2-query/qnodeCluster.py new file mode 100644 index 0000000000..23adfb768d --- /dev/null +++ b/tests/system-test/2-query/qnodeCluster.py @@ -0,0 +1,290 @@ +# from asyncio.windows_events import NULL +import taos +import sys +import datetime +import inspect +import random + +from util.log import * +from util.sql import * +from util.cases import * +from util.cluster import * +from util.common import * + +sys.path.append("./6-cluster/") +from clusterCommonCreate import * +from clusterCommonCheck import clusterComCheck + +import threading + +class TDTestCase: + + clientCfgDict = {'queryproxy': '1','debugFlag': 135} + clientCfgDict["debugFlag"] = 131 + updatecfgDict = {'clientCfg': {}} + updatecfgDict = {'debugFlag': 131} + updatecfgDict = {'keepColumnName': 1} + updatecfgDict["clientCfg"] = clientCfgDict + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + tagValue = 'beijing' + if (i % 10 == 0): + sql += " %s%d using %s (name,fleet,driver,device_version) tags('truck_%d', 'South%d','Trish%d','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,i) + else: + model = 'H-%d'%i + sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,model,i) + if (i > 0) and (i%1000 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def prepareData(self): + dbname="db_tsbs" + stabname1="readings" + stabname2="diagnostics" + ctbnamePre1="rct" + ctbnamePre2="dct" + ctbNums=40 + self.ctbNums=ctbNums + rowNUms=100 + ts=1451606400000 + tdSql.execute(f"create database {dbname};") + tdSql.execute(f"use {dbname} ") + tdSql.execute(f''' + create table {stabname1} (ts timestamp,latitude double,longitude double,elevation double,velocity double,heading double,grade double,fuel_consumption double,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)); + ''') + tdSql.execute(f''' + create table {stabname2} (ts timestamp,fuel_state double,current_load double,status bigint,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)) ; + ''') + self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname1,ctbPrefix=ctbnamePre1,ctbNum=ctbNums) + self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums) + + + for j in range(ctbNums): + for i in range(rowNUms): + tdSql.execute( + f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )" + ) + status= random.randint(0,1) + tdSql.execute( + f"insert into dct{j} values ( {ts+i*60000}, {1+i*0.1},{1400+i*15}, {status},{1500+i*20}, {150+i*2},{5+i} )" + ) + tdSql.execute("insert into dct9 (ts,fuel_state) values('2021-07-13 14:06:33.123Z',1.2) ;") + # def check_avg(self ,origin_query , check_query): + # avg_result = tdSql.getResult(origin_query) + # origin_result = tdSql.getResult(check_query) + + # check_status = True + # for row_index , row in enumerate(avg_result): + # for col_index , elem in enumerate(row): + # if avg_result[row_index][col_index] != origin_result[row_index][col_index]: + # check_status = False + # if not check_status: + # tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query ) + # sys.exit(1) + # else: + # tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query ) + + + def createCluster(self): + tdSql.execute("create mnode on dnode 2") + tdSql.execute("create mnode on dnode 3") + tdSql.execute("create qnode on dnode 1") + tdSql.execute("create qnode on dnode 2") + tdSql.execute("create qnode on dnode 3") + time.sleep(10) + + def tsbsIotQuery(self,tdSql): + tdSql.execute("use db_tsbs") + + # test interval and partition + tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ") + # print(tdSql.queryResult) + parRows=tdSql.queryRows + tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ") + tdSql.checkRows(parRows) + + + # # test insert into + # tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;") + # tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") + + # tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") + + + # test paitition interval fill + tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;") + + + # test partition interval limit (PRcore-TD-17410) + tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);") + tdSql.checkRows(self.ctbNums) + + # test partition interval Pseudo time-column + tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") + + # 1 high-load: + tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;") + + tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;") + + # 2 stationary-trucks + tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)") + tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name") + + # 3 long-driving-sessions + tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;") + + + #4 long-daily-sessions + tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity FROM readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60") + + # 5. avg-daily-driving-duration + tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;") + + + # # 6. avg-daily-driving-session + # #taosc core dumped + # tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))") + # tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;") + # tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;") + # tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)") + + # 7. avg-load + tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;") + + # 8. daily-activity + tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") + + tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") + + tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") + + tdSql.query("SELECT _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") + + + # 9. breakdown-frequency + # NULL ---count(NULL)=0 expect count(NULL)= 100 + tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ") + parRows=tdSql.queryRows + assert parRows != 0 , "query result is wrong" + + + tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;") + + #it's already supported: + # last-loc + tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;") + + + #2. low-fuel + tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;") + + # 3. avg-vs-projected-fuel-consumption + tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet") + + def restartFunc(self,func_name,threadNumbers,dnodeNumbers,mnodeNums,restartNumbers,stopRole): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db', + 'dbNumbers': 8, + 'dropFlag': 1, + 'event': '', + 'vgroups': 2, + 'replica': 1, + 'stbName': 'stb', + 'stbNumbers': 100, + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + } + + dnodeNumbers=int(dnodeNumbers) + mnodeNums=int(mnodeNums) + vnodeNumbers = int(dnodeNumbers-mnodeNums) + + tdSql.query("show dnodes;") + tdLog.debug(tdSql.queryResult) + clusterComCheck.checkDnodes(dnodeNumbers) + + tdLog.info("create database and stable") + tdDnodes=cluster.dnodes + stopcount =0 + threads=[] + for i in range(threadNumbers): + newTdSql=tdCom.newTdSql() + print("123") + threads.append(threading.Thread(target=func_name,args=(newTdSql,))) + print("456") + for tr in threads: + tr.start() + + tdLog.info("Take turns stopping %s "%stopRole) + while stopcount < restartNumbers: + tdLog.info(" restart loop: %d"%stopcount ) + if stopRole == "mnode": + for i in range(mnodeNums): + tdDnodes[i].stoptaosd() + # sleep(10) + tdDnodes[i].starttaosd() + # sleep(10) + elif stopRole == "vnode": + for i in range(vnodeNumbers): + tdDnodes[i+mnodeNums].stoptaosd() + # sleep(10) + tdDnodes[i+mnodeNums].starttaosd() + # sleep(10) + elif stopRole == "dnode": + for i in range(dnodeNumbers): + tdDnodes[i].stoptaosd() + # sleep(10) + tdDnodes[i].starttaosd() + # sleep(10) + + # dnodeNumbers don't include database of schema + if clusterComCheck.checkDnodes(dnodeNumbers): + tdLog.info("check dnodes status is ready") + else: + tdLog.info("check dnodes status is not ready") + self.stopThread(threads) + tdLog.exit("one or more of dnodes failed to start ") + # self.check3mnode() + stopcount+=1 + + for tr in threads: + tr.join() + + + def run(self): + tdLog.printNoPrefix("==========step1:create database and table,insert data ==============") + self.createCluster() + self.prepareData() + queryPolicy=2 + simClientCfg="%s/taos.cfg"%tdDnodes.getSimCfgPath() + cmd='sed -i "s/^queryPolicy.*/queryPolicy 2/g" %s'%simClientCfg + os.system(cmd) + # self.tsbsIotQuery() + self.restartFunc(func_name=self.tsbsIotQuery,threadNumbers=3,dnodeNumbers=5,mnodeNums=3,restartNumbers=3,stopRole='mnode') + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/queryQnode.py b/tests/system-test/2-query/queryQnode.py index b5e2bd3328..176c7ccf3e 100644 --- a/tests/system-test/2-query/queryQnode.py +++ b/tests/system-test/2-query/queryQnode.py @@ -318,7 +318,6 @@ class TDTestCase: os.system(cmd) # tdDnodes.stop(1) # tdDnodes.start(1) - tdSql.execute("reset query cache") tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy) tdSql.query("show local variables;") for i in range(tdSql.queryRows): diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py index ca270932b1..75b33f1a5e 100644 --- a/tests/system-test/2-query/tsbsQuery.py +++ b/tests/system-test/2-query/tsbsQuery.py @@ -1,3 +1,4 @@ +# from asyncio.windows_events import NULL import taos import sys import datetime @@ -21,38 +22,99 @@ class TDTestCase: tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), True) - def prepareData(self): - database="db_tsbs" - ts=1451606400000 - tdSql.execute(f"create database {database};") - tdSql.execute(f"use {database} ") - tdSql.execute(''' - create table readings (ts timestamp,latitude double,longitude double,elevation double,velocity double,heading double,grade double,fuel_consumption double,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)); - ''') - tdSql.execute(''' - create table diagnostics (ts timestamp,fuel_state double,current_load double,status bigint,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)) ; - ''') + def create_ctable(self,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + tagValue = 'beijing' + if (i % 10 == 0): + sql += " %s%d using %s (name,fleet,driver,device_version) tags('truck_%d', 'South%d','Trish%d','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,i) + else: + model = 'H-%d'%i + sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,model,i) + if (i > 0) and (i%1000 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return - for i in range(10): - if i == 1 or i == 2 : - tdLog.debug(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')") - tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')") - else : - tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')") - if i == 1 or i == 2 : - tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')") - else: - tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')") - for j in range(10): - for i in range(100): - tdSql.execute( - f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )" - ) - status= random.randint(0,1) - tdSql.execute( - f"insert into dct{j} values ( {ts+i*60000}, {1+i*0.1},{1400+i*15}, {status},{1500+i*20}, {150+i*2},{5+i} )" - ) - tdSql.execute("insert into dct9 (ts,fuel_state) values('2021-07-13 14:06:33.123Z',1.2) ;") + def insertData(self,startTs,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1,rowsPerTbl=100,batchNum=1000): + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + if startTs is None: + t = time.time() + startTs = int(round(t * 1000)) + + for i in range(ctbNum): + sql += " %s%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + if(ctbPrefix=="rct"): + sql += f"({startTs+j*60000}, {80+j}, {90+j}, {85+j}, {30+j*10}, {1.2*j}, {221+j*2}, {20+j*0.2}, {1500+j*20}, {150+j*2},{5+j}) " + elif ( ctbPrefix=="dct"): + status= random.randint(0,1) + sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status},{1500+j*20}, {150+j*2},{5+j} ) " + # tdLog.debug("1insert sql:%s"%sql) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + # tdLog.debug("2insert sql:%s"%sql) + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s%d values " %(ctbPrefix,i) + else: + sql = "insert into " + if sql != pre_insert: + # tdLog.debug("3insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareData(self): + dbname="db_tsbs" + stabname1="readings" + stabname2="diagnostics" + ctbnamePre1="rct" + ctbnamePre2="dct" + ctbNums=40 + self.ctbNums=ctbNums + rowNUms=200 + ts=1451606400000 + tdSql.execute(f"create database {dbname};") + tdSql.execute(f"use {dbname} ") + tdSql.execute(f''' + create table {stabname1} (ts timestamp,latitude double,longitude double,elevation double,velocity double,heading double,grade double,fuel_consumption double,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)); + ''') + tdSql.execute(f''' + create table {stabname2} (ts timestamp,fuel_state double,current_load double,status bigint,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)) ; + ''') + self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname1,ctbPrefix=ctbnamePre1,ctbNum=ctbNums) + self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums) + self.insertData(tsql=tdSql,dbName=dbname,stbName=stabname1,ctbPrefix=ctbnamePre1,ctbNum=ctbNums,rowsPerTbl=rowNUms,startTs=ts,batchNum=1000) + self.insertData(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums,rowsPerTbl=rowNUms,startTs=ts,batchNum=1000) + # for i in range(ctbNum): + # if i %10 == 0 : + # # tdLog.debug(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')") + # tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')") + # else : + # tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')") + # if i %10 == 0 : + # tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')") + # else: + # tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')") + # for j in range(ctbNums): + # for i in range(rowNUms): + # tdSql.execute( + # f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )" + # ) + # status= random.randint(0,1) + # tdSql.execute( + # f"insert into dct{j} values ( {ts+i*60000}, {1+i*0.1},{1400+i*15}, {status},{1500+i*20}, {150+i*2},{5+i} )" + # ) + # tdSql.execute("insert into dct9 (ts,fuel_state) values('2021-07-13 14:06:33.123Z',1.2) ;") # def check_avg(self ,origin_query , check_query): # avg_result = tdSql.getResult(origin_query) # origin_result = tdSql.getResult(check_query) @@ -75,7 +137,6 @@ class TDTestCase: # test interval and partition tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ") - print(tdSql.queryResult) parRows=tdSql.queryRows tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ") tdSql.checkRows(parRows) @@ -94,7 +155,7 @@ class TDTestCase: # test partition interval limit (PRcore-TD-17410) tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);") - tdSql.checkRows(10) + tdSql.checkRows(self.ctbNums) # test partition interval Pseudo time-column tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") @@ -136,17 +197,27 @@ class TDTestCase: tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") - tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") + tdSql.query("SELECT _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") # 9. breakdown-frequency # NULL ---count(NULL)=0 expect count(NULL)= 100 tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ") parRows=tdSql.queryRows - assert parRows != 0 , "query result is wrong" + assert parRows != 0 , "query result is wrong, query rows %d but expect > 0 " %parRows tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;") + sql="select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;" + + # for i in range(2): + # tdSql.query("%s"%sql) + # quertR1=tdSql.queryResult + # for j in range(50): + # tdSql.query("%s"%sql) + # quertR2=tdSql.queryResult + # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2) + #it's already supported: # last-loc diff --git a/tests/system-test/5-taos-tools/TD-12478.py b/tests/system-test/5-taos-tools/TD-12478.py new file mode 100644 index 0000000000..69849d3c7a --- /dev/null +++ b/tests/system-test/5-taos-tools/TD-12478.py @@ -0,0 +1,151 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +import os +import sys +import time +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +from util.dnodes import * +import itertools +from itertools import product +from itertools import combinations +from faker import Faker +import subprocess + +class TDTestCase: + def caseDescription(self): + ''' + case1[TD-12434]:taosdump null nchar/binary length can cause core:taos-tools/src/taosdump.c + case2[TD-12478]:taos_stmt_execute() failed! reason: WAL size exceeds limit + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + os.system("rm -rf 5-taos-tools/TD-12478.py.sql") + os.system("rm db*") + os.system("rm dump_result.txt*") + + def restartDnodes(self): + tdDnodes.stop(1) + tdDnodes.start(1) + + def dropandcreateDB_random(self,n): + self.ts = 1630000000000 + + fake = Faker('zh_CN') + self.num_random = fake.random_int(min=1000, max=5000, step=1) + print(self.num_random) + for i in range(n): + tdSql.execute('''drop database if exists db ;''') + tdSql.execute('''create database db keep 36500;''') + tdSql.execute('''use db;''') + + tdSql.execute('''create stable stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + tdSql.execute('''create table table_1 using stable_1 tags('table_1', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_2 using stable_1 tags('table_2', '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , 'binary2' , 'nchar2' , '2' , '22' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 tags('table_3', '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , 'binary3' , 'nchar3nchar3' , '-3.3' , '-33.33' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_21 using stable_2 tags('table_21' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + + #regular table + tdSql.execute('''create table regular_table_1 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + tdSql.execute('''create table regular_table_2 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + tdSql.execute('''create table regular_table_3 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + + + for i in range(self.num_random): + tdSql.execute('''insert into table_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts) values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d)''' + % (self.ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1), + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , self.ts + i)) + tdSql.execute('''insert into regular_table_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts) values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d)''' + % (self.ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1) , + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1) , + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , self.ts + i)) + + tdSql.execute('''insert into table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts) values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d)''' + % (self.ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , self.ts + i)) + tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts) values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d)''' + % (self.ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , self.ts + i)) + + tdSql.execute('''insert into table_3 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts) values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d)''' + % (self.ts + i*1000, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , self.ts + i)) + tdSql.execute('''insert into regular_table_3 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts) values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d)''' + % (self.ts + i*1000, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , self.ts + i)) + + tdSql.query("select count(*) from stable_1;") + tdSql.checkData(0,0,3*self.num_random) + tdSql.query("select count(*) from regular_table_1;") + tdSql.checkData(0,0,self.num_random) + + def run(self): + tdSql.prepare() + + dcDB = self.dropandcreateDB_random(1) + + assert os.system("taosdump -D db") == 0 + + assert os.system("taosdump -i . -g") == 0 + + tdSql.query("select count(*) from stable_1;") + tdSql.checkData(0,0,3*self.num_random) + tdSql.query("select count(*) from regular_table_1;") + tdSql.checkData(0,0,self.num_random) + tdSql.query("select count(*) from regular_table_2;") + tdSql.checkData(0,0,self.num_random) + tdSql.query("select count(*) from regular_table_3;") + tdSql.checkData(0,0,self.num_random) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/5-taos-tools/taosdump/taosdumpTestColTag.py b/tests/system-test/5-taos-tools/taosdump/taosdumpTestColTag.py new file mode 100644 index 0000000000..eb4de435ec --- /dev/null +++ b/tests/system-test/5-taos-tools/taosdump/taosdumpTestColTag.py @@ -0,0 +1,1291 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +import time +import os +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def caseDescription(self): + ''' + case1:[TD-10540]The escape char "`" can be used for both tag name and column name + case2:[TD-12435]create table as cause column error; + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.tmpdir = "tmp" + now = time.time() + self.ts = int(round(now * 1000)) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def table1_checkall(self, sql): + tdLog.info(sql) + tdSql.query(sql) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 2) + tdSql.checkData(0, 3, 3) + tdSql.checkData(0, 4, 4) + tdSql.checkData(0, 5, 'True') + tdSql.checkData(0, 6, 6) + tdSql.checkData(0, 7, 7) + tdSql.checkData(0, 8, 8) + tdSql.checkData(0, 9, 9) + tdSql.checkData(0, 10, '1970-01-01 08:00:00.010') + + def table1_checkall_1(self, sql): + tdSql.query(sql) + tdSql.checkData(0, 1, 1) + + def table1_checkall_2(self, sql): + self.table1_checkall_1(sql) + tdSql.checkData(0, 2, 2) + + def table1_checkall_3(self, sql): + self.table1_checkall_2(sql) + tdSql.checkData(0, 3, 3) + + def table1_checkall_4(self, sql): + self.table1_checkall_3(sql) + tdSql.checkData(0, 4, 4) + + def table1_checkall_5(self, sql): + self.table1_checkall_4(sql) + tdSql.checkData(0, 5, 'True') + + def table1_checkall_6(self, sql): + self.table1_checkall_5(sql) + tdSql.checkData(0, 6, 6) + + def table1_checkall_7(self, sql): + self.table1_checkall_6(sql) + tdSql.checkData(0, 7, 7) + + def table1_checkall_8(self, sql): + self.table1_checkall_7(sql) + tdSql.checkData(0, 8, 8) + + def table1_checkall_9(self, sql): + self.table1_checkall_8(sql) + tdSql.checkData(0, 9, 9) + + def table1_checkall_10(self, sql): + self.table1_checkall_9(sql) + tdSql.checkData(0, 10, '1970-01-01 08:00:00.010') + + def run(self): + + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf 5-taos-tools/%s.sql" % testcaseFilename) + os.system("rm %s/db*" % self.tmpdir) + os.system("rm dump_result.txt*") + tdSql.prepare() + + print("==============step1") + print("prepare data") + + tdSql.execute("create database db2") + tdSql.execute("use db2") + + print( + "==============new version [escape character] for stable==============") + print("==============step1,#create db.stable,db.table; insert db.table; show db.table; select db.table; drop db.table;") + print("prepare data") + + self.stb1 = "stable_1~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + self.tb1 = "table_1~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + + self.col_base = "123~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + + self.col_int = "stable_col_int%s" % self.col_base + print(self.col_int) + self.col_bigint = "stable_col_bigint%s" % self.col_base + self.col_smallint = "stable_col_smallint%s" % self.col_base + self.col_tinyint = "stable_col_tinyint%s" % self.col_base + self.col_bool = "stable_col_bool%s" % self.col_base + self.col_binary = "stable_col_binary%s" % self.col_base + self.col_nchar = "stable_col_nchar%s" % self.col_base + self.col_float = "stable_col_float%s" % self.col_base + self.col_double = "stable_col_double%s" % self.col_base + self.col_ts = "stable_col_ts%s" % self.col_base + + self.tag_base = "abc~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + self.tag_int = "stable_tag_int%s" % self.tag_base + self.tag_bigint = "stable_tag_bigint%s" % self.tag_base + self.tag_smallint = "stable_tag_smallint%s" % self.tag_base + self.tag_tinyint = "stable_tag_tinyint%s" % self.tag_base + self.tag_bool = "stable_tag_bool%s" % self.tag_base + self.tag_binary = "stable_tag_binary%s" % self.tag_base + self.tag_nchar = "stable_tag_nchar%s" % self.tag_base + self.tag_float = "stable_tag_float%s" % self.tag_base + self.tag_double = "stable_tag_double%s" % self.tag_base + self.tag_ts = "stable_tag_ts%s" % self.tag_base + + tdSql.execute('''create stable db.`%s` (ts timestamp, `%s` int , `%s` bigint , `%s` smallint , `%s` tinyint, `%s` bool , + `%s` binary(20) , `%s` nchar(20) ,`%s` float , `%s` double , `%s` timestamp) + tags(loc nchar(20), `%s` int , `%s` bigint , `%s` smallint , `%s` tinyint, `%s` bool , + `%s` binary(20) , `%s` nchar(20) ,`%s` float , `%s` double , `%s` timestamp);''' + % (self.stb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, + self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, + self.tag_int, self.tag_bigint, self.tag_smallint, self.tag_tinyint, self.tag_bool, + self.tag_binary, self.tag_nchar, self.tag_float, self.tag_double, self.tag_ts)) + tdSql.query("describe db.`%s` ; " % self.stb1) + tdSql.checkRows(22) + + tdSql.query("select count(*) from db.`%s` ; " % self.stb1) + tdSql.checkRows(0) + + tdSql.query("show create stable db.`%s` ; " % self.stb1) + tdSql.checkData(0, 0, self.stb1) + tdSql.checkData(0, 1, "CREATE TABLE `%s` (`ts` TIMESTAMP,`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(20),`%s` NCHAR(20),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)\ + TAGS (`loc` NCHAR(20),`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(20),`%s` NCHAR(20),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)" + % (self.stb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, + self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, + self.tag_int, self.tag_bigint, self.tag_smallint, self.tag_tinyint, self.tag_bool, + self.tag_binary, self.tag_nchar, self.tag_float, self.tag_double, self.tag_ts)) + + tdSql.execute( + "create table db.`table!1` using db.`%s` tags('table_1' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')" % + self.stb1) + tdSql.query("describe db.`table!1` ; ") + tdSql.checkRows(22) + + time.sleep(10) + tdSql.query("show create table db.`table!1` ; ") + tdSql.checkData(0, 0, "table!1") + tdSql.checkData( + 0, + 1, + "CREATE TABLE `table!1` USING `%s` TAGS (\"table_1\",0,0,0,0,false,\"0\",\"0\",0.000000,0.000000,\"0\")" % + self.stb1) + + tdSql.execute( + "insert into db.`table!1` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)") + sql = " select * from db.`table!1`; " + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`table!1`; '''\ + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + + time.sleep(1) + tdSql.execute('''insert into db.`table!1`(ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)''' + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) + sql = " select * from db.`table!1`; " + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + tdSql.query("select count(*) from db.`table!1`; ") + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from db.`%s` ; " % self.stb1) + tdSql.checkRows(1) + + tdSql.execute( + "create table db.`%s` using db.`%s` TAGS (\"table_2\",2,2,2,2,true,\"2\",\"2\",2.000000,2.000000,\"2\")" % + (self.tb1, self.stb1)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.query("show create table db.`%s` ; " % self.tb1) + tdSql.checkData(0, 0, self.tb1) + tdSql.checkData( + 0, + 1, + "CREATE TABLE `%s` USING `%s` TAGS (\"table_2\",2,2,2,2,true,\"2\",\"2\",2.000000,2.000000,\"2\")" % + (self.tb1, + self.stb1)) + + tdSql.execute( + "insert into db.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" % + self.tb1) + sql = "select * from db.`%s` ; " % self.tb1 + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s` ; '''\ + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, + self.tag_int, self.tag_bigint, self.tag_smallint, self.tag_tinyint, self.tag_bool, self.tag_binary, self.tag_nchar, self.tag_float, self.tag_double, self.tag_ts, self.tb1) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + + time.sleep(1) + tdSql.execute('''insert into db.`%s`(ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)''' + % (self.tb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) + sql = " select * from db.`%s` ; " % self.tb1 + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + sql = " select * from db.`%s` where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10; " \ + % (self.tb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + tdSql.query("select count(*) from db.`%s`; " % self.tb1) + tdSql.checkData(0, 0, 2) + sql = "select * from db.`%s` ; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + tdSql.query("select count(*) from db.`%s`; " % self.stb1) + tdSql.checkData(0, 0, 4) + + sql = "select * from (select * from db.`%s`) ; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + tdSql.query( + "select count(*) from (select * from db.`%s`) ; " % + self.stb1) + tdSql.checkData(0, 0, 4) + + sql = "select * from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s`) ; " \ + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.stb1) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + + sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s`) ; " \ + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.stb1) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + + sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s`\ + where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10 ) ; " \ + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.stb1, + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(4) + + tdSql.query("show db.stables like 'stable_1%' ") + tdSql.checkRows(1) + tdSql.query("show db.tables like 'table%' ") + tdSql.checkRows(2) + + self.cr_tb1 = "create_table_1~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" + tdSql.execute( + "create table db.`%s` as select avg(`%s`) from db.`%s` where ts > now interval(1m) sliding(30s);" % + (self.cr_tb1, self.col_bigint, self.stb1)) + tdSql.query("show db.tables like 'create_table_%' ") + tdSql.checkRows(1) + + print(r"==============drop\ add\ change\ modify column or tag") + print("==============drop==============") + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_ts)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(21) + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_double)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(20) + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_float)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(19) + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_nchar)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(18) + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_binary)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(17) + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_bool)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(16) + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_tinyint)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(15) + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_smallint)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(14) + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_bigint)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(13) + tdSql.execute( + "ALTER TABLE db.`%s` DROP TAG `%s`; " % + (self.stb1, self.tag_int)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(12) + + tdSql.execute( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_ts)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall_9(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(11) + tdSql.execute( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_double)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall_8(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(10) + tdSql.execute( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_float)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall_7(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(9) + tdSql.execute( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_nchar)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall_6(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(8) + tdSql.execute( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_binary)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall_5(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(7) + tdSql.execute( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_bool)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall_4(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(6) + tdSql.execute( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_tinyint)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall_3(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(5) + tdSql.execute( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_smallint)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall_2(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(4) + tdSql.execute( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_bigint)) + sql = " select * from db.`%s`; " % self.stb1 + datacheck = self.table1_checkall_1(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(3) + tdSql.error( + "ALTER TABLE db.`%s` DROP COLUMN `%s`; " % + (self.stb1, self.col_int)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(3) + + print("==============add==============") + tdSql.execute( + "ALTER TABLE db.`%s` ADD COLUMN `%s` bigint; " % + (self.stb1, self.col_bigint)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(4) + tdSql.execute( + "ALTER TABLE db.`%s` ADD COLUMN `%s` smallint; " % + (self.stb1, self.col_smallint)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(5) + tdSql.execute( + "ALTER TABLE db.`%s` ADD COLUMN `%s` tinyint; " % + (self.stb1, self.col_tinyint)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(6) + tdSql.execute( + "ALTER TABLE db.`%s` ADD COLUMN `%s` bool; " % + (self.stb1, self.col_bool)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(7) + tdSql.execute( + "ALTER TABLE db.`%s` ADD COLUMN `%s` binary(20); " % + (self.stb1, self.col_binary)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(8) + + tdSql.execute( + "insert into db.`%s` values(now, 1 , 2, 3, 4, 5, 6)" % + self.tb1) + sql = "select * from db.`%s` order by ts desc; " % self.tb1 + datacheck = self.table1_checkall_5(sql) + + tdSql.execute( + "ALTER TABLE db.`%s` ADD COLUMN `%s` nchar(20); " % + (self.stb1, self.col_nchar)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(9) + tdSql.execute( + "ALTER TABLE db.`%s` ADD COLUMN `%s` float; " % + (self.stb1, self.col_float)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(10) + tdSql.execute( + "ALTER TABLE db.`%s` ADD COLUMN `%s` double; " % + (self.stb1, self.col_double)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(11) + tdSql.execute( + "ALTER TABLE db.`%s` ADD COLUMN `%s` timestamp; " % + (self.stb1, self.col_ts)) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(12) + + tdSql.execute( + "insert into db.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" % + self.tb1) + sql = "select * from db.`%s` order by ts desc; " % self.tb1 + datacheck = self.table1_checkall(sql) + + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` int; " % + (self.stb1, self.tag_int)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(13) + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` bigint; " % + (self.stb1, self.tag_bigint)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(14) + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` smallint; " % + (self.stb1, self.tag_smallint)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(15) + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` tinyint; " % + (self.stb1, self.tag_tinyint)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(16) + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` bool; " % + (self.stb1, self.tag_bool)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(17) + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` binary(20); " % + (self.stb1, self.tag_binary)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(18) + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` nchar(20); " % + (self.stb1, self.tag_nchar)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(19) + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` float; " % + (self.stb1, self.tag_float)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(20) + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` double; " % + (self.stb1, self.tag_double)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(21) + tdSql.execute( + "ALTER TABLE db.`%s` ADD TAG `%s` timestamp; " % + (self.stb1, self.tag_ts)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + + print("==============change==============") + self.tag_base_change = "abcdas" + self.tag_int_change = "stable_tag_int%s" % self.tag_base_change + self.tag_bigint_change = "stable_tag_bigint%s" % self.tag_base_change + self.tag_smallint_change = "stable_tag_smallint%s" % self.tag_base_change + self.tag_tinyint_change = "stable_tag_tinyint%s" % self.tag_base_change + self.tag_bool_change = "stable_tag_bool%s" % self.tag_base_change + self.tag_binary_change = "stable_tag_binary%s" % self.tag_base_change + self.tag_nchar_change = "stable_tag_nchar%s" % self.tag_base_change + self.tag_float_change = "stable_tag_float%s" % self.tag_base_change + self.tag_double_change = "stable_tag_double%s" % self.tag_base_change + self.tag_ts_change = "stable_tag_ts%s" % self.tag_base_change + + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_int, self.tag_int_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_bigint, self.tag_bigint_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_smallint, self.tag_smallint_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_tinyint, self.tag_tinyint_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_bool, self.tag_bool_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_binary, self.tag_binary_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_nchar, self.tag_nchar_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_float, self.tag_float_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_double, self.tag_double_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " % + (self.stb1, self.tag_ts, self.tag_ts_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + + print("==============modify==============") + # TD-10810 + tdSql.execute( + "ALTER STABLE db.`%s` MODIFY TAG `%s` binary(30); ; " % + (self.stb1, self.tag_binary_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER STABLE db.`%s` MODIFY TAG `%s` nchar(30); ; " % + (self.stb1, self.tag_nchar_change)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + + tdSql.execute( + "ALTER STABLE db.`%s` MODIFY COLUMN `%s` binary(30); ; " % + (self.stb1, self.col_binary)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + tdSql.execute( + "ALTER STABLE db.`%s` MODIFY COLUMN `%s` nchar(30); ; " % + (self.stb1, self.col_nchar)) + sql = " select * from db.`%s` order by ts desc; " % self.stb1 + datacheck = self.table1_checkall(sql) + tdSql.query("describe db.`%s` ; " % self.tb1) + tdSql.checkRows(22) + + print(r"==============drop table\stable") + try: + tdSql.execute("drop table db.`%s` " % self.tb1) + except Exception as e: + tdLog.exit(e) + + tdSql.error("select * from db.`%s`" % self.tb1) + tdSql.query("show db.stables like 'stable_1%' ") + tdSql.checkRows(1) + + try: + tdSql.execute("drop table db.`%s` " % self.stb1) + except Exception as e: + tdLog.exit(e) + + tdSql.error("select * from db.`%s`" % self.tb1) + tdSql.error("select * from db.`%s`" % self.stb1) + + print("==============step2,#create stable,table; insert table; show table; select table; drop table") + + self.stb2 = "stable_2~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + self.tb2 = "table_2~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + + tdSql.execute( + "create stable `%s` (ts timestamp, i int) tags(j int);" % + self.stb2) + tdSql.query("describe `%s` ; " % self.stb2) + tdSql.checkRows(3) + + tdSql.query("select count(*) from `%s` ; " % self.stb2) + tdSql.checkRows(0) + + tdSql.query("show create stable `%s` ; " % self.stb2) + tdSql.checkData(0, 0, self.stb2) + tdSql.checkData( + 0, + 1, + "CREATE TABLE `%s` (`ts` TIMESTAMP,`i` INT) TAGS (`j` INT)" % + self.stb2) + + tdSql.execute("create table `table!2` using `%s` tags(1)" % self.stb2) + tdSql.query("describe `table!2` ; ") + tdSql.checkRows(3) + + time.sleep(10) + + tdSql.query("show create table `table!2` ; ") + tdSql.checkData(0, 0, "table!2") + tdSql.checkData( + 0, + 1, + "CREATE TABLE `table!2` USING `%s` TAGS (1)" % + self.stb2) + tdSql.execute("insert into `table!2` values(now, 1)") + tdSql.query("select * from `table!2`; ") + tdSql.checkRows(1) + tdSql.query("select count(*) from `table!2`; ") + tdSql.checkData(0, 0, 1) + tdSql.query("select count(*) from `%s` ; " % self.stb2) + tdSql.checkRows(1) + + tdSql.execute( + "create table `%s` using `%s` tags(1)" % + (self.tb2, self.stb2)) + tdSql.query("describe `%s` ; " % self.tb2) + tdSql.checkRows(3) + tdSql.query("show create table `%s` ; " % self.tb2) + tdSql.checkData(0, 0, self.tb2) + tdSql.checkData( + 0, 1, "CREATE TABLE `%s` USING `%s` TAGS (1)" % + (self.tb2, self.stb2)) + tdSql.execute("insert into `%s` values(now, 1)" % self.tb2) + tdSql.query("select * from `%s` ; " % self.tb2) + tdSql.checkRows(1) + tdSql.query("select count(*) from `%s`; " % self.tb2) + tdSql.checkData(0, 0, 1) + tdSql.query("select * from `%s` ; " % self.stb2) + tdSql.checkRows(2) + tdSql.query("select count(*) from `%s`; " % self.stb2) + tdSql.checkData(0, 0, 2) + + tdSql.query("select * from (select * from `%s`) ; " % self.stb2) + tdSql.checkRows(2) + tdSql.query("select count(*) from (select * from `%s` ); " % self.stb2) + tdSql.checkData(0, 0, 2) + + tdSql.query("show stables like 'stable_2%' ") + tdSql.checkRows(1) + tdSql.query("show tables like 'table%' ") + tdSql.checkRows(2) + + # TD-10536 + self.cr_tb2 = "create_table_2~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + tdSql.execute( + "create table `%s` as select * from `%s` ;" % + (self.cr_tb2, self.stb2)) + tdSql.query("show db.tables like 'create_table_%' ") + tdSql.checkRows(1) + + print("==============step3,#create regular_table; insert regular_table; show regular_table; select regular_table; drop regular_table") + self.regular_table = "regular_table~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + + self.regular_col_base = "123@#$%^&*()-_+=[]{};:,<.>/?~!$%^" + + self.col_int = "regular_table_col_int%s" % self.regular_col_base + print(self.col_int) + self.col_bigint = "regular_table_col_bigint%s" % self.regular_col_base + self.col_smallint = "regular_table_col_smallint%s" % self.regular_col_base + self.col_tinyint = "regular_table_col_tinyint%s" % self.regular_col_base + self.col_bool = "regular_table_col_bool%s" % self.regular_col_base + self.col_binary = "regular_table_col_binary%s" % self.regular_col_base + self.col_nchar = "regular_table_col_nchar%s" % self.regular_col_base + self.col_float = "regular_table_col_float%s" % self.regular_col_base + self.col_double = "regular_table_col_double%s" % self.regular_col_base + self.col_ts = "regular_table_col_ts%s" % self.regular_col_base + + tdSql.execute("create table `%s` (ts timestamp,`%s` int , `%s` bigint , `%s` smallint , `%s` tinyint, `%s` bool , \ + `%s` binary(20) , `%s` nchar(20) ,`%s` float , `%s` double , `%s` timestamp) ;" + % (self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, + self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(11) + + tdSql.query("select count(*) from `%s` ; " % self.regular_table) + tdSql.checkRows(1) + + tdSql.query("show create table `%s` ; " % self.regular_table) + tdSql.checkData(0, 0, self.regular_table) + tdSql.checkData(0, 1, "CREATE TABLE `%s` (`ts` TIMESTAMP,`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(20),`%s` NCHAR(20),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)" + % (self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, + self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) + + tdSql.execute( + "insert into `%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" % + self.regular_table) + sql = "select * from `%s` ; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db2.`%s`; '''\ + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.regular_table) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(1) + + time.sleep(1) + tdSql.execute('''insert into db2.`%s` (ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)''' + % (self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) + sql = " select * from db2.`%s`; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + sql = " select * from db2.`%s` where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10; " \ + % (self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + tdSql.query("select count(*) from `%s`; " % self.regular_table) + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from `%s` ; " % self.regular_table) + tdSql.checkRows(1) + + sql = "select * from (select * from `%s`) ; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db2.`%s`\ + where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10 ) ; " \ + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.regular_table, + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(2) + + tdSql.query( + "select count(*) from (select * from `%s` ); " % + self.regular_table) + tdSql.checkData(0, 0, 2) + + tdSql.query("show tables like 'regular_table%' ") + tdSql.checkRows(1) + + self.crr_tb = "create_r_table~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + tdSql.execute( + "create table `%s` as select * from `%s` ;" % + (self.crr_tb, self.regular_table)) + tdSql.query("show db2.tables like 'create_r_table%' ") + tdSql.checkRows(1) + + print(r"==============drop\ add\ change\ modify column ") + print("==============drop==============") + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_ts)) + sql = " select * from db2.`%s`; " % self.regular_table + datacheck = self.table1_checkall_9(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(10) + tdSql.execute( + "ALTER TABLE `%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_double)) + sql = " select * from `%s`; " % self.regular_table + datacheck = self.table1_checkall_8(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(9) + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_float)) + sql = " select * from db2.`%s`; " % self.regular_table + datacheck = self.table1_checkall_7(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(8) + tdSql.execute( + "ALTER TABLE `%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_nchar)) + sql = " select * from `%s`; " % self.regular_table + datacheck = self.table1_checkall_6(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(7) + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_binary)) + sql = " select * from db2.`%s`; " % self.regular_table + datacheck = self.table1_checkall_5(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(6) + tdSql.execute( + "ALTER TABLE `%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_bool)) + sql = " select * from `%s`; " % self.regular_table + datacheck = self.table1_checkall_4(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(5) + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_tinyint)) + sql = " select * from db2.`%s`; " % self.regular_table + datacheck = self.table1_checkall_3(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(4) + tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_smallint)) + sql = " select * from `%s`; " % self.regular_table + datacheck = self.table1_checkall_2(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(3) + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_bigint)) + sql = " select * from db2.`%s`; " % self.regular_table + datacheck = self.table1_checkall_1(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(2) + tdSql.error( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_int)) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(2) + + print("==============add==============") + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` bigint; " % + (self.regular_table, self.col_bigint)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(3) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` smallint; " % + (self.regular_table, self.col_smallint)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(4) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` tinyint; " % + (self.regular_table, self.col_tinyint)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(5) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` bool; " % + (self.regular_table, self.col_bool)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(6) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` binary(20); " % + (self.regular_table, self.col_binary)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(7) + + tdSql.execute( + "insert into db2.`%s` values(now, 1 , 2, 3, 4, 5, 6)" % + self.regular_table) + sql = "select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_5(sql) + + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` nchar(20); " % + (self.regular_table, self.col_nchar)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(8) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` float; " % + (self.regular_table, self.col_float)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(9) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` double; " % + (self.regular_table, self.col_double)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(10) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` timestamp; " % + (self.regular_table, self.col_ts)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(11) + + tdSql.execute( + "insert into db2.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" % + self.regular_table) + sql = "select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall(sql) + + print("==============change, regular not support==============") + + print("==============modify==============") + # TD-10810 + tdSql.execute( + "ALTER TABLE db2.`%s` MODIFY COLUMN `%s` binary(30); ; " % + (self.regular_table, self.col_binary)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(11) + tdSql.execute( + "ALTER TABLE `%s` MODIFY COLUMN `%s` nchar(30); ; " % + (self.regular_table, self.col_nchar)) + sql = " select * from `%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(11) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + print("==============step4,#taosdump out ; drop db ; taosdumo in") + assert os.system( + "%staosdump -D db2 -o %s" % + (binPath, self.tmpdir)) == 0 + + tdSql.execute('''drop database if exists db2 ;''') + + assert os.system("%staosdump -i %s -g" % (binPath, self.tmpdir)) == 0 + + print("==============step5,#create regular_table; insert regular_table; show regular_table; select regular_table; drop regular_table") + self.regular_table = "regular_table~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" + + self.regular_col_base = "123@#$%^&*()-_+=[]{};:,<.>/?~!$%^" + + self.col_int = "regular_table_col_int%s" % self.regular_col_base + print(self.col_int) + self.col_bigint = "regular_table_col_bigint%s" % self.regular_col_base + self.col_smallint = "regular_table_col_smallint%s" % self.regular_col_base + self.col_tinyint = "regular_table_col_tinyint%s" % self.regular_col_base + self.col_bool = "regular_table_col_bool%s" % self.regular_col_base + self.col_binary = "regular_table_col_binary%s" % self.regular_col_base + self.col_nchar = "regular_table_col_nchar%s" % self.regular_col_base + self.col_float = "regular_table_col_float%s" % self.regular_col_base + self.col_double = "regular_table_col_double%s" % self.regular_col_base + self.col_ts = "regular_table_col_ts%s" % self.regular_col_base + + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(11) + + tdSql.query("select count(*) from `%s` ; " % self.regular_table) + tdSql.checkRows(1) + + tdSql.query("show create table `%s` ; " % self.regular_table) + tdSql.checkData(0, 0, self.regular_table) + tdSql.checkData(0, 1, "CREATE TABLE `%s` (`ts` TIMESTAMP,`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(30),`%s` NCHAR(30),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)" + % (self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, + self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) + + tdSql.execute( + "insert into `%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" % + self.regular_table) + sql = "select * from `%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.checkRows(5) + sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db2.`%s` order by ts desc; '''\ + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.regular_table) + datacheck = self.table1_checkall(sql) + tdSql.checkRows(5) + + time.sleep(1) + tdSql.execute('''insert into db2.`%s` (ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)''' + % (self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.checkRows(6) + + sql = " select * from db2.`%s` where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10; " \ + % (self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) +# CBD tdSql.checkRows(3) + + tdSql.query( + "select count(*) from `%s` order by ts desc; " % + self.regular_table) + tdSql.checkData(0, 0, 6) + tdSql.query("select count(*) from `%s` ; " % self.regular_table) + tdSql.checkRows(1) + + sql = "select * from (select * from `%s` order by ts desc) ; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.checkRows(6) + + sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db2.`%s`\ + where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10 ) ; " \ + % (self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.regular_table, + self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) + datacheck = self.table1_checkall(sql) +# CBD tdSql.checkRows(3) + + tdSql.query( + "select count(*) from (select * from `%s` ); " % + self.regular_table) + tdSql.checkData(0, 0, 6) + + tdSql.query("show tables like 'regular_table%' ") + tdSql.checkRows(1) + + tdSql.query("show db2.tables like 'create_r_table%' ") + tdSql.checkRows(1) + + print(r"==============drop\ add\ change\ modify column ") + print("==============drop==============") + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_ts)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_9(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(10) + tdSql.execute( + "ALTER TABLE `%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_double)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_8(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(9) + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_float)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_7(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(8) + tdSql.execute( + "ALTER TABLE `%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_nchar)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_6(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(7) + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_binary)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_5(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(6) + tdSql.execute( + "ALTER TABLE `%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_bool)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_4(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(5) + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_tinyint)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_3(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(4) + tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_smallint)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_2(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(3) + tdSql.execute( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_bigint)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_1(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(2) + tdSql.error( + "ALTER TABLE db2.`%s` DROP COLUMN `%s`; " % + (self.regular_table, self.col_int)) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(2) + + print("==============add==============") + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` bigint; " % + (self.regular_table, self.col_bigint)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(3) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` smallint; " % + (self.regular_table, self.col_smallint)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(4) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` tinyint; " % + (self.regular_table, self.col_tinyint)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(5) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` bool; " % + (self.regular_table, self.col_bool)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(6) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` binary(20); " % + (self.regular_table, self.col_binary)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(7) + + tdSql.execute( + "insert into db2.`%s` values(now, 1 , 2, 3, 4, 5, 6)" % + self.regular_table) + sql = "select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall_5(sql) + + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` nchar(20); " % + (self.regular_table, self.col_nchar)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(8) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` float; " % + (self.regular_table, self.col_float)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(9) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` double; " % + (self.regular_table, self.col_double)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(10) + tdSql.execute( + "ALTER TABLE db2.`%s` ADD COLUMN `%s` timestamp; " % + (self.regular_table, self.col_ts)) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(11) + + tdSql.execute( + "insert into db2.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" % + self.regular_table) + sql = "select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall(sql) + + print("==============change, regular not support==============") + + print("==============modify==============") + # TD-10810 + tdSql.execute( + "ALTER TABLE db2.`%s` MODIFY COLUMN `%s` binary(40); ; " % + (self.regular_table, self.col_binary)) + sql = " select * from db2.`%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.query("describe db2.`%s` ; " % self.regular_table) + tdSql.checkRows(11) + tdSql.execute( + "ALTER TABLE `%s` MODIFY COLUMN `%s` nchar(40); ; " % + (self.regular_table, self.col_nchar)) + sql = " select * from `%s` order by ts desc; " % self.regular_table + datacheck = self.table1_checkall(sql) + tdSql.query("describe `%s` ; " % self.regular_table) + tdSql.checkRows(11) + + os.system("rm %s/db*" % self.tmpdir) + os.system("rm dump_result.txt*") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py index e9b032c003..3736166a83 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py @@ -68,7 +68,7 @@ class TDTestCase: def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'db', - 'dbNumbers': 8, + 'dbNumbers': 6, 'dropFlag': 1, 'event': '', 'vgroups': 2, @@ -98,10 +98,10 @@ class TDTestCase: # fisr add three mnodes; tdLog.info("fisr add three mnodes and check mnode status") - tdSql.info("create mnode on dnode 2") + tdLog.info("create mnode on dnode 2") tdSql.execute("create mnode on dnode 2") clusterComCheck.checkMnodeStatus(2) - tdSql.info("create mnode on dnode 3") + tdLog.info("create mnode on dnode 3") tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) @@ -161,7 +161,7 @@ class TDTestCase: tdLog.info("check dnode number:") clusterComCheck.checkDnodes(dnodeNumbers) tdSql.query("show databases") - tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers-2)) + tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers)) # tdLog.info("check DB Rows:") # clusterComCheck.checkDbRows(allDbNumbers) @@ -172,7 +172,7 @@ class TDTestCase: def run(self): # print(self.master_dnode.cfgDict) - self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='mnode') + self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=5,stopRole='mnode') def stop(self): tdSql.close() diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py index 99efabd8ea..ea8e9612a2 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py @@ -159,7 +159,7 @@ class TDTestCase: tdLog.info("check dnode number:") clusterComCheck.checkDnodes(dnodeNumbers) tdSql.query("show databases") - tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers-2)) + tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers)) # tdLog.info("check DB Rows:") # clusterComCheck.checkDbRows(allDbNumbers) @@ -170,7 +170,7 @@ class TDTestCase: def run(self): # print(self.master_dnode.cfgDict) - self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode') + self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=3,stopRole='mnode') def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 3fc38ac898..1f6e8ce1f5 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -34,7 +34,7 @@ python3 ./test.py -f 1-insert/table_param_ttl.py python3 ./test.py -f 1-insert/mutil_stage.py python3 ./test.py -f 1-insert/update_data_muti_rows.py - +python3 ./test.py -f 1-insert/db_tb_name_check.py python3 ./test.py -f 2-query/abs.py python3 ./test.py -f 2-query/abs.py -R @@ -60,8 +60,8 @@ python3 ./test.py -f 2-query/ceil.py python3 ./test.py -f 2-query/ceil.py -R python3 ./test.py -f 2-query/char_length.py python3 ./test.py -f 2-query/char_length.py -R -# python3 ./test.py -f 2-query/check_tsdb.py -# python3 ./test.py -f 2-query/check_tsdb.py -R +python3 ./test.py -f 2-query/check_tsdb.py +python3 ./test.py -f 2-query/check_tsdb.py -R python3 ./test.py -f 2-query/concat.py python3 ./test.py -f 2-query/concat.py -R python3 ./test.py -f 2-query/concat_ws.py @@ -173,8 +173,9 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3 -# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3 @@ -195,15 +196,15 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 # vnode case -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1 -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1 -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 @@ -215,7 +216,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 +python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 From a74c830125f19553d438dfe5e853074938cd8d1e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 6 Aug 2022 19:34:15 +0800 Subject: [PATCH 05/11] fix(query): support scalar function in fill operator. --- source/libs/executor/inc/executorimpl.h | 5 ++++- source/libs/executor/inc/tfill.h | 1 + source/libs/executor/src/executorimpl.c | 19 +++++++++++-------- source/libs/executor/src/tfill.c | 24 +++++++++++++++--------- tests/script/tsim/parser/fill_us.sim | 1 + 5 files changed, 32 insertions(+), 18 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index b62ff2bef1..1207ea83cc 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -627,12 +627,15 @@ typedef struct SFillOperatorInfo { int64_t totalInputRows; void** p; SSDataBlock* existNewGroupBlock; - bool multigroupResult; STimeWindow win; SNode* pCondition; SArray* pColMatchColInfo; int32_t primaryTsCol; uint64_t curGroupId; // current handled group id + SExprInfo* pExprInfo; + int32_t numOfExpr; + SExprInfo* pNotFillExprInfo; + int32_t numOfNotFillExpr; } SFillOperatorInfo; typedef struct SGroupbyOperatorInfo { diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h index 2d39cd8eb1..9f3a95aca8 100644 --- a/source/libs/executor/inc/tfill.h +++ b/source/libs/executor/inc/tfill.h @@ -44,6 +44,7 @@ typedef struct SFillInfo { TSKEY end; // endKey for fill TSKEY currentKey; // current active timestamp, the value may be changed during the fill procedure. int32_t tsSlotId; // primary time stamp slot id + int32_t srcTsSlotId; // timestamp column id in the source data block. int32_t order; // order [TSDB_ORDER_ASC|TSDB_ORDER_DESC] int32_t type; // fill type int32_t numOfRows; // number of rows in the input data block diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 213638c73f..2fc113c7d6 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3244,7 +3244,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { SSDataBlock* pBlock = pDownstream->fpSet.getNextFn(pDownstream); if (pBlock == NULL) { if (pInfo->totalInputRows == 0) { - pOperator->status = OP_EXEC_DONE; + doSetOperatorCompleted(pOperator); return NULL; } @@ -3252,6 +3252,9 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { } else { blockDataUpdateTsWindow(pBlock, pInfo->primaryTsCol); + SExprSupp* pSup = &pOperator->exprSupp; + projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, NULL); + if (pInfo->curGroupId == 0 || pInfo->curGroupId == pBlock->info.groupId) { pInfo->curGroupId = pBlock->info.groupId; // the first data block @@ -3629,10 +3632,9 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* goto _error; } - int32_t num = 0, num1 = 0; SSDataBlock* pResBlock = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc); - SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &num); - SExprInfo* pCopyColumnExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &num1); + SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pInfo->numOfExpr); + SExprInfo* pCopyColumnExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &pInfo->numOfNotFillExpr); SInterval* pInterval = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType @@ -3645,6 +3647,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* SResultInfo* pResultInfo = &pOperator->resultInfo; initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pResBlock, pOperator->resultInfo.capacity); + initExprSupp(&pOperator->exprSupp, pExprInfo, pInfo->numOfExpr); pInfo->primaryTsCol = ((SColumnNode*)pPhyFillNode->pWStartTs)->slotId; @@ -3652,9 +3655,9 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID); - int32_t code = initFillInfo(pInfo, pExprInfo, num, pCopyColumnExprInfo, num1, - (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, - pResultInfo->capacity, pTaskInfo->id.str, pInterval, type, order); + int32_t code = + initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pCopyColumnExprInfo, pInfo->numOfNotFillExpr, (SNodeListNode*)pPhyFillNode->pValues, + pPhyFillNode->timeRange, pResultInfo->capacity, pTaskInfo->id.str, pInterval, type, order); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -3667,7 +3670,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pOperator->status = OP_NOT_OPENED; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_FILL; pOperator->exprSupp.pExprInfo = pExprInfo; - pOperator->exprSupp.numOfExprs = num; + pOperator->exprSupp.numOfExprs = pInfo->numOfExpr; pOperator->info = pInfo; pOperator->pTaskInfo = pTaskInfo; diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index c1bcf12cb2..4a885fb2ce 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -258,7 +258,7 @@ static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, int32_t rowIndex, SArray static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t outputRows) { pFillInfo->numOfCurrent = 0; - SColumnInfoData* pTsCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, pFillInfo->tsSlotId); + SColumnInfoData* pTsCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, pFillInfo->srcTsSlotId); int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); bool ascFill = FILL_IS_ASC_FILL(pFillInfo); @@ -349,10 +349,6 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t } if (pFillInfo->index >= pFillInfo->numOfRows || pFillInfo->numOfCurrent >= outputRows) { - /* the raw data block is exhausted, next value does not exists */ - // if (pFillInfo->index >= pFillInfo->numOfRows) { - // taosMemoryFreeClear(*next); - // } pFillInfo->numOfTotal += pFillInfo->numOfCurrent; return pFillInfo->numOfCurrent; } @@ -413,7 +409,17 @@ struct SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t } pFillInfo->order = order; - pFillInfo->tsSlotId = primaryTsSlotId; + pFillInfo->srcTsSlotId = primaryTsSlotId; + + for(int32_t i = 0; i < numOfNotFillCols; ++i) { + SFillColInfo* p = &pCol[i + numOfFillCols]; + int32_t srcSlotId = GET_SRC_SLOT_ID(p); + if (srcSlotId == primaryTsSlotId) { + pFillInfo->tsSlotId = i + numOfFillCols; + break; + } + } + taosResetFillInfo(pFillInfo, skey); switch (fillType) { @@ -531,7 +537,7 @@ bool taosFillHasMoreResults(SFillInfo* pFillInfo) { } int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) { - SColumnInfoData* pCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, 0); + SColumnInfoData* pCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, pFillInfo->srcTsSlotId); int64_t* tsList = (int64_t*)pCol->pData; int32_t numOfRows = taosNumOfRemainRows(pFillInfo); @@ -619,9 +625,9 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn nodesValueNodeToVariant(pv, &pFillCol[i].fillVal); } - if (pExprInfo->base.numOfParams > 0) { +// if (pExprInfo->base.numOfParams > 0) { // pFillCol[i].flag = pExprInfo->base.pParam[0].pCol->flag; // always be the normal column for table query - } +// } } for(int32_t i = 0; i < numOfNotFillExpr; ++i) { diff --git a/tests/script/tsim/parser/fill_us.sim b/tests/script/tsim/parser/fill_us.sim index 82d282642e..d42d604ad3 100644 --- a/tests/script/tsim/parser/fill_us.sim +++ b/tests/script/tsim/parser/fill_us.sim @@ -1010,6 +1010,7 @@ if $data31 != 9.000000000 then return -1 endi if $data41 != 12.500000000 then + print expect 12.500000000, actual: $data41 return -1 endi if $data51 != 16.000000000 then From 3fb91c6a1825c358e5c6bfe8da9e46c44ecf5af1 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Mon, 8 Aug 2022 17:29:11 +0800 Subject: [PATCH 06/11] enh: set _wstart to the output datablock of fill --- source/libs/planner/src/planPhysiCreater.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 6075a57ab3..476820020a 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1390,6 +1390,9 @@ static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren if (TSDB_CODE_SUCCESS == code) { code = setNodeSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->pWStartTs, &pFill->pWStartTs); } + if (TSDB_CODE_SUCCESS == code) { + code = addDataBlockSlot(pCxt, &pFill->pWStartTs, pFill->node.pOutputDataBlockDesc); + } if (TSDB_CODE_SUCCESS == code && NULL != pFillNode->pValues) { pFill->pValues = nodesCloneNode(pFillNode->pValues); From 166cdcec7c8b960e51cbffde0e18a63132ec536f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 12 Aug 2022 14:04:43 +0800 Subject: [PATCH 07/11] fix(query): fix bug in fill. --- source/libs/executor/inc/executorimpl.h | 2 + source/libs/executor/inc/tfill.h | 12 ++-- source/libs/executor/src/executorimpl.c | 34 +++++++---- source/libs/executor/src/tfill.c | 80 +++++++++++++------------ tests/script/tsim/parser/fill.sim | 20 +++---- tests/script/tsim/parser/fill_stb.sim | 12 ++-- 6 files changed, 93 insertions(+), 67 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 1207ea83cc..bbfddba4fb 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -624,6 +624,7 @@ typedef struct SIndefOperatorInfo { typedef struct SFillOperatorInfo { struct SFillInfo* pFillInfo; SSDataBlock* pRes; + SSDataBlock* pFinalRes; int64_t totalInputRows; void** p; SSDataBlock* existNewGroupBlock; @@ -631,6 +632,7 @@ typedef struct SFillOperatorInfo { SNode* pCondition; SArray* pColMatchColInfo; int32_t primaryTsCol; + int32_t primarySrcSlotId; uint64_t curGroupId; // current handled group id SExprInfo* pExprInfo; int32_t numOfExpr; diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h index 9f3a95aca8..ae3c010ac3 100644 --- a/source/libs/executor/inc/tfill.h +++ b/source/libs/executor/inc/tfill.h @@ -28,9 +28,7 @@ struct SSDataBlock; typedef struct SFillColInfo { SExprInfo *pExpr; -// int16_t flag; // column flag: TAG COLUMN|NORMAL COLUMN bool notFillCol; // denote if this column needs fill operation -// int16_t tagIndex; // index of current tag in SFillTagColInfo array list SVariant fillVal; } SFillColInfo; @@ -38,6 +36,11 @@ typedef struct { SSchema col; char* tagVal; } SFillTagColInfo; + +typedef struct { + int64_t key; + SArray* pRowVal; +} SRowVal; typedef struct SFillInfo { TSKEY start; // start timestamp @@ -53,9 +56,8 @@ typedef struct SFillInfo { int32_t numOfCurrent; // number of filled rows in current results int32_t numOfCols; // number of columns, including the tags columns SInterval interval; - - SArray *prev; - SArray *next; + SRowVal prev; + SRowVal next; SSDataBlock *pSrcBlock; int32_t alloc; // data buffer size in rows diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 2fc113c7d6..fff0600fe4 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3212,8 +3212,8 @@ static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo* pInfo, SResult static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) { if (taosFillHasMoreResults(pInfo->pFillInfo)) { - int32_t numOfResultRows = pResultInfo->capacity - pInfo->pRes->info.rows; - taosFillResultDataBlock(pInfo->pFillInfo, pInfo->pRes, numOfResultRows); + int32_t numOfResultRows = pResultInfo->capacity - pInfo->pFinalRes->info.rows; + taosFillResultDataBlock(pInfo->pFillInfo, pInfo->pFinalRes, numOfResultRows); pInfo->pRes->info.groupId = pInfo->curGroupId; return; } @@ -3229,9 +3229,13 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SResultInfo* pResultInfo = &pOperator->resultInfo; - SSDataBlock* pResBlock = pInfo->pRes; + SSDataBlock* pResBlock = pInfo->pFinalRes; blockDataCleanup(pResBlock); + blockDataCleanup(pInfo->pRes); + + int32_t order = TSDB_ORDER_ASC; + int32_t scanFlag = MAIN_SCAN; doHandleRemainBlockFromNewGroup(pInfo, pResultInfo, pTaskInfo); if (pResBlock->info.rows > 0) { @@ -3251,17 +3255,23 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey); } else { blockDataUpdateTsWindow(pBlock, pInfo->primaryTsCol); - SExprSupp* pSup = &pOperator->exprSupp; + + getTableScanInfo(pOperator, &order, &scanFlag); + setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false); projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, NULL); + pInfo->pRes->info.groupId = pBlock->info.groupId; - if (pInfo->curGroupId == 0 || pInfo->curGroupId == pBlock->info.groupId) { - pInfo->curGroupId = pBlock->info.groupId; // the first data block + SColumnInfoData* pDst = taosArrayGet(pInfo->pRes->pDataBlock, pInfo->primaryTsCol); + SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, pInfo->primarySrcSlotId); + colDataAssign(pDst, pSrc, pInfo->pRes->info.rows, &pResBlock->info); - pInfo->totalInputRows += pBlock->info.rows; + if (pInfo->curGroupId == 0 || pInfo->curGroupId == pInfo->pRes->info.groupId) { + pInfo->curGroupId = pInfo->pRes->info.groupId; // the first data block + pInfo->totalInputRows += pInfo->pRes->info.rows; - taosFillSetStartInfo(pInfo->pFillInfo, pBlock->info.rows, pBlock->info.window.ekey); - taosFillSetInputDataBlock(pInfo->pFillInfo, pBlock); + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, pBlock->info.window.ekey); + taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->pRes); } else if (pInfo->curGroupId != pBlock->info.groupId) { // the new group data block pInfo->existNewGroupBlock = pBlock; @@ -3649,7 +3659,8 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* blockDataEnsureCapacity(pResBlock, pOperator->resultInfo.capacity); initExprSupp(&pOperator->exprSupp, pExprInfo, pInfo->numOfExpr); - pInfo->primaryTsCol = ((SColumnNode*)pPhyFillNode->pWStartTs)->slotId; + pInfo->primaryTsCol = ((STargetNode*)pPhyFillNode->pWStartTs)->slotId; + pInfo->primarySrcSlotId = ((SColumnNode*)((STargetNode*)pPhyFillNode->pWStartTs)->pExpr)->slotId; int32_t numOfOutputCols = 0; SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc, @@ -3663,6 +3674,9 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* } pInfo->pRes = pResBlock; + pInfo->pFinalRes = createOneDataBlock(pResBlock, false); + blockDataEnsureCapacity(pInfo->pFinalRes, pOperator->resultInfo.capacity); + pInfo->pCondition = pPhyFillNode->node.pConditions; pInfo->pColMatchColInfo = pColMatchColInfo; pOperator->name = "FillOperator"; diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index 4a885fb2ce..f9897c4253 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -65,7 +65,7 @@ static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowInd if (pDstColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { colDataAppend(pDstColInfo, rowIndex, (const char*)&pFillInfo->currentKey, false); } else { - SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev : pFillInfo->next; + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfo, rowIndex, pKey); } @@ -105,7 +105,7 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* // set the other values if (pFillInfo->type == TSDB_FILL_PREV) { - SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev : pFillInfo->next; + SArray* p = FILL_IS_ASC_FILL(pFillInfo)? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; @@ -120,7 +120,7 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* } } } else if (pFillInfo->type == TSDB_FILL_NEXT) { - SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->next : pFillInfo->prev; + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->next.pRowVal : pFillInfo->prev.pRowVal; // todo refactor: start from 0 not 1 for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; @@ -149,21 +149,21 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* if (type == TSDB_DATA_TYPE_TIMESTAMP) { colDataAppend(pDstCol, index, (const char*)&pFillInfo->currentKey, false); } else { - SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev : pFillInfo->next; + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstCol, index, pKey); } } else { - SGroupKeys* pKey = taosArrayGet(pFillInfo->prev, i); + SGroupKeys* pKey = taosArrayGet(pFillInfo->prev.pRowVal, i); if (IS_VAR_DATA_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || pKey->isNull) { colDataAppendNULL(pDstCol, index); continue; } - SGroupKeys* pKey1 = taosArrayGet(pFillInfo->prev, pFillInfo->tsSlotId); + SGroupKeys* pKey1 = taosArrayGet(pFillInfo->prev.pRowVal, pFillInfo->tsSlotId); int64_t prevTs = *(int64_t*)pKey1->pData; - int32_t srcSlotId = GET_SRC_SLOT_ID(pCol); + int32_t srcSlotId = GET_DEST_SLOT_ID(pCol); SColumnInfoData* pSrcCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId); char* data = colDataGetData(pSrcCol, pFillInfo->index); @@ -192,7 +192,7 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) { colDataAppend(pDst, index, (const char*)&pFillInfo->currentKey, false); } else { - SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev : pFillInfo->next; + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDst, index, pKey); } @@ -220,7 +220,7 @@ void doSetVal(SColumnInfoData* pDstCol, int32_t rowIndex, const SGroupKeys* pKey } static void initBeforeAfterDataBuf(SFillInfo* pFillInfo) { - if (taosArrayGetSize(pFillInfo->next) > 0) { + if (taosArrayGetSize(pFillInfo->next.pRowVal) > 0) { return; } @@ -234,10 +234,10 @@ static void initBeforeAfterDataBuf(SFillInfo* pFillInfo) { key.bytes = pSchema->bytes; key.type = pSchema->type; - taosArrayPush(pFillInfo->next, &key); + taosArrayPush(pFillInfo->next.pRowVal, &key); key.pData = taosMemoryMalloc(pSchema->bytes); - taosArrayPush(pFillInfo->prev, &key); + taosArrayPush(pFillInfo->prev.pRowVal, &key); } } @@ -245,13 +245,24 @@ static void saveColData(SArray* rowBuf, int32_t columnIndex, const char* src, bo static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, int32_t rowIndex, SArray* pRow) { for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { - int32_t srcSlotId = GET_SRC_SLOT_ID(&pFillInfo->pFillCol[i]); + int32_t type = pFillInfo->pFillCol[i].pExpr->pExpr->nodeType; + if (type == QUERY_NODE_COLUMN) { + int32_t srcSlotId = GET_DEST_SLOT_ID(&pFillInfo->pFillCol[i]); - SColumnInfoData* pSrcCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, srcSlotId); + SColumnInfoData* pSrcCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, srcSlotId); - bool isNull = colDataIsNull_s(pSrcCol, rowIndex); - char* p = colDataGetData(pSrcCol, rowIndex); - saveColData(pRow, i, p, isNull); + bool isNull = colDataIsNull_s(pSrcCol, rowIndex); + char* p = colDataGetData(pSrcCol, rowIndex); + saveColData(pRow, i, p, isNull); + } else if (type == QUERY_NODE_OPERATOR) { + SColumnInfoData* pSrcCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, i); + + bool isNull = colDataIsNull_s(pSrcCol, rowIndex); + char* p = colDataGetData(pSrcCol, rowIndex); + saveColData(pRow, i, p, isNull); + } else { + ASSERT(0); + } } } @@ -272,7 +283,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t // set the next value for interpolation if ((pFillInfo->currentKey < ts && ascFill) || (pFillInfo->currentKey > ts && !ascFill)) { - copyCurrentRowIntoBuf(pFillInfo, pFillInfo->index, pFillInfo->next); + copyCurrentRowIntoBuf(pFillInfo, pFillInfo->index, pFillInfo->next.pRowVal); } if (((pFillInfo->currentKey < ts && ascFill) || (pFillInfo->currentKey > ts && !ascFill)) && @@ -294,39 +305,38 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t if (pFillInfo->type == TSDB_FILL_NEXT && (pFillInfo->index + 1) < pFillInfo->numOfRows) { int32_t nextRowIndex = pFillInfo->index + 1; - copyCurrentRowIntoBuf(pFillInfo, nextRowIndex, pFillInfo->next); + copyCurrentRowIntoBuf(pFillInfo, nextRowIndex, pFillInfo->next.pRowVal); } // copy rows to dst buffer for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - int32_t srcSlotId = GET_SRC_SLOT_ID(pCol); int32_t dstSlotId = GET_DEST_SLOT_ID(pCol); SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, dstSlotId); - SColumnInfoData* pSrc = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, srcSlotId); + SColumnInfoData* pSrc = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, dstSlotId); char* src = colDataGetData(pSrc, pFillInfo->index); if (!colDataIsNull_s(pSrc, pFillInfo->index)) { colDataAppend(pDst, index, src, false); - saveColData(pFillInfo->prev, i, src, false); + saveColData(pFillInfo->prev.pRowVal, i, src, false); } else { // the value is null if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) { colDataAppend(pDst, index, (const char*)&pFillInfo->currentKey, false); } else { // i > 0 and data is null , do interpolation if (pFillInfo->type == TSDB_FILL_PREV) { - SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev : pFillInfo->next; + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDst, index, pKey); } else if (pFillInfo->type == TSDB_FILL_LINEAR) { bool isNull = colDataIsNull_s(pSrc, pFillInfo->index); colDataAppend(pDst, index, src, isNull); - saveColData(pFillInfo->prev, i, src, isNull); // todo: + saveColData(pFillInfo->prev.pRowVal, i, src, isNull); // todo: } else if (pFillInfo->type == TSDB_FILL_NULL) { colDataAppendNULL(pDst, index); } else if (pFillInfo->type == TSDB_FILL_NEXT) { - SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->next : pFillInfo->prev; + SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->next.pRowVal : pFillInfo->prev.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDst, index, pKey); } else { @@ -413,7 +423,7 @@ struct SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t for(int32_t i = 0; i < numOfNotFillCols; ++i) { SFillColInfo* p = &pCol[i + numOfFillCols]; - int32_t srcSlotId = GET_SRC_SLOT_ID(p); + int32_t srcSlotId = GET_DEST_SLOT_ID(p); if (srcSlotId == primaryTsSlotId) { pFillInfo->tsSlotId = i + numOfFillCols; break; @@ -453,8 +463,8 @@ struct SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t pFillInfo->id = id; pFillInfo->interval = *pInterval; - pFillInfo->next = taosArrayInit(pFillInfo->numOfCols, sizeof(SGroupKeys)); - pFillInfo->prev = taosArrayInit(pFillInfo->numOfCols, sizeof(SGroupKeys)); + pFillInfo->next.pRowVal = taosArrayInit(pFillInfo->numOfCols, sizeof(SGroupKeys)); + pFillInfo->prev.pRowVal = taosArrayInit(pFillInfo->numOfCols, sizeof(SGroupKeys)); initBeforeAfterDataBuf(pFillInfo); return pFillInfo; @@ -474,16 +484,16 @@ void* taosDestroyFillInfo(SFillInfo* pFillInfo) { if (pFillInfo == NULL) { return NULL; } - for (int32_t i = 0; i < taosArrayGetSize(pFillInfo->prev); ++i) { - SGroupKeys* pKey = taosArrayGet(pFillInfo->prev, i); + for (int32_t i = 0; i < taosArrayGetSize(pFillInfo->prev.pRowVal); ++i) { + SGroupKeys* pKey = taosArrayGet(pFillInfo->prev.pRowVal, i); taosMemoryFree(pKey->pData); } - taosArrayDestroy(pFillInfo->prev); - for (int32_t i = 0; i < taosArrayGetSize(pFillInfo->next); ++i) { - SGroupKeys* pKey = taosArrayGet(pFillInfo->next, i); + taosArrayDestroy(pFillInfo->prev.pRowVal); + for (int32_t i = 0; i < taosArrayGetSize(pFillInfo->next.pRowVal); ++i) { + SGroupKeys* pKey = taosArrayGet(pFillInfo->next.pRowVal, i); taosMemoryFree(pKey->pData); } - taosArrayDestroy(pFillInfo->next); + taosArrayDestroy(pFillInfo->next.pRowVal); // for (int32_t i = 0; i < pFillInfo->numOfTags; ++i) { // taosMemoryFreeClear(pFillInfo->pTags[i].tagVal); @@ -624,10 +634,6 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn SValueNode* pv = (SValueNode*)nodesListGetNode(pValNode->pNodeList, index); nodesValueNodeToVariant(pv, &pFillCol[i].fillVal); } - -// if (pExprInfo->base.numOfParams > 0) { -// pFillCol[i].flag = pExprInfo->base.pParam[0].pCol->flag; // always be the normal column for table query -// } } for(int32_t i = 0; i < numOfNotFillExpr; ++i) { diff --git a/tests/script/tsim/parser/fill.sim b/tests/script/tsim/parser/fill.sim index 396bdd1e56..24196450a5 100644 --- a/tests/script/tsim/parser/fill.sim +++ b/tests/script/tsim/parser/fill.sim @@ -885,15 +885,15 @@ if $data10 != @20-01-01 01:01:10.000@ then return -1 endi -if $data11 != 1.000000000 then +if $data11 != 99.000000000 then return -1 endi -if $data12 != 1.000000000 then +if $data12 != 91.000000000 then return -1 endi -if $data13 != -87.000000000 then +if $data13 != 90.000000000 then return -1 endi @@ -917,15 +917,15 @@ if $data70 != @20-01-01 01:02:10.000@ then return -1 endi -if $data71 != 1.000000000 then +if $data71 != 99.000000000 then return -1 endi -if $data72 != 1.000000000 then +if $data72 != 91.000000000 then return -1 endi -if $data73 != -87.000000000 then +if $data73 != 90.000000000 then return -1 endi @@ -994,19 +994,19 @@ if $data10 != @20-01-01 01:01:10.000@ then return -1 endi -if $data11 != 1.000000000 then +if $data11 != 99.000000000 then return -1 endi -if $data12 != 1.000000000 then +if $data12 != 91.000000000 then return -1 endi -if $data13 != -87.000000000 then +if $data13 != 90.000000000 then return -1 endi -if $data14 != 86 then +if $data14 != 89 then return -1 endi diff --git a/tests/script/tsim/parser/fill_stb.sim b/tests/script/tsim/parser/fill_stb.sim index 107bac7089..c1d568594a 100644 --- a/tests/script/tsim/parser/fill_stb.sim +++ b/tests/script/tsim/parser/fill_stb.sim @@ -111,13 +111,15 @@ endi if $data12 != -2 then return -1 endi -if $data13 != -3.00000 then +if $data13 != -3 then return -1 endi -if $data14 != -4.000000000 then +if $data14 != -4.00000 then + print expect -4.00000, actual: $data14 return -1 endi -if $data15 != -5 then +if $data15 != -5.000000000 then + print expect -5.000000000, actual: $data15 return -1 endi if $data31 != -1 then @@ -126,10 +128,10 @@ endi if $data52 != -2 then return -1 endi -if $data73 != -3.00000 then +if $data73 != -3 then return -1 endi -if $data74 != -4.000000000 then +if $data74 != -4.00000 then return -1 endi From 7084c765d04a2f1a1e9fdbced02db61a77f47120 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 12 Aug 2022 14:45:06 +0800 Subject: [PATCH 08/11] fix(query): set correct fill column index. --- source/libs/executor/src/executorimpl.c | 71 ++++++++++++++++++------- source/libs/executor/src/tfill.c | 19 ------- 2 files changed, 51 insertions(+), 39 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index fff0600fe4..e1732cb2aa 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3191,25 +3191,33 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa } } -static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, +static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag); +static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo *pOperator, SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) { pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; + SSDataBlock* pResBlock = pInfo->pFinalRes; + + int32_t order = TSDB_ORDER_ASC; + int32_t scanFlag = MAIN_SCAN; + getTableScanInfo(pOperator, &order, &scanFlag); int64_t ekey = Q_STATUS_EQUAL(pTaskInfo->status, TASK_COMPLETED) ? pInfo->win.ekey : pInfo->existNewGroupBlock->info.window.ekey; taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo)); - taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); - taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); + doApplyScalarCalculation(pOperator, pInfo->existNewGroupBlock, order, scanFlag); - int32_t numOfResultRows = pResultInfo->capacity - pInfo->pRes->info.rows; - taosFillResultDataBlock(pInfo->pFillInfo, pInfo->pRes, numOfResultRows); + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, ekey); + taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->pRes); + + int32_t numOfResultRows = pResultInfo->capacity - pResBlock->info.rows; + taosFillResultDataBlock(pInfo->pFillInfo, pResBlock, numOfResultRows); pInfo->curGroupId = pInfo->existNewGroupBlock->info.groupId; pInfo->existNewGroupBlock = NULL; } -static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, +static void doHandleRemainBlockFromNewGroup(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) { if (taosFillHasMoreResults(pInfo->pFillInfo)) { int32_t numOfResultRows = pResultInfo->capacity - pInfo->pFinalRes->info.rows; @@ -3220,7 +3228,34 @@ static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo* pInfo, SResultInf // handle the cached new group data block if (pInfo->existNewGroupBlock) { - doHandleRemainBlockForNewGroupImpl(pInfo, pResultInfo, pTaskInfo); + doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, pTaskInfo); + } +} + +static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag) { + SFillOperatorInfo* pInfo = pOperator->info; + SExprSupp* pSup = &pOperator->exprSupp; + SSDataBlock* pResBlock = pInfo->pFinalRes; + + setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false); + projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, NULL); + pInfo->pRes->info.groupId = pBlock->info.groupId; + + SColumnInfoData* pDst = taosArrayGet(pInfo->pRes->pDataBlock, pInfo->primaryTsCol); + SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, pInfo->primarySrcSlotId); + colDataAssign(pDst, pSrc, pInfo->pRes->info.rows, &pResBlock->info); + + for(int32_t i = 0; i < pInfo->numOfNotFillExpr; ++i) { + SFillColInfo* pCol = &pInfo->pFillInfo->pFillCol[i + pInfo->numOfExpr]; + ASSERT(pCol->notFillCol); + + SExprInfo* pExpr = pCol->pExpr; + int32_t srcSlotId = pExpr->base.pParam[0].pCol->slotId; + int32_t dstSlotId = pExpr->base.resSchema.slotId; + + SColumnInfoData* pDst1 = taosArrayGet(pInfo->pRes->pDataBlock, dstSlotId); + SColumnInfoData* pSrc1 = taosArrayGet(pBlock->pDataBlock, srcSlotId); + colDataAssign(pDst1, pSrc1, pInfo->pRes->info.rows, &pResBlock->info); } } @@ -3236,8 +3271,9 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { int32_t order = TSDB_ORDER_ASC; int32_t scanFlag = MAIN_SCAN; + getTableScanInfo(pOperator, &order, &scanFlag); - doHandleRemainBlockFromNewGroup(pInfo, pResultInfo, pTaskInfo); + doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, pTaskInfo); if (pResBlock->info.rows > 0) { pResBlock->info.groupId = pInfo->curGroupId; return pResBlock; @@ -3255,16 +3291,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey); } else { blockDataUpdateTsWindow(pBlock, pInfo->primaryTsCol); - SExprSupp* pSup = &pOperator->exprSupp; - - getTableScanInfo(pOperator, &order, &scanFlag); - setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false); - projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, NULL); - pInfo->pRes->info.groupId = pBlock->info.groupId; - - SColumnInfoData* pDst = taosArrayGet(pInfo->pRes->pDataBlock, pInfo->primaryTsCol); - SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, pInfo->primarySrcSlotId); - colDataAssign(pDst, pSrc, pInfo->pRes->info.rows, &pResBlock->info); + doApplyScalarCalculation(pOperator, pBlock, order, scanFlag); if (pInfo->curGroupId == 0 || pInfo->curGroupId == pInfo->pRes->info.groupId) { pInfo->curGroupId = pInfo->pRes->info.groupId; // the first data block @@ -3293,14 +3320,18 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { return pResBlock; } - doHandleRemainBlockFromNewGroup(pInfo, pResultInfo, pTaskInfo); + doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, pTaskInfo); if (pResBlock->info.rows >= pOperator->resultInfo.threshold || pBlock == NULL) { pResBlock->info.groupId = pInfo->curGroupId; return pResBlock; } } else if (pInfo->existNewGroupBlock) { // try next group assert(pBlock != NULL); - doHandleRemainBlockForNewGroupImpl(pInfo, pResultInfo, pTaskInfo); + + blockDataCleanup(pResBlock); + blockDataCleanup(pInfo->pRes); + + doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, pTaskInfo); if (pResBlock->info.rows > pResultInfo->threshold) { pResBlock->info.groupId = pInfo->curGroupId; return pResBlock; diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index f9897c4253..44340a5b5e 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -34,28 +34,9 @@ ((_v1) + ((_v2) - (_v1)) * (((double)(_k)) - ((double)(_k1))) / (((double)(_k2)) - ((double)(_k1)))) #define GET_DEST_SLOT_ID(_p) ((_p)->pExpr->base.resSchema.slotId) -#define GET_SRC_SLOT_ID(_p) ((_p)->pExpr->base.pParam[0].pCol->slotId) static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey); -static void setTagsValue(SFillInfo* pFillInfo, void** data, int32_t genRows) { -#if 0 - for (int32_t j = 0; j < pFillInfo->numOfCols; ++j) { - SFillColInfo* pCol = &pFillInfo->pFillCol[j]; - if (TSDB_COL_IS_NORMAL_COL(pCol->flag) || TSDB_COL_IS_UD_COL(pCol->flag)) { - continue; - } - - SResSchema* pSchema = &pCol->pExpr->base.resSchema; - char* val1 = elePtrAt(data[j], pSchema->bytes, genRows); - - assert(pCol->tagIndex >= 0 && pCol->tagIndex < pFillInfo->numOfTags); - SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex]; - assignVal(val1, pTag->tagVal, pSchema->bytes, pSchema->type); - } -#endif -} - static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowIndex) { for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; From 2ad70dff1e9487297dcb6d15f8bf46f7a8019aba Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Fri, 12 Aug 2022 17:08:32 +0800 Subject: [PATCH 09/11] fix: output datablcok desc error --- source/libs/planner/src/planPhysiCreater.c | 2 +- source/libs/planner/test/planIntervalTest.cpp | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 5381db3ba7..c7eb6f7b5e 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -195,7 +195,7 @@ static int32_t addDataBlockSlotsImpl(SPhysiPlanContext* pCxt, SNodeList* pList, int32_t code = TSDB_CODE_SUCCESS; SHashObj* pHash = taosArrayGetP(pCxt->pLocationHelper, pDataBlockDesc->dataBlockId); - int16_t nextSlotId = taosHashGetSize(pHash), slotId = 0; + int16_t nextSlotId = LIST_LENGTH(pDataBlockDesc->pSlots), slotId = 0; SNode* pNode = NULL; FOREACH(pNode, pList) { SNode* pExpr = QUERY_NODE_ORDER_BY_EXPR == nodeType(pNode) ? ((SOrderByExprNode*)pNode)->pExpr : pNode; diff --git a/source/libs/planner/test/planIntervalTest.cpp b/source/libs/planner/test/planIntervalTest.cpp index 674603310a..8346889734 100644 --- a/source/libs/planner/test/planIntervalTest.cpp +++ b/source/libs/planner/test/planIntervalTest.cpp @@ -48,6 +48,9 @@ TEST_F(PlanIntervalTest, fill) { run("SELECT _WSTART, TBNAME, COUNT(*) FROM st1 " "WHERE ts > '2022-04-01 00:00:00' and ts < '2022-04-30 23:59:59' " "PARTITION BY TBNAME INTERVAL(10s) FILL(PREV)"); + + run("SELECT COUNT(c1), MAX(c3), COUNT(c1) FROM t1 " + "WHERE ts > '2022-04-01 00:00:00' and ts < '2022-04-30 23:59:59' INTERVAL(10s) FILL(PREV)"); } TEST_F(PlanIntervalTest, selectFunc) { From 79f005552e476b48273bb91bce8ff8c7b51bc9c8 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Fri, 12 Aug 2022 18:18:16 +0800 Subject: [PATCH 10/11] fix: fill clause error --- source/libs/planner/src/planLogicCreater.c | 24 +++++++++++++++---- source/libs/planner/test/planIntervalTest.cpp | 3 +++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 1e6f1edc4c..c843dd0a67 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -798,13 +798,13 @@ static bool needFillValue(SNode* pNode) { return hasFillCol; } -static int32_t partFillExprs(SNodeList* pProjectionList, SNodeList** pFillExprs, SNodeList** pNotFillExprs) { +static int32_t partFillExprs(SSelectStmt* pSelect, SNodeList** pFillExprs, SNodeList** pNotFillExprs) { int32_t code = TSDB_CODE_SUCCESS; SNode* pProject = NULL; - FOREACH(pProject, pProjectionList) { + FOREACH(pProject, pSelect->pProjectionList) { if (needFillValue(pProject)) { code = nodesListMakeStrictAppend(pFillExprs, nodesCloneNode(pProject)); - } else { + } else if (QUERY_NODE_VALUE != nodeType(pProject)) { code = nodesListMakeStrictAppend(pNotFillExprs, nodesCloneNode(pProject)); } if (TSDB_CODE_SUCCESS != code) { @@ -813,6 +813,22 @@ static int32_t partFillExprs(SNodeList* pProjectionList, SNodeList** pFillExprs, break; } } + if (!pSelect->isDistinct) { + SNode* pOrderExpr = NULL; + FOREACH(pOrderExpr, pSelect->pOrderByList) { + SNode* pExpr = ((SOrderByExprNode*)pOrderExpr)->pExpr; + if (needFillValue(pExpr)) { + code = nodesListMakeStrictAppend(pFillExprs, nodesCloneNode(pExpr)); + } else if (QUERY_NODE_VALUE != nodeType(pExpr)) { + code = nodesListMakeStrictAppend(pNotFillExprs, nodesCloneNode(pExpr)); + } + if (TSDB_CODE_SUCCESS != code) { + NODES_DESTORY_LIST(*pFillExprs); + NODES_DESTORY_LIST(*pNotFillExprs); + break; + } + } + } return code; } @@ -837,7 +853,7 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pFill->node.resultDataOrder = pFill->node.requireDataOrder; pFill->inputTsOrder = ORDER_ASC; - int32_t code = partFillExprs(pSelect->pProjectionList, &pFill->pFillExprs, &pFill->pNotFillExprs); + int32_t code = partFillExprs(pSelect, &pFill->pFillExprs, &pFill->pNotFillExprs); if (TSDB_CODE_SUCCESS == code) { code = rewriteExprsForSelect(pFill->pFillExprs, pSelect, SQL_CLAUSE_FILL); } diff --git a/source/libs/planner/test/planIntervalTest.cpp b/source/libs/planner/test/planIntervalTest.cpp index 8346889734..dddd38fc95 100644 --- a/source/libs/planner/test/planIntervalTest.cpp +++ b/source/libs/planner/test/planIntervalTest.cpp @@ -51,6 +51,9 @@ TEST_F(PlanIntervalTest, fill) { run("SELECT COUNT(c1), MAX(c3), COUNT(c1) FROM t1 " "WHERE ts > '2022-04-01 00:00:00' and ts < '2022-04-30 23:59:59' INTERVAL(10s) FILL(PREV)"); + + run("SELECT COUNT(c1) FROM t1 WHERE ts > '2022-04-01 00:00:00' and ts < '2022-04-30 23:59:59' " + "PARTITION BY c2 INTERVAL(10s) FILL(PREV) ORDER BY c2"); } TEST_F(PlanIntervalTest, selectFunc) { From f96aa3e2ff5a06a31a1062e1477e26e65998aefc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 12 Aug 2022 19:11:10 +0800 Subject: [PATCH 11/11] fix(query): fix memory leak. --- source/libs/executor/src/executorimpl.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 5821982330..871cc6d86d 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3647,6 +3647,13 @@ void destroyFillOperatorInfo(void* param, int32_t numOfOutput) { SFillOperatorInfo* pInfo = (SFillOperatorInfo*)param; pInfo->pFillInfo = taosDestroyFillInfo(pInfo->pFillInfo); pInfo->pRes = blockDataDestroy(pInfo->pRes); + pInfo->pFinalRes = blockDataDestroy(pInfo->pFinalRes); + + if (pInfo->pNotFillExprInfo != NULL) { + destroyExprInfo(pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr); + taosMemoryFree(pInfo->pNotFillExprInfo); + } + taosMemoryFreeClear(pInfo->p); taosArrayDestroy(pInfo->pColMatchColInfo); taosMemoryFreeClear(param); @@ -3712,7 +3719,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* SSDataBlock* pResBlock = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc); SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pInfo->numOfExpr); - SExprInfo* pCopyColumnExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &pInfo->numOfNotFillExpr); + pInfo->pNotFillExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &pInfo->numOfNotFillExpr); SInterval* pInterval = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType @@ -3735,7 +3742,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* &numOfOutputCols, COL_MATCH_FROM_SLOT_ID); int32_t code = - initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pCopyColumnExprInfo, pInfo->numOfNotFillExpr, (SNodeListNode*)pPhyFillNode->pValues, + initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr, (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity, pTaskInfo->id.str, pInterval, type, order); if (code != TSDB_CODE_SUCCESS) { goto _error;