From f3acf49e229c8015b8be50363a276f4adbe4f3ca Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Tue, 19 Nov 2024 16:23:46 +0800 Subject: [PATCH 01/76] enh: include --- source/libs/command/src/explain.c | 2 +- source/libs/executor/inc/dynqueryctrl.h | 1 + source/libs/executor/inc/groupcache.h | 1 + source/libs/executor/inc/hashjoin.h | 3 +++ source/libs/executor/inc/operator.h | 1 + source/libs/executor/inc/querytask.h | 2 ++ source/util/src/tcompare.c | 2 +- 7 files changed, 10 insertions(+), 2 deletions(-) diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 24b43ac95b..13b75f8233 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -748,7 +748,7 @@ static int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx } case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: { SAggPhysiNode *pAggNode = (SAggPhysiNode *)pNode; - EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT, (pAggNode->pGroupKeys ? "GroupAggragate" : "Aggragate")); + EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT, (pAggNode->pGroupKeys ? "GroupAggregate" : "Aggregate")); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); if (pResNode->pExecInfo) { QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); diff --git a/source/libs/executor/inc/dynqueryctrl.h b/source/libs/executor/inc/dynqueryctrl.h index 793fbc0e61..3df0f6644c 100755 --- a/source/libs/executor/inc/dynqueryctrl.h +++ b/source/libs/executor/inc/dynqueryctrl.h @@ -19,6 +19,7 @@ extern "C" { #endif +#include "executorInt.h" typedef struct SDynQueryCtrlExecInfo { int64_t prevBlkNum; int64_t prevBlkRows; diff --git a/source/libs/executor/inc/groupcache.h b/source/libs/executor/inc/groupcache.h index 0244823e29..f50947ead7 100755 --- a/source/libs/executor/inc/groupcache.h +++ b/source/libs/executor/inc/groupcache.h @@ -18,6 +18,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "executorInt.h" #define GROUP_CACHE_DEFAULT_MAX_FILE_SIZE 104857600 #define GROUP_CACHE_MAX_FILE_FDS 10 diff --git a/source/libs/executor/inc/hashjoin.h b/source/libs/executor/inc/hashjoin.h index 1085f2236c..542763ffd3 100755 --- a/source/libs/executor/inc/hashjoin.h +++ b/source/libs/executor/inc/hashjoin.h @@ -19,6 +19,9 @@ extern "C" { #endif +#include "executorInt.h" +#include "operator.h" + #define HASH_JOIN_DEFAULT_PAGE_SIZE 10485760 #define HJOIN_DEFAULT_BLK_ROWS_NUM 4096 #define HJOIN_BLK_SIZE_LIMIT 10485760 diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index 91aef93452..f2e542e7cd 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -20,6 +20,7 @@ extern "C" { #endif +#include "executorInt.h" typedef struct SOperatorCostInfo { double openCost; double totalCost; diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index e3bb9a1361..f726e4300f 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -20,6 +20,8 @@ extern "C" { #endif +#include "executorInt.h" + #define GET_TASKID(_t) (((SExecTaskInfo*)(_t))->id.str) enum { diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index b1f4ed0ed3..ebc379897f 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -1294,7 +1294,7 @@ void DestroyRegexCache(){ uInfo("[regex cache] destory regex cache"); bool ret = taosTmrStopA(&sRegexCache.timer); if (!ret) { - uError("failed to stop regex cache timer"); + uInfo("stop regex cache timer may be failed"); } taosWLockLatch(&sRegexCache.mutex); sRegexCache.exit = true; From 04af5f4b944e9580b0bdfa73fdd7800d90682859 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Tue, 19 Nov 2024 19:38:40 +0800 Subject: [PATCH 02/76] enh: check param --- source/libs/executor/inc/operator.h | 8 ++++++++ source/libs/executor/src/aggregateoperator.c | 8 ++++++++ source/libs/executor/src/anomalywindowoperator.c | 5 +++++ source/libs/executor/src/executorInt.c | 1 + 4 files changed, 22 insertions(+) diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index f2e542e7cd..5ceedbe542 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -202,6 +202,14 @@ void * getOperatorParam(int32_t opType, SOperatorParam* param, int32_t i void doKeepTuple(SWindowRowsSup* pRowSup, int64_t ts, uint64_t groupId); void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsList, int32_t rowIndex, uint64_t groupId); +#define CHECK_CONDITION_FAILED(c) \ + do { \ + if (!(c)) { \ + qError("function:%s condition failed, Line:%d", __FUNCTION__, __LINE__); \ + return TSDB_CODE_APP_ERROR; \ + } \ + } while (0) + #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index 829ca6da50..b71ed5ee26 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -184,6 +184,10 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SAggOperatorInfo* pAggInfo = pOperator->info; + if(!pAggInfo) { + qError("function:%s, pAggInfo is NULL", __func__); + return false; + } if (pOperator->blocking && pAggInfo->hasValidBlock) { return false; } @@ -333,6 +337,10 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { int32_t doAggregateImpl(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx) { int32_t code = TSDB_CODE_SUCCESS; + if (pOperator || (pOperator->exprSupp.numOfExprs > 0 && pCtx == NULL)) { + qError("%s failed at line %d since pCtx is NULL.", __func__, __LINE__); + return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + } for (int32_t k = 0; k < pOperator->exprSupp.numOfExprs; ++k) { if (functionNeedToExecute(&pCtx[k])) { // todo add a dummy function to avoid process check diff --git a/source/libs/executor/src/anomalywindowoperator.c b/source/libs/executor/src/anomalywindowoperator.c index 94cc5d9129..b678030a1c 100644 --- a/source/libs/executor/src/anomalywindowoperator.c +++ b/source/libs/executor/src/anomalywindowoperator.c @@ -171,6 +171,10 @@ _error: } static int32_t anomalyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + CHECK_CONDITION_FAILED(pOperator != NULL); + CHECK_CONDITION_FAILED(ppRes != NULL); + CHECK_CONDITION_FAILED(pOperator->info != NULL); + CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SAnomalyWindowOperatorInfo* pInfo = pOperator->info; @@ -181,6 +185,7 @@ static int32_t anomalyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRe int64_t st = taosGetTimestampUs(); int32_t numOfBlocks = taosArrayGetSize(pSupp->blocks); + CHECK_CONDITION_FAILED(pRes != NULL); blockDataCleanup(pRes); while (1) { diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index 1b823bf69d..4a1d26d875 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -255,6 +255,7 @@ static int32_t doSetInputDataBlockInfo(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SqlFunctionCtx* pCtx = pExprSup->pCtx; + CHECK_CONDITION_FAILED(pExprSup->numOfExprs <= 0 || pCtx != NULL); for (int32_t i = 0; i < pExprSup->numOfExprs; ++i) { pCtx[i].order = order; pCtx[i].input.numOfRows = pBlock->info.rows; From 3027e377fa2c0c155026eb606a1ab6d9cd95eb18 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 23 Nov 2024 22:18:30 +0800 Subject: [PATCH 03/76] support update multi tag --- include/common/tmsg.h | 40 +- include/libs/nodes/cmdnodes.h | 34 +- source/common/src/tmsg.c | 55 +- source/dnode/vnode/src/meta/metaTable.c | 4 + source/libs/nodes/src/nodesUtilFuncs.c | 675 +++++++++++++++--------- source/libs/parser/inc/parAst.h | 2 + source/libs/parser/inc/sql.y | 11 +- source/libs/parser/src/parAstCreater.c | 34 +- source/libs/parser/src/parAstParser.c | 18 +- source/libs/parser/src/parTranslater.c | 206 ++++++-- 10 files changed, 758 insertions(+), 321 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 70a32cd266..27169b0a4e 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -178,6 +178,7 @@ typedef enum _mgmt_table { #define TSDB_ALTER_TABLE_DROP_TAG_INDEX 12 #define TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS 13 #define TSDB_ALTER_TABLE_ADD_COLUMN_WITH_COMPRESS_OPTION 14 +#define TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL 15 #define TSDB_FILL_NONE 0 #define TSDB_FILL_NULL 1 @@ -351,6 +352,7 @@ typedef enum ENodeType { QUERY_NODE_CREATE_ANODE_STMT, QUERY_NODE_DROP_ANODE_STMT, QUERY_NODE_UPDATE_ANODE_STMT, + QUERY_NODE_ALTER_TABLE_MULTI_STMT, // show statement nodes // see 'sysTableShowAdapter', 'SYSTABLE_SHOW_TYPE_OFFSET' @@ -421,7 +423,7 @@ typedef enum ENodeType { // physical plan node QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100, QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN, - QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN, // INACTIVE + QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN, // INACTIVE QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN, @@ -435,7 +437,7 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_SORT, QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT, QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL, - QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL, // INACTIVE + QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL, // INACTIVE QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL, @@ -985,7 +987,6 @@ typedef struct SEpSet { SEp eps[TSDB_MAX_REPLICA]; } SEpSet; - int32_t tEncodeSEpSet(SEncoder* pEncoder, const SEpSet* pEp); int32_t tDecodeSEpSet(SDecoder* pDecoder, SEpSet* pEp); int32_t taosEncodeSEpSet(void** buf, const SEpSet* pEp); @@ -3259,6 +3260,16 @@ int32_t tEncodeSVDropTbBatchRsp(SEncoder* pCoder, const SVDropTbBatchRsp* pRsp); int32_t tDecodeSVDropTbBatchRsp(SDecoder* pCoder, SVDropTbBatchRsp* pRsp); // TDMT_VND_ALTER_TABLE ===================== +typedef struct SMultiTagUpateVal { + char* tagName; + int32_t colId; + int8_t tagType; + int8_t tagFree; + uint32_t nTagVal; + uint8_t* pTagVal; + int8_t isNull; + SArray* pTagArray; +} SMultiTagUpateVal; typedef struct { char* tbName; int8_t action; @@ -3285,9 +3296,10 @@ typedef struct { int32_t newTTL; int32_t newCommentLen; char* newComment; - int64_t ctimeMs; // fill by vnode - int8_t source; // TD_REQ_FROM_TAOX-taosX or TD_REQ_FROM_APP-taosClient - uint32_t compress; // TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS + int64_t ctimeMs; // fill by vnode + int8_t source; // TD_REQ_FROM_TAOX-taosX or TD_REQ_FROM_APP-taosClient + uint32_t compress; // TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS + SArray* pMultiTag; // TSDB_ALTER_TABLE_ADD_MULTI_TAGS } SVAlterTbReq; int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq); @@ -4143,20 +4155,20 @@ typedef struct { SArray* blockTbName; SArray* blockSchema; - union{ - struct{ - int64_t sleepTime; + union { + struct { + int64_t sleepTime; }; - struct{ - int32_t createTableNum; - SArray* createTableLen; - SArray* createTableReq; + struct { + int32_t createTableNum; + SArray* createTableLen; + SArray* createTableReq; }; }; } SMqDataRsp; -int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pObj); +int32_t tEncodeMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pObj); int32_t tDecodeMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp); void tDeleteMqDataRsp(SMqDataRsp* pRsp); diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 867f8c8efc..6623811712 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -264,8 +264,30 @@ typedef struct SAlterTableStmt { SDataType dataType; SValueNode* pVal; SColumnOptions* pColOptions; + SNodeList* pNodeListTagValue; } SAlterTableStmt; + +typedef struct SAlterTableStmt2 { + ENodeType type; + int8_t alterType; + char colName[TSDB_COL_NAME_LEN]; + STableOptions* pOptions; + SDataType dataType; + SValueNode* pVal; + SColumnOptions* pColOptions; + +} SAlterTableStmt2; + +typedef struct SAlterTableMultiStmt { + ENodeType type; + char dbName[TSDB_DB_NAME_LEN]; + char tableName[TSDB_TABLE_NAME_LEN]; + int8_t alterType; + + SNodeList* pNodeListTagValue; +} SAlterTableMultiStmt; + typedef struct SCreateUserStmt { ENodeType type; char userName[TSDB_USER_LEN]; @@ -341,7 +363,7 @@ typedef struct SShowStmt { SNode* pTbName; // SValueNode EOperatorType tableCondType; EShowKind showKind; // show databases: user/system, show tables: normal/child, others NULL - bool withFull; // for show users full; + bool withFull; // for show users full; } SShowStmt; typedef struct SShowCreateDatabaseStmt { @@ -651,7 +673,7 @@ typedef struct SCreateTSMAStmt { bool ignoreExists; char tsmaName[TSDB_TABLE_NAME_LEN]; char dbName[TSDB_DB_NAME_LEN]; - char tableName[TSDB_TABLE_NAME_LEN]; // base tb name or base tsma name + char tableName[TSDB_TABLE_NAME_LEN]; // base tb name or base tsma name char originalTbName[TSDB_TABLE_NAME_LEN]; STSMAOptions* pOptions; SNode* pPrevQuery; @@ -660,10 +682,10 @@ typedef struct SCreateTSMAStmt { } SCreateTSMAStmt; typedef struct SDropTSMAStmt { - ENodeType type; - bool ignoreNotExists; - char dbName[TSDB_DB_NAME_LEN]; - char tsmaName[TSDB_TABLE_NAME_LEN]; + ENodeType type; + bool ignoreNotExists; + char dbName[TSDB_DB_NAME_LEN]; + char tsmaName[TSDB_TABLE_NAME_LEN]; } SDropTSMAStmt; #ifdef __cplusplus diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 814ec4a626..134a5cf8c5 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -2166,9 +2166,9 @@ int32_t tSerializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnalAl int32_t numOfAlgos = 0; void *pIter = taosHashIterate(pRsp->hash, NULL); while (pIter != NULL) { - SAnalyticsUrl *pUrl = pIter; - size_t nameLen = 0; - const char *name = taosHashGetKey(pIter, &nameLen); + SAnalyticsUrl *pUrl = pIter; + size_t nameLen = 0; + const char *name = taosHashGetKey(pIter, &nameLen); if (nameLen > 0 && nameLen <= TSDB_ANALYTIC_ALGO_KEY_LEN && pUrl->urlLen > 0) { numOfAlgos++; } @@ -2181,9 +2181,9 @@ int32_t tSerializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnalAl pIter = taosHashIterate(pRsp->hash, NULL); while (pIter != NULL) { - SAnalyticsUrl *pUrl = pIter; - size_t nameLen = 0; - const char *name = taosHashGetKey(pIter, &nameLen); + SAnalyticsUrl *pUrl = pIter; + size_t nameLen = 0; + const char *name = taosHashGetKey(pIter, &nameLen); if (nameLen > 0 && pUrl->urlLen > 0) { TAOS_CHECK_EXIT(tEncodeI32(&encoder, nameLen)); TAOS_CHECK_EXIT(tEncodeBinary(&encoder, (const uint8_t *)name, nameLen)); @@ -2221,10 +2221,10 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal int32_t lino; tDecoderInit(&decoder, buf, bufLen); - int32_t numOfAlgos = 0; - int32_t nameLen; - int32_t type; - char name[TSDB_ANALYTIC_ALGO_KEY_LEN]; + int32_t numOfAlgos = 0; + int32_t nameLen; + int32_t type; + char name[TSDB_ANALYTIC_ALGO_KEY_LEN]; SAnalyticsUrl url = {0}; TAOS_CHECK_EXIT(tStartDecode(&decoder)); @@ -10511,6 +10511,20 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) { TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, pReq->pTagVal, pReq->nTagVal)); } break; + case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: + int32_t nTags = taosArrayGetSize(pReq->pMultiTag); + TAOS_CHECK_EXIT(tEncodeI32v(pEncoder, nTags)); + for (int32_t i = 0; i < nTags; i++) { + SMultiTagUpateVal *pTag = taosArrayGet(pReq->pMultiTag, i); + TAOS_CHECK_EXIT(tEncodeI32v(pEncoder, pTag->colId)); + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTag->tagName)); + TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTag->isNull)); + TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTag->tagType)); + if (!pTag->isNull) { + TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, pTag->pTagVal, pReq->nTagVal)); + } + } + break; case TSDB_ALTER_TABLE_UPDATE_OPTIONS: TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pReq->updateTTL)); if (pReq->updateTTL) { @@ -10577,6 +10591,27 @@ static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq) TAOS_CHECK_EXIT(tDecodeBinary(pDecoder, &pReq->pTagVal, &pReq->nTagVal)); } break; + case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: + int32_t nTags; + TAOS_CHECK_EXIT(tDecodeI32v(pDecoder, &nTags)); + pReq->pMultiTag = taosArrayInit(nTags, sizeof(SMultiTagUpateVal)); + if (pReq->pMultiTag == NULL) { + TAOS_CHECK_EXIT(terrno); + } + for (int32_t i = 0; i < nTags; i++) { + SMultiTagUpateVal tag; + TAOS_CHECK_EXIT(tDecodeI32v(pDecoder, &tag.colId)); + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &tag.tagName)); + TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &tag.isNull)); + TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &tag.tagType)); + if (!tag.isNull) { + TAOS_CHECK_EXIT(tDecodeBinary(pDecoder, &tag.pTagVal, &tag.nTagVal)); + } + if (taosArrayPush(pReq->pMultiTag, &tag) == NULL) { + TAOS_CHECK_EXIT(terrno); + } + } + break; case TSDB_ALTER_TABLE_UPDATE_OPTIONS: TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pReq->updateTTL)); if (pReq->updateTTL) { diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 5c3516a962..03b8bdd93f 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2011,6 +2011,7 @@ _err: return terrno != 0 ? terrno : TSDB_CODE_FAILED; } +static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { return 0; } static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { SMetaEntry ctbEntry = {0}; SMetaEntry stbEntry = {0}; @@ -2736,6 +2737,9 @@ int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq, STableMeta return metaAlterTableColumn(pMeta, version, pReq, pMetaRsp); case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: return metaUpdateTableTagVal(pMeta, version, pReq); + case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: + return metaUpdateTableMultiTagVal(pMeta, version, pReq); + return terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION; case TSDB_ALTER_TABLE_UPDATE_OPTIONS: return metaUpdateTableOptions(pMeta, version, pReq); case TSDB_ALTER_TABLE_ADD_TAG_INDEX: diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 30cc552761..25d1ecd6f9 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -14,6 +14,7 @@ */ #include "cmdnodes.h" +#include "functionMgt.h" #include "nodesUtil.h" #include "plannodes.h" #include "querynodes.h" @@ -22,7 +23,6 @@ #include "tdatablock.h" #include "thash.h" #include "tref.h" -#include "functionMgt.h" typedef struct SNodeMemChunk { int32_t availableSize; @@ -56,15 +56,13 @@ char* getJoinSTypeString(EJoinSubType type) { char* getFullJoinTypeString(EJoinType type, EJoinSubType stype) { static char* joinFullType[][8] = { - {"INNER", "INNER", "INNER", "INNER", "INNER", "INNER ANY", "INNER", "INNER"}, - {"LEFT", "LEFT", "LEFT OUTER", "LEFT SEMI", "LEFT ANTI", "LEFT ANY", "LEFT ASOF", "LEFT WINDOW"}, - {"RIGHT", "RIGHT", "RIGHT OUTER", "RIGHT SEMI", "RIGHT ANTI", "RIGHT ANY", "RIGHT ASOF", "RIGHT WINDOW"}, - {"FULL", "FULL", "FULL OUTER", "FULL", "FULL", "FULL ANY", "FULL", "FULL"} - }; + {"INNER", "INNER", "INNER", "INNER", "INNER", "INNER ANY", "INNER", "INNER"}, + {"LEFT", "LEFT", "LEFT OUTER", "LEFT SEMI", "LEFT ANTI", "LEFT ANY", "LEFT ASOF", "LEFT WINDOW"}, + {"RIGHT", "RIGHT", "RIGHT OUTER", "RIGHT SEMI", "RIGHT ANTI", "RIGHT ANY", "RIGHT ASOF", "RIGHT WINDOW"}, + {"FULL", "FULL", "FULL OUTER", "FULL", "FULL", "FULL ANY", "FULL", "FULL"}}; return joinFullType[type][stype]; } - int32_t mergeJoinConds(SNode** ppDst, SNode** ppSrc) { if (NULL == *ppSrc) { return TSDB_CODE_SUCCESS; @@ -74,14 +72,16 @@ int32_t mergeJoinConds(SNode** ppDst, SNode** ppSrc) { *ppSrc = NULL; return TSDB_CODE_SUCCESS; } - if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) && ((SLogicConditionNode*)(*ppSrc))->condType == LOGIC_COND_TYPE_AND) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) && + ((SLogicConditionNode*)(*ppSrc))->condType == LOGIC_COND_TYPE_AND) { TSWAP(*ppDst, *ppSrc); } int32_t code = 0; if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppDst)) { SLogicConditionNode* pDst = (SLogicConditionNode*)*ppDst; if (pDst->condType == LOGIC_COND_TYPE_AND) { - if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) && ((SLogicConditionNode*)(*ppSrc))->condType == LOGIC_COND_TYPE_AND) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) && + ((SLogicConditionNode*)(*ppSrc))->condType == LOGIC_COND_TYPE_AND) { code = nodesListStrictAppendList(pDst->pParameterList, ((SLogicConditionNode*)(*ppSrc))->pParameterList); ((SLogicConditionNode*)(*ppSrc))->pParameterList = NULL; } else { @@ -109,13 +109,12 @@ int32_t mergeJoinConds(SNode** ppDst, SNode** ppSrc) { *ppSrc = NULL; code = nodesListMakeStrictAppend(&pLogicCond->pParameterList, *ppDst); } - if (TSDB_CODE_SUCCESS == code) { + if (TSDB_CODE_SUCCESS == code) { *ppDst = (SNode*)pLogicCond; } return code; } - static int32_t callocNodeChunk(SNodeAllocator* pAllocator, SNodeMemChunk** pOutChunk) { SNodeMemChunk* pNewChunk = taosMemoryCalloc(1, sizeof(SNodeMemChunk) + pAllocator->chunkSize); if (NULL == pNewChunk) { @@ -155,11 +154,12 @@ static int32_t nodesCallocImpl(int32_t size, void** pOut) { void* p = g_pNodeAllocator->pCurrChunk->pBuf + g_pNodeAllocator->pCurrChunk->usedSize; g_pNodeAllocator->pCurrChunk->usedSize += size; *pOut = p; - return TSDB_CODE_SUCCESS;; + return TSDB_CODE_SUCCESS; + ; } static int32_t nodesCalloc(int32_t num, int32_t size, void** pOut) { - void* p = NULL; + void* p = NULL; int32_t code = nodesCallocImpl(num * size + 1, &p); if (TSDB_CODE_SUCCESS != code) { return code; @@ -237,7 +237,8 @@ void nodesDestroyAllocatorSet() { refId = pAllocator->self; int32_t code = taosRemoveRef(g_allocatorReqRefPool, refId); if (TSDB_CODE_SUCCESS != code) { - nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%"PRId64, __func__, __LINE__, g_allocatorReqRefPool, refId); + nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%" PRId64, __func__, __LINE__, + g_allocatorReqRefPool, refId); } pAllocator = taosIterateRef(g_allocatorReqRefPool, refId); } @@ -333,7 +334,8 @@ void nodesDestroyAllocator(int64_t allocatorId) { int32_t code = taosRemoveRef(g_allocatorReqRefPool, allocatorId); if (TSDB_CODE_SUCCESS != code) { - nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%"PRId64, __func__, __LINE__, g_allocatorReqRefPool, allocatorId); + nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%" PRId64, __func__, __LINE__, g_allocatorReqRefPool, + allocatorId); } } @@ -348,202 +350,297 @@ static int32_t makeNode(ENodeType type, int32_t size, SNode** ppNode) { } int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { - SNode* pNode = NULL; + SNode* pNode = NULL; int32_t code = 0; switch (type) { case QUERY_NODE_COLUMN: - code = makeNode(type, sizeof(SColumnNode), &pNode); break; + code = makeNode(type, sizeof(SColumnNode), &pNode); + break; case QUERY_NODE_VALUE: - code = makeNode(type, sizeof(SValueNode), &pNode); break; + code = makeNode(type, sizeof(SValueNode), &pNode); + break; case QUERY_NODE_OPERATOR: - code = makeNode(type, sizeof(SOperatorNode), &pNode); break; + code = makeNode(type, sizeof(SOperatorNode), &pNode); + break; case QUERY_NODE_LOGIC_CONDITION: - code = makeNode(type, sizeof(SLogicConditionNode), &pNode); break; + code = makeNode(type, sizeof(SLogicConditionNode), &pNode); + break; case QUERY_NODE_FUNCTION: - code = makeNode(type, sizeof(SFunctionNode), &pNode); break; + code = makeNode(type, sizeof(SFunctionNode), &pNode); + break; case QUERY_NODE_REAL_TABLE: - code = makeNode(type, sizeof(SRealTableNode), &pNode); break; + code = makeNode(type, sizeof(SRealTableNode), &pNode); + break; case QUERY_NODE_TEMP_TABLE: - code = makeNode(type, sizeof(STempTableNode), &pNode); break; + code = makeNode(type, sizeof(STempTableNode), &pNode); + break; case QUERY_NODE_JOIN_TABLE: - code = makeNode(type, sizeof(SJoinTableNode), &pNode); break; + code = makeNode(type, sizeof(SJoinTableNode), &pNode); + break; case QUERY_NODE_GROUPING_SET: - code = makeNode(type, sizeof(SGroupingSetNode), &pNode); break; + code = makeNode(type, sizeof(SGroupingSetNode), &pNode); + break; case QUERY_NODE_ORDER_BY_EXPR: - code = makeNode(type, sizeof(SOrderByExprNode), &pNode); break; + code = makeNode(type, sizeof(SOrderByExprNode), &pNode); + break; case QUERY_NODE_LIMIT: - code = makeNode(type, sizeof(SLimitNode), &pNode); break; + code = makeNode(type, sizeof(SLimitNode), &pNode); + break; case QUERY_NODE_STATE_WINDOW: - code = makeNode(type, sizeof(SStateWindowNode), &pNode); break; + code = makeNode(type, sizeof(SStateWindowNode), &pNode); + break; case QUERY_NODE_SESSION_WINDOW: - code = makeNode(type, sizeof(SSessionWindowNode), &pNode); break; + code = makeNode(type, sizeof(SSessionWindowNode), &pNode); + break; case QUERY_NODE_INTERVAL_WINDOW: - code = makeNode(type, sizeof(SIntervalWindowNode), &pNode); break; + code = makeNode(type, sizeof(SIntervalWindowNode), &pNode); + break; case QUERY_NODE_NODE_LIST: - code = makeNode(type, sizeof(SNodeListNode), &pNode); break; + code = makeNode(type, sizeof(SNodeListNode), &pNode); + break; case QUERY_NODE_FILL: - code = makeNode(type, sizeof(SFillNode), &pNode); break; + code = makeNode(type, sizeof(SFillNode), &pNode); + break; case QUERY_NODE_RAW_EXPR: - code = makeNode(type, sizeof(SRawExprNode), &pNode); break; + code = makeNode(type, sizeof(SRawExprNode), &pNode); + break; case QUERY_NODE_TARGET: - code = makeNode(type, sizeof(STargetNode), &pNode); break; + code = makeNode(type, sizeof(STargetNode), &pNode); + break; case QUERY_NODE_DATABLOCK_DESC: - code = makeNode(type, sizeof(SDataBlockDescNode), &pNode); break; + code = makeNode(type, sizeof(SDataBlockDescNode), &pNode); + break; case QUERY_NODE_SLOT_DESC: - code = makeNode(type, sizeof(SSlotDescNode), &pNode); break; + code = makeNode(type, sizeof(SSlotDescNode), &pNode); + break; case QUERY_NODE_COLUMN_DEF: - code = makeNode(type, sizeof(SColumnDefNode), &pNode); break; + code = makeNode(type, sizeof(SColumnDefNode), &pNode); + break; case QUERY_NODE_DOWNSTREAM_SOURCE: - code = makeNode(type, sizeof(SDownstreamSourceNode), &pNode); break; + code = makeNode(type, sizeof(SDownstreamSourceNode), &pNode); + break; case QUERY_NODE_DATABASE_OPTIONS: - code = makeNode(type, sizeof(SDatabaseOptions), &pNode); break; + code = makeNode(type, sizeof(SDatabaseOptions), &pNode); + break; case QUERY_NODE_TABLE_OPTIONS: - code = makeNode(type, sizeof(STableOptions), &pNode); break; + code = makeNode(type, sizeof(STableOptions), &pNode); + break; case QUERY_NODE_COLUMN_OPTIONS: - code = makeNode(type, sizeof(SColumnOptions), &pNode); break; + code = makeNode(type, sizeof(SColumnOptions), &pNode); + break; case QUERY_NODE_INDEX_OPTIONS: - code = makeNode(type, sizeof(SIndexOptions), &pNode); break; + code = makeNode(type, sizeof(SIndexOptions), &pNode); + break; case QUERY_NODE_EXPLAIN_OPTIONS: - code = makeNode(type, sizeof(SExplainOptions), &pNode); break; + code = makeNode(type, sizeof(SExplainOptions), &pNode); + break; case QUERY_NODE_STREAM_OPTIONS: - code = makeNode(type, sizeof(SStreamOptions), &pNode); break; + code = makeNode(type, sizeof(SStreamOptions), &pNode); + break; case QUERY_NODE_LEFT_VALUE: - code = makeNode(type, sizeof(SLeftValueNode), &pNode); break; + code = makeNode(type, sizeof(SLeftValueNode), &pNode); + break; case QUERY_NODE_COLUMN_REF: - code = makeNode(type, sizeof(SColumnRefNode), &pNode); break; + code = makeNode(type, sizeof(SColumnRefNode), &pNode); + break; case QUERY_NODE_WHEN_THEN: - code = makeNode(type, sizeof(SWhenThenNode), &pNode); break; + code = makeNode(type, sizeof(SWhenThenNode), &pNode); + break; case QUERY_NODE_CASE_WHEN: - code = makeNode(type, sizeof(SCaseWhenNode), &pNode); break; + code = makeNode(type, sizeof(SCaseWhenNode), &pNode); + break; case QUERY_NODE_EVENT_WINDOW: - code = makeNode(type, sizeof(SEventWindowNode), &pNode); break; + code = makeNode(type, sizeof(SEventWindowNode), &pNode); + break; case QUERY_NODE_COUNT_WINDOW: - code = makeNode(type, sizeof(SCountWindowNode), &pNode); break; + code = makeNode(type, sizeof(SCountWindowNode), &pNode); + break; case QUERY_NODE_ANOMALY_WINDOW: - code = makeNode(type, sizeof(SAnomalyWindowNode), &pNode); break; + code = makeNode(type, sizeof(SAnomalyWindowNode), &pNode); + break; case QUERY_NODE_HINT: - code = makeNode(type, sizeof(SHintNode), &pNode); break; + code = makeNode(type, sizeof(SHintNode), &pNode); + break; case QUERY_NODE_VIEW: - code = makeNode(type, sizeof(SViewNode), &pNode); break; + code = makeNode(type, sizeof(SViewNode), &pNode); + break; case QUERY_NODE_WINDOW_OFFSET: - code = makeNode(type, sizeof(SWindowOffsetNode), &pNode); break; + code = makeNode(type, sizeof(SWindowOffsetNode), &pNode); + break; case QUERY_NODE_SET_OPERATOR: - code = makeNode(type, sizeof(SSetOperator), &pNode); break; + code = makeNode(type, sizeof(SSetOperator), &pNode); + break; case QUERY_NODE_SELECT_STMT: - code = makeNode(type, sizeof(SSelectStmt), &pNode); break; + code = makeNode(type, sizeof(SSelectStmt), &pNode); + break; case QUERY_NODE_VNODE_MODIFY_STMT: - code = makeNode(type, sizeof(SVnodeModifyOpStmt), &pNode); break; + code = makeNode(type, sizeof(SVnodeModifyOpStmt), &pNode); + break; case QUERY_NODE_CREATE_DATABASE_STMT: - code = makeNode(type, sizeof(SCreateDatabaseStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateDatabaseStmt), &pNode); + break; case QUERY_NODE_DROP_DATABASE_STMT: - code = makeNode(type, sizeof(SDropDatabaseStmt), &pNode); break; + code = makeNode(type, sizeof(SDropDatabaseStmt), &pNode); + break; case QUERY_NODE_ALTER_DATABASE_STMT: - code = makeNode(type, sizeof(SAlterDatabaseStmt), &pNode); break; + code = makeNode(type, sizeof(SAlterDatabaseStmt), &pNode); + break; case QUERY_NODE_FLUSH_DATABASE_STMT: - code = makeNode(type, sizeof(SFlushDatabaseStmt), &pNode); break; + code = makeNode(type, sizeof(SFlushDatabaseStmt), &pNode); + break; case QUERY_NODE_TRIM_DATABASE_STMT: - code = makeNode(type, sizeof(STrimDatabaseStmt), &pNode); break; + code = makeNode(type, sizeof(STrimDatabaseStmt), &pNode); + break; case QUERY_NODE_S3MIGRATE_DATABASE_STMT: - code = makeNode(type, sizeof(SS3MigrateDatabaseStmt), &pNode); break; + code = makeNode(type, sizeof(SS3MigrateDatabaseStmt), &pNode); + break; case QUERY_NODE_CREATE_TABLE_STMT: - code = makeNode(type, sizeof(SCreateTableStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateTableStmt), &pNode); + break; case QUERY_NODE_CREATE_SUBTABLE_CLAUSE: - code = makeNode(type, sizeof(SCreateSubTableClause), &pNode); break; + code = makeNode(type, sizeof(SCreateSubTableClause), &pNode); + break; case QUERY_NODE_CREATE_SUBTABLE_FROM_FILE_CLAUSE: - code = makeNode(type, sizeof(SCreateSubTableFromFileClause), &pNode); break; + code = makeNode(type, sizeof(SCreateSubTableFromFileClause), &pNode); + break; case QUERY_NODE_CREATE_MULTI_TABLES_STMT: - code = makeNode(type, sizeof(SCreateMultiTablesStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateMultiTablesStmt), &pNode); + break; case QUERY_NODE_DROP_TABLE_CLAUSE: - code = makeNode(type, sizeof(SDropTableClause), &pNode); break; + code = makeNode(type, sizeof(SDropTableClause), &pNode); + break; case QUERY_NODE_DROP_TABLE_STMT: - code = makeNode(type, sizeof(SDropTableStmt), &pNode); break; + code = makeNode(type, sizeof(SDropTableStmt), &pNode); + break; case QUERY_NODE_DROP_SUPER_TABLE_STMT: - code = makeNode(type, sizeof(SDropSuperTableStmt), &pNode); break; + code = makeNode(type, sizeof(SDropSuperTableStmt), &pNode); + break; case QUERY_NODE_ALTER_TABLE_STMT: case QUERY_NODE_ALTER_SUPER_TABLE_STMT: - code = makeNode(type, sizeof(SAlterTableStmt), &pNode); break; + code = makeNode(type, sizeof(SAlterTableStmt), &pNode); + break; + case QUERY_NODE_ALTER_TABLE_MULTI_STMT: + code = makeNode(type, sizeof(SAlterTableMultiStmt), &pNode); + break; case QUERY_NODE_CREATE_USER_STMT: - code = makeNode(type, sizeof(SCreateUserStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateUserStmt), &pNode); + break; case QUERY_NODE_ALTER_USER_STMT: - code = makeNode(type, sizeof(SAlterUserStmt), &pNode); break; + code = makeNode(type, sizeof(SAlterUserStmt), &pNode); + break; case QUERY_NODE_DROP_USER_STMT: - code = makeNode(type, sizeof(SDropUserStmt), &pNode); break; + code = makeNode(type, sizeof(SDropUserStmt), &pNode); + break; case QUERY_NODE_USE_DATABASE_STMT: - code = makeNode(type, sizeof(SUseDatabaseStmt), &pNode); break; + code = makeNode(type, sizeof(SUseDatabaseStmt), &pNode); + break; case QUERY_NODE_CREATE_DNODE_STMT: - code = makeNode(type, sizeof(SCreateDnodeStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateDnodeStmt), &pNode); + break; case QUERY_NODE_DROP_DNODE_STMT: - code = makeNode(type, sizeof(SDropDnodeStmt), &pNode); break; + code = makeNode(type, sizeof(SDropDnodeStmt), &pNode); + break; case QUERY_NODE_ALTER_DNODE_STMT: - code = makeNode(type, sizeof(SAlterDnodeStmt), &pNode); break; + code = makeNode(type, sizeof(SAlterDnodeStmt), &pNode); + break; case QUERY_NODE_CREATE_ANODE_STMT: - code = makeNode(type, sizeof(SCreateAnodeStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateAnodeStmt), &pNode); + break; case QUERY_NODE_DROP_ANODE_STMT: - code = makeNode(type, sizeof(SDropAnodeStmt), &pNode); break; + code = makeNode(type, sizeof(SDropAnodeStmt), &pNode); + break; case QUERY_NODE_UPDATE_ANODE_STMT: - code = makeNode(type, sizeof(SUpdateAnodeStmt), &pNode); break; + code = makeNode(type, sizeof(SUpdateAnodeStmt), &pNode); + break; case QUERY_NODE_CREATE_INDEX_STMT: - code = makeNode(type, sizeof(SCreateIndexStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateIndexStmt), &pNode); + break; case QUERY_NODE_DROP_INDEX_STMT: - code = makeNode(type, sizeof(SDropIndexStmt), &pNode); break; + code = makeNode(type, sizeof(SDropIndexStmt), &pNode); + break; case QUERY_NODE_CREATE_QNODE_STMT: case QUERY_NODE_CREATE_BNODE_STMT: case QUERY_NODE_CREATE_SNODE_STMT: case QUERY_NODE_CREATE_MNODE_STMT: - code = makeNode(type, sizeof(SCreateComponentNodeStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateComponentNodeStmt), &pNode); + break; case QUERY_NODE_DROP_QNODE_STMT: case QUERY_NODE_DROP_BNODE_STMT: case QUERY_NODE_DROP_SNODE_STMT: case QUERY_NODE_DROP_MNODE_STMT: - code = makeNode(type, sizeof(SDropComponentNodeStmt), &pNode); break; + code = makeNode(type, sizeof(SDropComponentNodeStmt), &pNode); + break; case QUERY_NODE_CREATE_TOPIC_STMT: - code = makeNode(type, sizeof(SCreateTopicStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateTopicStmt), &pNode); + break; case QUERY_NODE_DROP_TOPIC_STMT: - code = makeNode(type, sizeof(SDropTopicStmt), &pNode); break; + code = makeNode(type, sizeof(SDropTopicStmt), &pNode); + break; case QUERY_NODE_DROP_CGROUP_STMT: - code = makeNode(type, sizeof(SDropCGroupStmt), &pNode); break; + code = makeNode(type, sizeof(SDropCGroupStmt), &pNode); + break; case QUERY_NODE_ALTER_LOCAL_STMT: - code = makeNode(type, sizeof(SAlterLocalStmt), &pNode); break; + code = makeNode(type, sizeof(SAlterLocalStmt), &pNode); + break; case QUERY_NODE_EXPLAIN_STMT: - code = makeNode(type, sizeof(SExplainStmt), &pNode); break; + code = makeNode(type, sizeof(SExplainStmt), &pNode); + break; case QUERY_NODE_DESCRIBE_STMT: - code = makeNode(type, sizeof(SDescribeStmt), &pNode); break; + code = makeNode(type, sizeof(SDescribeStmt), &pNode); + break; case QUERY_NODE_RESET_QUERY_CACHE_STMT: - code = makeNode(type, sizeof(SNode), &pNode); break; + code = makeNode(type, sizeof(SNode), &pNode); + break; case QUERY_NODE_COMPACT_DATABASE_STMT: - code = makeNode(type, sizeof(SCompactDatabaseStmt), &pNode); break; + code = makeNode(type, sizeof(SCompactDatabaseStmt), &pNode); + break; case QUERY_NODE_CREATE_FUNCTION_STMT: - code = makeNode(type, sizeof(SCreateFunctionStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateFunctionStmt), &pNode); + break; case QUERY_NODE_DROP_FUNCTION_STMT: - code = makeNode(type, sizeof(SDropFunctionStmt), &pNode); break; + code = makeNode(type, sizeof(SDropFunctionStmt), &pNode); + break; case QUERY_NODE_CREATE_STREAM_STMT: - code = makeNode(type, sizeof(SCreateStreamStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateStreamStmt), &pNode); + break; case QUERY_NODE_DROP_STREAM_STMT: - code = makeNode(type, sizeof(SDropStreamStmt), &pNode); break; + code = makeNode(type, sizeof(SDropStreamStmt), &pNode); + break; case QUERY_NODE_PAUSE_STREAM_STMT: - code = makeNode(type, sizeof(SPauseStreamStmt), &pNode); break; + code = makeNode(type, sizeof(SPauseStreamStmt), &pNode); + break; case QUERY_NODE_RESUME_STREAM_STMT: - code = makeNode(type, sizeof(SResumeStreamStmt), &pNode); break; + code = makeNode(type, sizeof(SResumeStreamStmt), &pNode); + break; case QUERY_NODE_BALANCE_VGROUP_STMT: - code = makeNode(type, sizeof(SBalanceVgroupStmt), &pNode); break; + code = makeNode(type, sizeof(SBalanceVgroupStmt), &pNode); + break; case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT: - code = makeNode(type, sizeof(SBalanceVgroupLeaderStmt), &pNode); break; + code = makeNode(type, sizeof(SBalanceVgroupLeaderStmt), &pNode); + break; case QUERY_NODE_BALANCE_VGROUP_LEADER_DATABASE_STMT: - code = makeNode(type, sizeof(SBalanceVgroupLeaderStmt), &pNode); break; + code = makeNode(type, sizeof(SBalanceVgroupLeaderStmt), &pNode); + break; case QUERY_NODE_MERGE_VGROUP_STMT: - code = makeNode(type, sizeof(SMergeVgroupStmt), &pNode); break; + code = makeNode(type, sizeof(SMergeVgroupStmt), &pNode); + break; case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT: - code = makeNode(type, sizeof(SRedistributeVgroupStmt), &pNode); break; + code = makeNode(type, sizeof(SRedistributeVgroupStmt), &pNode); + break; case QUERY_NODE_SPLIT_VGROUP_STMT: - code = makeNode(type, sizeof(SSplitVgroupStmt), &pNode); break; + code = makeNode(type, sizeof(SSplitVgroupStmt), &pNode); + break; case QUERY_NODE_SYNCDB_STMT: break; case QUERY_NODE_GRANT_STMT: - code = makeNode(type, sizeof(SGrantStmt), &pNode); break; + code = makeNode(type, sizeof(SGrantStmt), &pNode); + break; case QUERY_NODE_REVOKE_STMT: - code = makeNode(type, sizeof(SRevokeStmt), &pNode); break; + code = makeNode(type, sizeof(SRevokeStmt), &pNode); + break; case QUERY_NODE_ALTER_CLUSTER_STMT: - code = makeNode(type, sizeof(SAlterClusterStmt), &pNode); break; + code = makeNode(type, sizeof(SAlterClusterStmt), &pNode); + break; case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: case QUERY_NODE_SHOW_MODULES_STMT: @@ -583,191 +680,280 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { case QUERY_NODE_SHOW_CLUSTER_MACHINES_STMT: case QUERY_NODE_SHOW_ENCRYPTIONS_STMT: case QUERY_NODE_SHOW_TSMAS_STMT: - code = makeNode(type, sizeof(SShowStmt), &pNode); break; + code = makeNode(type, sizeof(SShowStmt), &pNode); + break; case QUERY_NODE_SHOW_TABLE_TAGS_STMT: - code = makeNode(type, sizeof(SShowTableTagsStmt), &pNode); break; + code = makeNode(type, sizeof(SShowTableTagsStmt), &pNode); + break; case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT: - code = makeNode(type, sizeof(SShowDnodeVariablesStmt), &pNode); break; + code = makeNode(type, sizeof(SShowDnodeVariablesStmt), &pNode); + break; case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: - code = makeNode(type, sizeof(SShowCreateDatabaseStmt), &pNode); break; + code = makeNode(type, sizeof(SShowCreateDatabaseStmt), &pNode); + break; case QUERY_NODE_SHOW_DB_ALIVE_STMT: case QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT: - code = makeNode(type, sizeof(SShowAliveStmt), &pNode); break; + code = makeNode(type, sizeof(SShowAliveStmt), &pNode); + break; case QUERY_NODE_SHOW_CREATE_TABLE_STMT: case QUERY_NODE_SHOW_CREATE_STABLE_STMT: - code = makeNode(type, sizeof(SShowCreateTableStmt), &pNode); break; + code = makeNode(type, sizeof(SShowCreateTableStmt), &pNode); + break; case QUERY_NODE_SHOW_CREATE_VIEW_STMT: - code = makeNode(type, sizeof(SShowCreateViewStmt), &pNode); break; + code = makeNode(type, sizeof(SShowCreateViewStmt), &pNode); + break; case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT: - code = makeNode(type, sizeof(SShowTableDistributedStmt), &pNode); break; + code = makeNode(type, sizeof(SShowTableDistributedStmt), &pNode); + break; case QUERY_NODE_SHOW_COMPACTS_STMT: - code = makeNode(type, sizeof(SShowCompactsStmt), &pNode); break; + code = makeNode(type, sizeof(SShowCompactsStmt), &pNode); + break; case QUERY_NODE_SHOW_COMPACT_DETAILS_STMT: - code = makeNode(type, sizeof(SShowCompactDetailsStmt), &pNode); break; + code = makeNode(type, sizeof(SShowCompactDetailsStmt), &pNode); + break; case QUERY_NODE_KILL_QUERY_STMT: - code = makeNode(type, sizeof(SKillQueryStmt), &pNode); break; + code = makeNode(type, sizeof(SKillQueryStmt), &pNode); + break; case QUERY_NODE_KILL_TRANSACTION_STMT: case QUERY_NODE_KILL_CONNECTION_STMT: case QUERY_NODE_KILL_COMPACT_STMT: - code = makeNode(type, sizeof(SKillStmt), &pNode); break; + code = makeNode(type, sizeof(SKillStmt), &pNode); + break; case QUERY_NODE_DELETE_STMT: - code = makeNode(type, sizeof(SDeleteStmt), &pNode); break; + code = makeNode(type, sizeof(SDeleteStmt), &pNode); + break; case QUERY_NODE_INSERT_STMT: - code = makeNode(type, sizeof(SInsertStmt), &pNode); break; + code = makeNode(type, sizeof(SInsertStmt), &pNode); + break; case QUERY_NODE_QUERY: - code = makeNode(type, sizeof(SQuery), &pNode); break; + code = makeNode(type, sizeof(SQuery), &pNode); + break; case QUERY_NODE_RESTORE_DNODE_STMT: case QUERY_NODE_RESTORE_QNODE_STMT: case QUERY_NODE_RESTORE_MNODE_STMT: case QUERY_NODE_RESTORE_VNODE_STMT: - code = makeNode(type, sizeof(SRestoreComponentNodeStmt), &pNode); break; + code = makeNode(type, sizeof(SRestoreComponentNodeStmt), &pNode); + break; case QUERY_NODE_CREATE_VIEW_STMT: - code = makeNode(type, sizeof(SCreateViewStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateViewStmt), &pNode); + break; case QUERY_NODE_DROP_VIEW_STMT: - code = makeNode(type, sizeof(SDropViewStmt), &pNode); break; + code = makeNode(type, sizeof(SDropViewStmt), &pNode); + break; case QUERY_NODE_CREATE_TSMA_STMT: - code = makeNode(type, sizeof(SCreateTSMAStmt), &pNode); break; + code = makeNode(type, sizeof(SCreateTSMAStmt), &pNode); + break; case QUERY_NODE_DROP_TSMA_STMT: - code = makeNode(type, sizeof(SDropTSMAStmt), &pNode); break; + code = makeNode(type, sizeof(SDropTSMAStmt), &pNode); + break; case QUERY_NODE_TSMA_OPTIONS: - code = makeNode(type, sizeof(STSMAOptions), &pNode); break; + code = makeNode(type, sizeof(STSMAOptions), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_SCAN: - code = makeNode(type, sizeof(SScanLogicNode), &pNode); break; + code = makeNode(type, sizeof(SScanLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_JOIN: - code = makeNode(type, sizeof(SJoinLogicNode), &pNode); break; + code = makeNode(type, sizeof(SJoinLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_AGG: - code = makeNode(type, sizeof(SAggLogicNode), &pNode); break; + code = makeNode(type, sizeof(SAggLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_PROJECT: - code = makeNode(type, sizeof(SProjectLogicNode), &pNode); break; + code = makeNode(type, sizeof(SProjectLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY: - code = makeNode(type, sizeof(SVnodeModifyLogicNode), &pNode); break; + code = makeNode(type, sizeof(SVnodeModifyLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_EXCHANGE: - code = makeNode(type, sizeof(SExchangeLogicNode), &pNode); break; + code = makeNode(type, sizeof(SExchangeLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_MERGE: - code = makeNode(type, sizeof(SMergeLogicNode), &pNode); break; + code = makeNode(type, sizeof(SMergeLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_WINDOW: - code = makeNode(type, sizeof(SWindowLogicNode), &pNode); break; + code = makeNode(type, sizeof(SWindowLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_FILL: - code = makeNode(type, sizeof(SFillLogicNode), &pNode); break; + code = makeNode(type, sizeof(SFillLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_SORT: - code = makeNode(type, sizeof(SSortLogicNode), &pNode); break; + code = makeNode(type, sizeof(SSortLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_PARTITION: - code = makeNode(type, sizeof(SPartitionLogicNode), &pNode); break; + code = makeNode(type, sizeof(SPartitionLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC: - code = makeNode(type, sizeof(SIndefRowsFuncLogicNode), &pNode); break; + code = makeNode(type, sizeof(SIndefRowsFuncLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_INTERP_FUNC: - code = makeNode(type, sizeof(SInterpFuncLogicNode), &pNode); break; + code = makeNode(type, sizeof(SInterpFuncLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_FORECAST_FUNC: - code = makeNode(type, sizeof(SForecastFuncLogicNode), &pNode); break; + code = makeNode(type, sizeof(SForecastFuncLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_GROUP_CACHE: - code = makeNode(type, sizeof(SGroupCacheLogicNode), &pNode); break; + code = makeNode(type, sizeof(SGroupCacheLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_PLAN_DYN_QUERY_CTRL: - code = makeNode(type, sizeof(SDynQueryCtrlLogicNode), &pNode); break; + code = makeNode(type, sizeof(SDynQueryCtrlLogicNode), &pNode); + break; case QUERY_NODE_LOGIC_SUBPLAN: - code = makeNode(type, sizeof(SLogicSubplan), &pNode); break; + code = makeNode(type, sizeof(SLogicSubplan), &pNode); + break; case QUERY_NODE_LOGIC_PLAN: - code = makeNode(type, sizeof(SQueryLogicPlan), &pNode); break; + code = makeNode(type, sizeof(SQueryLogicPlan), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: - code = makeNode(type, sizeof(STagScanPhysiNode), &pNode); break; + code = makeNode(type, sizeof(STagScanPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: - code = makeNode(type, sizeof(STableScanPhysiNode), &pNode); break; + code = makeNode(type, sizeof(STableScanPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN: - code = makeNode(type, sizeof(STableSeqScanPhysiNode), &pNode); break; + code = makeNode(type, sizeof(STableSeqScanPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN: - code = makeNode(type, sizeof(STableMergeScanPhysiNode), &pNode); break; + code = makeNode(type, sizeof(STableMergeScanPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN: - code = makeNode(type, sizeof(SStreamScanPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamScanPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN: - code = makeNode(type, sizeof(SSystemTableScanPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SSystemTableScanPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: - code = makeNode(type, sizeof(SBlockDistScanPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SBlockDistScanPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: - code = makeNode(type, sizeof(SLastRowScanPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SLastRowScanPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN: - code = makeNode(type, sizeof(STableCountScanPhysiNode), &pNode); break; + code = makeNode(type, sizeof(STableCountScanPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_PROJECT: - code = makeNode(type, sizeof(SProjectPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SProjectPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: - code = makeNode(type, sizeof(SSortMergeJoinPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SSortMergeJoinPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN: - code = makeNode(type, sizeof(SHashJoinPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SHashJoinPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: - code = makeNode(type, sizeof(SAggPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SAggPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: - code = makeNode(type, sizeof(SExchangePhysiNode), &pNode); break; + code = makeNode(type, sizeof(SExchangePhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_MERGE: - code = makeNode(type, sizeof(SMergePhysiNode), &pNode); break; + code = makeNode(type, sizeof(SMergePhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_SORT: - code = makeNode(type, sizeof(SSortPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SSortPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT: - code = makeNode(type, sizeof(SGroupSortPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SGroupSortPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL: - code = makeNode(type, sizeof(SIntervalPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SIntervalPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: - code = makeNode(type, sizeof(SMergeIntervalPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SMergeIntervalPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL: - code = makeNode(type, sizeof(SMergeAlignedIntervalPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SMergeAlignedIntervalPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: - code = makeNode(type, sizeof(SStreamIntervalPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamIntervalPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL: - code = makeNode(type, sizeof(SStreamFinalIntervalPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamFinalIntervalPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL: - code = makeNode(type, sizeof(SStreamSemiIntervalPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamSemiIntervalPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL: - code = makeNode(type, sizeof(SStreamMidIntervalPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamMidIntervalPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_FILL: case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL: - code = makeNode(type, sizeof(SFillPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SFillPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: - code = makeNode(type, sizeof(SSessionWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SSessionWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: - code = makeNode(type, sizeof(SStreamSessionWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamSessionWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION: - code = makeNode(type, sizeof(SStreamSemiSessionWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamSemiSessionWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION: - code = makeNode(type, sizeof(SStreamFinalSessionWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamFinalSessionWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: - code = makeNode(type, sizeof(SStateWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStateWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: - code = makeNode(type, sizeof(SStreamStateWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamStateWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT: - code = makeNode(type, sizeof(SEventWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SEventWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT: - code = makeNode(type, sizeof(SStreamEventWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamEventWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT: - code = makeNode(type, sizeof(SCountWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SCountWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_MERGE_ANOMALY: - code = makeNode(type, sizeof(SAnomalyWindowPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SAnomalyWindowPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT: - code = makeNode(type, sizeof(SStreamCountWinodwPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamCountWinodwPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_PARTITION: - code = makeNode(type, sizeof(SPartitionPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SPartitionPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: - code = makeNode(type, sizeof(SStreamPartitionPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamPartitionPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC: - code = makeNode(type, sizeof(SIndefRowsFuncPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SIndefRowsFuncPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: - code = makeNode(type, sizeof(SInterpFuncLogicNode), &pNode); break; + code = makeNode(type, sizeof(SInterpFuncLogicNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC: - code = makeNode(type, sizeof(SForecastFuncLogicNode), &pNode); break; + code = makeNode(type, sizeof(SForecastFuncLogicNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_DISPATCH: - code = makeNode(type, sizeof(SDataDispatcherNode), &pNode); break; + code = makeNode(type, sizeof(SDataDispatcherNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_INSERT: - code = makeNode(type, sizeof(SDataInserterNode), &pNode); break; + code = makeNode(type, sizeof(SDataInserterNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT: - code = makeNode(type, sizeof(SQueryInserterNode), &pNode); break; + code = makeNode(type, sizeof(SQueryInserterNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_DELETE: - code = makeNode(type, sizeof(SDataDeleterNode), &pNode); break; + code = makeNode(type, sizeof(SDataDeleterNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE: - code = makeNode(type, sizeof(SGroupCachePhysiNode), &pNode); break; + code = makeNode(type, sizeof(SGroupCachePhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL: - code = makeNode(type, sizeof(SDynQueryCtrlPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SDynQueryCtrlPhysiNode), &pNode); + break; case QUERY_NODE_PHYSICAL_SUBPLAN: - code = makeNode(type, sizeof(SSubplan), &pNode); break; + code = makeNode(type, sizeof(SSubplan), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN: - code = makeNode(type, sizeof(SQueryPlan), &pNode); break; + code = makeNode(type, sizeof(SQueryPlan), &pNode); + break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: - code = makeNode(type, sizeof(SStreamInterpFuncPhysiNode), &pNode); break; + code = makeNode(type, sizeof(SStreamInterpFuncPhysiNode), &pNode); + break; default: break; } @@ -957,7 +1143,7 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_DATABLOCK_DESC: nodesDestroyList(((SDataBlockDescNode*)pNode)->pSlots); break; - case QUERY_NODE_SLOT_DESC: // no pointer field + case QUERY_NODE_SLOT_DESC: // no pointer field break; case QUERY_NODE_COLUMN_DEF: nodesDestroyNode(((SColumnDefNode*)pNode)->pOptions); @@ -1138,7 +1324,7 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_FLUSH_DATABASE_STMT: // no pointer field case QUERY_NODE_TRIM_DATABASE_STMT: // no pointer field break; - case QUERY_NODE_S3MIGRATE_DATABASE_STMT: // no pointer field + case QUERY_NODE_S3MIGRATE_DATABASE_STMT: // no pointer field break; case QUERY_NODE_CREATE_TABLE_STMT: { SCreateTableStmt* pStmt = (SCreateTableStmt*)pNode; @@ -1176,6 +1362,13 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode((SNode*)pStmt->pVal); break; } + case QUERY_NODE_ALTER_TABLE_MULTI_STMT: { + SAlterTableMultiStmt* pStmt = (SAlterTableMultiStmt*)pNode; + // nodesDestroyList(pStmt->pTables); + // nodesDestroyNode((SNode*)pStmt->pOptions); + // nodesDestroyNode((SNode*)pStmt->pVal); + break; + } case QUERY_NODE_CREATE_USER_STMT: { SCreateUserStmt* pStmt = (SCreateUserStmt*)pNode; taosMemoryFree(pStmt->pIpRanges); @@ -1254,13 +1447,13 @@ void nodesDestroyNode(SNode* pNode) { taosMemoryFreeClear(pStmt->pReq); break; } - case QUERY_NODE_DROP_STREAM_STMT: // no pointer field - case QUERY_NODE_PAUSE_STREAM_STMT: // no pointer field - case QUERY_NODE_RESUME_STREAM_STMT: // no pointer field - case QUERY_NODE_BALANCE_VGROUP_STMT: // no pointer field - case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT: // no pointer field + case QUERY_NODE_DROP_STREAM_STMT: // no pointer field + case QUERY_NODE_PAUSE_STREAM_STMT: // no pointer field + case QUERY_NODE_RESUME_STREAM_STMT: // no pointer field + case QUERY_NODE_BALANCE_VGROUP_STMT: // no pointer field + case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT: // no pointer field case QUERY_NODE_BALANCE_VGROUP_LEADER_DATABASE_STMT: // no pointer field - case QUERY_NODE_MERGE_VGROUP_STMT: // no pointer field + case QUERY_NODE_MERGE_VGROUP_STMT: // no pointer field break; case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT: nodesDestroyList(((SRedistributeVgroupStmt*)pNode)->pDnodes); @@ -1274,7 +1467,7 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_REVOKE_STMT: nodesDestroyNode(((SRevokeStmt*)pNode)->pTagCond); break; - case QUERY_NODE_ALTER_CLUSTER_STMT: // no pointer field + case QUERY_NODE_ALTER_CLUSTER_STMT: // no pointer field break; case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: @@ -1387,12 +1580,12 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pQuery->pPrepareRoot); break; } - case QUERY_NODE_RESTORE_DNODE_STMT: // no pointer field - case QUERY_NODE_RESTORE_QNODE_STMT: // no pointer field - case QUERY_NODE_RESTORE_MNODE_STMT: // no pointer field - case QUERY_NODE_RESTORE_VNODE_STMT: // no pointer field + case QUERY_NODE_RESTORE_DNODE_STMT: // no pointer field + case QUERY_NODE_RESTORE_QNODE_STMT: // no pointer field + case QUERY_NODE_RESTORE_MNODE_STMT: // no pointer field + case QUERY_NODE_RESTORE_VNODE_STMT: // no pointer field break; - case QUERY_NODE_CREATE_VIEW_STMT: { + case QUERY_NODE_CREATE_VIEW_STMT: { SCreateViewStmt* pStmt = (SCreateViewStmt*)pNode; taosMemoryFree(pStmt->pQuerySql); tFreeSCMCreateViewReq(&pStmt->createReq); @@ -1409,7 +1602,7 @@ void nodesDestroyNode(SNode* pNode) { taosMemoryFreeClear(pStmt->pReq); } break; - } + } case QUERY_NODE_LOGIC_PLAN_SCAN: { SScanLogicNode* pLogicNode = (SScanLogicNode*)pNode; destroyLogicNode((SLogicNode*)pLogicNode); @@ -1800,7 +1993,7 @@ void nodesDestroyNode(SNode* pNode) { int32_t nodesMakeList(SNodeList** ppListOut) { SNodeList* p = NULL; - int32_t code = nodesCalloc(1, sizeof(SNodeList), (void**)&p); + int32_t code = nodesCalloc(1, sizeof(SNodeList), (void**)&p); if (TSDB_CODE_SUCCESS == code) { *ppListOut = p; } @@ -1812,7 +2005,7 @@ int32_t nodesListAppend(SNodeList* pList, SNode* pNode) { return TSDB_CODE_FAILED; } SListCell* p = NULL; - int32_t code = nodesCalloc(1, sizeof(SListCell), (void**)&p); + int32_t code = nodesCalloc(1, sizeof(SListCell), (void**)&p); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -1891,7 +2084,6 @@ int32_t nodesListStrictAppendList(SNodeList* pTarget, SNodeList* pSrc) { return code; } - int32_t nodesListMakeStrictAppendList(SNodeList** pTarget, SNodeList* pSrc) { if (NULL == *pTarget) { int32_t code = nodesMakeList(pTarget); @@ -1902,7 +2094,7 @@ int32_t nodesListMakeStrictAppendList(SNodeList** pTarget, SNodeList* pSrc) { return nodesListStrictAppendList(*pTarget, pSrc); } -int32_t nodesListMakePushFront(SNodeList** pList, SNode* pNode) { +int32_t nodesListMakePushFront(SNodeList** pList, SNode* pNode) { if (*pList == NULL) { int32_t code = nodesMakeList(pList); if (*pList == NULL) { @@ -1917,7 +2109,7 @@ int32_t nodesListPushFront(SNodeList* pList, SNode* pNode) { return TSDB_CODE_FAILED; } SListCell* p = NULL; - int32_t code = nodesCalloc(1, sizeof(SListCell), (void**)&p); + int32_t code = nodesCalloc(1, sizeof(SListCell), (void**)&p); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -2350,7 +2542,8 @@ static EDealRes doCollect(SCollectColumnsCxt* pCxt, SColumnNode* pCol, SNode* pN static bool isCollectType(ECollectColType collectType, EColumnType colType) { return COLLECT_COL_TYPE_ALL == collectType ? true - : (COLLECT_COL_TYPE_TAG == collectType ? COLUMN_TYPE_TAG == colType : (COLUMN_TYPE_TAG != colType && COLUMN_TYPE_TBNAME != colType)); + : (COLLECT_COL_TYPE_TAG == collectType ? COLUMN_TYPE_TAG == colType + : (COLUMN_TYPE_TAG != colType && COLUMN_TYPE_TBNAME != colType)); } static EDealRes collectColumns(SNode* pNode, void* pContext) { @@ -2370,7 +2563,9 @@ static EDealRes collectColumnsExt(SNode* pNode, void* pContext) { if (QUERY_NODE_COLUMN == nodeType(pNode)) { SColumnNode* pCol = (SColumnNode*)pNode; if (isCollectType(pCxt->collectType, pCol->colType) && 0 != strcmp(pCol->colName, "*") && - (NULL == pCxt->pMultiTableAlias || NULL != (pCxt->pTableAlias = tSimpleHashGet(pCxt->pMultiTableAlias, pCol->tableAlias, strlen(pCol->tableAlias))))) { + (NULL == pCxt->pMultiTableAlias || + NULL != (pCxt->pTableAlias = + tSimpleHashGet(pCxt->pMultiTableAlias, pCol->tableAlias, strlen(pCol->tableAlias))))) { return doCollect(pCxt, pCol, pNode); } } @@ -2382,7 +2577,7 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* if (NULL == pSelect || NULL == pCols) { return TSDB_CODE_FAILED; } - SNodeList * pList = NULL; + SNodeList* pList = NULL; if (!*pCols) { int32_t code = nodesMakeList(&pList); if (TSDB_CODE_SUCCESS != code) { @@ -2414,13 +2609,13 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* return TSDB_CODE_SUCCESS; } -int32_t nodesCollectColumnsExt(SSelectStmt* pSelect, ESqlClause clause, SSHashObj* pMultiTableAlias, ECollectColType type, - SNodeList** pCols) { +int32_t nodesCollectColumnsExt(SSelectStmt* pSelect, ESqlClause clause, SSHashObj* pMultiTableAlias, + ECollectColType type, SNodeList** pCols) { if (NULL == pSelect || NULL == pCols) { return TSDB_CODE_FAILED; } - SNodeList * pList = NULL; + SNodeList* pList = NULL; if (!*pCols) { int32_t code = nodesMakeList(&pList); if (TSDB_CODE_SUCCESS != code) { @@ -2458,7 +2653,7 @@ int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, EColle if (NULL == pCols) { return TSDB_CODE_FAILED; } - SNodeList * pList = NULL; + SNodeList* pList = NULL; if (!*pCols) { int32_t code = nodesMakeList(&pList); if (TSDB_CODE_SUCCESS != code) { @@ -2511,7 +2706,7 @@ static EDealRes collectFuncs(SNode* pNode, void* pContext) { } } - bool bFound = false; + bool bFound = false; SNode* pn = NULL; FOREACH(pn, pCxt->pFuncs) { if (nodesEqualNode(pn, pNode)) { @@ -2543,21 +2738,21 @@ static int32_t funcNodeEqual(const void* pLeft, const void* pRight, size_t len) return nodesEqualNode(*(const SNode**)pLeft, *(const SNode**)pRight) ? 0 : 1; } -int32_t nodesCollectSelectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList* pFuncs) { +int32_t nodesCollectSelectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, + SNodeList* pFuncs) { if (NULL == pSelect || NULL == pFuncs) { return TSDB_CODE_FAILED; } - SCollectFuncsCxt cxt = {.errCode = TSDB_CODE_SUCCESS, - .classifier = classifier, - .tableAlias = tableAlias, - .pFuncs = pFuncs}; + SCollectFuncsCxt cxt = { + .errCode = TSDB_CODE_SUCCESS, .classifier = classifier, .tableAlias = tableAlias, .pFuncs = pFuncs}; nodesWalkSelectStmt(pSelect, clause, collectFuncs, &cxt); return cxt.errCode; } -int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList** pFuncs) { +int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, + SNodeList** pFuncs) { if (NULL == pSelect || NULL == pFuncs) { return TSDB_CODE_FAILED; } @@ -2783,7 +2978,7 @@ int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc) { nodesClearList(*pSrc); } else { SLogicConditionNode* pLogicCond = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION, (SNode**)&pLogicCond); + int32_t code = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION, (SNode**)&pLogicCond); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -2815,13 +3010,13 @@ const char* dataOrderStr(EDataOrderLevel order) { } int32_t nodesMakeValueNodeFromString(char* literal, SValueNode** ppValNode) { - int32_t lenStr = strlen(literal); + int32_t lenStr = strlen(literal); SValueNode* pValNode = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValNode); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValNode); if (pValNode) { pValNode->node.resType.type = TSDB_DATA_TYPE_VARCHAR; pValNode->node.resType.bytes = lenStr + VARSTR_HEADER_SIZE; - char* p = taosMemoryMalloc(lenStr + 1 + VARSTR_HEADER_SIZE); + char* p = taosMemoryMalloc(lenStr + 1 + VARSTR_HEADER_SIZE); if (p == NULL) { return terrno; } @@ -2838,7 +3033,7 @@ int32_t nodesMakeValueNodeFromString(char* literal, SValueNode** ppValNode) { int32_t nodesMakeValueNodeFromBool(bool b, SValueNode** ppValNode) { SValueNode* pValNode = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValNode); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValNode); if (TSDB_CODE_SUCCESS == code) { pValNode->node.resType.type = TSDB_DATA_TYPE_BOOL; pValNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes; @@ -2856,7 +3051,7 @@ int32_t nodesMakeValueNodeFromBool(bool b, SValueNode** ppValNode) { int32_t nodesMakeValueNodeFromInt32(int32_t value, SNode** ppNode) { SValueNode* pValNode = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValNode); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValNode); if (TSDB_CODE_SUCCESS == code) { pValNode->node.resType.type = TSDB_DATA_TYPE_INT; pValNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_INT].bytes; @@ -2885,7 +3080,7 @@ bool nodesIsTableStar(SNode* pNode) { void nodesSortList(SNodeList** pList, int32_t (*comp)(SNode* pNode1, SNode* pNode2)) { if ((*pList)->length == 1) return; - uint32_t inSize = 1; + uint32_t inSize = 1; SListCell* pHead = (*pList)->pHead; while (1) { SListCell* p = pHead; @@ -2896,7 +3091,7 @@ void nodesSortList(SNodeList** pList, int32_t (*comp)(SNode* pNode1, SNode* pNod while (p) { ++nMerges; SListCell* q = p; - uint32_t pSize = 0; + uint32_t pSize = 0; for (uint32_t i = 0; i < inSize; ++i) { ++pSize; q = q->pNext; @@ -2971,7 +3166,7 @@ int32_t nodesListDeduplicate(SNodeList** ppList) { return TSDB_CODE_SUCCESS; } SNodeList* pTmp = NULL; - int32_t code = nodesMakeList(&pTmp); + int32_t code = nodesMakeList(&pTmp); if (TSDB_CODE_SUCCESS == code) { SNode* pNode = NULL; FOREACH(pNode, *ppList) { diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index 3caa8da80f..fb0529e6d1 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -165,6 +165,7 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* SNode* createInterpTimePoint(SAstCreateContext* pCxt, SNode* pPoint); SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen); SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhenThenList, SNode* pElse); +SNode* createAlterSingleTagColumnNode(SAstCreateContext* pCtx, SToken* token, SNode* pVal); SNode* addWhereClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWhere); SNode* addPartitionByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pPartitionByList); @@ -228,6 +229,7 @@ SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_ SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pOldColName, SToken* pNewColName); SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal); +SNode* createAlterTableSetMultiTagValue(SAstCreateContext* pCxt, SNode* pRealTable, SNodeList* singleNode); SNode* setAlterSuperTableType(SNode* pStmt); SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName); SNode* setShowKind(SAstCreateContext* pCxt, SNode* pStmt, EShowKind showKind); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index e1c3456e3f..9c76aa7f7a 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -370,6 +370,7 @@ cmd ::= DROP STABLE with_opt(A) exists_opt(B) full_table_name(C). cmd ::= ALTER TABLE alter_table_clause(A). { pCxt->pRootNode = A; } cmd ::= ALTER STABLE alter_table_clause(A). { pCxt->pRootNode = setAlterSuperTableType(A); } + alter_table_clause(A) ::= full_table_name(B) alter_table_options(C). { A = createAlterTableModifyOptions(pCxt, B, C); } alter_table_clause(A) ::= full_table_name(B) ADD COLUMN column_name(C) type_name(D) column_options(E). { A = createAlterTableAddModifyColOptions2(pCxt, B, TSDB_ALTER_TABLE_ADD_COLUMN, &C, D, E); } @@ -387,8 +388,16 @@ alter_table_clause(A) ::= full_table_name(B) MODIFY TAG column_name(C) type_name(D). { A = createAlterTableAddModifyCol(pCxt, B, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &C, D); } alter_table_clause(A) ::= full_table_name(B) RENAME TAG column_name(C) column_name(D). { A = createAlterTableRenameCol(pCxt, B, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &C, &D); } + + +%type column_eq_value_list { SNodeList* } +%destructor column_eq_value_list { nodesDestroyList($$); } +column_eq_value(A) ::= column_name(C) NK_EQ tags_literal(D). { A = createAlterSingleTagColumnNode(pCxt, &C, D); } +column_eq_value_list(A) ::= column_eq_value(B). { A = createNodeList(pCxt, B); } +column_eq_value_list(A) ::= column_eq_value_list(B) NK_COMMA column_eq_value(C). { A = addNodeToList(pCxt, B, C);} + alter_table_clause(A) ::= - full_table_name(B) SET TAG column_name(C) NK_EQ tags_literal(D). { A = createAlterTableSetTag(pCxt, B, &C, D); } + full_table_name(B) SET TAG column_eq_value_list(C). { A = createAlterTableSetMultiTagValue(pCxt, B, C); } %type multi_create_clause { SNodeList* } %destructor multi_create_clause { nodesDestroyList($$); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 1a5e3444c0..b41dad0b18 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1976,7 +1976,7 @@ static SNode* setDatabaseOptionImpl(SAstCreateContext* pCxt, SNode* pOptions, ED case DB_OPTION_S3_COMPACT: pDbOptions->s3Compact = taosStr2Int8(((SToken*)pVal)->z, NULL, 10); break; - case DB_OPTION_KEEP_TIME_OFFSET: + case DB_OPTION_KEEP_TIME_OFFSET: pDbOptions->keepTimeOffset = taosStr2Int32(((SToken*)pVal)->z, NULL, 10); break; case DB_OPTION_ENCRYPT_ALGORITHM: @@ -2427,6 +2427,12 @@ static SNode* createAlterTableStmtFinalize(SNode* pRealTable, SAlterTableStmt* p nodesDestroyNode(pRealTable); return (SNode*)pStmt; } +static SNode* createAlterTableMultiStmtFinalize(SNode* pRealTable, SAlterTableMultiStmt* pStmt) { + strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName); + strcpy(pStmt->tableName, ((SRealTableNode*)pRealTable)->table.tableName); + nodesDestroyNode(pRealTable); + return (SNode*)pStmt; +} SNode* createAlterTableModifyOptions(SAstCreateContext* pCxt, SNode* pRealTable, SNode* pOptions) { CHECK_PARSER_STATUS(pCxt); @@ -2541,6 +2547,19 @@ _err: return NULL; } +SNode* createAlterSingleTagColumnNode(SAstCreateContext* pCtx, SToken* pTagName, SNode* pVal) { + CHECK_PARSER_STATUS(pCtx); + SAlterTableStmt* pStmt = NULL; + pCtx->errCode = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT, (SNode**)&pStmt); + CHECK_MAKE_NODE(pStmt); + pStmt->alterType = TSDB_ALTER_TABLE_UPDATE_TAG_VAL; + COPY_STRING_FORM_ID_TOKEN(pStmt->colName, pTagName); + pStmt->pVal = (SValueNode*)pVal; + return (SNode*)pStmt; +_err: + return NULL; +} + SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal) { CHECK_PARSER_STATUS(pCxt); CHECK_NAME(checkColumnName(pCxt, pTagName)); @@ -2557,6 +2576,19 @@ _err: return NULL; } +SNode* createAlterTableSetMultiTagValue(SAstCreateContext* pCxt, SNode* pRealTable, SNodeList* pList) { + CHECK_PARSER_STATUS(pCxt); + SAlterTableStmt* pStmt = NULL; + pCxt->errCode = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT, (SNode**)&pStmt); + + CHECK_MAKE_NODE(pStmt); + pStmt->alterType = TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL; + pStmt->pNodeListTagValue = pList; + return createAlterTableStmtFinalize(pRealTable, pStmt); +_err: + return NULL; +} + SNode* setAlterSuperTableType(SNode* pStmt) { if (!pStmt) return NULL; setNodeType(pStmt, QUERY_NODE_ALTER_SUPER_TABLE_STMT); diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index b78e10768f..39b142881f 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -46,7 +46,7 @@ int32_t buildQueryAfterParse(SQuery** pQuery, SNode* pRootNode, int16_t placehol int32_t parse(SParseContext* pParseCxt, SQuery** pQuery) { SAstCreateContext cxt; initAstCreateContext(pParseCxt, &cxt); - void* pParser = ParseAlloc((FMalloc)taosMemoryMalloc); + void* pParser = ParseAlloc((FMalloc)taosMemoryMalloc); if (!pParser) return terrno; int32_t i = 0; while (1) { @@ -210,15 +210,15 @@ static int32_t isTbnameEqCondOperator(SOperatorNode* pOperator, char** ppTableNa if (pOperator->opType != OP_TYPE_EQUAL) { return TSDB_CODE_SUCCESS; } - - SValueNode* pValueNode = NULL; + + SValueNode* pValueNode = NULL; if (nodeType(pOperator->pLeft) == QUERY_NODE_FUNCTION && 0 == strcasecmp(((SFunctionNode*)(pOperator->pLeft))->functionName, "tbname") && nodeType(pOperator->pRight) == QUERY_NODE_VALUE) { pValueNode = (SValueNode*)pOperator->pRight; } else if (nodeType(pOperator->pRight) == QUERY_NODE_FUNCTION && - 0 == strcasecmp(((SFunctionNode*)(pOperator->pRight))->functionName, "tbname") && - nodeType(pOperator->pLeft) == QUERY_NODE_VALUE) { + 0 == strcasecmp(((SFunctionNode*)(pOperator->pRight))->functionName, "tbname") && + nodeType(pOperator->pLeft) == QUERY_NODE_VALUE) { pValueNode = (SValueNode*)pOperator->pLeft; } else { return TSDB_CODE_SUCCESS; @@ -233,13 +233,14 @@ static EDealRes collectMetaKeyFromOperator(SCollectMetaKeyFromExprCxt* pCxt, SOp if (!pCxt->tbnameCollect) { return DEAL_RES_CONTINUE; } - + char* pTableName = NULL; int32_t code = isTbnameEqCondOperator((SOperatorNode*)pOpNode, &pTableName); if (TSDB_CODE_SUCCESS != code) return DEAL_RES_CONTINUE; if (pTableName) { SSelectStmt* pSelect = (SSelectStmt*)pCxt->pComCxt->pStmt; - pCxt->errCode = collectMetaKeyFromRealTableImpl(pCxt->pComCxt, ((SRealTableNode*)pSelect->pFromTable)->table.dbName, pTableName, AUTH_TYPE_READ); + pCxt->errCode = collectMetaKeyFromRealTableImpl(pCxt->pComCxt, ((SRealTableNode*)pSelect->pFromTable)->table.dbName, + pTableName, AUTH_TYPE_READ); } return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; @@ -500,7 +501,7 @@ static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateS reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->targetDbName, pStmt->targetTabName, pCxt->pMetaCache); if (TSDB_CODE_SUCCESS == code && NULL != pStmt->pSubtable && NULL != pStmt->pQuery) { SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; - int32_t code = nodesCloneNode(pStmt->pSubtable, &pSelect->pSubtable); + int32_t code = nodesCloneNode(pStmt->pSubtable, &pSelect->pSubtable); if (NULL == pSelect->pSubtable) { return code; } @@ -969,6 +970,7 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { case QUERY_NODE_DROP_SUPER_TABLE_STMT: return collectMetaKeyFromDropStable(pCxt, (SDropSuperTableStmt*)pStmt); case QUERY_NODE_ALTER_TABLE_STMT: + case QUERY_NODE_ALTER_TABLE_MULTI_STMT: return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt); case QUERY_NODE_ALTER_SUPER_TABLE_STMT: return collectMetaKeyFromAlterStable(pCxt, (SAlterTableStmt*)pStmt); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index fcb6361a6b..3d0c3d7c00 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -97,6 +97,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .showType = QUERY_NODE_SHOW_DNODES_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, .pTableName = TSDB_INS_TABLE_DNODES, + .numOfShowCols = 1, .pShowCols = {"*"} }, @@ -1853,7 +1854,7 @@ static bool clauseSupportAlias(ESqlClause clause) { return SQL_CLAUSE_GROUP_BY == clause || SQL_CLAUSE_PARTITION_BY == clause || SQL_CLAUSE_ORDER_BY == clause; } -static EDealRes translateColumnInGroupByClause(STranslateContext* pCxt, SColumnNode** pCol, bool *translateAsAlias) { +static EDealRes translateColumnInGroupByClause(STranslateContext* pCxt, SColumnNode** pCol, bool* translateAsAlias) { *translateAsAlias = false; // count(*)/first(*)/last(*) and so on if (0 == strcmp((*pCol)->colName, "*")) { @@ -1862,7 +1863,7 @@ static EDealRes translateColumnInGroupByClause(STranslateContext* pCxt, SColumnN if (pCxt->pParseCxt->biMode) { SNode** ppNode = (SNode**)pCol; - bool ret; + bool ret; pCxt->errCode = biRewriteToTbnameFunc(pCxt, ppNode, &ret); if (TSDB_CODE_SUCCESS != pCxt->errCode) return DEAL_RES_ERROR; if (ret) { @@ -1876,9 +1877,8 @@ static EDealRes translateColumnInGroupByClause(STranslateContext* pCxt, SColumnN } else { bool found = false; res = translateColumnWithoutPrefix(pCxt, pCol); - if (!(*pCol)->node.asParam && - res != DEAL_RES_CONTINUE && - res != DEAL_RES_END && pCxt->errCode != TSDB_CODE_PAR_AMBIGUOUS_COLUMN) { + if (!(*pCol)->node.asParam && res != DEAL_RES_CONTINUE && res != DEAL_RES_END && + pCxt->errCode != TSDB_CODE_PAR_AMBIGUOUS_COLUMN) { res = translateColumnUseAlias(pCxt, pCol, &found); *translateAsAlias = true; } @@ -3321,9 +3321,11 @@ static int32_t selectCommonType(SDataType* commonType, const SDataType* newType) return TSDB_CODE_SUCCESS; } - if ((resultType == TSDB_DATA_TYPE_VARCHAR) && (IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) { + if ((resultType == TSDB_DATA_TYPE_VARCHAR) && + (IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) { commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), QUERY_NUMBER_MAX_DISPLAY_LEN); - } else if ((resultType == TSDB_DATA_TYPE_NCHAR) && (IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) { + } else if ((resultType == TSDB_DATA_TYPE_NCHAR) && + (IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) { commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), QUERY_NUMBER_MAX_DISPLAY_LEN * TSDB_NCHAR_SIZE); } else { commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), TYPE_BYTES[resultType]); @@ -5480,7 +5482,7 @@ static EDealRes translateGroupPartitionByImpl(SNode** pNode, void* pContext) { int32_t code = TSDB_CODE_SUCCESS; STranslateContext* pTransCxt = pCxt->pTranslateCxt; if (QUERY_NODE_VALUE == nodeType(*pNode)) { - SValueNode* pVal = (SValueNode*) *pNode; + SValueNode* pVal = (SValueNode*)*pNode; if (DEAL_RES_ERROR == translateValue(pTransCxt, pVal)) { return DEAL_RES_CONTINUE; } @@ -5528,8 +5530,7 @@ static int32_t translateGroupByList(STranslateContext* pCxt, SSelectStmt* pSelec if (NULL == pSelect->pGroupByList) { return TSDB_CODE_SUCCESS; } - SReplaceGroupByAliasCxt cxt = { - .pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; + SReplaceGroupByAliasCxt cxt = {.pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; nodesRewriteExprsPostOrder(pSelect->pGroupByList, translateGroupPartitionByImpl, &cxt); return pCxt->errCode; @@ -5540,8 +5541,7 @@ static int32_t translatePartitionByList(STranslateContext* pCxt, SSelectStmt* pS return TSDB_CODE_SUCCESS; } - SReplaceGroupByAliasCxt cxt = { - .pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; + SReplaceGroupByAliasCxt cxt = {.pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; nodesRewriteExprsPostOrder(pSelect->pPartitionByList, translateGroupPartitionByImpl, &cxt); return pCxt->errCode; @@ -9495,7 +9495,8 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable } static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) { - if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType) { + if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || + TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL == pStmt->alterType) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Set tag value only available for child table"); } @@ -10521,7 +10522,8 @@ static void getSourceDatabase(SNode* pStmt, int32_t acctId, char* pDbFName) { (void)tNameGetFullDbName(&name, pDbFName); } -static void getStreamQueryFirstProjectAliasName(SHashObj* pUserAliasSet, char* aliasName, int32_t len, char* defaultName[]) { +static void getStreamQueryFirstProjectAliasName(SHashObj* pUserAliasSet, char* aliasName, int32_t len, + char* defaultName[]) { for (int32_t i = 0; defaultName[i] != NULL; i++) { if (NULL == taosHashGet(pUserAliasSet, defaultName[i], strlen(defaultName[i]))) { snprintf(aliasName, len, "%s", defaultName[i]); @@ -10547,8 +10549,8 @@ static int32_t setColumnDefNodePrimaryKey(SColumnDefNode* pNode, bool isPk) { return code; } -static int32_t addIrowTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSelectStmt* pSelect, - SHashObj* pUserAliasSet, SNodeList* pCols, SCMCreateStreamReq* pReq) { +static int32_t addIrowTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSelectStmt* pSelect, SHashObj* pUserAliasSet, + SNodeList* pCols, SCMCreateStreamReq* pReq) { SNode* pProj = nodesListGetNode(pSelect->pProjectionList, 0); if (!pSelect->hasInterpFunc || (QUERY_NODE_FUNCTION == nodeType(pProj) && 0 == strcmp("_irowts", ((SFunctionNode*)pProj)->functionName))) { @@ -10595,7 +10597,7 @@ static int32_t addWstartTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSele return TSDB_CODE_SUCCESS; } SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -10627,7 +10629,7 @@ static int32_t addWstartTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSele } static int32_t addTsKeyToCreateStreamQuery(STranslateContext* pCxt, SNode* pStmt, SNodeList* pCols, - SCMCreateStreamReq* pReq) { + SCMCreateStreamReq* pReq) { SSelectStmt* pSelect = (SSelectStmt*)pStmt; SHashObj* pUserAliasSet = NULL; int32_t code = checkProjectAlias(pCxt, pSelect->pProjectionList, &pUserAliasSet); @@ -10990,21 +10992,18 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm if (pStmt->pOptions->triggerType == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { if (pStmt->pOptions->fillHistory) { - return generateSyntaxErrMsgExt( - &pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "When trigger was force window close, Stream unsupported Fill history"); + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream unsupported Fill history"); } if (pStmt->pOptions->ignoreExpired != 1) { - return generateSyntaxErrMsgExt( - &pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "When trigger was force window close, Stream must not set ignore expired 0"); + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream must not set ignore expired 0"); } if (pStmt->pOptions->ignoreUpdate != 1) { - return generateSyntaxErrMsgExt( - &pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "When trigger was force window close, Stream must not set ignore update 0"); + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream must not set ignore update 0"); } if (pSelect->pWindow != NULL && QUERY_NODE_INTERVAL_WINDOW == nodeType(pSelect->pWindow)) { @@ -13127,7 +13126,7 @@ static int32_t extractShowCreateViewResultSchema(int32_t* numOfCols, SSchema** p } static int32_t extractShowVariablesResultSchema(int32_t* numOfCols, SSchema** pSchema) { - *numOfCols = SHOW_LOCAL_VARIABLES_RESULT_COLS; // SHOW_VARIABLES_RESULT_COLS + *numOfCols = SHOW_LOCAL_VARIABLES_RESULT_COLS; // SHOW_VARIABLES_RESULT_COLS *pSchema = taosMemoryCalloc((*numOfCols), sizeof(SSchema)); if (NULL == (*pSchema)) { return terrno; @@ -15183,24 +15182,90 @@ static int32_t rewriteDropSuperTable(STranslateContext* pCxt, SQuery* pQuery) { TAOS_RETURN(0); } -static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, - SVAlterTbReq* pReq) { - SName tbName = {0}; - SArray* pTsmas = NULL; +static int32_t buildUpdateTagValReqImpl2(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, + char* colName, SMultiTagUpateVal* pReq) { int32_t code = TSDB_CODE_SUCCESS; - if (pCxt->pMetaCache) { - toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tbName); - code = getTableTsmasFromCache(pCxt->pMetaCache, &tbName, &pTsmas); - if (code != TSDB_CODE_SUCCESS) return code; - if (pTsmas && pTsmas->size > 0) return TSDB_CODE_TSMA_MUST_BE_DROPPED; + if (NULL == pReq->tagName) { + return terrno; } - SSchema* pSchema = getTagSchema(pTableMeta, pStmt->colName); + SSchema* pSchema = getTagSchema(pTableMeta, colName); if (NULL == pSchema) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid tag name: %s", - pStmt->colName); + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid tag name: %s", colName); } - pReq->tagName = taosStrdup(pStmt->colName); + + pReq->tagName = taosStrdup(colName); + if (NULL == pReq->tagName) { + return terrno; + } + pReq->pTagArray = taosArrayInit(1, sizeof(STagVal)); + if (NULL == pReq->pTagArray) { + return terrno; + } + pReq->colId = pSchema->colId; + pReq->tagType = pSchema->type; + + STag* pTag = NULL; + SToken token; + char tokenBuf[TSDB_MAX_TAGS_LEN]; + const char* tagStr = pStmt->pVal->literal; + NEXT_TOKEN_WITH_PREV(tagStr, token); + if (TSDB_CODE_SUCCESS == code) { + code = checkAndTrimValue(&token, tokenBuf, &pCxt->msgBuf, pSchema->type); + if (TSDB_CODE_SUCCESS == code && TK_NK_VARIABLE == token.type) { + code = buildSyntaxErrMsg(&pCxt->msgBuf, "not expected tags values", token.z); + } + } + + if (TSDB_CODE_SUCCESS == code) { + code = parseTagValue(&pCxt->msgBuf, &tagStr, pTableMeta->tableInfo.precision, pSchema, &token, NULL, + pReq->pTagArray, &pTag); + if (pSchema->type == TSDB_DATA_TYPE_JSON && token.type == TK_NULL && code == TSDB_CODE_SUCCESS) { + pReq->tagFree = true; + } + } + if (TSDB_CODE_SUCCESS == code && tagStr) { + NEXT_VALID_TOKEN(tagStr, token); + if (token.n != 0) { + code = buildSyntaxErrMsg(&pCxt->msgBuf, "not expected tags values", token.z); + } + } + + if (TSDB_CODE_SUCCESS == code) { + if (pSchema->type == TSDB_DATA_TYPE_JSON) { + code = buildSyntaxErrMsg(&pCxt->msgBuf, "not expected tags values ", token.z); + } else { + STagVal* pTagVal = taosArrayGet(pReq->pTagArray, 0); + if (pTagVal) { + pReq->isNull = false; + if (IS_VAR_DATA_TYPE(pSchema->type)) { + pReq->nTagVal = pTagVal->nData; + pReq->pTagVal = pTagVal->pData; + } else { + pReq->nTagVal = pSchema->bytes; + pReq->pTagVal = (uint8_t*)&pTagVal->i64; + } + } else { + pReq->isNull = true; + } + } + } + + return code; +} +static int32_t buildUpdateTagValReqImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, + char* colName, SVAlterTbReq* pReq) { + int32_t code = TSDB_CODE_SUCCESS; + // if (NULL == pReq->tagName) { + // return terrno; + // } + + SSchema* pSchema = getTagSchema(pTableMeta, colName); + if (NULL == pSchema) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid tag name: %s", colName); + } + + pReq->tagName = taosStrdup(colName); if (NULL == pReq->tagName) { return terrno; } @@ -15261,6 +15326,62 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS return code; } +static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, + SVAlterTbReq* pReq) { + SName tbName = {0}; + SArray* pTsmas = NULL; + int32_t code = TSDB_CODE_SUCCESS; + if (pCxt->pMetaCache) { + toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tbName); + code = getTableTsmasFromCache(pCxt->pMetaCache, &tbName, &pTsmas); + if (code != TSDB_CODE_SUCCESS) return code; + if (pTsmas && pTsmas->size > 0) return TSDB_CODE_TSMA_MUST_BE_DROPPED; + } + return buildUpdateTagValReqImpl(pCxt, pStmt, pTableMeta, pStmt->colName, pReq); +} + +static int32_t buildUpdateMultiTagValReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, + SVAlterTbReq* pReq) { + SName tbName = {0}; + SArray* pTsmas = NULL; + int32_t code = TSDB_CODE_SUCCESS; + if (pCxt->pMetaCache) { + toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tbName); + code = getTableTsmasFromCache(pCxt->pMetaCache, &tbName, &pTsmas); + if (code != TSDB_CODE_SUCCESS) return code; + if (pTsmas && pTsmas->size > 0) return TSDB_CODE_TSMA_MUST_BE_DROPPED; + } + SNodeList* pNodeList = pStmt->pNodeListTagValue; + if (pNodeList == NULL) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE); + } + + int32_t nTagValues = pNodeList->length; + if (nTagValues == 1) { + SAlterTableStmt* head = (SAlterTableStmt*)pNodeList->pHead->pNode; + pReq->action = TSDB_ALTER_TABLE_UPDATE_TAG_VAL; + return buildUpdateTagValReqImpl(pCxt, head, pTableMeta, head->colName, pReq); + } else { + pReq->pMultiTag = taosArrayInit(nTagValues, sizeof(SMultiTagUpateVal)); + if (NULL == pReq->pTagArray) { + return terrno; + } + + SAlterTableStmt* pTagStmt = NULL; + SNode* pNode = NULL; + FOREACH(pNode, pNodeList) { + SMultiTagUpateVal val; + pTagStmt = (SAlterTableStmt*)pNode; + code = buildUpdateTagValReqImpl2(pCxt, pTagStmt, pTableMeta, pTagStmt->colName, &val); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + TAOS_UNUSED(taosArrayPush(pReq->pMultiTag, &val)); + } + } + + return code; +} static int32_t buildAddColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, SVAlterTbReq* pReq) { @@ -15449,6 +15570,8 @@ static int32_t buildAlterTbReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, case TSDB_ALTER_TABLE_UPDATE_TAG_NAME: case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES: return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE); + case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: + return buildUpdateMultiTagValReq(pCxt, pStmt, pTableMeta, pReq); case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: return buildUpdateTagValReq(pCxt, pStmt, pTableMeta, pReq); case TSDB_ALTER_TABLE_ADD_COLUMN: @@ -16254,6 +16377,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) { case QUERY_NODE_DROP_SUPER_TABLE_STMT: code = rewriteDropSuperTable(pCxt, pQuery); break; + case QUERY_NODE_ALTER_TABLE_MULTI_STMT: case QUERY_NODE_ALTER_TABLE_STMT: code = rewriteAlterTable(pCxt, pQuery); break; From 97e08abe8ca6cd09293cdba12a0432da5a23c89d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 25 Nov 2024 10:55:28 +0800 Subject: [PATCH 04/76] support update multi tag --- source/dnode/vnode/src/meta/metaTable.c | 253 +++++++++++++++++++++++- source/libs/parser/src/parTranslater.c | 3 - 2 files changed, 252 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 03b8bdd93f..f51b641640 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2011,7 +2011,257 @@ _err: return terrno != 0 ? terrno : TSDB_CODE_FAILED; } -static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { return 0; } +static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { + SMetaEntry ctbEntry = {0}; + SMetaEntry stbEntry = {0}; + void *pVal = NULL; + int nVal = 0; + int ret; + int c; + tb_uid_t uid; + int64_t oversion; + const void *pData = NULL; + int nData = 0; + SHashObj *pTagTable = NULL; + SArray *updateTagColumnIds = NULL; + + // search name index + ret = tdbTbGet(pMeta->pNameIdx, pAlterTbReq->tbName, strlen(pAlterTbReq->tbName) + 1, &pVal, &nVal); + if (ret < 0) { + return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; + } + + uid = *(tb_uid_t *)pVal; + tdbFree(pVal); + pVal = NULL; + + // search uid index + TBC *pUidIdxc = NULL; + + TAOS_CHECK_RETURN(tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL)); + if (tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c) < 0) { + metaTrace("meta/table: failed to move to uid index, uid:%" PRId64, uid); + } + if (c != 0) { + tdbTbcClose(pUidIdxc); + metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c); + return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; + } + + if (tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData) != 0) { + metaError("meta/table: failed to get uid index, uid:%" PRId64, uid); + } + oversion = ((SUidIdxVal *)pData)[0].version; + + // search table.db + TBC *pTbDbc = NULL; + SDecoder dc1 = {0}; + SDecoder dc2 = {0}; + + /* get ctbEntry */ + TAOS_CHECK_RETURN(tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL)); + if (tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c) != 0) { + metaError("meta/table: failed to move to tb db, uid:%" PRId64, uid); + } + if (c != 0) { + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); + metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c); + return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; + } + + if (tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData) != 0) { + metaError("meta/table: failed to get tb db, uid:%" PRId64, uid); + } + + if ((ctbEntry.pBuf = taosMemoryMalloc(nData)) == NULL) { + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); + return terrno; + } + memcpy(ctbEntry.pBuf, pData, nData); + tDecoderInit(&dc1, ctbEntry.pBuf, nData); + ret = metaDecodeEntry(&dc1, &ctbEntry); + if (ret < 0) { + terrno = ret; + goto _err; + } + + /* get stbEntry*/ + if (tdbTbGet(pMeta->pUidIdx, &ctbEntry.ctbEntry.suid, sizeof(tb_uid_t), &pVal, &nVal) != 0) { + metaError("meta/table: failed to get uid index, uid:%" PRId64, ctbEntry.ctbEntry.suid); + } + if (!pVal) { + terrno = TSDB_CODE_INVALID_MSG; + goto _err; + } + + if (tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = ((SUidIdxVal *)pVal)[0].version}), + sizeof(STbDbKey), (void **)&stbEntry.pBuf, &nVal) != 0) { + metaError("meta/table: failed to get tb db, uid:%" PRId64, ctbEntry.ctbEntry.suid); + } + tdbFree(pVal); + tDecoderInit(&dc2, stbEntry.pBuf, nVal); + ret = metaDecodeEntry(&dc2, &stbEntry); + if (ret < 0) { + terrno = ret; + goto _err; + } + + int32_t nTagVals = taosArrayGetSize(pAlterTbReq->pMultiTag); + pTagTable = taosHashInit(nTagVals, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + if (pTagTable == NULL) { + ret = terrno; + goto _err; + } + + // remove duplicate tag name + for (int i = 0; i < nTagVals; i++) { + SMultiTagUpateVal *pTagVal = taosArrayGet(pAlterTbReq->pMultiTag, i); + ret = taosHashPut(pTagTable, pTagVal->tagName, strlen(pTagVal->tagName), pTagVal, sizeof(*pTagVal)); + if (ret != 0) { + goto _err; + } + } + int32_t nUpdateTagVal = taosHashGetSize(pTagTable); + updateTagColumnIds = taosArrayInit(nUpdateTagVal, sizeof(int32_t)); + + SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag; + SSchema *pColumn = NULL; + int32_t iCol = 0; + + for (;;) { + pColumn = NULL; + + if (iCol >= pTagSchema->nCols) break; + pColumn = &pTagSchema->pSchema[iCol]; + if (taosHashGet(pTagTable, pColumn->name, strlen(pColumn->name)) != NULL) { + if (taosArrayPush(updateTagColumnIds, &iCol) == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + } + iCol++; + } + if (taosArrayGetSize(updateTagColumnIds) == nUpdateTagVal) { + terrno = TSDB_CODE_VND_COL_NOT_EXISTS; + goto _err; + } + + ctbEntry.version = version; + if (pTagSchema->nCols == 1 && pTagSchema->pSchema[0].type == TSDB_DATA_TYPE_JSON) { + terrno = TSDB_CODE_VND_COL_NOT_EXISTS; + goto _err; + } else { + const STag *pOldTag = (const STag *)ctbEntry.ctbEntry.pTags; + STag *pNewTag = NULL; + SArray *pTagArray = taosArrayInit(pTagSchema->nCols, sizeof(STagVal)); + if (!pTagArray) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + for (int32_t i = 0; i < pTagSchema->nCols; i++) { + SSchema *pCol = &pTagSchema->pSchema[i]; + SMultiTagUpateVal *pTagVal = taosHashGet(pTagTable, pCol->name, strlen(pCol->name)); + if (pTagVal == NULL) { + STagVal val = {.cid = pCol->colId}; + if (tTagGet(pOldTag, &val)) { + if (taosArrayPush(pTagArray, &val) == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + taosArrayDestroy(pTagArray); + goto _err; + } + } + } else { + STagVal val = {0}; + val.type = pCol->type; + val.cid = pCol->colId; + if (pTagVal->isNull) continue; + + if (IS_VAR_DATA_TYPE(pCol->type)) { + val.pData = pTagVal->pTagVal; + val.nData = pTagVal->nTagVal; + } else { + memcpy(&val.i64, pTagVal->pTagVal, pTagVal->nTagVal); + } + if (taosArrayPush(pTagArray, &val) == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + taosArrayDestroy(pTagArray); + goto _err; + } + } + } + if ((terrno = tTagNew(pTagArray, pTagSchema->version, false, &pNewTag)) < 0) { + taosArrayDestroy(pTagArray); + goto _err; + } + ctbEntry.ctbEntry.pTags = (uint8_t *)pNewTag; + taosArrayDestroy(pTagArray); + } + + metaWLock(pMeta); + + // save to table.db + if (metaSaveToTbDb(pMeta, &ctbEntry) < 0) { + metaError("meta/table: failed to save to tb db:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } + + // save to uid.idx + if (metaUpdateUidIdx(pMeta, &ctbEntry) < 0) { + metaError("meta/table: failed to update uid idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } + + if (metaUpdateTagIdx(pMeta, &ctbEntry) < 0) { + metaError("meta/table: failed to update tag idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } + + if (NULL == ctbEntry.ctbEntry.pTags) { + metaError("meta/table: null tags, update tag val failed."); + goto _err; + } + + SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid}; + if (tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags, + ((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn) < 0) { + metaError("meta/table: failed to upsert ctb idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } + + if (metaUidCacheClear(pMeta, ctbEntry.ctbEntry.suid) < 0) { + metaError("meta/table: failed to clear uid cache:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } + + if (metaTbGroupCacheClear(pMeta, ctbEntry.ctbEntry.suid) < 0) { + metaError("meta/table: failed to clear group cache:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } + + if (metaUpdateChangeTime(pMeta, ctbEntry.uid, pAlterTbReq->ctimeMs) < 0) { + metaError("meta/table: failed to update change time:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); + } + + metaULock(pMeta); + + tDecoderClear(&dc1); + tDecoderClear(&dc2); + taosMemoryFree((void *)ctbEntry.ctbEntry.pTags); + if (ctbEntry.pBuf) taosMemoryFree(ctbEntry.pBuf); + if (stbEntry.pBuf) tdbFree(stbEntry.pBuf); + tdbTbcClose(pTbDbc); + tdbTbcClose(pUidIdxc); + taosHashCleanup(pTagTable); + taosArrayDestroy(updateTagColumnIds); + return 0; + +_err: + tDecoderClear(&dc1); + tDecoderClear(&dc2); + if (ctbEntry.pBuf) taosMemoryFree(ctbEntry.pBuf); + if (stbEntry.pBuf) tdbFree(stbEntry.pBuf); + tdbTbcClose(pTbDbc); + tdbTbcClose(pUidIdxc); + taosHashCleanup(pTagTable); + taosArrayDestroy(updateTagColumnIds); + return -1; +} static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { SMetaEntry ctbEntry = {0}; SMetaEntry stbEntry = {0}; @@ -2114,6 +2364,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag; SSchema *pColumn = NULL; int32_t iCol = 0; + for (;;) { pColumn = NULL; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 3d0c3d7c00..3543ee1a53 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -15256,9 +15256,6 @@ static int32_t buildUpdateTagValReqImpl2(STranslateContext* pCxt, SAlterTableStm static int32_t buildUpdateTagValReqImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, char* colName, SVAlterTbReq* pReq) { int32_t code = TSDB_CODE_SUCCESS; - // if (NULL == pReq->tagName) { - // return terrno; - // } SSchema* pSchema = getTagSchema(pTableMeta, colName); if (NULL == pSchema) { From 22f3c2097751722555013bed84f64cc21396e3b6 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Mon, 25 Nov 2024 11:33:02 +0800 Subject: [PATCH 05/76] enh: check param --- source/libs/executor/src/aggregateoperator.c | 2 +- source/libs/executor/src/anomalywindowoperator.c | 2 -- source/libs/executor/src/countwindowoperator.c | 3 +++ source/libs/executor/src/eventwindowoperator.c | 3 +++ source/libs/executor/src/filloperator.c | 8 ++++++++ source/libs/executor/src/groupoperator.c | 7 +++++++ source/libs/executor/src/hashjoinoperator.c | 10 +++++++--- source/libs/executor/src/projectoperator.c | 5 ++++- 8 files changed, 33 insertions(+), 7 deletions(-) diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index b71ed5ee26..94bf791ef8 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -337,7 +337,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { int32_t doAggregateImpl(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx) { int32_t code = TSDB_CODE_SUCCESS; - if (pOperator || (pOperator->exprSupp.numOfExprs > 0 && pCtx == NULL)) { + if (!pOperator || (pOperator->exprSupp.numOfExprs > 0 && pCtx == NULL)) { qError("%s failed at line %d since pCtx is NULL.", __func__, __LINE__); return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; } diff --git a/source/libs/executor/src/anomalywindowoperator.c b/source/libs/executor/src/anomalywindowoperator.c index b678030a1c..71a38c739d 100644 --- a/source/libs/executor/src/anomalywindowoperator.c +++ b/source/libs/executor/src/anomalywindowoperator.c @@ -171,8 +171,6 @@ _error: } static int32_t anomalyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { - CHECK_CONDITION_FAILED(pOperator != NULL); - CHECK_CONDITION_FAILED(ppRes != NULL); CHECK_CONDITION_FAILED(pOperator->info != NULL); CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; diff --git a/source/libs/executor/src/countwindowoperator.c b/source/libs/executor/src/countwindowoperator.c index 542a7c89a9..cb7459744f 100644 --- a/source/libs/executor/src/countwindowoperator.c +++ b/source/libs/executor/src/countwindowoperator.c @@ -225,6 +225,8 @@ _end: } static int32_t countWindowAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + CHECK_CONDITION_FAILED(pOperator->info != NULL); + CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SCountWindowOperatorInfo* pInfo = pOperator->info; @@ -232,6 +234,7 @@ static int32_t countWindowAggregateNext(SOperatorInfo* pOperator, SSDataBlock** SExprSupp* pExprSup = &pOperator->exprSupp; int32_t order = pInfo->binfo.inputTsOrder; SSDataBlock* pRes = pInfo->binfo.pRes; + CHECK_CONDITION_FAILED(pRes != NULL); blockDataCleanup(pRes); diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c index e68a91d97d..83b202fed6 100644 --- a/source/libs/executor/src/eventwindowoperator.c +++ b/source/libs/executor/src/eventwindowoperator.c @@ -182,6 +182,8 @@ void destroyEWindowOperatorInfo(void* param) { } static int32_t eventWindowAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + CHECK_CONDITION_FAILED(pOperator->info != NULL); + CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SEventWindowOperatorInfo* pInfo = pOperator->info; @@ -191,6 +193,7 @@ static int32_t eventWindowAggregateNext(SOperatorInfo* pOperator, SSDataBlock** int32_t order = pInfo->binfo.inputTsOrder; SSDataBlock* pRes = pInfo->binfo.pRes; + CHECK_CONDITION_FAILED(pRes != NULL); blockDataCleanup(pRes); diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index 1595c90419..d6a518ccc4 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -182,9 +182,17 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { int32_t lino = 0; SFillOperatorInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + if (pInfo == NULL || pTaskInfo == NULL) { + qError("%s failed at line %d since pInfo or pTaskInfo is NULL.", __func__, __LINE__); + return NULL; + } SResultInfo* pResultInfo = &pOperator->resultInfo; SSDataBlock* pResBlock = pInfo->pFinalRes; + if (pResBlock == NULL) { + qError("%s failed at line %d since pResBlock is NULL.", __func__, __LINE__); + return NULL; + } blockDataCleanup(pResBlock); diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index fec35c3371..c832cfbb4e 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -444,6 +444,8 @@ _end: } static int32_t hashGroupbyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + CHECK_CONDITION_FAILED(pOperator->info != NULL); + CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -1003,11 +1005,14 @@ static int32_t hashPartitionNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) return TSDB_CODE_SUCCESS; } + CHECK_CONDITION_FAILED(pOperator->info != NULL); + CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SPartitionOperatorInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->binfo.pRes; + CHECK_CONDITION_FAILED(pRes != NULL); if (pOperator->status == OP_RES_TO_RETURN) { (*ppRes) = buildPartitionResult(pOperator); @@ -1459,6 +1464,8 @@ static int32_t doStreamHashPartitionNext(SOperatorInfo* pOperator, SSDataBlock** int32_t lino = 0; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamPartitionOperatorInfo* pInfo = pOperator->info; + CHECK_CONDITION_FAILED(pInfo != NULL); + CHECK_CONDITION_FAILED(pTaskInfo != NULL); if (pOperator->status == OP_EXEC_DONE) { (*ppRes) = NULL; diff --git a/source/libs/executor/src/hashjoinoperator.c b/source/libs/executor/src/hashjoinoperator.c index 1f43a429b3..12f90097c5 100644 --- a/source/libs/executor/src/hashjoinoperator.c +++ b/source/libs/executor/src/hashjoinoperator.c @@ -904,9 +904,10 @@ static int32_t hJoinAddBlockRowsToHash(SSDataBlock* pBlock, SHJoinOperatorInfo* static int32_t hJoinBuildHash(struct SOperatorInfo* pOperator, bool* queryDone) { SHJoinOperatorInfo* pJoin = pOperator->info; - SSDataBlock* pBlock = NULL; - int32_t code = TSDB_CODE_SUCCESS; - + SSDataBlock* pBlock = NULL; + int32_t code = TSDB_CODE_SUCCESS; + CHECK_CONDITION_FAILED(pJoin != NULL); + while (true) { pBlock = getNextBlockFromDownstream(pOperator, pJoin->pBuild->downStreamIdx); if (NULL == pBlock) { @@ -990,12 +991,15 @@ void hJoinSetDone(struct SOperatorInfo* pOperator) { } static int32_t hJoinMainProcess(struct SOperatorInfo* pOperator, SSDataBlock** pResBlock) { + CHECK_CONDITION_FAILED(pOperator->info != NULL); + CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); SHJoinOperatorInfo* pJoin = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SSDataBlock* pRes = pJoin->finBlk; int64_t st = 0; + CHECK_CONDITION_FAILED(pRes != NULL); QRY_PARAM_CHECK(pResBlock); if (pOperator->cost.openCost == 0) { diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 5b9e531679..5030b8a148 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -564,7 +564,7 @@ SSDataBlock* doApplyIndefinitFunction1(SOperatorInfo* pOperator) { int32_t doApplyIndefinitFunction(SOperatorInfo* pOperator, SSDataBlock** pResBlock) { QRY_PARAM_CHECK(pResBlock); - + CHECK_CONDITION_FAILED(pOperator->info != NULL); SIndefOperatorInfo* pIndefInfo = pOperator->info; SOptrBasicInfo* pInfo = &pIndefInfo->binfo; SExprSupp* pSup = &pOperator->exprSupp; @@ -1178,5 +1178,8 @@ _exit: if(processByRowFunctionCtx) { taosArrayDestroy(processByRowFunctionCtx); } + if(code) { + qError("project apply functions failed at: %s:%d", __func__, lino); + } return code; } From c46278a863d1045bad5f222c21bf2a64e179b886 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Mon, 25 Nov 2024 13:56:14 +0800 Subject: [PATCH 06/76] fix: col type --- source/libs/function/src/functionMgt.c | 8 ++++++++ source/libs/planner/src/planOptimizer.c | 16 ++++++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index a406b23c59..5dfb94ba6e 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -412,6 +412,13 @@ int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNo return code; } +static void resetOutputChangedFunc(SFunctionNode *pFunc, const SFunctionNode* pSrcFunc) { + if (funcMgtBuiltins[pFunc->funcId].type == FUNCTION_TYPE_LAST_MERGE) { + pFunc->node.resType = pSrcFunc->node.resType; + return; + } +} + int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** ppFunc) { int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)ppFunc); if (NULL == *ppFunc) { @@ -430,6 +437,7 @@ int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFu *ppFunc = NULL; return code; } + resetOutputChangedFunc(*ppFunc, pSrcFunc); return code; } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 39024731ed..0cc26dfce9 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -4258,7 +4258,7 @@ typedef struct SLastRowScanOptSetColDataTypeCxt { int32_t code; } SLastRowScanOptSetColDataTypeCxt; -static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) { +static EDealRes lastRowScanOptGetColAndSetDataType(SNode* pNode, void* pContext, bool setType) { if (QUERY_NODE_COLUMN == nodeType(pNode)) { SLastRowScanOptSetColDataTypeCxt* pCxt = pContext; if (pCxt->doAgg) { @@ -4266,12 +4266,12 @@ static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) { if (TSDB_CODE_SUCCESS != pCxt->code) { return DEAL_RES_ERROR; } - getLastCacheDataType(&(((SColumnNode*)pNode)->node.resType), pCxt->pkBytes); + if (setType) getLastCacheDataType(&(((SColumnNode*)pNode)->node.resType), pCxt->pkBytes); } else { SNode* pCol = NULL; FOREACH(pCol, pCxt->pLastCols) { if (nodesEqualNode(pCol, pNode)) { - getLastCacheDataType(&(((SColumnNode*)pNode)->node.resType), pCxt->pkBytes); + if (setType) getLastCacheDataType(&(((SColumnNode*)pNode)->node.resType), pCxt->pkBytes); break; } } @@ -4281,6 +4281,14 @@ static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) { return DEAL_RES_CONTINUE; } +static EDealRes lastRowScanOptGetLastCols(SNode* pNode, void* pContext) { + return lastRowScanOptGetColAndSetDataType(pNode, pContext, false); +} + +static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) { + return lastRowScanOptGetColAndSetDataType(pNode, pContext, true); +} + static void lastRowScanOptSetLastTargets(SNodeList* pTargets, SNodeList* pLastCols, SNodeList* pLastRowCols, bool erase, int32_t pkBytes) { SNode* pTarget = NULL; WHERE_EACH(pTarget, pTargets) { @@ -4393,7 +4401,7 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic SNode* pParamNode = NULL; if (FUNCTION_TYPE_LAST == funcType) { (void)nodesListErase(pFunc->pParameterList, nodesListGetCell(pFunc->pParameterList, 1)); - nodesWalkExpr(nodesListGetNode(pFunc->pParameterList, 0), lastRowScanOptSetColDataType, &cxt); + nodesWalkExpr(nodesListGetNode(pFunc->pParameterList, 0), lastRowScanOptGetLastCols, &cxt); if (TSDB_CODE_SUCCESS != cxt.code) break; } FOREACH(pParamNode, pFunc->pParameterList) { From 4d857b01498ca29aefa5c4aeee3c68d32db8069d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 25 Nov 2024 15:22:53 +0800 Subject: [PATCH 07/76] fix mem leak --- include/common/tmsg.h | 1 - include/libs/nodes/cmdnodes.h | 12 ---------- source/common/src/tmsg.c | 2 +- source/dnode/vnode/src/meta/metaTable.c | 4 ++-- source/dnode/vnode/src/vnd/vnodeSvr.c | 14 +++++++----- source/libs/nodes/src/nodesUtilFuncs.c | 19 ++++++++-------- source/libs/parser/inc/sql.y | 12 +++++----- source/libs/parser/src/parAstCreater.c | 1 + source/libs/parser/src/parAstParser.c | 1 - source/libs/parser/src/parTranslater.c | 29 ++++++++++++++++++------- 10 files changed, 49 insertions(+), 46 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 27169b0a4e..bcbae641f2 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -352,7 +352,6 @@ typedef enum ENodeType { QUERY_NODE_CREATE_ANODE_STMT, QUERY_NODE_DROP_ANODE_STMT, QUERY_NODE_UPDATE_ANODE_STMT, - QUERY_NODE_ALTER_TABLE_MULTI_STMT, // show statement nodes // see 'sysTableShowAdapter', 'SYSTABLE_SHOW_TYPE_OFFSET' diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 6623811712..defacf4cd3 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -267,18 +267,6 @@ typedef struct SAlterTableStmt { SNodeList* pNodeListTagValue; } SAlterTableStmt; - -typedef struct SAlterTableStmt2 { - ENodeType type; - int8_t alterType; - char colName[TSDB_COL_NAME_LEN]; - STableOptions* pOptions; - SDataType dataType; - SValueNode* pVal; - SColumnOptions* pColOptions; - -} SAlterTableStmt2; - typedef struct SAlterTableMultiStmt { ENodeType type; char dbName[TSDB_DB_NAME_LEN]; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 134a5cf8c5..fd3af20ac1 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -10521,7 +10521,7 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) { TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTag->isNull)); TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTag->tagType)); if (!pTag->isNull) { - TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, pTag->pTagVal, pReq->nTagVal)); + TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, pTag->pTagVal, pTag->nTagVal)); } } break; diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index f51b641640..54476d339f 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2109,7 +2109,7 @@ static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbRe } int32_t nTagVals = taosArrayGetSize(pAlterTbReq->pMultiTag); - pTagTable = taosHashInit(nTagVals, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pTagTable = taosHashInit(nTagVals, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (pTagTable == NULL) { ret = terrno; goto _err; @@ -2143,7 +2143,7 @@ static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbRe } iCol++; } - if (taosArrayGetSize(updateTagColumnIds) == nUpdateTagVal) { + if (taosArrayGetSize(updateTagColumnIds) != nUpdateTagVal) { terrno = TSDB_CODE_VND_COL_NOT_EXISTS; goto _err; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index dd13c975cf..546cd6c3ae 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -145,8 +145,12 @@ static int32_t vnodePreProcessAlterTableMsg(SVnode *pVnode, SRpcMsg *pMsg) { SVAlterTbReq vAlterTbReq = {0}; int64_t ctimeMs = taosGetTimestampMs(); if (tDecodeSVAlterTbReqSetCtime(&dc, &vAlterTbReq, ctimeMs) < 0) { + taosArrayDestroy(vAlterTbReq.pMultiTag); + vAlterTbReq.pMultiTag = NULL; goto _exit; } + taosArrayDestroy(vAlterTbReq.pMultiTag); + vAlterTbReq.pMultiTag = NULL; code = 0; @@ -666,10 +670,9 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg } } break; case TDMT_VND_STREAM_TASK_RESET: { - if (pVnode->restored && vnodeIsLeader(pVnode) && - (code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) { - goto _err; - } + if (pVnode->restored && vnodeIsLeader(pVnode) && (code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) { + goto _err; + } } break; case TDMT_VND_ALTER_CONFIRM: @@ -690,7 +693,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg case TDMT_VND_DROP_INDEX: vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp); break; - case TDMT_VND_STREAM_CHECK_POINT_SOURCE: // always return true + case TDMT_VND_STREAM_CHECK_POINT_SOURCE: // always return true tqProcessTaskCheckPointSourceReq(pVnode->pTq, pMsg, pRsp); break; case TDMT_VND_STREAM_TASK_UPDATE: // always return true @@ -1367,6 +1370,7 @@ static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t ver, void *pReq, i } _exit: + taosArrayDestroy(vAlterTbReq.pMultiTag); tEncodeSize(tEncodeSVAlterTbRsp, &vAlterTbRsp, pRsp->contLen, ret); pRsp->pCont = rpcMallocCont(pRsp->contLen); tEncoderInit(&ec, pRsp->pCont, pRsp->contLen); diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 25d1ecd6f9..ac29021e83 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -519,9 +519,6 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { case QUERY_NODE_ALTER_SUPER_TABLE_STMT: code = makeNode(type, sizeof(SAlterTableStmt), &pNode); break; - case QUERY_NODE_ALTER_TABLE_MULTI_STMT: - code = makeNode(type, sizeof(SAlterTableMultiStmt), &pNode); - break; case QUERY_NODE_CREATE_USER_STMT: code = makeNode(type, sizeof(SCreateUserStmt), &pNode); break; @@ -1360,13 +1357,15 @@ void nodesDestroyNode(SNode* pNode) { SAlterTableStmt* pStmt = (SAlterTableStmt*)pNode; nodesDestroyNode((SNode*)pStmt->pOptions); nodesDestroyNode((SNode*)pStmt->pVal); - break; - } - case QUERY_NODE_ALTER_TABLE_MULTI_STMT: { - SAlterTableMultiStmt* pStmt = (SAlterTableMultiStmt*)pNode; - // nodesDestroyList(pStmt->pTables); - // nodesDestroyNode((SNode*)pStmt->pOptions); - // nodesDestroyNode((SNode*)pStmt->pVal); + if (pStmt->pNodeListTagValue != NULL) { + SNodeList* pNodeList = pStmt->pNodeListTagValue; + SNode* pSubNode = NULL; + FOREACH(pSubNode, pNodeList) { + SAlterTableStmt* pSubAlterTable = (SAlterTableStmt*)pSubNode; + nodesDestroyNode((SNode*)pSubAlterTable->pOptions); + nodesDestroyNode((SNode*)pSubAlterTable->pVal); + } + } break; } case QUERY_NODE_CREATE_USER_STMT: { diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 9c76aa7f7a..b8a5e6f98f 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -390,14 +390,14 @@ alter_table_clause(A) ::= full_table_name(B) RENAME TAG column_name(C) column_name(D). { A = createAlterTableRenameCol(pCxt, B, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &C, &D); } -%type column_eq_value_list { SNodeList* } -%destructor column_eq_value_list { nodesDestroyList($$); } -column_eq_value(A) ::= column_name(C) NK_EQ tags_literal(D). { A = createAlterSingleTagColumnNode(pCxt, &C, D); } -column_eq_value_list(A) ::= column_eq_value(B). { A = createNodeList(pCxt, B); } -column_eq_value_list(A) ::= column_eq_value_list(B) NK_COMMA column_eq_value(C). { A = addNodeToList(pCxt, B, C);} +%type column_tag_value_list { SNodeList* } +%destructor column_tag_value_list { nodesDestroyList($$); } +column_tag_value(A) ::= column_name(C) NK_EQ tags_literal(D). { A = createAlterSingleTagColumnNode(pCxt, &C, D); } +column_tag_value_list(A) ::= column_tag_value(B). { A = createNodeList(pCxt, B); } +column_tag_value_list(A) ::= column_tag_value_list(B) NK_COMMA column_tag_value(C). { A = addNodeToList(pCxt, B, C);} alter_table_clause(A) ::= - full_table_name(B) SET TAG column_eq_value_list(C). { A = createAlterTableSetMultiTagValue(pCxt, B, C); } + full_table_name(B) SET TAG column_tag_value_list(C). { A = createAlterTableSetMultiTagValue(pCxt, B, C); } %type multi_create_clause { SNodeList* } %destructor multi_create_clause { nodesDestroyList($$); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index b41dad0b18..8b0fc19739 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -2555,6 +2555,7 @@ SNode* createAlterSingleTagColumnNode(SAstCreateContext* pCtx, SToken* pTagName, pStmt->alterType = TSDB_ALTER_TABLE_UPDATE_TAG_VAL; COPY_STRING_FORM_ID_TOKEN(pStmt->colName, pTagName); pStmt->pVal = (SValueNode*)pVal; + pStmt->pNodeListTagValue = NULL; return (SNode*)pStmt; _err: return NULL; diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 39b142881f..657deb43d0 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -970,7 +970,6 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { case QUERY_NODE_DROP_SUPER_TABLE_STMT: return collectMetaKeyFromDropStable(pCxt, (SDropSuperTableStmt*)pStmt); case QUERY_NODE_ALTER_TABLE_STMT: - case QUERY_NODE_ALTER_TABLE_MULTI_STMT: return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt); case QUERY_NODE_ALTER_SUPER_TABLE_STMT: return collectMetaKeyFromAlterStable(pCxt, (SAlterTableStmt*)pStmt); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 3543ee1a53..ebb6693508 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -15184,11 +15184,7 @@ static int32_t rewriteDropSuperTable(STranslateContext* pCxt, SQuery* pQuery) { static int32_t buildUpdateTagValReqImpl2(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, char* colName, SMultiTagUpateVal* pReq) { - int32_t code = TSDB_CODE_SUCCESS; - if (NULL == pReq->tagName) { - return terrno; - } - + int32_t code = TSDB_CODE_SUCCESS; SSchema* pSchema = getTagSchema(pTableMeta, colName); if (NULL == pSchema) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid tag name: %s", colName); @@ -15360,7 +15356,7 @@ static int32_t buildUpdateMultiTagValReq(STranslateContext* pCxt, SAlterTableStm return buildUpdateTagValReqImpl(pCxt, head, pTableMeta, head->colName, pReq); } else { pReq->pMultiTag = taosArrayInit(nTagValues, sizeof(SMultiTagUpateVal)); - if (NULL == pReq->pTagArray) { + if (pReq->pMultiTag == NULL) { return terrno; } @@ -15373,7 +15369,9 @@ static int32_t buildUpdateMultiTagValReq(STranslateContext* pCxt, SAlterTableStm if (TSDB_CODE_SUCCESS != code) { return code; } - TAOS_UNUSED(taosArrayPush(pReq->pMultiTag, &val)); + if (taosArrayPush(pReq->pMultiTag, &val) == NULL) { + return terrno; + } } } @@ -15658,6 +15656,18 @@ static int32_t buildModifyVnodeArray(STranslateContext* pCxt, SAlterTableStmt* p return code; } +static void deleTagVal(void* val) { + SMultiTagUpateVal* pTag = val; + taosMemoryFree(pTag->tagName); + for (int i = 0; i < taosArrayGetSize(pTag->pTagArray); ++i) { + STagVal* p = (STagVal*)taosArrayGet(pTag->pTagArray, i); + if (IS_VAR_DATA_TYPE(p->type)) { + taosMemoryFreeClear(p->pData); + } + } + + taosArrayDestroy(pTag->pTagArray); +} static void destoryAlterTbReq(SVAlterTbReq* pReq) { taosMemoryFree(pReq->tbName); taosMemoryFree(pReq->colName); @@ -15670,6 +15680,10 @@ static void destoryAlterTbReq(SVAlterTbReq* pReq) { taosMemoryFreeClear(p->pData); } } + if (pReq->action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL) { + taosArrayDestroyEx(pReq->pMultiTag, deleTagVal); + } + taosArrayDestroy(pReq->pTagArray); if (pReq->tagFree) tTagFree((STag*)pReq->pTagVal); } @@ -16374,7 +16388,6 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) { case QUERY_NODE_DROP_SUPER_TABLE_STMT: code = rewriteDropSuperTable(pCxt, pQuery); break; - case QUERY_NODE_ALTER_TABLE_MULTI_STMT: case QUERY_NODE_ALTER_TABLE_STMT: code = rewriteAlterTable(pCxt, pQuery); break; From ac012b83ec1fa6aa4dcaded9282c9e5240558ffa Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 25 Nov 2024 16:04:47 +0800 Subject: [PATCH 08/76] fix mem leak --- source/client/src/clientRawBlockWrite.c | 87 +++++++++++++------------ 1 file changed, 46 insertions(+), 41 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 1799f29eb4..30d4cdb573 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -762,6 +762,11 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValueNull", isNullCJson)); break; } + case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: { + + break; + } + case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: { cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); RAW_NULL_CHECK(colName); @@ -1855,19 +1860,19 @@ end: typedef int32_t _raw_decode_func_(SDecoder* pDecoder, SMqDataRsp* pRsp); static int32_t decodeRawData(SDecoder* decoder, void* data, int32_t dataLen, _raw_decode_func_ func, SMqRspObj* rspObj) { - int8_t dataVersion = *(int8_t*)data; - if (dataVersion >= MQ_DATA_RSP_VERSION) { - data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); - dataLen -= sizeof(int8_t) + sizeof(int32_t); + int8_t dataVersion = *(int8_t*)data; + if (dataVersion >= MQ_DATA_RSP_VERSION) { + data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); + dataLen -= sizeof(int8_t) + sizeof(int32_t); } - rspObj->resIter = -1; - tDecoderInit(decoder, data, dataLen); - int32_t code = func(decoder, &rspObj->dataRsp); - if (code != 0) { - SET_ERROR_MSG("decode mq taosx data rsp failed"); + rspObj->resIter = -1; + tDecoderInit(decoder, data, dataLen); + int32_t code = func(decoder, &rspObj->dataRsp); + if (code != 0) { + SET_ERROR_MSG("decode mq taosx data rsp failed"); } - return code; + return code; } static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj* pMetaHash, @@ -2195,44 +2200,44 @@ static int32_t getOffSetLen(const SMqDataRsp* pRsp) { typedef int32_t __encode_func__(SEncoder* pEncoder, const SMqDataRsp* pRsp); static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) { - int32_t len = 0; - int32_t code = 0; - SEncoder encoder = {0}; - void* buf = NULL; - tEncodeSize(encodeFunc, rspObj, len, code); - if (code < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + int32_t len = 0; + int32_t code = 0; + SEncoder encoder = {0}; + void* buf = NULL; + tEncodeSize(encodeFunc, rspObj, len, code); + if (code < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - len += sizeof(int8_t) + sizeof(int32_t); - buf = taosMemoryCalloc(1, len); - if (buf == NULL) { - code = terrno; - goto FAILED; + len += sizeof(int8_t) + sizeof(int32_t); + buf = taosMemoryCalloc(1, len); + if (buf == NULL) { + code = terrno; + goto FAILED; } - tEncoderInit(&encoder, buf, len); - if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + tEncoderInit(&encoder, buf, len); + if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - int32_t offsetLen = getOffSetLen(rspObj); - if (offsetLen <= 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + int32_t offsetLen = getOffSetLen(rspObj); + if (offsetLen <= 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - if (tEncodeI32(&encoder, offsetLen) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + if (tEncodeI32(&encoder, offsetLen) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - if (encodeFunc(&encoder, rspObj) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + if (encodeFunc(&encoder, rspObj) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - tEncoderClear(&encoder); + tEncoderClear(&encoder); - raw->raw = buf; - raw->raw_len = len; - return code; + raw->raw = buf; + raw->raw_len = len; + return code; FAILED: tEncoderClear(&encoder); taosMemoryFree(buf); From 28aae929d4e72d2ecf096ea3ee5441726a70eb5e Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 25 Nov 2024 17:03:02 +0800 Subject: [PATCH 09/76] support subscript --- source/client/src/clientRawBlockWrite.c | 45 ++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 30d4cdb573..924b1ce202 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -763,7 +763,50 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { break; } case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: { - + int32_t nTags = taosArrayGetSize(vAlterTbReq.pMultiTag); + if (nTags <= 0) { + uError("processAlterTable parse multi tags error"); + goto end; + } + + cJSON* tags = cJSON_CreateArray(); + RAW_NULL_CHECK(tags); + for (int32_t i = 0; i < nTags; i++) { + SMultiTagUpateVal* pTagVal = taosArrayGet(vAlterTbReq.pMultiTag, i); + cJSON* tagName = cJSON_CreateString(pTagVal->tagName); + RAW_NULL_CHECK(tagName); + RAW_FALSE_CHECK(cJSON_AddItemToObject(tags, "colName", tagName)); + + if (pTagVal->tagType == TSDB_DATA_TYPE_JSON) { + uError("processAlterTable isJson false"); + goto end; + } + bool isNull = pTagVal->isNull; + if (!isNull) { + char* buf = NULL; + int64_t bufSize = 0; + if (pTagVal->tagType == TSDB_DATA_TYPE_VARBINARY) { + bufSize = pTagVal->nTagVal * 2 + 2 + 3; + } else { + bufSize = pTagVal->nTagVal + 3; + } + buf = taosMemoryCalloc(bufSize, 1); + RAW_NULL_CHECK(buf); + if (dataConverToStr(buf, bufSize, pTagVal->tagType, pTagVal->pTagVal, pTagVal->nTagVal, NULL) != + TSDB_CODE_SUCCESS) { + taosMemoryFree(buf); + goto end; + } + cJSON* colValue = cJSON_CreateString(buf); + RAW_NULL_CHECK(colValue); + RAW_FALSE_CHECK(cJSON_AddItemToObject(tags, "colValue", colValue)); + taosMemoryFree(buf); + } + cJSON* isNullCJson = cJSON_CreateBool(isNull); + RAW_NULL_CHECK(isNullCJson); + RAW_FALSE_CHECK(cJSON_AddItemToObject(tags, "colValueNull", isNullCJson)); + } + RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); break; } From 6e394c634eb38050c2709f28fc3423bfb5dacdb7 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 17:05:21 +0800 Subject: [PATCH 10/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 18 +++++++++++++++++- tests/parallel_test/tdgpt_cases.task | 6 ++++++ 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 tests/parallel_test/tdgpt_cases.task diff --git a/Jenkinsfile2 b/Jenkinsfile2 index f44339c8c1..b431deb1cf 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -465,6 +465,9 @@ pipeline { steps { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { timeout(time: 126, unit: 'MINUTES'){ + if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { + echo "skip windows test because of forecastoperator.c or anomalywindowoperator.c" + } else { pre_test_win() pre_test_build_win() run_win_ctest() @@ -478,6 +481,9 @@ pipeline { steps { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { timeout(time: 60, unit: 'MINUTES'){ + if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { + echo "skip mac test because of forecastoperator.c or anomalywindowoperator.c" + } else { pre_test() pre_test_build_mac() } @@ -550,12 +556,22 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - sh ''' + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { +] sh ''' + cd ${WKC}/tests/parallel_test + export DEFAULT_RETRY_TIME=2 + date + ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' + ''' + } else { + sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 date ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' ''' + } + } } } diff --git a/tests/parallel_test/tdgpt_cases.task b/tests/parallel_test/tdgpt_cases.task new file mode 100644 index 0000000000..e028d13fb8 --- /dev/null +++ b/tests/parallel_test/tdgpt_cases.task @@ -0,0 +1,6 @@ +#Column Define +#caseID,rerunTimes,Run with Sanitizer,casePath,caseCommand +#NA,NA,y or n,script,./test.sh -f tsim/user/basic.sim + +#tdgpt-test +,,y,script,./test.sh -f tsim/query/timeline.sim From 6159a06ee7f4543318235133cfe01c798f95fe23 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 17:16:58 +0800 Subject: [PATCH 11/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index b431deb1cf..4cbdfb8239 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -472,6 +472,7 @@ pipeline { pre_test_build_win() run_win_ctest() run_win_test() + } } } } @@ -486,6 +487,7 @@ pipeline { } else { pre_test() pre_test_build_mac() + } } } } @@ -556,22 +558,21 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { -] sh ''' - cd ${WKC}/tests/parallel_test - export DEFAULT_RETRY_TIME=2 - date - ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' - ''' + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/ ) { + sh ''' + cd ${WKC}/tests/parallel_test + export DEFAULT_RETRY_TIME=2 + date + ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' + ''' } else { sh ''' - cd ${WKC}/tests/parallel_test - export DEFAULT_RETRY_TIME=2 - date - ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' + cd ${WKC}/tests/parallel_test + export DEFAULT_RETRY_TIME=2 + date + ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' ''' } - } } } From 7688c3b7d98efb809498fce75450e2ff264b2ad5 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 17:23:55 +0800 Subject: [PATCH 12/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 14 ++++++++------ source/libs/executor/src/forecastoperator.c | 1 + 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 4cbdfb8239..b151930891 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -466,12 +466,14 @@ pipeline { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { timeout(time: 126, unit: 'MINUTES'){ if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { - echo "skip windows test because of forecastoperator.c or anomalywindowoperator.c" + sh ''' + echo "skip windows test because of tdgpt codes changed" + ''' } else { - pre_test_win() - pre_test_build_win() - run_win_ctest() - run_win_test() + pre_test_win() + pre_test_build_win() + run_win_ctest() + run_win_test() } } } @@ -483,7 +485,7 @@ pipeline { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { timeout(time: 60, unit: 'MINUTES'){ if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { - echo "skip mac test because of forecastoperator.c or anomalywindowoperator.c" + echo "skip mac test because of tdgpt codes changed" } else { pre_test() pre_test_build_mac() diff --git a/source/libs/executor/src/forecastoperator.c b/source/libs/executor/src/forecastoperator.c index bf1efc54ca..a56b0dd214 100644 --- a/source/libs/executor/src/forecastoperator.c +++ b/source/libs/executor/src/forecastoperator.c @@ -169,6 +169,7 @@ static int32_t forecastCloseBuf(SForecastSupp* pSupp) { code = taosAnalBufWriteOptInt(pBuf, "start", start); if (code != 0) return code; + bool hasEvery = taosAnalGetOptInt(pSupp->algoOpt, "every", &every); if (!hasEvery) { qDebug("forecast every not found from %s, use %" PRId64, pSupp->algoOpt, every); From 1aaf254ee82e25c1b8ea28c4b4c957181b4665f1 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 17:31:38 +0800 Subject: [PATCH 13/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index b151930891..5f3acae08c 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -463,17 +463,19 @@ pipeline { WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test" } steps { - catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { - timeout(time: 126, unit: 'MINUTES'){ - if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { - sh ''' - echo "skip windows test because of tdgpt codes changed" - ''' - } else { - pre_test_win() - pre_test_build_win() - run_win_ctest() - run_win_test() + script { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + timeout(time: 126, unit: 'MINUTES'){ + if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { + sh ''' + echo "skip windows test because of tdgpt codes changed" + ''' + } else { + pre_test_win() + pre_test_build_win() + run_win_ctest() + run_win_test() + } } } } From 0755bf85e0dee7f383a580534012689a471e8b1d Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 17:48:46 +0800 Subject: [PATCH 14/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 5f3acae08c..5a6868b907 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -467,9 +467,7 @@ pipeline { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { timeout(time: 126, unit: 'MINUTES'){ if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { - sh ''' - echo "skip windows test because of tdgpt codes changed" - ''' + echo "skip windows test because of tdgpt codes changed" } else { pre_test_win() pre_test_build_win() @@ -484,13 +482,15 @@ pipeline { stage('mac test') { agent{label " Mac_catalina "} steps { - catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { - timeout(time: 60, unit: 'MINUTES'){ - if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { - echo "skip mac test because of tdgpt codes changed" - } else { - pre_test() - pre_test_build_mac() + script { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + timeout(time: 60, unit: 'MINUTES'){ + if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { + echo "skip mac test because of tdgpt codes changed" + } else { + pre_test() + pre_test_build_mac() + } } } } From c2ca865f2c5d0b212bbfef257a765f6588c74afe Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 25 Nov 2024 18:52:34 +0800 Subject: [PATCH 15/76] support subscript --- source/common/src/tmsg.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index fd3af20ac1..b6912f457a 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -10511,7 +10511,7 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) { TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, pReq->pTagVal, pReq->nTagVal)); } break; - case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: + case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: { int32_t nTags = taosArrayGetSize(pReq->pMultiTag); TAOS_CHECK_EXIT(tEncodeI32v(pEncoder, nTags)); for (int32_t i = 0; i < nTags; i++) { @@ -10525,6 +10525,7 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) { } } break; + } case TSDB_ALTER_TABLE_UPDATE_OPTIONS: TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pReq->updateTTL)); if (pReq->updateTTL) { From 81d177d52d832adfe73756cd3f77963f1159a44f Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 25 Nov 2024 19:17:56 +0800 Subject: [PATCH 16/76] support subscript --- source/libs/parser/src/parTranslater.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index ebb6693508..b4452172d8 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -15192,11 +15192,12 @@ static int32_t buildUpdateTagValReqImpl2(STranslateContext* pCxt, SAlterTableStm pReq->tagName = taosStrdup(colName); if (NULL == pReq->tagName) { - return terrno; + TAOS_CHECK_GOTO(terrno, NULL, _err); } + pReq->pTagArray = taosArrayInit(1, sizeof(STagVal)); if (NULL == pReq->pTagArray) { - return terrno; + TAOS_CHECK_GOTO(terrno, NULL, _err); } pReq->colId = pSchema->colId; pReq->tagType = pSchema->type; @@ -15246,7 +15247,11 @@ static int32_t buildUpdateTagValReqImpl2(STranslateContext* pCxt, SAlterTableStm } } } - +_err: + if (code != 0) { + taosArrayDestroy(pReq->pTagArray); + taosMemoryFree(pReq->tagName); + } return code; } static int32_t buildUpdateTagValReqImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, @@ -15363,7 +15368,7 @@ static int32_t buildUpdateMultiTagValReq(STranslateContext* pCxt, SAlterTableStm SAlterTableStmt* pTagStmt = NULL; SNode* pNode = NULL; FOREACH(pNode, pNodeList) { - SMultiTagUpateVal val; + SMultiTagUpateVal val = {0}; pTagStmt = (SAlterTableStmt*)pNode; code = buildUpdateTagValReqImpl2(pCxt, pTagStmt, pTableMeta, pTagStmt->colName, &val); if (TSDB_CODE_SUCCESS != code) { From 9656ec4c80483cd6d0277fe79834933118dee24d Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Mon, 25 Nov 2024 19:53:57 +0800 Subject: [PATCH 17/76] fix: misspelling --- tests/script/tsim/query/cache_last.sim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/script/tsim/query/cache_last.sim b/tests/script/tsim/query/cache_last.sim index 0a30bbd325..64abdaef77 100644 --- a/tests/script/tsim/query/cache_last.sim +++ b/tests/script/tsim/query/cache_last.sim @@ -76,11 +76,11 @@ if $data00 != @-> Data Exchange 2:1 (width=296)@ then return -1 endi sql explain select count(*), last_row(f1), min(f1),t1 from sta partition by t1; -if $data00 != @-> Aggragate (functions=4 width=28 input_order=desc )@ then +if $data00 != @-> Aggregate (functions=4 width=28 input_order=desc )@ then return -1 endi sql explain select count(*), last_row(f1), min(f1),t1 from sta group by t1; -if $data00 != @-> Aggragate (functions=4 width=28 input_order=desc )@ then +if $data00 != @-> Aggregate (functions=4 width=28 input_order=desc )@ then return -1 endi sql explain select distinct count(*), last_row(f1), min(f1) from sta; From 601d2f1c088a04f3cb471cd5d19d920e824869f3 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 20:04:30 +0800 Subject: [PATCH 18/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 23 +++++++++++++++++------ tests/army/tmq/a.py | 13 +++++++++++++ 2 files changed, 30 insertions(+), 6 deletions(-) create mode 100644 tests/army/tmq/a.py diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 5a6868b907..f163767557 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -6,6 +6,7 @@ node { file_zh_changed = '' file_en_changed = '' file_no_doc_changed = '1' +file_only_tdgpt_change_except = '1' def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -73,6 +74,14 @@ def check_docs(){ ''', returnStdout: true ).trim() + + file_only_tdgpt_change_except = sh ( + script: ''' + cat ${file_no_doc_changed} |grep -v "forecastoperator.c\\|anomalywindowoperator.c" || : + ''', + returnStdout: true + ).trim() + echo "file_zh_changed: ${file_zh_changed}" echo "file_en_changed: ${file_en_changed}" echo "file_no_doc_changed: ${file_no_doc_changed}" @@ -570,12 +579,14 @@ pipeline { ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' ''' } else { - sh ''' - cd ${WKC}/tests/parallel_test - export DEFAULT_RETRY_TIME=2 - date - ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' - ''' + if ( file_only_tdgpt_change_except != '' ) { + sh ''' + cd ${WKC}/tests/parallel_test + export DEFAULT_RETRY_TIME=2 + date + ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' + ''' + } } } } diff --git a/tests/army/tmq/a.py b/tests/army/tmq/a.py new file mode 100644 index 0000000000..9236e3a5b3 --- /dev/null +++ b/tests/army/tmq/a.py @@ -0,0 +1,13 @@ +import click + +@click.command() +@click.option('--count', default=1, help='Number of greetings.') +@click.option('--name', prompt='Your name', + help='The person to greet.') +def hello(count, name): + """Simple program that greets NAME for a total of COUNT times.""" + for x in range(count): + click.echo('Hello %s!' % name) + +if __name__ == '__main__': + hello() \ No newline at end of file From 041bd8a8dadf6410bc7e005d38214072f908be26 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 20:07:56 +0800 Subject: [PATCH 19/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index f163767557..5679015e32 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -471,6 +471,10 @@ pipeline { WIN_COMMUNITY_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community" WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test" } + when { + beforeAgent true + expression { file_only_tdgpt_change_except != '' } + } steps { script { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { @@ -490,6 +494,10 @@ pipeline { } stage('mac test') { agent{label " Mac_catalina "} + when { + beforeAgent true + expression { file_only_tdgpt_change_except != '' } + } steps { script { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { From 7c1b9e3b4c85a86c75bbafec308b15c3bdcbe6f8 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 20:25:33 +0800 Subject: [PATCH 20/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 5679015e32..0b127279be 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -415,6 +415,7 @@ pipeline { } stage ('pre for build docs') { + agent any when { beforeAgent true expression { env.CHANGE_BRANCH =~ /(?i)doc.*/ || file_zh_changed != '' || file_en_changed != '' } @@ -426,6 +427,7 @@ pipeline { } stage('build Docs') { + agent any when { beforeAgent true expression { env.CHANGE_BRANCH =~ /(?i)doc.*/ || file_zh_changed != '' || file_en_changed != '' } @@ -458,6 +460,7 @@ pipeline { } stage('run test') { + agent any when { expression { file_no_doc_changed != '' && env.CHANGE_TARGET != 'docs-cloud' From 8a10fd4f7ef004e47aa6c101cc4a07f958a3ef58 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 25 Nov 2024 20:27:48 +0800 Subject: [PATCH 21/76] refactor code --- include/common/tmsg.h | 1 + source/common/src/tmsg.c | 12 +++++++ source/libs/parser/src/parTranslater.c | 47 ++++++++++++++------------ 3 files changed, 39 insertions(+), 21 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index bcbae641f2..26b15c2b76 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -3304,6 +3304,7 @@ typedef struct { int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq); int32_t tDecodeSVAlterTbReq(SDecoder* pDecoder, SVAlterTbReq* pReq); int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int64_t ctimeMs); +void tfreeMultiTagUpateVal(void* pMultiTag); typedef struct { int32_t code; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index b6912f457a..a9883e7f6c 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -10678,6 +10678,18 @@ _exit: return code; } +void tfreeMultiTagUpateVal(void *val) { + SMultiTagUpateVal *pTag = val; + taosMemoryFree(pTag->tagName); + for (int i = 0; i < taosArrayGetSize(pTag->pTagArray); ++i) { + STagVal *p = (STagVal *)taosArrayGet(pTag->pTagArray, i); + if (IS_VAR_DATA_TYPE(p->type)) { + taosMemoryFreeClear(p->pData); + } + } + + taosArrayDestroy(pTag->pTagArray); +} int32_t tEncodeSVAlterTbRsp(SEncoder *pEncoder, const SVAlterTbRsp *pRsp) { int32_t code = 0; int32_t lino; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index b4452172d8..c0f6ab124a 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -15340,9 +15340,10 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS static int32_t buildUpdateMultiTagValReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, SVAlterTbReq* pReq) { - SName tbName = {0}; - SArray* pTsmas = NULL; - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; + SName tbName = {0}; + SArray* pTsmas = NULL; + SHashObj* pUnique = NULL; if (pCxt->pMetaCache) { toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tbName); code = getTableTsmasFromCache(pCxt->pMetaCache, &tbName, &pTsmas); @@ -15365,21 +15366,37 @@ static int32_t buildUpdateMultiTagValReq(STranslateContext* pCxt, SAlterTableStm return terrno; } + pUnique = taosHashInit(nTagValues, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (pUnique == NULL) { + TAOS_CHECK_GOTO(terrno, NULL, _err); + } + SAlterTableStmt* pTagStmt = NULL; SNode* pNode = NULL; + int8_t dummpy = 0; FOREACH(pNode, pNodeList) { SMultiTagUpateVal val = {0}; pTagStmt = (SAlterTableStmt*)pNode; - code = buildUpdateTagValReqImpl2(pCxt, pTagStmt, pTableMeta, pTagStmt->colName, &val); - if (TSDB_CODE_SUCCESS != code) { - return code; + + SMultiTagUpateVal* p = taosHashGet(pUnique, pTagStmt->colName, strlen(pTagStmt->colName)); + if (p) { + code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_DUPLICATED_COLUMN); + TAOS_CHECK_GOTO(code, NULL, _err); } + + code = taosHashPut(pUnique, pTagStmt->colName, strlen(pTagStmt->colName), &dummpy, sizeof(dummpy)); + TAOS_CHECK_GOTO(code, NULL, _err); + + code = buildUpdateTagValReqImpl2(pCxt, pTagStmt, pTableMeta, pTagStmt->colName, &val); + TAOS_CHECK_GOTO(code, NULL, _err); + if (taosArrayPush(pReq->pMultiTag, &val) == NULL) { - return terrno; + TAOS_CHECK_GOTO(terrno, NULL, _err); } } } - +_err: + taosHashCleanup(pUnique); return code; } @@ -15661,18 +15678,6 @@ static int32_t buildModifyVnodeArray(STranslateContext* pCxt, SAlterTableStmt* p return code; } -static void deleTagVal(void* val) { - SMultiTagUpateVal* pTag = val; - taosMemoryFree(pTag->tagName); - for (int i = 0; i < taosArrayGetSize(pTag->pTagArray); ++i) { - STagVal* p = (STagVal*)taosArrayGet(pTag->pTagArray, i); - if (IS_VAR_DATA_TYPE(p->type)) { - taosMemoryFreeClear(p->pData); - } - } - - taosArrayDestroy(pTag->pTagArray); -} static void destoryAlterTbReq(SVAlterTbReq* pReq) { taosMemoryFree(pReq->tbName); taosMemoryFree(pReq->colName); @@ -15686,7 +15691,7 @@ static void destoryAlterTbReq(SVAlterTbReq* pReq) { } } if (pReq->action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL) { - taosArrayDestroyEx(pReq->pMultiTag, deleTagVal); + taosArrayDestroyEx(pReq->pMultiTag, tfreeMultiTagUpateVal); } taosArrayDestroy(pReq->pTagArray); From 8db01e203a666b5e69401e6009cc7aabf54e7dcd Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 20:35:03 +0800 Subject: [PATCH 22/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 0b127279be..799cee70be 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -394,7 +394,7 @@ def run_win_test() { } pipeline { - agent none + agent any options { skipDefaultCheckout() } environment{ WKDIR = '/var/lib/jenkins/workspace' @@ -415,7 +415,6 @@ pipeline { } stage ('pre for build docs') { - agent any when { beforeAgent true expression { env.CHANGE_BRANCH =~ /(?i)doc.*/ || file_zh_changed != '' || file_en_changed != '' } @@ -427,7 +426,6 @@ pipeline { } stage('build Docs') { - agent any when { beforeAgent true expression { env.CHANGE_BRANCH =~ /(?i)doc.*/ || file_zh_changed != '' || file_en_changed != '' } @@ -460,7 +458,6 @@ pipeline { } stage('run test') { - agent any when { expression { file_no_doc_changed != '' && env.CHANGE_TARGET != 'docs-cloud' From 50a20292ddd5ba2caab20fb9d0c0b9cb5bd2b7f9 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 20:43:46 +0800 Subject: [PATCH 23/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 1 + 1 file changed, 1 insertion(+) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 799cee70be..3a56695df9 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -85,6 +85,7 @@ def check_docs(){ echo "file_zh_changed: ${file_zh_changed}" echo "file_en_changed: ${file_en_changed}" echo "file_no_doc_changed: ${file_no_doc_changed}" + echo "file_only_tdgpt_change_except: ${file_only_tdgpt_change_except}" } } From 50aa05cfcc40a21cf4604665ba1e47496820b780 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 20:52:07 +0800 Subject: [PATCH 24/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 3a56695df9..f02c036563 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -77,7 +77,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' - cat ${file_no_doc_changed} |grep -v "forecastoperator.c\\|anomalywindowoperator.c" || : + echo ${file_no_doc_changed} |grep -v "forecastoperator.c\\|anomalywindowoperator.c" || : ''', returnStdout: true ).trim() From d390aec7ced8b0ac264b34433a90d8c08d95131c Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 25 Nov 2024 20:58:21 +0800 Subject: [PATCH 25/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index f02c036563..0e816a3f05 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -77,7 +77,8 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' - echo ${file_no_doc_changed} |grep -v "forecastoperator.c\\|anomalywindowoperator.c" || : + cd ${WKC} + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c" || : ''', returnStdout: true ).trim() From 5a65b1977ed39a54c518f5bc5ae85e1fddafc5c0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 26 Nov 2024 09:03:00 +0800 Subject: [PATCH 26/76] refactor code --- source/common/src/tmsg.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index a9883e7f6c..d51c981331 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -10592,7 +10592,7 @@ static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq) TAOS_CHECK_EXIT(tDecodeBinary(pDecoder, &pReq->pTagVal, &pReq->nTagVal)); } break; - case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: + case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: { int32_t nTags; TAOS_CHECK_EXIT(tDecodeI32v(pDecoder, &nTags)); pReq->pMultiTag = taosArrayInit(nTags, sizeof(SMultiTagUpateVal)); @@ -10613,6 +10613,7 @@ static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq) } } break; + } case TSDB_ALTER_TABLE_UPDATE_OPTIONS: TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pReq->updateTTL)); if (pReq->updateTTL) { From 9248276ff3c15e89a98656e614879d6b71e8a7ce Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 26 Nov 2024 09:20:06 +0800 Subject: [PATCH 27/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 0e816a3f05..c1e03b3440 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -478,18 +478,12 @@ pipeline { expression { file_only_tdgpt_change_except != '' } } steps { - script { - catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { - timeout(time: 126, unit: 'MINUTES'){ - if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { - echo "skip windows test because of tdgpt codes changed" - } else { - pre_test_win() - pre_test_build_win() - run_win_ctest() - run_win_test() - } - } + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + timeout(time: 126, unit: 'MINUTES'){ + pre_test_win() + pre_test_build_win() + run_win_ctest() + run_win_test() } } } @@ -501,16 +495,10 @@ pipeline { expression { file_only_tdgpt_change_except != '' } } steps { - script { - catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { - timeout(time: 60, unit: 'MINUTES'){ - if (file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/) { - echo "skip mac test because of tdgpt codes changed" - } else { - pre_test() - pre_test_build_mac() - } - } + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + timeout(time: 60, unit: 'MINUTES'){ + pre_test() + pre_test_build_mac() } } } From 3bae6751080befee710a4e3b48d27d1581743ef8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 26 Nov 2024 09:45:04 +0800 Subject: [PATCH 28/76] refactor code --- tests/script/tsim/tag/change_multi_tag.sim | 846 +++++++++++++++++++++ 1 file changed, 846 insertions(+) create mode 100644 tests/script/tsim/tag/change_multi_tag.sim diff --git a/tests/script/tsim/tag/change_multi_tag.sim b/tests/script/tsim/tag/change_multi_tag.sim new file mode 100644 index 0000000000..93ed8e633d --- /dev/null +++ b/tests/script/tsim/tag/change_multi_tag.sim @@ -0,0 +1,846 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ======================== dnode1 start + +$dbPrefix = ta_ad_db +$tbPrefix = ta_ad_tb +$mtPrefix = ta_ad_mt +$tbNum = 10 +$rowNum = 20 +$totalNum = 200 + +print =============== step1 +$i = 0 +$db = $dbPrefix . $i + +sql create database $db +sql use $db + +print =============== step2 +$i = 2 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tagCol2 tinyint, tagCol3 smallint, tagCol4 int, tagCol5 bigint, tagCol6 nchar(10), tagCol7 binary(8)) +sql create table $tb using $mt tags( 1, 2, 3, 5, "test", "test") +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol2 = 2 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2 then + return -1 +endi +sql alter table +sql alter table $mt drop tag tgcol2 +sql alter table $mt add tag tgcol4 int +sql reset query cache +sql alter table $tb set tag tgcol4 =4 +sql reset query cache + +sql select * from $mt where tgcol4 = 4 +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 4 then + return -1 +endi + +sql select * from $mt where tgcol2 = 1 -x step2 + return -1 +step2: + +print =============== step3 +$i = 3 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint) +sql create table $tb using $mt tags( 1, 2 ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol2 = 2 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2 then + return -1 +endi + +sql alter table $mt drop tag tgcol2 +sql alter table $mt add tag tgcol4 tinyint +sql reset query cache +sql alter table $tb set tag tgcol4=4 +sql reset query cache + +sql select * from $mt where tgcol4 = 4 +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 4 then + return -1 +endi + +sql select * from $mt where tgcol2 = 1 -x step3 + return -1 +step3: + +print =============== step4 +$i = 4 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float) +sql create table $tb using $mt tags( 1, 2 ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol2 = 2 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2.00000 then + return -1 +endi + +sql describe $tb +print sql describe $tb +if $data21 != BIGINT then + return -1 +endi +if $data31 != FLOAT then + return -1 +endi +if $data23 != TAG then + return -1 +endi +if $data33 != TAG then + return -1 +endi + +sql alter table $mt drop tag tgcol2 +sql alter table $mt add tag tgcol4 float +sql reset query cache +sql alter table $tb set tag tgcol4=4 +sql reset query cache + +sql select * from $mt where tgcol4 = 4 +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 4.00000 then + return -1 +endi + +sql select * from $mt where tgcol2 = 1 -x step4 + return -1 +step4: + +print =============== step5 +$i = 5 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10)) +sql create table $tb using $mt tags( 1, '2' ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol2 = '2' +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1.000000000 then + return -1 +endi +if $data03 != 2 then + return -1 +endi + +sql alter table $mt drop tag tgcol2 +sql alter table $mt add tag tgcol4 smallint +sql reset query cache +sql alter table $tb set tag tgcol4=4 +sql reset query cache + +sql select * from $mt where tgcol4 = 4 +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1.000000000 then + return -1 +endi +if $data03 != 4 then + return -1 +endi + +sql select * from $mt where tgcol3 = '1' -x step5 + return -1 +step5: + +print =============== step6 +$i = 6 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 tinyint) +sql create table $tb using $mt tags( 1, 2, 3 ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol2 = 2 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2 then + return -1 +endi +if $data04 != 3 then + return -1 +endi + +sql alter table $mt rename tag tgcol1 tgcol4 +sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol3 +sql alter table $mt add tag tgcol5 binary(10) +sql alter table $mt add tag tgcol6 binary(10) + +sql reset query cache +sql alter table $tb set tag tgcol4=false +sql alter table $tb set tag tgcol5='5' +sql alter table $tb set tag tgcol6='6' +sql reset query cache + +sql select * from $mt where tgcol5 = '5' +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != 5 then + return -1 +endi +if $data04 != 6 then + return -1 +endi + +sql select * from $mt where tgcol6 = '6' +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != 5 then + return -1 +endi +if $data04 != 6 then + return -1 +endi + +sql select * from $mt where tgcol4 = 1 +if $rows != 0 then + return -1 +endi +sql select * from $mt where tgcol3 = 1 -x step52 + return -1 +step52: + +print =============== step7 +$i = 7 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint, tgcol3 binary(10)) +sql create table $tb using $mt tags( 1, 2, '3' ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol3 = '3' +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2 then + return -1 +endi +if $data04 != 3 then + return -1 +endi + +sql alter table $mt rename tag tgcol1 tgcol4 +sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol3 +sql alter table $mt add tag tgcol5 bigint +sql alter table $mt add tag tgcol6 tinyint + +sql reset query cache +sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol5=5 +sql alter table $tb set tag tgcol6=6 +sql reset query cache + +sql select * from $mt where tgcol6 = 6 +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data03 != 5 then + return -1 +endi +if $data04 != 6 then + return -1 +endi + +sql select * from $mt where tgcol2 = 1 -x step71 + return -1 +step71: +sql select * from $mt where tgcol3 = 1 -x step72 + return -1 +step72: + +print =============== step8 +$i = 8 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float, tgcol3 binary(10)) +sql create table $tb using $mt tags( 1, 2, '3' ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol3 = '3' +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2.00000 then + return -1 +endi +if $data04 != 3 then + return -1 +endi + +sql alter table $mt rename tag tgcol1 tgcol4 +sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol3 +sql alter table $mt add tag tgcol5 binary(17) +sql alter table $mt add tag tgcol6 bool +sql reset query cache +sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol5='5' +sql alter table $tb set tag tgcol6='1' +sql reset query cache + +sql select * from $mt where tgcol5 = '5' +print select * from $mt where tgcol5 = 5 +print $data01 $data02 $data03 $data04 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data03 != 5 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +sql select * from $mt where tgcol2 = 1 -x step81 + return -1 +step81: +sql select * from $mt where tgcol3 = 1 -x step82 + return -1 +step82: + +print =============== step9 +$i = 9 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10), tgcol3 binary(10)) +sql create table $tb using $mt tags( 1, '2', '3' ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol2 = '2' +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1.000000000 then + return -1 +endi +if $data03 != 2 then + return -1 +endi +if $data04 != 3 then + return -1 +endi + +sql alter table $mt rename tag tgcol1 tgcol4 +sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol3 +sql alter table $mt add tag tgcol5 bool +sql alter table $mt add tag tgcol6 float + +sql reset query cache +sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol5=1 +sql alter table $tb set tag tgcol6=6 +sql reset query cache + +sql select * from $mt where tgcol5 = 1 +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 4.000000000 then + return -1 +endi +if $data03 != 1 then + return -1 +endi +if $data04 != 6.00000 then + return -1 +endi + +sql select * from $mt where tgcol3 = 1 -x step91 + return -1 +step91: +sql select * from $mt where tgcol2 = 1 -x step92 + return -1 +step92: + +print =============== step10 +$i = 10 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 binary(10), tgcol3 binary(10), tgcol4 binary(10)) +sql create table $tb using $mt tags( '1', '2', '3', '4' ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol4 = '4' +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data05 != 4 then + return -1 +endi + +sql alter table $mt rename tag tgcol1 tgcol4 -x step103 + return -1 +step103: + +sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol3 +sql alter table $mt drop tag tgcol4 +sql reset query cache +sql alter table $mt add tag tgcol4 binary(10) +sql alter table $mt add tag tgcol5 bool + +sql reset query cache +sql alter table $tb set tag tgcol4='4' +sql alter table $tb set tag tgcol5=false +sql reset query cache + +sql select * from $mt where tgcol4 = '4' +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 4 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != null then + return -1 +endi + +sql select * from $mt where tgcol2 = 1 -x step101 + return -1 +step101: +sql select * from $mt where tgcol3 = 1 -x step102 + return -1 +step102: + +print =============== step11 +$i = 11 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 smallint, tgcol4 float, tgcol5 binary(10)) +sql create table $tb using $mt tags( 1, 2, 3, 4, '5' ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol1 = 1 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data05 != 4.00000 then + return -1 +endi +if $data06 != 5 then + return -1 +endi + +sql alter table $mt rename tag tgcol1 tgcol4 -x step114 + return -1 +step114: + +sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol3 +sql alter table $mt drop tag tgcol4 +sql alter table $mt drop tag tgcol5 +sql reset query cache +sql alter table $mt add tag tgcol4 binary(10) +sql alter table $mt add tag tgcol5 int +sql alter table $mt add tag tgcol6 binary(10) +sql alter table $mt add tag tgcol7 bigint +sql alter table $mt add tag tgcol8 smallint + +sql reset query cache +sql alter table $tb set tag tgcol4='4' +sql alter table $tb set tag tgcol5=5 +sql alter table $tb set tag tgcol6='6' +sql alter table $tb set tag tgcol7=7 +sql alter table $tb set tag tgcol8=8 +sql reset query cache + +sql select * from $mt where tgcol5 =5 +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 4 then + return -1 +endi +if $data04 != 5 then + return -1 +endi +if $data05 != 6 then + return -1 +endi +if $data06 != 7 then + return -1 +endi +if $data07 != 8 then + return -1 +endi + +sql select * from $mt where tgcol2 = 1 -x step111 + return -1 +step111: +sql select * from $mt where tgcol3 = 1 -x step112 + return -1 +step112: +sql select * from $mt where tgcol9 = 1 -x step113 + return -1 +step113: + +print =============== step12 +$i = 12 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 smallint, tgcol3 float, tgcol4 double, tgcol5 binary(10), tgcol6 binary(20)) +sql create table $tb using $mt tags( 1, 2, 3, 4, '5', '6' ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol1 = 1 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2 then + return -1 +endi +if $data04 != 3.00000 then + return -1 +endi +if $data05 != 4.000000000 then + return -1 +endi +if $data06 != 5 then + return -1 +endi +if $data07 != 6 then + return -1 +endi + +sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol3 +sql alter table $mt drop tag tgcol4 +sql alter table $mt drop tag tgcol5 +sql reset query cache +sql alter table $mt add tag tgcol2 binary(10) +sql alter table $mt add tag tgcol3 int +sql alter table $mt add tag tgcol4 binary(10) +sql alter table $mt add tag tgcol5 bigint + +sql reset query cache +sql alter table $tb set tag tgcol1=false +sql alter table $tb set tag tgcol2='5' +sql alter table $tb set tag tgcol3=4 +sql alter table $tb set tag tgcol4='3' +sql alter table $tb set tag tgcol5=2 +sql alter table $tb set tag tgcol6='1' +sql reset query cache + +sql select * from $mt where tgcol4 = '3' +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != 1 then + return -1 +endi +if $data04 != 5 then + return -1 +endi +if $data05 != 4 then + return -1 +endi +if $data06 != 3 then + return -1 +endi +if $data07 != 2 then + return -1 +endi + +sql select * from $mt where tgcol2 = '5' +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tgcol3 = 4 +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tgcol5 = 2 +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tgcol6 = '1' +if $rows != 1 then + return -1 +endi + +print =============== step13 +$i = 13 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20)) +sql create table $tb using $mt tags( '1', 2, 3, '4', 5, '6' ) +sql insert into $tb values(now, 1) +sql select * from $mt where tgcol1 = '1' +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data03 != 2 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data05 != 4 then + return -1 +endi +if $data06 != 5.000000000 then + return -1 +endi +if $data07 != 6 then + return -1 +endi + +sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol4 +sql alter table $mt drop tag tgcol6 +sql reset query cache +sql alter table $mt add tag tgcol2 binary(10) +sql alter table $mt add tag tgcol4 int +sql alter table $mt add tag tgcol6 bigint + +sql reset query cache +sql alter table $tb set tag tgcol1='7' +sql alter table $tb set tag tgcol2='8' +sql alter table $tb set tag tgcol3=9 +sql alter table $tb set tag tgcol4=10 +sql alter table $tb set tag tgcol5=11 +sql alter table $tb set tag tgcol6=12 +sql reset query cache + +sql select * from $mt where tgcol2 = '8' +print $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data02 != 7 then + return -1 +endi +if $data03 != 9 then + return -1 +endi +if $data04 != 11.000000000 then + return -1 +endi +if $data05 != 8 then + return -1 +endi +if $data06 != 10 then + return -1 +endi +if $data07 != 12 then + return -1 +endi + +print =============== step14 +$i = 14 +$mt = $mtPrefix . $i +$tb = $tbPrefix . $i +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 bigint) +sql create table $tb using $mt tags( 1, 1 ) +sql insert into $tb values(now, 1) + +sql alter table $mt add tag tgcol3 binary(10) +sql alter table $mt add tag tgcol4 int +sql alter table $mt add tag tgcol5 bigint +sql alter table $mt add tag tgcol6 bigint + + sql reset query cache +sql alter table $mt drop tag tgcol6 +sql alter table $mt add tag tgcol7 bigint +sql alter table $mt add tag tgcol8 bigint + +print =============== clear +sql drop database $db +sql select * from information_schema.ins_databases +if $rows != 2 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From d50ee39daba968991d3d1b17cfa7c85e281b426a Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Tue, 26 Nov 2024 10:01:09 +0800 Subject: [PATCH 29/76] fix: basic1.sim case --- tests/script/tsim/db/basic1.sim | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/script/tsim/db/basic1.sim b/tests/script/tsim/db/basic1.sim index 8eb6dce759..9a6d3001ef 100644 --- a/tests/script/tsim/db/basic1.sim +++ b/tests/script/tsim/db/basic1.sim @@ -108,6 +108,8 @@ if $data30 != 12 then endi print =============== show vnodes on dnode 1 +print =============== Wait for the synchronization status of vnode and Mnode, heartbeat for one second +sleep 1000 sql show vnodes on dnode 1 if $rows != 9 then return -1 From 6d182355d5cbf0afeedfa835cb4998ba4a964023 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 26 Nov 2024 10:28:35 +0800 Subject: [PATCH 30/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index c1e03b3440..45da12ad76 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -576,32 +576,18 @@ pipeline { date ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' ''' - } else { - if ( file_only_tdgpt_change_except != '' ) { - sh ''' - cd ${WKC}/tests/parallel_test - export DEFAULT_RETRY_TIME=2 - date - ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' - ''' - } + } + if ( file_only_tdgpt_change_except != '' ) { + sh ''' + cd ${WKC}/tests/parallel_test + export DEFAULT_RETRY_TIME=2 + date + ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' + ''' } } } } - /*catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { - timeout(time: 15, unit: 'MINUTES'){ - script { - sh ''' - echo "packaging ..." - date - rm -rf ${WKC}/release/* - cd ${WKC}/packaging - ./release.sh -v cluster -n 3.0.0.100 -s static - ''' - } - } - }*/ } } } From 34ca4ca81f4ec3454f858cdb92eea4e84023933a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 26 Nov 2024 11:17:01 +0800 Subject: [PATCH 31/76] update test case --- tests/parallel_test/cases.task | 1 + tests/script/tsim/tag/change_multi_tag.sim | 824 +-------------------- 2 files changed, 40 insertions(+), 785 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 492dd11177..5dacf44ae8 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1576,6 +1576,7 @@ ,,y,script,./test.sh -f tsim/tag/tinyint.sim ,,y,script,./test.sh -f tsim/tag/drop_tag.sim ,,y,script,./test.sh -f tsim/tag/tbNameIn.sim +,,y,script,./test.sh -f tsim/tag/change_multi_tag.sim ,,y,script,./test.sh -f tmp/monitor.sim ,,y,script,./test.sh -f tsim/tagindex/add_index.sim ,,n,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim diff --git a/tests/script/tsim/tag/change_multi_tag.sim b/tests/script/tsim/tag/change_multi_tag.sim index 93ed8e633d..b7d655ad1c 100644 --- a/tests/script/tsim/tag/change_multi_tag.sim +++ b/tests/script/tsim/tag/change_multi_tag.sim @@ -20,827 +20,81 @@ sql create database $db sql use $db print =============== step2 +$j = 3 $i = 2 $mt = $mtPrefix . $i $tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tagCol2 tinyint, tagCol3 smallint, tagCol4 int, tagCol5 bigint, tagCol6 nchar(10), tagCol7 binary(8)) -sql create table $tb using $mt tags( 1, 2, 3, 5, "test", "test") +$tbj = $tbPrefix . $j +$ntable = tb_normal_table + +sql create table $mt (ts timestamp, tbcol int) TAGS(tagCol1 bool, tagCol2 tinyint, tagCol3 smallint, tagCol4 int, tagCol5 bigint, tagCol6 nchar(10), tagCol7 binary(8)) +sql create table $tb using $mt tags( 1, 2, 3, 5,7, "test", "test") +sql create table $tbj using $mt tags( 2, 3, 4, 6,8, "testj", "testj") +sql create table $ntable (ts timestamp, f int) + sql insert into $tb values(now, 1) -sql select * from $mt where tgcol2 = 2 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2 then - return -1 -endi -sql alter table -sql alter table $mt drop tag tgcol2 -sql alter table $mt add tag tgcol4 int -sql reset query cache -sql alter table $tb set tag tgcol4 =4 -sql reset query cache - -sql select * from $mt where tgcol4 = 4 -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 4 then - return -1 -endi - -sql select * from $mt where tgcol2 = 1 -x step2 - return -1 -step2: - -print =============== step3 -$i = 3 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint) -sql create table $tb using $mt tags( 1, 2 ) sql insert into $tb values(now, 1) -sql select * from $mt where tgcol2 = 2 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2 then +sql_error alter table $mt set tag tgcol1 = 1,tagcol2 = 2, tag3 = 4 # set tag value on supertable +sql_error alter table $ntable set tag f = 10 # set normal table value +sql_error alter table $tbj set tag tagCol1=1,tagCol1 = 2 # dumplicate tag name +sql_error alter table $tbj set tag tagCol1=1,tagCol1 = 2 # not exist tag +sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx" +sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx", tagCol7="yyyyyyyyyyyyyyyyyyyyyyyyy" +sql alter table $tbj set tag tagCol1 = 100, tagCol2 = 100 + +sql select * from $mt where tagCol2 = 100 +if $rows != 0 then return -1 endi -sql alter table $mt drop tag tgcol2 -sql alter table $mt add tag tgcol4 tinyint -sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql reset query cache - -sql select * from $mt where tgcol4 = 4 -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 4 then +sql select * from $mt where tagCol1 = 1 +if $rows != 2 then return -1 endi -sql select * from $mt where tgcol2 = 1 -x step3 - return -1 -step3: +sql alter table $tbj set tag tagCol1=true,tagCol2=-1,tagcol3=-10, tagcol4=-100,tagcol5=-1000,tagCol6="empty",tagCol7="empty" +sql alter table $tb set tag tagCol1=0 -print =============== step4 -$i = 4 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float) -sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol2 = 2 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2.00000 then +sql select * from $mt where tagCol1 = true +if $rows != 0 then return -1 endi -sql describe $tb -print sql describe $tb -if $data21 != BIGINT then - return -1 -endi -if $data31 != FLOAT then - return -1 -endi -if $data23 != TAG then - return -1 -endi -if $data33 != TAG then +sql select * from $mt where tagCol2 = -1 +if $rows != 0 then return -1 endi -sql alter table $mt drop tag tgcol2 -sql alter table $mt add tag tgcol4 float -sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql reset query cache - -sql select * from $mt where tgcol4 = 4 -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 4.00000 then +sql select * from $mt where tagCol3 = -10 +if $rows != 0 then return -1 endi -sql select * from $mt where tgcol2 = 1 -x step4 - return -1 -step4: - -print =============== step5 -$i = 5 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10)) -sql create table $tb using $mt tags( 1, '2' ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol2 = '2' -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1.000000000 then - return -1 -endi -if $data03 != 2 then +sql select * from $mt where tagCol4 = -100 +if $rows != 0 then return -1 endi -sql alter table $mt drop tag tgcol2 -sql alter table $mt add tag tgcol4 smallint -sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql reset query cache - -sql select * from $mt where tgcol4 = 4 -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1.000000000 then - return -1 -endi -if $data03 != 4 then +sql select * from $mt where tagCol4 = -1000 +if $rows != 0 then return -1 endi -sql select * from $mt where tgcol3 = '1' -x step5 - return -1 -step5: - -print =============== step6 -$i = 6 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 tinyint) -sql create table $tb using $mt tags( 1, 2, 3 ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol2 = 2 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2 then - return -1 -endi -if $data04 != 3 then +sql select * from $mt where tagCol5 = -10000 +if $rows != 0 then return -1 endi -sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 -sql alter table $mt drop tag tgcol3 -sql alter table $mt add tag tgcol5 binary(10) -sql alter table $mt add tag tgcol6 binary(10) - -sql reset query cache -sql alter table $tb set tag tgcol4=false -sql alter table $tb set tag tgcol5='5' -sql alter table $tb set tag tgcol6='6' -sql reset query cache - -sql select * from $mt where tgcol5 = '5' -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 0 then - return -1 -endi -if $data03 != 5 then - return -1 -endi -if $data04 != 6 then +sql select * from $mt where tagCol6 = "empty" +if $rows != 0 then return -1 endi -sql select * from $mt where tgcol6 = '6' -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 0 then - return -1 -endi -if $data03 != 5 then - return -1 -endi -if $data04 != 6 then +sql select * from $mt where tagCol6 = "empty1" +if $rows != 0 then return -1 endi -sql select * from $mt where tgcol4 = 1 -if $rows != 0 then - return -1 -endi -sql select * from $mt where tgcol3 = 1 -x step52 - return -1 -step52: -print =============== step7 -$i = 7 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint, tgcol3 binary(10)) -sql create table $tb using $mt tags( 1, 2, '3' ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol3 = '3' -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2 then - return -1 -endi -if $data04 != 3 then - return -1 -endi - -sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 -sql alter table $mt drop tag tgcol3 -sql alter table $mt add tag tgcol5 bigint -sql alter table $mt add tag tgcol6 tinyint - -sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql alter table $tb set tag tgcol5=5 -sql alter table $tb set tag tgcol6=6 -sql reset query cache - -sql select * from $mt where tgcol6 = 6 -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 4 then - return -1 -endi -if $data03 != 5 then - return -1 -endi -if $data04 != 6 then - return -1 -endi - -sql select * from $mt where tgcol2 = 1 -x step71 - return -1 -step71: -sql select * from $mt where tgcol3 = 1 -x step72 - return -1 -step72: - -print =============== step8 -$i = 8 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float, tgcol3 binary(10)) -sql create table $tb using $mt tags( 1, 2, '3' ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol3 = '3' -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2.00000 then - return -1 -endi -if $data04 != 3 then - return -1 -endi - -sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 -sql alter table $mt drop tag tgcol3 -sql alter table $mt add tag tgcol5 binary(17) -sql alter table $mt add tag tgcol6 bool -sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql alter table $tb set tag tgcol5='5' -sql alter table $tb set tag tgcol6='1' -sql reset query cache - -sql select * from $mt where tgcol5 = '5' -print select * from $mt where tgcol5 = 5 -print $data01 $data02 $data03 $data04 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 4 then - return -1 -endi -if $data03 != 5 then - return -1 -endi -if $data04 != 1 then - return -1 -endi - -sql select * from $mt where tgcol2 = 1 -x step81 - return -1 -step81: -sql select * from $mt where tgcol3 = 1 -x step82 - return -1 -step82: - -print =============== step9 -$i = 9 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10), tgcol3 binary(10)) -sql create table $tb using $mt tags( 1, '2', '3' ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol2 = '2' -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1.000000000 then - return -1 -endi -if $data03 != 2 then - return -1 -endi -if $data04 != 3 then - return -1 -endi - -sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 -sql alter table $mt drop tag tgcol3 -sql alter table $mt add tag tgcol5 bool -sql alter table $mt add tag tgcol6 float - -sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql alter table $tb set tag tgcol5=1 -sql alter table $tb set tag tgcol6=6 -sql reset query cache - -sql select * from $mt where tgcol5 = 1 -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 4.000000000 then - return -1 -endi -if $data03 != 1 then - return -1 -endi -if $data04 != 6.00000 then - return -1 -endi - -sql select * from $mt where tgcol3 = 1 -x step91 - return -1 -step91: -sql select * from $mt where tgcol2 = 1 -x step92 - return -1 -step92: - -print =============== step10 -$i = 10 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 binary(10), tgcol3 binary(10), tgcol4 binary(10)) -sql create table $tb using $mt tags( '1', '2', '3', '4' ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol4 = '4' -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2 then - return -1 -endi -if $data04 != 3 then - return -1 -endi -if $data05 != 4 then - return -1 -endi - -sql alter table $mt rename tag tgcol1 tgcol4 -x step103 - return -1 -step103: - -sql alter table $mt drop tag tgcol2 -sql alter table $mt drop tag tgcol3 -sql alter table $mt drop tag tgcol4 -sql reset query cache -sql alter table $mt add tag tgcol4 binary(10) -sql alter table $mt add tag tgcol5 bool - -sql reset query cache -sql alter table $tb set tag tgcol4='4' -sql alter table $tb set tag tgcol5=false -sql reset query cache - -sql select * from $mt where tgcol4 = '4' -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 4 then - return -1 -endi -if $data04 != 0 then - return -1 -endi -if $data05 != null then - return -1 -endi - -sql select * from $mt where tgcol2 = 1 -x step101 - return -1 -step101: -sql select * from $mt where tgcol3 = 1 -x step102 - return -1 -step102: - -print =============== step11 -$i = 11 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 smallint, tgcol4 float, tgcol5 binary(10)) -sql create table $tb using $mt tags( 1, 2, 3, 4, '5' ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol1 = 1 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2 then - return -1 -endi -if $data04 != 3 then - return -1 -endi -if $data05 != 4.00000 then - return -1 -endi -if $data06 != 5 then - return -1 -endi - -sql alter table $mt rename tag tgcol1 tgcol4 -x step114 - return -1 -step114: - -sql alter table $mt drop tag tgcol2 -sql alter table $mt drop tag tgcol3 -sql alter table $mt drop tag tgcol4 -sql alter table $mt drop tag tgcol5 -sql reset query cache -sql alter table $mt add tag tgcol4 binary(10) -sql alter table $mt add tag tgcol5 int -sql alter table $mt add tag tgcol6 binary(10) -sql alter table $mt add tag tgcol7 bigint -sql alter table $mt add tag tgcol8 smallint - -sql reset query cache -sql alter table $tb set tag tgcol4='4' -sql alter table $tb set tag tgcol5=5 -sql alter table $tb set tag tgcol6='6' -sql alter table $tb set tag tgcol7=7 -sql alter table $tb set tag tgcol8=8 -sql reset query cache - -sql select * from $mt where tgcol5 =5 -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 4 then - return -1 -endi -if $data04 != 5 then - return -1 -endi -if $data05 != 6 then - return -1 -endi -if $data06 != 7 then - return -1 -endi -if $data07 != 8 then - return -1 -endi - -sql select * from $mt where tgcol2 = 1 -x step111 - return -1 -step111: -sql select * from $mt where tgcol3 = 1 -x step112 - return -1 -step112: -sql select * from $mt where tgcol9 = 1 -x step113 - return -1 -step113: - -print =============== step12 -$i = 12 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 smallint, tgcol3 float, tgcol4 double, tgcol5 binary(10), tgcol6 binary(20)) -sql create table $tb using $mt tags( 1, 2, 3, 4, '5', '6' ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol1 = 1 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2 then - return -1 -endi -if $data04 != 3.00000 then - return -1 -endi -if $data05 != 4.000000000 then - return -1 -endi -if $data06 != 5 then - return -1 -endi -if $data07 != 6 then - return -1 -endi - -sql alter table $mt drop tag tgcol2 -sql alter table $mt drop tag tgcol3 -sql alter table $mt drop tag tgcol4 -sql alter table $mt drop tag tgcol5 -sql reset query cache -sql alter table $mt add tag tgcol2 binary(10) -sql alter table $mt add tag tgcol3 int -sql alter table $mt add tag tgcol4 binary(10) -sql alter table $mt add tag tgcol5 bigint - -sql reset query cache -sql alter table $tb set tag tgcol1=false -sql alter table $tb set tag tgcol2='5' -sql alter table $tb set tag tgcol3=4 -sql alter table $tb set tag tgcol4='3' -sql alter table $tb set tag tgcol5=2 -sql alter table $tb set tag tgcol6='1' -sql reset query cache - -sql select * from $mt where tgcol4 = '3' -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 0 then - return -1 -endi -if $data03 != 1 then - return -1 -endi -if $data04 != 5 then - return -1 -endi -if $data05 != 4 then - return -1 -endi -if $data06 != 3 then - return -1 -endi -if $data07 != 2 then - return -1 -endi - -sql select * from $mt where tgcol2 = '5' -if $rows != 1 then - return -1 -endi - -sql select * from $mt where tgcol3 = 4 -if $rows != 1 then - return -1 -endi - -sql select * from $mt where tgcol5 = 2 -if $rows != 1 then - return -1 -endi - -sql select * from $mt where tgcol6 = '1' -if $rows != 1 then - return -1 -endi - -print =============== step13 -$i = 13 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20)) -sql create table $tb using $mt tags( '1', 2, 3, '4', 5, '6' ) -sql insert into $tb values(now, 1) -sql select * from $mt where tgcol1 = '1' -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 1 then - return -1 -endi -if $data03 != 2 then - return -1 -endi -if $data04 != 3 then - return -1 -endi -if $data05 != 4 then - return -1 -endi -if $data06 != 5.000000000 then - return -1 -endi -if $data07 != 6 then - return -1 -endi - -sql alter table $mt drop tag tgcol2 -sql alter table $mt drop tag tgcol4 -sql alter table $mt drop tag tgcol6 -sql reset query cache -sql alter table $mt add tag tgcol2 binary(10) -sql alter table $mt add tag tgcol4 int -sql alter table $mt add tag tgcol6 bigint - -sql reset query cache -sql alter table $tb set tag tgcol1='7' -sql alter table $tb set tag tgcol2='8' -sql alter table $tb set tag tgcol3=9 -sql alter table $tb set tag tgcol4=10 -sql alter table $tb set tag tgcol5=11 -sql alter table $tb set tag tgcol6=12 -sql reset query cache - -sql select * from $mt where tgcol2 = '8' -print $data01 $data02 $data03 -if $rows != 1 then - return -1 -endi -if $data01 != 1 then - return -1 -endi -if $data02 != 7 then - return -1 -endi -if $data03 != 9 then - return -1 -endi -if $data04 != 11.000000000 then - return -1 -endi -if $data05 != 8 then - return -1 -endi -if $data06 != 10 then - return -1 -endi -if $data07 != 12 then - return -1 -endi - -print =============== step14 -$i = 14 -$mt = $mtPrefix . $i -$tb = $tbPrefix . $i -sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 bigint) -sql create table $tb using $mt tags( 1, 1 ) -sql insert into $tb values(now, 1) - -sql alter table $mt add tag tgcol3 binary(10) -sql alter table $mt add tag tgcol4 int -sql alter table $mt add tag tgcol5 bigint -sql alter table $mt add tag tgcol6 bigint - - sql reset query cache -sql alter table $mt drop tag tgcol6 -sql alter table $mt add tag tgcol7 bigint -sql alter table $mt add tag tgcol8 bigint - -print =============== clear -sql drop database $db -sql select * from information_schema.ins_databases -if $rows != 2 then - return -1 -endi system sh/exec.sh -n dnode1 -s stop -x SIGINT From ae885dc592c27f7517f5047551f4b1438b7e52ca Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 26 Nov 2024 11:28:37 +0800 Subject: [PATCH 32/76] update test case --- tests/script/tsim/tag/change_multi_tag.sim | 46 +++++++++++++++++++--- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/tests/script/tsim/tag/change_multi_tag.sim b/tests/script/tsim/tag/change_multi_tag.sim index b7d655ad1c..13a4bdb1d8 100644 --- a/tests/script/tsim/tag/change_multi_tag.sim +++ b/tests/script/tsim/tag/change_multi_tag.sim @@ -40,6 +40,8 @@ sql_error alter table $tbj set tag tagCol1=1,tagCol1 = 2 # dumplicate tag name sql_error alter table $tbj set tag tagCol1=1,tagCol1 = 2 # not exist tag sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx" sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx", tagCol7="yyyyyyyyyyyyyyyyyyyyyyyyy" +sql_error alter table $tbj set tag tagCol1=1,tagCol5=10, tagCol1=3 +sql_error alter table $tbj set tag tagCol5="xxxx" sql alter table $tbj set tag tagCol1 = 100, tagCol2 = 100 sql select * from $mt where tagCol2 = 100 @@ -52,7 +54,7 @@ if $rows != 2 then return -1 endi -sql alter table $tbj set tag tagCol1=true,tagCol2=-1,tagcol3=-10, tagcol4=-100,tagcol5=-1000,tagCol6="empty",tagCol7="empty" +sql alter table $tbj set tag tagCol1=true,tagCol2=-1,tagcol3=-10, tagcol4=-100,tagcol5=-1000,tagCol6="empty",tagCol7="empty1" sql alter table $tb set tag tagCol1=0 sql select * from $mt where tagCol1 = true @@ -75,15 +77,11 @@ if $rows != 0 then return -1 endi -sql select * from $mt where tagCol4 = -1000 +sql select * from $mt where tagCol5 = -1000 if $rows != 0 then return -1 endi -sql select * from $mt where tagCol5 = -10000 -if $rows != 0 then - return -1 -endi sql select * from $mt where tagCol6 = "empty" if $rows != 0 then @@ -95,6 +93,42 @@ if $rows != 0 then return -1 endi +sql insert into $tbj values (now, 1) +sql select * from $mt where tagCol1 = true +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol2 = -1 +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol3 = -10 +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol4 = -100 +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol5 = -1000 +if $rows != 1 then + return -1 +endi + + +sql select * from $mt where tagCol6 = "empty" +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol7 = "empty1" +if $rows != 1 then + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT From d38657118dcf82762bdc5ab710e5f7ab961643f0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 26 Nov 2024 13:50:43 +0800 Subject: [PATCH 33/76] update test case --- tests/script/tsim/tag/change_multi_tag.sim | 48 ++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/tests/script/tsim/tag/change_multi_tag.sim b/tests/script/tsim/tag/change_multi_tag.sim index 13a4bdb1d8..0ba238b05c 100644 --- a/tests/script/tsim/tag/change_multi_tag.sim +++ b/tests/script/tsim/tag/change_multi_tag.sim @@ -131,4 +131,52 @@ if $rows != 1 then return -1 endi +sql alter table $tbj set tag tagCol1=true +sql alter table $tb set tag tagCol1=true + +sql select * from $mt where tagCol1 = true +if $rows != 3 then + return -1 +endi + +sql alter table $tb set tag tagCol1=false + +sql alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=-10000,tagCol6="empty1",tagCol7="empty2" + +sql select * from $mt where tagCol1 = true +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol2 = -10 +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol3 = -100 +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol4 = -1000 +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol5 = -10000 +if $rows != 1 then + return -1 +endi + + +sql select * from $mt where tagCol6 = "empty1" +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol7 = "empty2" +if $rows != 1 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT From 022544d226c01a397b6dc60879120039bcfe0e5c Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 26 Nov 2024 15:01:04 +0800 Subject: [PATCH 34/76] update test case --- source/libs/parser/src/parTranslater.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index c0f6ab124a..ed696ad236 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -15391,6 +15391,7 @@ static int32_t buildUpdateMultiTagValReq(STranslateContext* pCxt, SAlterTableStm TAOS_CHECK_GOTO(code, NULL, _err); if (taosArrayPush(pReq->pMultiTag, &val) == NULL) { + tfreeMultiTagUpateVal((void*)&val); TAOS_CHECK_GOTO(terrno, NULL, _err); } } From 1a05a4cd7d798932137c2b25f1ee729dc72da397 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 26 Nov 2024 17:44:06 +0800 Subject: [PATCH 35/76] update test case --- tests/system-test/0-others/empty_identifier.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/system-test/0-others/empty_identifier.py b/tests/system-test/0-others/empty_identifier.py index a576781d86..3644269b38 100644 --- a/tests/system-test/0-others/empty_identifier.py +++ b/tests/system-test/0-others/empty_identifier.py @@ -145,7 +145,6 @@ class TDTestCase: 'select * from ``', 'alter table meters add column `` int', 'alter table meters drop column ``', - 'alter table t0 set tag `` = ""', 'alter stable meters add tag `` int', 'alter stable meters rename tag cc ``', 'alter stable meters drop tag ``', From e87570c03688cb8518d51aa3b0aae9969edcccaf Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 26 Nov 2024 20:50:37 +0800 Subject: [PATCH 36/76] fix invalid free --- source/dnode/vnode/src/meta/metaTable.c | 4 ++++ tests/script/tsim/tag/change_multi_tag.sim | 2 ++ 2 files changed, 6 insertions(+) diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 54476d339f..94349048ad 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2049,7 +2049,9 @@ static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbRe } if (tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData) != 0) { + tdbTbcClose(pUidIdxc); metaError("meta/table: failed to get uid index, uid:%" PRId64, uid); + return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; } oversion = ((SUidIdxVal *)pData)[0].version; @@ -2302,7 +2304,9 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA } if (tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData) != 0) { + tdbTbcClose(pUidIdxc); metaError("meta/table: failed to get uid index, uid:%" PRId64, uid); + return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; } oversion = ((SUidIdxVal *)pData)[0].version; diff --git a/tests/script/tsim/tag/change_multi_tag.sim b/tests/script/tsim/tag/change_multi_tag.sim index 0ba238b05c..9370afc785 100644 --- a/tests/script/tsim/tag/change_multi_tag.sim +++ b/tests/script/tsim/tag/change_multi_tag.sim @@ -179,4 +179,6 @@ if $rows != 1 then return -1 endi +sql alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL + system sh/exec.sh -n dnode1 -s stop -x SIGINT From 2aac26e0a9b492a85f65fb60cde86e93d031531c Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 27 Nov 2024 07:56:24 +0800 Subject: [PATCH 37/76] fix partial failure --- source/dnode/vnode/src/meta/metaTable.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 94349048ad..e6ad1d03fc 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2074,6 +2074,9 @@ static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbRe if (tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData) != 0) { metaError("meta/table: failed to get tb db, uid:%" PRId64, uid); + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); + return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; } if ((ctbEntry.pBuf = taosMemoryMalloc(nData)) == NULL) { @@ -2329,6 +2332,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA if (tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData) != 0) { metaError("meta/table: failed to get tb db, uid:%" PRId64, uid); + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); } if ((ctbEntry.pBuf = taosMemoryMalloc(nData)) == NULL) { From 90d63ea5a2b389a466fd6f798d7804bc90d1c915 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 27 Nov 2024 07:59:16 +0800 Subject: [PATCH 38/76] fix partial failure --- source/libs/parser/src/parAstCreater.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 8b0fc19739..e05a399d32 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -2427,12 +2427,6 @@ static SNode* createAlterTableStmtFinalize(SNode* pRealTable, SAlterTableStmt* p nodesDestroyNode(pRealTable); return (SNode*)pStmt; } -static SNode* createAlterTableMultiStmtFinalize(SNode* pRealTable, SAlterTableMultiStmt* pStmt) { - strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName); - strcpy(pStmt->tableName, ((SRealTableNode*)pRealTable)->table.tableName); - nodesDestroyNode(pRealTable); - return (SNode*)pStmt; -} SNode* createAlterTableModifyOptions(SAstCreateContext* pCxt, SNode* pRealTable, SNode* pOptions) { CHECK_PARSER_STATUS(pCxt); From 89ee604083c75c58b14fcadfcf9304c4875fd7a8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 27 Nov 2024 09:01:02 +0800 Subject: [PATCH 39/76] add test case --- tests/script/tsim/tag/change_multi_tag.sim | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/script/tsim/tag/change_multi_tag.sim b/tests/script/tsim/tag/change_multi_tag.sim index 9370afc785..30ae1c653a 100644 --- a/tests/script/tsim/tag/change_multi_tag.sim +++ b/tests/script/tsim/tag/change_multi_tag.sim @@ -181,4 +181,25 @@ endi sql alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL +sql alter table $mt drop tag tagCol7 +sql alter table $mt drop tag tagCol3 + +sql alter table $mt add tag tagCol8 int + +sql_error alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL +sql_error alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL + +sql alter table $tbj set tag tagCol8 = 8 + +sql select * from $mt where tagCol4 = -1000 +if $rows != 1 then + return -1 +endi + +sql select * from $mt where tagCol8 = 8 +if $rows != 1 then + return -1 +endi + + system sh/exec.sh -n dnode1 -s stop -x SIGINT From a84a849ef760cbb81a67c81e0c85a95277d46c75 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 27 Nov 2024 09:12:17 +0800 Subject: [PATCH 40/76] add test case --- source/dnode/vnode/src/meta/metaTable.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index e6ad1d03fc..2a4b2baf45 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2334,6 +2334,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA metaError("meta/table: failed to get tb db, uid:%" PRId64, uid); tdbTbcClose(pUidIdxc); tdbTbcClose(pTbDbc); + return terrno = TSDB_CODE_INVALID_MSG; } if ((ctbEntry.pBuf = taosMemoryMalloc(nData)) == NULL) { From 221eac50caa75e3645b13648bd5b9eef65fd25bd Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 27 Nov 2024 09:28:01 +0800 Subject: [PATCH 41/76] refactor code --- source/dnode/vnode/src/meta/metaTable.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 2a4b2baf45..8256b5e402 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2023,7 +2023,6 @@ static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbRe const void *pData = NULL; int nData = 0; SHashObj *pTagTable = NULL; - SArray *updateTagColumnIds = NULL; // search name index ret = tdbTbGet(pMeta->pNameIdx, pAlterTbReq->tbName, strlen(pAlterTbReq->tbName) + 1, &pVal, &nVal); @@ -2128,12 +2127,11 @@ static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbRe goto _err; } } - int32_t nUpdateTagVal = taosHashGetSize(pTagTable); - updateTagColumnIds = taosArrayInit(nUpdateTagVal, sizeof(int32_t)); SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag; SSchema *pColumn = NULL; int32_t iCol = 0; + int32_t count = 0; for (;;) { pColumn = NULL; @@ -2141,14 +2139,11 @@ static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbRe if (iCol >= pTagSchema->nCols) break; pColumn = &pTagSchema->pSchema[iCol]; if (taosHashGet(pTagTable, pColumn->name, strlen(pColumn->name)) != NULL) { - if (taosArrayPush(updateTagColumnIds, &iCol) == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } + count++; } iCol++; } - if (taosArrayGetSize(updateTagColumnIds) != nUpdateTagVal) { + if (count != taosHashGetSize(pTagTable)) { terrno = TSDB_CODE_VND_COL_NOT_EXISTS; goto _err; } @@ -2253,7 +2248,6 @@ static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbRe tdbTbcClose(pTbDbc); tdbTbcClose(pUidIdxc); taosHashCleanup(pTagTable); - taosArrayDestroy(updateTagColumnIds); return 0; _err: @@ -2264,7 +2258,6 @@ _err: tdbTbcClose(pTbDbc); tdbTbcClose(pUidIdxc); taosHashCleanup(pTagTable); - taosArrayDestroy(updateTagColumnIds); return -1; } static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { From 9125e60a5aba81913607de7a1487a253f2c73a33 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 14:14:52 +0800 Subject: [PATCH 42/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 4 ++-- tests/army/tmq/a.py | 13 ------------- tests/parallel_test/tdgpt_cases.task | 4 ++-- tests/script/tsim/analytics/basic0.sim | 2 +- 4 files changed, 5 insertions(+), 18 deletions(-) delete mode 100644 tests/army/tmq/a.py diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 45da12ad76..a197d9a9a8 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -78,7 +78,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c" || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c" |grep -v "tsim/analytics" || : ''', returnStdout: true ).trim() @@ -569,7 +569,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c/ ) { + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 diff --git a/tests/army/tmq/a.py b/tests/army/tmq/a.py deleted file mode 100644 index 9236e3a5b3..0000000000 --- a/tests/army/tmq/a.py +++ /dev/null @@ -1,13 +0,0 @@ -import click - -@click.command() -@click.option('--count', default=1, help='Number of greetings.') -@click.option('--name', prompt='Your name', - help='The person to greet.') -def hello(count, name): - """Simple program that greets NAME for a total of COUNT times.""" - for x in range(count): - click.echo('Hello %s!' % name) - -if __name__ == '__main__': - hello() \ No newline at end of file diff --git a/tests/parallel_test/tdgpt_cases.task b/tests/parallel_test/tdgpt_cases.task index e028d13fb8..015f2a005a 100644 --- a/tests/parallel_test/tdgpt_cases.task +++ b/tests/parallel_test/tdgpt_cases.task @@ -1,6 +1,6 @@ #Column Define #caseID,rerunTimes,Run with Sanitizer,casePath,caseCommand -#NA,NA,y or n,script,./test.sh -f tsim/user/basic.sim +#NA,NA,y or n,script,./test.sh -f tsim/analytics/basic0.sim #tdgpt-test -,,y,script,./test.sh -f tsim/query/timeline.sim +,,y,script,./test.sh -f tsim/analytics/basic0.sim \ No newline at end of file diff --git a/tests/script/tsim/analytics/basic0.sim b/tests/script/tsim/analytics/basic0.sim index a4fe6354ae..77c9184e8f 100644 --- a/tests/script/tsim/analytics/basic0.sim +++ b/tests/script/tsim/analytics/basic0.sim @@ -4,7 +4,7 @@ system sh/exec.sh -n dnode1 -s start sql connect print =============== create anode -sql create anode '127.0.0.1:6090' +sql create anode '192.168.1.116:6050' sql show anodes if $rows != 1 then From b0e3f24953e94f3ed0ebdd2a55004a0cef09e8df Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 14:40:27 +0800 Subject: [PATCH 43/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 4 ++-- tests/parallel_test/tdgpt_cases.task | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index a197d9a9a8..85683a6350 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -78,7 +78,7 @@ def check_docs(){ file_only_tdgpt_change_except = sh ( script: ''' cd ${WKC} - git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c" |grep -v "tsim/analytics" || : + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c" |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || : ''', returnStdout: true ).trim() @@ -569,7 +569,7 @@ pipeline { cd ${WKC}/tests/parallel_test ./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + ''' ''' - if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics/ ) { + if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task/ ) { sh ''' cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 diff --git a/tests/parallel_test/tdgpt_cases.task b/tests/parallel_test/tdgpt_cases.task index 015f2a005a..bb3c15f307 100644 --- a/tests/parallel_test/tdgpt_cases.task +++ b/tests/parallel_test/tdgpt_cases.task @@ -3,4 +3,4 @@ #NA,NA,y or n,script,./test.sh -f tsim/analytics/basic0.sim #tdgpt-test -,,y,script,./test.sh -f tsim/analytics/basic0.sim \ No newline at end of file +,,y,script,./test.sh -f tsim/analytics/basic0.sim From 52c87f5b5b2566a574f0149ab0bc490cf0818465 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 14:45:12 +0800 Subject: [PATCH 44/76] ci: set test step in ci when tdgpt file changed --- Jenkinsfile2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 85683a6350..63ebf3a6ed 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -574,7 +574,7 @@ pipeline { cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 date - ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + ''' + timeout 600 time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 300 ''' + extra_param + ''' ''' } if ( file_only_tdgpt_change_except != '' ) { From 779b97e29174bf0172aa267b67d267bb5bb61918 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 14:49:37 +0800 Subject: [PATCH 45/76] ci: set test step in ci when tdgpt file changed --- tests/parallel_test/tdgpt_cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/tdgpt_cases.task b/tests/parallel_test/tdgpt_cases.task index bb3c15f307..41a9c3b356 100644 --- a/tests/parallel_test/tdgpt_cases.task +++ b/tests/parallel_test/tdgpt_cases.task @@ -3,4 +3,4 @@ #NA,NA,y or n,script,./test.sh -f tsim/analytics/basic0.sim #tdgpt-test -,,y,script,./test.sh -f tsim/analytics/basic0.sim +,,n,script,./test.sh -f tsim/analytics/basic0.sim From 0378062c4ecbb430829e9c30c0559174f50a3186 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 27 Nov 2024 15:00:47 +0800 Subject: [PATCH 46/76] doc: update doc --- docs/en/14-reference/03-taos-sql/03-table.md | 2 +- docs/zh/14-reference/03-taos-sql/03-table.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/14-reference/03-taos-sql/03-table.md b/docs/en/14-reference/03-taos-sql/03-table.md index 561f479aa3..e6a704a974 100644 --- a/docs/en/14-reference/03-taos-sql/03-table.md +++ b/docs/en/14-reference/03-taos-sql/03-table.md @@ -161,7 +161,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause alter_table_clause: { alter_table_options - | SET TAG tag_name = new_tag_value + | SET TAG tag_name = new_tag_value,tag_name2=new_tag2_value... } alter_table_options: diff --git a/docs/zh/14-reference/03-taos-sql/03-table.md b/docs/zh/14-reference/03-taos-sql/03-table.md index 40e2802fcd..4bf1bd5b87 100644 --- a/docs/zh/14-reference/03-taos-sql/03-table.md +++ b/docs/zh/14-reference/03-taos-sql/03-table.md @@ -171,7 +171,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause alter_table_clause: { alter_table_options - | SET TAG tag_name = new_tag_value + | SET TAG tag_name = new_tag_value,tag_name2=new_tag2_value... } alter_table_options: From 1f96f3ad7229c6d47a57c48ec84b32d6429a9404 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 27 Nov 2024 15:07:59 +0800 Subject: [PATCH 47/76] fix mem leak --- source/client/src/clientRawBlockWrite.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 924b1ce202..b209ff2eca 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -823,6 +823,9 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { end: uDebug("alter table return"); + if (vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL) { + taosArrayDestroy(vAlterTbReq.pMultiTag); + } tDecoderClear(&decoder); *pJson = json; } From 6997386455b673a3e8863471a4034868ac19c103 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 27 Nov 2024 15:22:29 +0800 Subject: [PATCH 48/76] support subscribe --- source/client/src/clientRawBlockWrite.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index b209ff2eca..08ecf103cd 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -772,10 +772,13 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { cJSON* tags = cJSON_CreateArray(); RAW_NULL_CHECK(tags); for (int32_t i = 0; i < nTags; i++) { + cJSON* member = cJSON_CreateObject(); + RAW_NULL_CHECK(member); + SMultiTagUpateVal* pTagVal = taosArrayGet(vAlterTbReq.pMultiTag, i); cJSON* tagName = cJSON_CreateString(pTagVal->tagName); RAW_NULL_CHECK(tagName); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tags, "colName", tagName)); + RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colName", tagName)); if (pTagVal->tagType == TSDB_DATA_TYPE_JSON) { uError("processAlterTable isJson false"); @@ -799,12 +802,13 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { } cJSON* colValue = cJSON_CreateString(buf); RAW_NULL_CHECK(colValue); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tags, "colValue", colValue)); + RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValue", colValue)); taosMemoryFree(buf); } cJSON* isNullCJson = cJSON_CreateBool(isNull); RAW_NULL_CHECK(isNullCJson); - RAW_FALSE_CHECK(cJSON_AddItemToObject(tags, "colValueNull", isNullCJson)); + RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValueNull", isNullCJson)); + RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, member)); } RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); break; From fcec7c522d692e93fbd2413a755514be99da4dac Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 27 Nov 2024 16:12:57 +0800 Subject: [PATCH 49/76] support subscribe --- source/client/src/clientRawBlockWrite.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 08ecf103cd..6331224c3a 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -620,7 +620,10 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { cJSON* type = cJSON_CreateString("alter"); RAW_NULL_CHECK(type); RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type)); - cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal"); + cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL || + vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL + ? "child" + : "normal"); RAW_NULL_CHECK(tableType); RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType)); cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName); From a64c6b6ce13d5b561e3cf151f3433c799bec3aa9 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 27 Nov 2024 19:09:38 +0800 Subject: [PATCH 50/76] fix:[TD-33048] add ts to cols if dataFormat is true in schemaless to avoid schemal is old --- source/client/src/clientSml.c | 25 ++++++----- utils/test/c/sml_test.c | 81 +++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 10 deletions(-) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 8602421ed0..911e3664f5 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -233,7 +233,7 @@ int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSml goto END; } SML_CHECK_CODE(smlBuildSTableMeta(info->dataFormat, sMeta)); - for (int i = 1; i < pTableMeta->tableInfo.numOfTags + pTableMeta->tableInfo.numOfColumns; i++) { + for (int i = 0; i < pTableMeta->tableInfo.numOfTags + pTableMeta->tableInfo.numOfColumns; i++) { SSchema *col = pTableMeta->schema + i; SSmlKv kv = {.key = col->name, .keyLen = strlen(col->name), .type = col->type}; if (col->type == TSDB_DATA_TYPE_NCHAR) { @@ -772,22 +772,27 @@ END: RETURN } -static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool isTag) { +static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SHashObj *hashTmp = taosHashInit(length, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); SML_CHECK_NULL(hashTmp); - int32_t i = 0; - for (; i < length; i++) { - SML_CHECK_CODE(taosHashPut(hashTmp, schema[i].name, strlen(schema[i].name), &i, SHORT_BYTES)); + for (int32_t i = 0; i < length; i++) { + SML_CHECK_CODE(taosHashPut(hashTmp, schema[i].name, strlen(schema[i].name), &schema[i], sizeof(SSchema))); } - i = isTag ? 0 : 1; - for (; i < taosArrayGetSize(cols); i++) { + for (int32_t i = 0; i < taosArrayGetSize(cols); i++) { SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i); SML_CHECK_NULL(kv); - if (taosHashGet(hashTmp, kv->key, kv->keyLen) == NULL) { + SSchema *sTmp = taosHashGet(hashTmp, kv->key, kv->keyLen); + if (sTmp == NULL) { SML_CHECK_CODE(TSDB_CODE_SML_INVALID_DATA); } + if ((IS_VAR_DATA_TYPE(kv->type) && kv->length + VARSTR_HEADER_SIZE > sTmp->bytes) || + (!IS_VAR_DATA_TYPE(kv->type) && kv->length != sTmp->bytes)){ + uError("column %s (type %s) bytes invalid. db bytes:%d, kv bytes:%zu", sTmp->name, + tDataTypes[sTmp->type].name, sTmp->bytes, kv->length); + SML_CHECK_CODE(TSDB_CODE_INTERNAL_ERROR); + } } END: @@ -1132,8 +1137,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { } if (needCheckMeta) { - SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags, sTableData->tags, true)); - SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false)); + SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags, sTableData->tags)); + SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols)); } taosMemoryFreeClear(sTableData->tableMeta); diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index a8d4fafb03..bf04352232 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -2253,6 +2253,83 @@ int sml_ts5528_test(){ return 0; } +int sml_td33048_Test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "drop database if exists td33048"); + taos_free_result(pRes); + + pRes = taos_query(taos, "create database if not exists td33048"); + taos_free_result(pRes); + + // check column name duplication + const char *sql[] = { + "alarm_record,tag=alarm_record uid=\"3+8001+c939604c\",deviceId=\"3\",alarmId=\"8001\",alarmStatus=\"false\",lotNo=\"2411A0302\",subMode=\"11\",occurTime=\"2024-11-25 09:31:52.702\" 1732527117484", + }; + pRes = taos_query(taos, "use td33048"); + taos_free_result(pRes); + pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); + int code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + ASSERT(code == 0); + taos_free_result(pRes); + + // check tag name duplication + const char *sql1[] = { + "alarm_record,tag=alarm_record uid=\"2+100012+303fe9b5\",deviceId=\"2\",alarmId=\"100012\",alarmStatus=\"false\",lotNo=\"2411A0202\",subMode=\"11\",occurTime=\"2024-11-25 09:31:55.591\" 1732527119493", + }; + pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); + code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + ASSERT(code == 0); + taos_free_result(pRes); + + pRes = taos_query(taos, "select * from alarm_record"); + code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + ASSERT(code == 0); + taos_free_result(pRes); + + taos_close(taos); + + return code; +} + +int sml_td17324_Test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "drop database if exists gcbacaefqk"); + taos_free_result(pRes); + + pRes = taos_query(taos, "create database if not exists gcbacaefqk PRECISION 'ns'"); + taos_free_result(pRes); + + pRes = taos_query(taos, "use gcbacaefqk"); + taos_free_result(pRes); + + pRes = taos_query(taos, "create stable gcbacaefqk.test_stb(_ts timestamp, f int) tags(t1 bigint)"); + taos_free_result(pRes); + // check column name duplication + const char *sql[] = { + "st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1732700000364000000", + "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1732700000361000000", + "test_stb,t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1732700000364316532" + }; + + pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS); + int code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + ASSERT(code == 0); + taos_free_result(pRes); + + taos_close(taos); + + return code; +} + int main(int argc, char *argv[]) { if (argc == 2) { taos_options(TSDB_OPTION_CONFIGDIR, argv[1]); @@ -2262,6 +2339,10 @@ int main(int argc, char *argv[]) { ASSERT(!ret); ret = sml_ts5528_test(); ASSERT(!ret); + ret = sml_td33048_Test(); + ASSERT(!ret); + ret = sml_td17324_Test(); + ASSERT(!ret); ret = sml_td29691_Test(); ASSERT(ret); ret = sml_td29373_Test(); From ca5604633d832cd45a3a8d40af1548b3d52c608e Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 19:28:27 +0800 Subject: [PATCH 51/76] ci: add test case for tdgpt --- tests/system-test/9-tdgpt/test_gpt.py | 65 +++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 tests/system-test/9-tdgpt/test_gpt.py diff --git a/tests/system-test/9-tdgpt/test_gpt.py b/tests/system-test/9-tdgpt/test_gpt.py new file mode 100644 index 0000000000..099003dbdd --- /dev/null +++ b/tests/system-test/9-tdgpt/test_gpt.py @@ -0,0 +1,65 @@ +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +import taos + + + +class TDTestCase: + clientCfgDict = {'debugFlag': 135} + updatecfgDict = { + "debugFlag" : "135", + "queryBufferSize" : 10240, + 'clientCfg' : clientCfgDict + } + + def init(self, conn, logSql, replicaVal=1): + self.replicaVar = int(replicaVal) + tdLog.debug(f"start to excute {__file__}") + self.conn = conn + tdSql.init(conn.cursor(), False) + self.passwd = {'root':'taosdata', + 'test':'test'} + + def prepare_anode_data(self): + tdSql.execute(f"create anode '127.0.0.1:6090'") + tdSql.execute(f"create database db_gpt") + tdSql.execute(f"create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);") + tdSql.execute(f"create table ct1 using stb tags(1000);") + tdSql.execute(f"insert into ct1(ts, c1) values(now-1a, 5)(now+1a, 14)(now+2a, 15)(now+3a, 15)(now+4a, 14);") + tdSql.execute(f"insert into ct1(ts, c1) values(now+5a, 19)(now+6a, 17)(now+7a, 16)(now+8a, 20)(now+9a, 22);") + tdSql.execute(f"insert into ct1(ts, c1) values(now+10a, 8)(now+11a, 21)(now+12a, 28)(now+13a, 11)(now+14a, 9);") + tdSql.execute(f"insert into ct1(ts, c1) values(now+15a, 29)(now+16a, 40);") + + + def test_forecast(self): + """ + Test forecast + """ + tdLog.info(f"Test forecast") + tdSql.query(f"SELECT _frowts, FORECAST(c1, \"algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5,d=1\") from ct1 ;") + + + def test_anomaly_window(self, uname): + """ + Test anomaly window + """ + tdLog.info(f"Test anomaly window") + tdSql.query(f"SELECT _wstart, _wend, SUM(c1) FROM ct1 ANOMALY_WINDOW(c1, \"algo=iqr\");") + + def run(self): + self.prepare_anode_data() + self.test_forecast() + self.test_anomaly_window() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + + From f840db1e966a9b6d2fa1d9b38a10fbee43cb06da Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 19:36:24 +0800 Subject: [PATCH 52/76] ci: add test case for tdgpt --- tests/system-test/9-tdgpt/test_gpt.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/system-test/9-tdgpt/test_gpt.py b/tests/system-test/9-tdgpt/test_gpt.py index 099003dbdd..662e64dc7e 100644 --- a/tests/system-test/9-tdgpt/test_gpt.py +++ b/tests/system-test/9-tdgpt/test_gpt.py @@ -25,12 +25,12 @@ class TDTestCase: def prepare_anode_data(self): tdSql.execute(f"create anode '127.0.0.1:6090'") tdSql.execute(f"create database db_gpt") - tdSql.execute(f"create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);") - tdSql.execute(f"create table ct1 using stb tags(1000);") - tdSql.execute(f"insert into ct1(ts, c1) values(now-1a, 5)(now+1a, 14)(now+2a, 15)(now+3a, 15)(now+4a, 14);") - tdSql.execute(f"insert into ct1(ts, c1) values(now+5a, 19)(now+6a, 17)(now+7a, 16)(now+8a, 20)(now+9a, 22);") - tdSql.execute(f"insert into ct1(ts, c1) values(now+10a, 8)(now+11a, 21)(now+12a, 28)(now+13a, 11)(now+14a, 9);") - tdSql.execute(f"insert into ct1(ts, c1) values(now+15a, 29)(now+16a, 40);") + tdSql.execute(f"create table if not exists db_gpt.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);") + tdSql.execute(f"create table db_gpt.ct1 using db_gpt.stb tags(1000);") + tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now-1a, 5)(now+1a, 14)(now+2a, 15)(now+3a, 15)(now+4a, 14);") + tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now+5a, 19)(now+6a, 17)(now+7a, 16)(now+8a, 20)(now+9a, 22);") + tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now+10a, 8)(now+11a, 21)(now+12a, 28)(now+13a, 11)(now+14a, 9);") + tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now+15a, 29)(now+16a, 40);") def test_forecast(self): @@ -38,7 +38,7 @@ class TDTestCase: Test forecast """ tdLog.info(f"Test forecast") - tdSql.query(f"SELECT _frowts, FORECAST(c1, \"algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5,d=1\") from ct1 ;") + tdSql.query(f"SELECT _frowts, FORECAST(c1, \"algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5,d=1\") from db_gpt.ct1 ;") def test_anomaly_window(self, uname): @@ -46,7 +46,7 @@ class TDTestCase: Test anomaly window """ tdLog.info(f"Test anomaly window") - tdSql.query(f"SELECT _wstart, _wend, SUM(c1) FROM ct1 ANOMALY_WINDOW(c1, \"algo=iqr\");") + tdSql.query(f"SELECT _wstart, _wend, SUM(c1) FROM db_gpt.ct1 ANOMALY_WINDOW(c1, \"algo=iqr\");") def run(self): self.prepare_anode_data() From 160e65306b07441e8167b1f3cd7fe85c67a10f5d Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 19:39:30 +0800 Subject: [PATCH 53/76] ci: add test case for tdgpt --- tests/system-test/9-tdgpt/test_gpt.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/system-test/9-tdgpt/test_gpt.py b/tests/system-test/9-tdgpt/test_gpt.py index 662e64dc7e..16ab6bb22f 100644 --- a/tests/system-test/9-tdgpt/test_gpt.py +++ b/tests/system-test/9-tdgpt/test_gpt.py @@ -39,14 +39,16 @@ class TDTestCase: """ tdLog.info(f"Test forecast") tdSql.query(f"SELECT _frowts, FORECAST(c1, \"algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5,d=1\") from db_gpt.ct1 ;") + tdSql.checkRows(10) - - def test_anomaly_window(self, uname): + def test_anomaly_window(self): """ Test anomaly window """ tdLog.info(f"Test anomaly window") tdSql.query(f"SELECT _wstart, _wend, SUM(c1) FROM db_gpt.ct1 ANOMALY_WINDOW(c1, \"algo=iqr\");") + tdSql.checkData(0,2,40) + def run(self): self.prepare_anode_data() From a2f8a822dabd49052654ecfb925685543faef928 Mon Sep 17 00:00:00 2001 From: facetosea <25808407@qq.com> Date: Wed, 27 Nov 2024 19:51:27 +0800 Subject: [PATCH 54/76] remove some check --- source/libs/executor/inc/operator.h | 8 -------- source/libs/executor/src/anomalywindowoperator.c | 3 --- source/libs/executor/src/countwindowoperator.c | 3 --- source/libs/executor/src/eventwindowoperator.c | 3 --- source/libs/executor/src/executorInt.c | 1 - source/libs/executor/src/groupoperator.c | 7 ------- source/libs/executor/src/hashjoinoperator.c | 4 ---- source/libs/executor/src/projectoperator.c | 1 - 8 files changed, 30 deletions(-) diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index 5ceedbe542..f2e542e7cd 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -202,14 +202,6 @@ void * getOperatorParam(int32_t opType, SOperatorParam* param, int32_t i void doKeepTuple(SWindowRowsSup* pRowSup, int64_t ts, uint64_t groupId); void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsList, int32_t rowIndex, uint64_t groupId); -#define CHECK_CONDITION_FAILED(c) \ - do { \ - if (!(c)) { \ - qError("function:%s condition failed, Line:%d", __FUNCTION__, __LINE__); \ - return TSDB_CODE_APP_ERROR; \ - } \ - } while (0) - #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/anomalywindowoperator.c b/source/libs/executor/src/anomalywindowoperator.c index 71a38c739d..94cc5d9129 100644 --- a/source/libs/executor/src/anomalywindowoperator.c +++ b/source/libs/executor/src/anomalywindowoperator.c @@ -171,8 +171,6 @@ _error: } static int32_t anomalyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { - CHECK_CONDITION_FAILED(pOperator->info != NULL); - CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SAnomalyWindowOperatorInfo* pInfo = pOperator->info; @@ -183,7 +181,6 @@ static int32_t anomalyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRe int64_t st = taosGetTimestampUs(); int32_t numOfBlocks = taosArrayGetSize(pSupp->blocks); - CHECK_CONDITION_FAILED(pRes != NULL); blockDataCleanup(pRes); while (1) { diff --git a/source/libs/executor/src/countwindowoperator.c b/source/libs/executor/src/countwindowoperator.c index cb7459744f..542a7c89a9 100644 --- a/source/libs/executor/src/countwindowoperator.c +++ b/source/libs/executor/src/countwindowoperator.c @@ -225,8 +225,6 @@ _end: } static int32_t countWindowAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { - CHECK_CONDITION_FAILED(pOperator->info != NULL); - CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SCountWindowOperatorInfo* pInfo = pOperator->info; @@ -234,7 +232,6 @@ static int32_t countWindowAggregateNext(SOperatorInfo* pOperator, SSDataBlock** SExprSupp* pExprSup = &pOperator->exprSupp; int32_t order = pInfo->binfo.inputTsOrder; SSDataBlock* pRes = pInfo->binfo.pRes; - CHECK_CONDITION_FAILED(pRes != NULL); blockDataCleanup(pRes); diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c index 83b202fed6..e68a91d97d 100644 --- a/source/libs/executor/src/eventwindowoperator.c +++ b/source/libs/executor/src/eventwindowoperator.c @@ -182,8 +182,6 @@ void destroyEWindowOperatorInfo(void* param) { } static int32_t eventWindowAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { - CHECK_CONDITION_FAILED(pOperator->info != NULL); - CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SEventWindowOperatorInfo* pInfo = pOperator->info; @@ -193,7 +191,6 @@ static int32_t eventWindowAggregateNext(SOperatorInfo* pOperator, SSDataBlock** int32_t order = pInfo->binfo.inputTsOrder; SSDataBlock* pRes = pInfo->binfo.pRes; - CHECK_CONDITION_FAILED(pRes != NULL); blockDataCleanup(pRes); diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index 4a1d26d875..1b823bf69d 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -255,7 +255,6 @@ static int32_t doSetInputDataBlockInfo(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SqlFunctionCtx* pCtx = pExprSup->pCtx; - CHECK_CONDITION_FAILED(pExprSup->numOfExprs <= 0 || pCtx != NULL); for (int32_t i = 0; i < pExprSup->numOfExprs; ++i) { pCtx[i].order = order; pCtx[i].input.numOfRows = pBlock->info.rows; diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index c832cfbb4e..fec35c3371 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -444,8 +444,6 @@ _end: } static int32_t hashGroupbyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { - CHECK_CONDITION_FAILED(pOperator->info != NULL); - CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -1005,14 +1003,11 @@ static int32_t hashPartitionNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) return TSDB_CODE_SUCCESS; } - CHECK_CONDITION_FAILED(pOperator->info != NULL); - CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SPartitionOperatorInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->binfo.pRes; - CHECK_CONDITION_FAILED(pRes != NULL); if (pOperator->status == OP_RES_TO_RETURN) { (*ppRes) = buildPartitionResult(pOperator); @@ -1464,8 +1459,6 @@ static int32_t doStreamHashPartitionNext(SOperatorInfo* pOperator, SSDataBlock** int32_t lino = 0; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamPartitionOperatorInfo* pInfo = pOperator->info; - CHECK_CONDITION_FAILED(pInfo != NULL); - CHECK_CONDITION_FAILED(pTaskInfo != NULL); if (pOperator->status == OP_EXEC_DONE) { (*ppRes) = NULL; diff --git a/source/libs/executor/src/hashjoinoperator.c b/source/libs/executor/src/hashjoinoperator.c index 12f90097c5..06498d73e7 100644 --- a/source/libs/executor/src/hashjoinoperator.c +++ b/source/libs/executor/src/hashjoinoperator.c @@ -906,7 +906,6 @@ static int32_t hJoinBuildHash(struct SOperatorInfo* pOperator, bool* queryDone) SHJoinOperatorInfo* pJoin = pOperator->info; SSDataBlock* pBlock = NULL; int32_t code = TSDB_CODE_SUCCESS; - CHECK_CONDITION_FAILED(pJoin != NULL); while (true) { pBlock = getNextBlockFromDownstream(pOperator, pJoin->pBuild->downStreamIdx); @@ -991,15 +990,12 @@ void hJoinSetDone(struct SOperatorInfo* pOperator) { } static int32_t hJoinMainProcess(struct SOperatorInfo* pOperator, SSDataBlock** pResBlock) { - CHECK_CONDITION_FAILED(pOperator->info != NULL); - CHECK_CONDITION_FAILED(pOperator->pTaskInfo != NULL); SHJoinOperatorInfo* pJoin = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SSDataBlock* pRes = pJoin->finBlk; int64_t st = 0; - CHECK_CONDITION_FAILED(pRes != NULL); QRY_PARAM_CHECK(pResBlock); if (pOperator->cost.openCost == 0) { diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 5030b8a148..226cde059b 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -564,7 +564,6 @@ SSDataBlock* doApplyIndefinitFunction1(SOperatorInfo* pOperator) { int32_t doApplyIndefinitFunction(SOperatorInfo* pOperator, SSDataBlock** pResBlock) { QRY_PARAM_CHECK(pResBlock); - CHECK_CONDITION_FAILED(pOperator->info != NULL); SIndefOperatorInfo* pIndefInfo = pOperator->info; SOptrBasicInfo* pInfo = &pIndefInfo->binfo; SExprSupp* pSup = &pOperator->exprSupp; From c0884a9b82024f7e76322c307d79b8429f57a64f Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 27 Nov 2024 19:57:44 +0800 Subject: [PATCH 55/76] fix:[TD-33048] add ts to cols if dataFormat is true in schemaless to avoid schemal is old --- tests/system-test/2-query/sml_TS-3724.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/system-test/2-query/sml_TS-3724.py b/tests/system-test/2-query/sml_TS-3724.py index b537ad9b9a..9511ee8aa1 100644 --- a/tests/system-test/2-query/sml_TS-3724.py +++ b/tests/system-test/2-query/sml_TS-3724.py @@ -14,7 +14,7 @@ sys.path.append("./7-tmq") from tmqCommon import * class TDTestCase: - updatecfgDict = {'clientCfg': {'smlChildTableName': 'dataModelName', 'fqdn': 'localhost', 'smlTsDefaultName': "times"}, 'fqdn': 'localhost'} + updatecfgDict = {'clientCfg': {'smlChildTableName': 'dataModelName', 'fqdn': 'localhost'}, 'fqdn': 'localhost'} print("===================: ", updatecfgDict) def init(self, conn, logSql, replicaVar=1): @@ -58,7 +58,7 @@ class TDTestCase: tdSql.query(f"select distinct tbname from {dbname}.readings") tdSql.checkRows(4) - tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by times") + tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by _ts") tdSql.checkRows(2) tdSql.checkData(0, 3, "kk") tdSql.checkData(1, 3, "") @@ -67,7 +67,7 @@ class TDTestCase: tdSql.query(f"select distinct tbname from {dbname}.`sys_if_bytes_out`") tdSql.checkRows(2) - tdSql.query(f"select * from {dbname}.t_f67972b49aa8adf8bca5d0d54f0d850d order by times") + tdSql.query(f"select * from {dbname}.t_f67972b49aa8adf8bca5d0d54f0d850d order by _ts") tdSql.checkRows(2) tdSql.checkData(0, 1, 1.300000000) tdSql.checkData(1, 1, 13.000000000) @@ -80,7 +80,7 @@ class TDTestCase: tdSql.query(f"select distinct tbname from {dbname}.`sys_cpu_nice`") tdSql.checkRows(3) - tdSql.query(f"select * from {dbname}.`sys_cpu_nice` order by times") + tdSql.query(f"select * from {dbname}.`sys_cpu_nice` order by _ts") tdSql.checkRows(4) tdSql.checkData(0, 1, 13.000000000) tdSql.checkData(0, 2, "web01") From c511979c9e96aa1588830099c607a685023a1d46 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 20:27:30 +0800 Subject: [PATCH 56/76] ci: add test case for tdgpt --- packaging/smokeTest/test_server_tdgpt.py | 134 +++++++++++++++++++++ packaging/smokeTest/test_server_unix_tdgpt | 2 + 2 files changed, 136 insertions(+) create mode 100644 packaging/smokeTest/test_server_tdgpt.py create mode 100644 packaging/smokeTest/test_server_unix_tdgpt diff --git a/packaging/smokeTest/test_server_tdgpt.py b/packaging/smokeTest/test_server_tdgpt.py new file mode 100644 index 0000000000..a97a2c6650 --- /dev/null +++ b/packaging/smokeTest/test_server_tdgpt.py @@ -0,0 +1,134 @@ +import pytest +import subprocess +import os +from versionCheckAndUninstallforPytest import UninstallTaos +import platform +import re +import time +import signal +import logging + + + +system = platform.system() +current_path = os.path.abspath(os.path.dirname(__file__)) + +with open("%s/test_server_unix_tdgpt" % current_path) as f: + cases = f.read().splitlines() + +OEM = ["ProDB"] + + +@pytest.fixture(scope="module") +def get_config(request): + verMode = request.config.getoption("--verMode") + taosVersion = request.config.getoption("--tVersion") + baseVersion = request.config.getoption("--baseVersion") + sourcePath = request.config.getoption("--sourcePath") + config = { + "verMode": verMode, + "taosVersion": taosVersion, + "baseVersion": baseVersion, + "sourcePath": sourcePath, + "system": platform.system(), + "arch": platform.machine() + } + return config + + +@pytest.fixture(scope="module") +def setup_module(get_config): + def run_cmd(command): + print("CMD:", command) + result = subprocess.run(command, capture_output=True, text=True, shell=True) + print("STDOUT:", result.stdout) + print("STDERR:", result.stderr) + print("Return Code:", result.returncode) + assert result.returncode == 0 + return result + + # setup before module tests + config = get_config + if config["system"] == "Windows": + cmd = r"mkdir ..\..\debug\build\bin" + else: + cmd = "mkdir -p ../../debug/build/bin/" + subprocess.getoutput(cmd) + if config["system"] == "Linux" or config["system"] == "Darwin" : # add tmq_sim + cmd = "cp -rf ../../../debug/build/bin/tmq_sim ../../debug/build/bin/." + subprocess.getoutput(cmd) + if config["system"] == "Darwin": + cmd = "sudo cp -rf /usr/local/bin/taos* ../../debug/build/bin/" + elif config["system"] == "Windows": + cmd = r"xcopy C:\TDengine\taos*.exe ..\..\debug\build\bin /Y" + else: + if config["baseVersion"] in OEM: + cmd = '''sudo find /usr/bin -name 'prodb*' -exec sh -c 'for file; do cp "$file" "../../debug/build/bin/taos${file##/usr/bin/%s}"; done' sh {} +''' % ( + config["baseVersion"].lower()) + else: + cmd = "sudo cp /usr/bin/taos* ../../debug/build/bin/" + run_cmd(cmd) + if config["baseVersion"] in OEM: # mock OEM + cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower() + run_cmd(cmd) + cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower() + run_cmd(cmd) + cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower() + run_cmd(cmd) + cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower() + run_cmd(cmd) + cmd = "ln -s /usr/bin/prodb /usr/local/bin/taos" + subprocess.getoutput(cmd) + + # yield + # + # name = "taos" + # if config["baseVersion"] in OEM: + # name = config["baseVersion"].lower() + # subprocess.getoutput("rm /usr/local/bin/taos") + # subprocess.getoutput("pkill taosd") + # UninstallTaos(config["taosVersion"], config["verMode"], True, name) + + +# use pytest fixture to exec case +@pytest.fixture(params=cases) +def run_command(request): + commands = request.param + if commands.strip().startswith("#"): + pytest.skip("This case has been marked as skipped") + d, command = commands.strip().split(",") + if system == "Windows": + cmd = r"cd %s\..\..\tests\%s && %s" % (current_path, d, command) + else: + cmd = "cd %s/../../tests/%s&&sudo %s" % (current_path, d, command) + print(cmd) + result = subprocess.run(cmd, capture_output=True, text=True, shell=True) + return { + "command": command, + "stdout": result.stdout, + "stderr": result.stderr, + "returncode": result.returncode + } + + +class TestServer: + + @pytest.mark.all + def test_execute_cases(self, setup_module, run_command): + # assert the result + if run_command['returncode'] != 0: + print(f"Running command: {run_command['command']}") + print("STDOUT:", run_command['stdout']) + print("STDERR:", run_command['stderr']) + print("Return Code:", run_command['returncode']) + else: + print(f"Running command: {run_command['command']}") + if len(run_command['stdout']) > 1000: + print("STDOUT:", run_command['stdout'][:1000] + "...") + else: + print("STDOUT:", run_command['stdout']) + print("STDERR:", run_command['stderr']) + print("Return Code:", run_command['returncode']) + + assert run_command[ + 'returncode'] == 0, f"Command '{run_command['command']}' failed with return code {run_command['returncode']}" diff --git a/packaging/smokeTest/test_server_unix_tdgpt b/packaging/smokeTest/test_server_unix_tdgpt new file mode 100644 index 0000000000..248e5205b6 --- /dev/null +++ b/packaging/smokeTest/test_server_unix_tdgpt @@ -0,0 +1,2 @@ +system-test,python3 ./test.py -f 9-tdgpt/test_gpt.py + From 990a01788305500bcd052b795d794b43f73a5361 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 20:38:09 +0800 Subject: [PATCH 57/76] ci: add test case for tdgpt --- packaging/smokeTest/{test_server_tdgpt.py => test_anode.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packaging/smokeTest/{test_server_tdgpt.py => test_anode.py} (100%) diff --git a/packaging/smokeTest/test_server_tdgpt.py b/packaging/smokeTest/test_anode.py similarity index 100% rename from packaging/smokeTest/test_server_tdgpt.py rename to packaging/smokeTest/test_anode.py From 8427db23f55e690f918d2c7714a051374f130d7e Mon Sep 17 00:00:00 2001 From: haoranchen Date: Wed, 27 Nov 2024 21:50:16 +0800 Subject: [PATCH 58/76] Update 04-machine-learning.md --- .../06-TDgpt/05-anomaly-detection/04-machine-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md index d72b8e70a9..ec76d6a0a3 100644 --- a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md +++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/04-machine-learning.md @@ -9,7 +9,7 @@ Autoencoder[1]: TDgpt 内置使用自编码器(Autoencoder)的异 --- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测 SELECT COUNT(*), _WSTART FROM foo -ANOMALY_DETECTION(col1, 'algo=encoder, model=ad_autoencoder_foo'); +ANOMALY_WINDOW(col1, 'algo=encoder, model=ad_autoencoder_foo'); ``` ### 参考文献 From 9a23d34437bb2a17460c54540997c6b88c1cdcd7 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 27 Nov 2024 22:01:22 +0800 Subject: [PATCH 59/76] ci: add test case for tdgpt --- packaging/smokeTest/test_server_unix_tdgpt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packaging/smokeTest/test_server_unix_tdgpt b/packaging/smokeTest/test_server_unix_tdgpt index 248e5205b6..6d099d6d02 100644 --- a/packaging/smokeTest/test_server_unix_tdgpt +++ b/packaging/smokeTest/test_server_unix_tdgpt @@ -1,2 +1 @@ -system-test,python3 ./test.py -f 9-tdgpt/test_gpt.py - +system-test,python3 ./test.py -f 9-tdgpt/test_gpt.py \ No newline at end of file From 4571f593a10a69f929aa213a1e0db3c874b14b8f Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 27 Nov 2024 22:12:42 +0800 Subject: [PATCH 60/76] fix:[TD-33048] add ts to cols if dataFormat is true in schemaless to avoid schemal is old --- source/client/src/clientSml.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 911e3664f5..0b1637bebf 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -787,8 +787,7 @@ static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols) { if (sTmp == NULL) { SML_CHECK_CODE(TSDB_CODE_SML_INVALID_DATA); } - if ((IS_VAR_DATA_TYPE(kv->type) && kv->length + VARSTR_HEADER_SIZE > sTmp->bytes) || - (!IS_VAR_DATA_TYPE(kv->type) && kv->length != sTmp->bytes)){ + if (IS_VAR_DATA_TYPE(kv->type) && kv->length + VARSTR_HEADER_SIZE > sTmp->bytes){ uError("column %s (type %s) bytes invalid. db bytes:%d, kv bytes:%zu", sTmp->name, tDataTypes[sTmp->type].name, sTmp->bytes, kv->length); SML_CHECK_CODE(TSDB_CODE_INTERNAL_ERROR); From 4fcb3a86894ed0d2040e20651caa9ab14a1a0c41 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 28 Nov 2024 11:36:54 +0800 Subject: [PATCH 61/76] ci: add test case for tdgpt --- tests/parallel_test/tdgpt_cases.task | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/parallel_test/tdgpt_cases.task b/tests/parallel_test/tdgpt_cases.task index 41a9c3b356..82911ed100 100644 --- a/tests/parallel_test/tdgpt_cases.task +++ b/tests/parallel_test/tdgpt_cases.task @@ -4,3 +4,5 @@ #tdgpt-test ,,n,script,./test.sh -f tsim/analytics/basic0.sim +,,n,system-test,python3 ./test.py -f 9-tdgpt/test_gpt.py + From c23dcd10acf50a216d1ea310518c5ff9cb625b24 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 28 Nov 2024 11:46:11 +0800 Subject: [PATCH 62/76] ci: add test case for tdgpt --- tests/parallel_test/tdgpt_cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/tdgpt_cases.task b/tests/parallel_test/tdgpt_cases.task index 82911ed100..69123f4aac 100644 --- a/tests/parallel_test/tdgpt_cases.task +++ b/tests/parallel_test/tdgpt_cases.task @@ -4,5 +4,5 @@ #tdgpt-test ,,n,script,./test.sh -f tsim/analytics/basic0.sim -,,n,system-test,python3 ./test.py -f 9-tdgpt/test_gpt.py +#,,n,system-test,python3 ./test.py -f 9-tdgpt/test_gpt.py From c879d59b6cc8e024d0d5c520831828db303b3c65 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Thu, 28 Nov 2024 14:17:12 +0800 Subject: [PATCH 63/76] fix: change task status from succ to partsucc --- include/util/taoserror.h | 1 + source/libs/qworker/src/qwDbg.c | 4 ++++ source/libs/qworker/src/qworker.c | 4 ++++ source/util/src/terror.c | 1 + 4 files changed, 10 insertions(+) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 6cedaeeef1..6caac066de 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -640,6 +640,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_QRY_FILTER_WRONG_OPTR_TYPE TAOS_DEF_ERROR_CODE(0, 0x0735) #define TSDB_CODE_QRY_FILTER_RANGE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0736) #define TSDB_CODE_QRY_FILTER_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0737) +#define TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS TAOS_DEF_ERROR_CODE(0, 0x0738) // grant #define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) diff --git a/source/libs/qworker/src/qwDbg.c b/source/libs/qworker/src/qwDbg.c index 897080df3e..5cef253cae 100644 --- a/source/libs/qworker/src/qwDbg.c +++ b/source/libs/qworker/src/qwDbg.c @@ -61,6 +61,10 @@ int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, break; case JOB_TASK_STATUS_SUCC: + if (newStatus == JOB_TASK_STATUS_PART_SUCC) { + QW_TASK_DLOG("task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); + return TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS; + } if (newStatus != JOB_TASK_STATUS_DROP && newStatus != JOB_TASK_STATUS_FAIL) { QW_ERR_JRET(TSDB_CODE_APP_ERROR); } diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 13e1d0e231..aae31480e2 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -684,6 +684,10 @@ _return: if (TSDB_CODE_SUCCESS == code && QW_PHASE_POST_QUERY == phase) { code = qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_PART_SUCC, ctx->dynamicTask); + if (code == TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS && ctx->queryRsped) { + QW_TASK_DLOG("skip error: %s. ", tstrerror(code)); + code = TSDB_CODE_SUCCESS; + } ctx->queryGotData = true; } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 9e8a85d301..7e4454edf8 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -511,6 +511,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_NOT_SUPPORT_TYPE, "Not supported range t TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_WRONG_OPTR_TYPE, "Wrong operator type") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_RANGE_ERROR, "Wrong filter range") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_INVALID_TYPE, "Invalid filter type") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS, "Change task status from partial success to success") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired") From baced053e2ba6aed4b1298388151108e0270edbd Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Thu, 28 Nov 2024 14:18:11 +0800 Subject: [PATCH 64/76] fix: error desc --- source/util/src/terror.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 7e4454edf8..3b49163a71 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -511,7 +511,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_NOT_SUPPORT_TYPE, "Not supported range t TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_WRONG_OPTR_TYPE, "Wrong operator type") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_RANGE_ERROR, "Wrong filter range") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_INVALID_TYPE, "Invalid filter type") -TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS, "Change task status from partial success to success") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS, "Change task status from success to partial success") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired") From 95a1005e1f53b8c0efdf05f7c06b639a9ccda0d4 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 28 Nov 2024 15:09:20 +0800 Subject: [PATCH 65/76] update test case and remove invalid code --- source/client/src/clientRawBlockWrite.c | 4 +-- source/dnode/vnode/src/meta/metaTable.c | 10 -------- tests/script/tsim/tag/change_multi_tag.sim | 30 +++++++++++++++++----- 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 6331224c3a..3a23d38375 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -755,9 +755,9 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { } cJSON* colValue = cJSON_CreateString(buf); + taosMemoryFree(buf); RAW_NULL_CHECK(colValue); RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValue", colValue)); - taosMemoryFree(buf); } cJSON* isNullCJson = cJSON_CreateBool(isNull); @@ -804,9 +804,9 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { goto end; } cJSON* colValue = cJSON_CreateString(buf); + taosMemoryFree(buf); RAW_NULL_CHECK(colValue); RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValue", colValue)); - taosMemoryFree(buf); } cJSON* isNullCJson = cJSON_CreateBool(isNull); RAW_NULL_CHECK(isNullCJson); diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 8256b5e402..65e520bb4a 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -2215,11 +2215,6 @@ static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbRe metaError("meta/table: failed to update tag idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); } - if (NULL == ctbEntry.ctbEntry.pTags) { - metaError("meta/table: null tags, update tag val failed."); - goto _err; - } - SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid}; if (tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags, ((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn) < 0) { @@ -2454,11 +2449,6 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA metaError("meta/table: failed to update tag idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid); } - if (NULL == ctbEntry.ctbEntry.pTags) { - metaError("meta/table: null tags, update tag val failed."); - goto _err; - } - SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid}; if (tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags, ((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn) < 0) { diff --git a/tests/script/tsim/tag/change_multi_tag.sim b/tests/script/tsim/tag/change_multi_tag.sim index 30ae1c653a..5bd8cba916 100644 --- a/tests/script/tsim/tag/change_multi_tag.sim +++ b/tests/script/tsim/tag/change_multi_tag.sim @@ -34,15 +34,30 @@ sql create table $ntable (ts timestamp, f int) sql insert into $tb values(now, 1) sql insert into $tb values(now, 1) -sql_error alter table $mt set tag tgcol1 = 1,tagcol2 = 2, tag3 = 4 # set tag value on supertable -sql_error alter table $ntable set tag f = 10 # set normal table value -sql_error alter table $tbj set tag tagCol1=1,tagCol1 = 2 # dumplicate tag name -sql_error alter table $tbj set tag tagCol1=1,tagCol1 = 2 # not exist tag + +# invalid sql +sql_error alter table $mt set tag tgcol1 = 1, +sql_error alter table $mt set tag , +sql_error alter table $mt set tag tgcol1=10,tagcol2= +#set tag value on supertable +sql_error alter table $mt set tag tgcol1 = 1,tagcol2 = 2, tag3 = 4 +#set normal table value +sql_error alter table $ntable set tag f = 10 +# duplicate tag name +sql_error alter table $tbj set tag tagCol1=1,tagCol1 = 2 +sql_error alter table $tbj set tag tagCol1=1,tagCol5=10, tagCol5=3 +# not exist tag +sql_error alter table $tbj set tag tagNotExist = 1,tagCol1 = 2 +sql_error alter table $tbj set tag tagCol1 = 2, tagNotExist = 1 +sql_error alter table $tbj set tagNotExist = 1 +sql_error alter table $tbj set tagNotExist = NULL, sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx" +# invalid tag value sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx", tagCol7="yyyyyyyyyyyyyyyyyyyyyyyyy" -sql_error alter table $tbj set tag tagCol1=1,tagCol5=10, tagCol1=3 +# invalid data type sql_error alter table $tbj set tag tagCol5="xxxx" -sql alter table $tbj set tag tagCol1 = 100, tagCol2 = 100 + +sql alter table $tbj set tag tagCol1 = 100, tagCol2 = 100 sql select * from $mt where tagCol2 = 100 if $rows != 0 then @@ -186,8 +201,9 @@ sql alter table $mt drop tag tagCol3 sql alter table $mt add tag tagCol8 int +#set not exist tag and value sql_error alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL -sql_error alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL +sql_error alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL sql alter table $tbj set tag tagCol8 = 8 From c21ff8fdfeb80153e11c728473392d1c22910338 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 28 Nov 2024 16:11:08 +0800 Subject: [PATCH 66/76] support escape and update test case --- source/libs/parser/src/parAstCreater.c | 1 + tests/script/tsim/tag/change_multi_tag.sim | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index e05a399d32..1217618c0a 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -2547,6 +2547,7 @@ SNode* createAlterSingleTagColumnNode(SAstCreateContext* pCtx, SToken* pTagName, pCtx->errCode = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT, (SNode**)&pStmt); CHECK_MAKE_NODE(pStmt); pStmt->alterType = TSDB_ALTER_TABLE_UPDATE_TAG_VAL; + CHECK_NAME(checkColumnName(pCtx, pTagName)); COPY_STRING_FORM_ID_TOKEN(pStmt->colName, pTagName); pStmt->pVal = (SValueNode*)pVal; pStmt->pNodeListTagValue = NULL; diff --git a/tests/script/tsim/tag/change_multi_tag.sim b/tests/script/tsim/tag/change_multi_tag.sim index 5bd8cba916..470b49d4dc 100644 --- a/tests/script/tsim/tag/change_multi_tag.sim +++ b/tests/script/tsim/tag/change_multi_tag.sim @@ -55,7 +55,11 @@ sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx" # invalid tag value sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx", tagCol7="yyyyyyyyyyyyyyyyyyyyyyyyy" # invalid data type -sql_error alter table $tbj set tag tagCol5="xxxx" + +# escape +sql_error alter table $tbj set tag `tagCol1`=true +sql_error alter table $tbj set tag `tagCol1`=true,`tagCol2`=1,`tagNotExist`=10 +sql_error alter table $tbj set tag `tagCol1`=true,`tagCol2`=1,tagcol1=true sql alter table $tbj set tag tagCol1 = 100, tagCol2 = 100 @@ -195,6 +199,7 @@ if $rows != 1 then endi sql alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL +sql alter table $tbj set tag `tagcol1`=true,`tagcol2`=-10,`tagcol3`=-100, `tagcol4`=-1000,`tagcol5`=NULL,`tagcol6`=NULL,`tagcol7`=NULL sql alter table $mt drop tag tagCol7 sql alter table $mt drop tag tagCol3 From 03616aa5093f8b0b49ee065aa3e782e2a7c12b6b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 28 Nov 2024 17:31:11 +0800 Subject: [PATCH 67/76] doc: fix typo in gpt doc --- docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md index dc0a534706..5b49db330e 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md @@ -52,7 +52,7 @@ class _MyAnomalyDetectionService(AbstractAnomalyDetectionService): ```SQL --- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类 -SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col, 'algo=myad') +SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col, 'algo=myad') ``` 如果是第一次启动该 Anode, 请按照 [TDgpt 安装部署](../../management/) 里的步骤先将该 Anode 添加到 TDengine 系统中。 From aa9802890cf29249a1b31fc654b83b4a5302da4d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 28 Nov 2024 17:31:52 +0800 Subject: [PATCH 68/76] doc: fix typo in gpt doc --- docs/zh/06-advanced/06-TDgpt/06-dev/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md index b7f048cefc..8834dba7c6 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md @@ -21,7 +21,7 @@ Anode的主要目录结构如下图所示 . ├── cfg ├── model -│   └── ac_detection +│   └── ad_detection ├── release ├── script └── taosanalytics From 70521e6b190498c0514465d46cada5d84765a39e Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Thu, 28 Nov 2024 18:31:45 +0800 Subject: [PATCH 69/76] add run all ci cases script --- tests/run_all_ci_cases.sh | 159 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 tests/run_all_ci_cases.sh diff --git a/tests/run_all_ci_cases.sh b/tests/run_all_ci_cases.sh new file mode 100644 index 0000000000..486c47ff4c --- /dev/null +++ b/tests/run_all_ci_cases.sh @@ -0,0 +1,159 @@ +#!/bin/bash + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +TDENGINE_DIR=/root/TDinternal/community + + +#echo "TDENGINE_DIR = $TDENGINE_DIR" +today=`date +"%Y%m%d"` +TDENGINE_ALLCI_REPORT=$TDENGINE_DIR/tests/all-ci-report-$today.log + + +function runCasesOneByOne () { + while read -r line; do + if [[ "$line" != "#"* ]]; then + cmd=`echo $line | cut -d',' -f 5` + if [[ "$2" == "sim" ]] && [[ $line == *"script"* ]]; then + case=`echo $cmd | cut -d' ' -f 3` + start_time=`date +%s` + date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \ + echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT \ + || echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT + end_time=`date +%s` + echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT + if $case failed + + elif [[ "$line" == *"$2"* ]]; then + if [[ "$cmd" == *"pytest.sh"* ]]; then + cmd=`echo $cmd | cut -d' ' -f 2-20` + fi + case=`echo $cmd | cut -d' ' -f 4-20` + start_time=`date +%s` + date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \ + echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT || \ + echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT + end_time=`date +%s` + echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT + fi + fi + done < $1 +} + +function runUnitTest() { + echo "=== Run unit test case ===" + echo " $TDENGINE_DIR/debug" + cd $TDENGINE_DIR/debug + ctest -j12 + echo "3.0 unit test done" +} + +function runSimCases() { + echo "=== Run sim cases ===" + + cd $TDENGINE_DIR/tests/script + runCasesOneByOne $TDENGINE_DIR/tests/parallel_test/cases-test.task sim + + totalSuccess=`grep 'sim success' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalSuccess" -gt "0" ]; then + echo "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi + + totalFailed=`grep 'sim failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalFailed" -ne "0" ]; then + echo "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi +} + +function runPythonCases() { + echo "=== Run python cases ===" + + cd $TDENGINE_DIR/tests/parallel_test + sed -i '/compatibility.py/d' cases-test.task + + # army + cd $TDENGINE_DIR/tests/army + runCasesOneByOne ../parallel_test/cases-test.task army + + # system-test + cd $TDENGINE_DIR/tests/system-test + runCasesOneByOne ../parallel_test/cases-test.task system-test + + # develop-test + cd $TDENGINE_DIR/tests/develop-test + runCasesOneByOne ../parallel_test/cases-test.task develop-test + + totalSuccess=`grep 'py success' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalSuccess" -gt "0" ]; then + echo "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi + + totalFailed=`grep 'py failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l` + if [ "$totalFailed" -ne "0" ]; then + echo "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT + fi +} + + +function runTest() { + echo "run Test" + + cd $TDENGINE_DIR + [ -d sim ] && rm -rf sim + [ -f $TDENGINE_ALLCI_REPORT ] && rm $TDENGINE_ALLCI_REPORT + + runUnitTest + runSimCases + runPythonCases + + stopTaosd + cd $TDENGINE_DIR/tests/script + find . -name '*.sql' | xargs rm -f + + cd $TDENGINE_DIR/tests/pytest + find . -name '*.sql' | xargs rm -f +} + +function stopTaosd { + echo "Stop taosd start" + systemctl stop taosd + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done + echo "Stop tasod end" +} + +function stopTaosadapter { + echo "Stop taosadapter" + systemctl stop taosadapter.service + PID=`ps -ef|grep -w taosadapter | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosadapter + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done + echo "Stop tasoadapter end" + +} + +WORK_DIR=/root/ + +date >> $WORK_DIR/date.log +echo "Run ALL CI Test Cases" | tee -a $WORK_DIR/date.log + +stopTaosd + +runTest + +date >> $WORK_DIR/date.log +echo "End of CI Test Cases" | tee -a $WORK_DIR/date.log \ No newline at end of file From 0675a308cd66515f3b1c6b90458498e3ad0b770e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 28 Nov 2024 18:34:40 +0800 Subject: [PATCH 70/76] doc: fix some typos. --- docs/zh/06-advanced/06-TDgpt/06-dev/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md index 8834dba7c6..6db7d13079 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md @@ -63,7 +63,7 @@ Anode采用算法自动加载模式,因此只识别符合命名约定的 Pytho ```SQL --- algo 后面的参数 name 即为类属性 `name` -SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col_name, 'algo=name') +SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col_name, 'algo=name') ``` ## 添加具有模型的分析算法 @@ -78,5 +78,5 @@ SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col_name, 'algo=name') ```SQL --- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测 -SELECT COUNT(*), _WSTART FROM foo ANOMALY_DETECTION(col1, 'algo=encoder, model=ad_autoencoder_foo'); +SELECT COUNT(*), _WSTART FROM foo ANOMALY_WINDOW(col1, 'algo=encoder, model=ad_autoencoder_foo'); ``` From 95124d201e81201b385145d2fc85b03cff8cea3e Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Thu, 28 Nov 2024 18:58:42 +0800 Subject: [PATCH 71/76] ci: add smoking scripts for docs --- packaging/smokeTest/test_smoking_selfhost.sh | 94 ++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 packaging/smokeTest/test_smoking_selfhost.sh diff --git a/packaging/smokeTest/test_smoking_selfhost.sh b/packaging/smokeTest/test_smoking_selfhost.sh new file mode 100644 index 0000000000..d8099dfaea --- /dev/null +++ b/packaging/smokeTest/test_smoking_selfhost.sh @@ -0,0 +1,94 @@ +#!/bin/bash + +# Define log file and result files +LOG_FILE="test_server.log" +SUCCESS_FILE="success.txt" +FAILED_FILE="failed.txt" + +# Initialize/clear result files +> "$SUCCESS_FILE" +> "$FAILED_FILE" +> "$LOG_FILE" + +# Switch to the target directory +TARGET_DIR="../../tests/system-test/" + +echo "===== Changing Directory to $TARGET_DIR =====" | tee -a "$LOG_FILE" + +if cd "$TARGET_DIR"; then + echo "Successfully changed directory to $TARGET_DIR" | tee -a "$LOG_FILE" +else + echo "ERROR: Failed to change directory to $TARGET_DIR" | tee -a "$LOG_FILE" + exit 1 +fi + +# Define the Python commands to execute :case list +commands=( + "python3 ./test.py -f 2-query/join.py" + "python3 ./test.py -f 1-insert/insert_column_value.py" + "python3 ./test.py -f 2-query/primary_ts_base_5.py" + "python3 ./test.py -f 2-query/case_when.py" + "python3 ./test.py -f 2-query/partition_limit_interval.py" + "python3 ./test.py -f 2-query/fill.py" + "python3 ./test.py -f query/query_basic.py -N 3" + "python3 ./test.py -f 7-tmq/basic5.py" + "python3 ./test.py -f 8-stream/stream_basic.py" + "python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3" +) + +# Counters +total=${#commands[@]} +success_count=0 +fail_count=0 + +# Execute each command +for cmd in "${commands[@]}" +do + echo "===== Executing Command: $cmd =====" | tee -a "$LOG_FILE" + # Execute the command and append output and errors to the log file + eval "$cmd" >> "$LOG_FILE" 2>&1 + exit_code=$? + + if [ $exit_code -eq 0 ]; then + echo "SUCCESS: $cmd" | tee -a "$LOG_FILE" + echo "$cmd" >> "$SUCCESS_FILE" + ((success_count++)) + else + echo "FAILED: $cmd" | tee -a "$LOG_FILE" + echo "$cmd" >> "$FAILED_FILE" + ((fail_count++)) + fi + echo "" | tee -a "$LOG_FILE" # Add an empty line for separation +done + +# Generate the final report +echo "===== Test Completed =====" | tee -a "$LOG_FILE" +echo "Total Commands Executed: $total" | tee -a "$LOG_FILE" +echo "Successful: $success_count" | tee -a "$LOG_FILE" +echo "Failed: $fail_count" | tee -a "$LOG_FILE" + +if [ $fail_count -ne 0 ]; then + echo "" | tee -a "$LOG_FILE" + echo "The following commands failed:" | tee -a "$LOG_FILE" + cat "$FAILED_FILE" | tee -a "$LOG_FILE" +else + echo "All commands executed successfully." | tee -a "$LOG_FILE" +fi + +# Optional: Generate a separate report file +echo "" > "report.txt" +echo "===== Test Report =====" >> "report.txt" +echo "Total Commands Executed: $total" >> "report.txt" +echo "Successful: $success_count" >> "report.txt" +echo "Failed: $fail_count" >> "report.txt" + +if [ $fail_count -ne 0 ]; then + echo "" >> "report.txt" + echo "The following commands failed:" >> "report.txt" + cat "$FAILED_FILE" >> "report.txt" +else + echo "All commands executed successfully." >> "report.txt" +fi + +echo "Detailed logs can be found in $LOG_FILE" +echo "Test report can be found in report.txt" \ No newline at end of file From 521ca93cc0f31fa56aa8814a30904463d98ed9db Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 28 Nov 2024 19:27:53 +0800 Subject: [PATCH 72/76] doc: update error codes --- docs/zh/14-reference/09-error-code.md | 48 +++++++++++++-------------- source/util/src/terror.c | 1 + 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/docs/zh/14-reference/09-error-code.md b/docs/zh/14-reference/09-error-code.md index fbd347b6af..685967ef83 100644 --- a/docs/zh/14-reference/09-error-code.md +++ b/docs/zh/14-reference/09-error-code.md @@ -11,32 +11,32 @@ description: TDengine 服务端的错误码列表和详细说明 ## rpc -| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 | -| ---------- | -------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | -| 0x8000000B | 无法正常收发请求 | 1. 网络不通 2. 多次重试、依然不能执行REQ | 1. 检查网络 2. 分析日志,具体原因比较复杂 | -| 0x80000013 | 客户端和服务端之间的时间时间相差太大, 默认900s | 1. 客户端和服务端不在同一个时区 2. 客户端和服务端在同一个时区,但是两者的时间不同步、相差太大 | 1. 调整到同一个时区 2. 校准客户端和服务端的时间 | -| 0x80000015 | 无法解析FQDN | 设置了无效的fqdn | 检查fqdn 的设置 | -| 0x80000017 | 当前端口被占用 | 端口P已经被某个服务占用的情况下,新启的服务依然尝试绑定端口P | 1. 改动新服务的服务端口 2. 杀死之前占用端口的服务 | -| 0x80000018 | 由于网络抖动/ REQ 请求时间过长导致系统主动摘掉REQ 所使用的conn | 1. 网络抖动 2. REQ 请求时间过长,大于900s | 1. 设置系统的最大超时时长 2. 检查REQ的请求时长 | -| 0x80000019 | 暂时没有用到这个错误码 | | | -| 0x80000020 | 多次重试之后,所有dnode 依然都链接不上 | 1. 所有的节点都挂了 2. 有节点挂了,但是存活的节点都不是master 节点 | 1. 查看taosd 的状态、分析taosd 挂掉的原因或者分析存活的taosd 为什么不是主 | -| 0x80000021 | 多次重试之后,所有dnode 依然都链接不上 | 1. 网络异常 2. req请求时间太长,服务端可能发生死锁等问题。系统自己断开了链接 | 1. 检查网络 2. 检查req 的执行时间 | -| 0x80000022 | 达到了可用链接上线。 | 1. 并发太高、占用链接已经到达上线。 2. 服务端的BUG,导致conn 一直不释放, | 1. 提高tsNumOfRpcSessions这个值。 2. tsTimeToGetAvailableConn 3. 分析服务端不释放的conn 的原因 | +| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 | +| ---------- | -----------------------------| --- | --- | +| 0x8000000B | Unable to establish connection | 1.网络不通 2.多次重试、依然不能执行请求 | 1.检查网络 2.分析日志,具体原因比较复杂 | +| 0x80000013 | Client and server's time is not synchronized | 1.客户端和服务端不在同一个时区 2.客户端和服务端在同一个时区,但是两者的时间不同步,相差超过 900 秒 | 1.调整到同一个时区 2.校准客户端和服务端的时间| +| 0x80000015 | Unable to resolve FQDN | 设置了无效的 fqdn | 检查fqdn 的设置 | +| 0x80000017 | Port already in use | 端口已经被某个服务占用的情况下,新启的服务依然尝试绑定该端口 | 1.改动新服务的服务端口 2.杀死之前占用端口的服务 | +| 0x80000018 | Conn is broken | 由于网络抖动或者请求时间过长(超过 900 秒),导致系统主动摘掉连接 | 1.设置系统的最大超时时长 2.检查请求时长 | +| 0x80000019 | Conn read timeout | 未启用 | | +| 0x80000020 | some vnode/qnode/mnode(s) out of service | 多次重试之后,仍然无法连接到集群,可能是所有的节点都宕机了,或者存活的节点不是 Leader 节点 | 1.查看 taosd 的状态、分析 taosd 宕机的原因 2.分析存活的 taosd 为什么无法选取 Leader | +| 0x80000021 | some vnode/qnode/mnode(s) conn is broken | 多次重试之后,仍然无法连接到集群,可能是网络异常、请求时间太长、服务端死锁等问题 | 1.检查网络 2.请求的执行时间 | +| 0x80000022 | rpc open too many session | 1.并发太高导致占用链接已经到达上限 2.服务端的 BUG,导致连接一直不释放 | 1.调整配置参数 numOfRpcSessions 2.调整配置参数 timeToGetAvailableConn 3.分析服务端不释放的连接的原因 | ## common -| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 | -| ---------- | --------------------------------- | -------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -| 0x80000100 | Operation not supported | 1. 操作不被支持、不允许的场景 | 1. 检查操作是否有误,确认该功能是否被支持 | -| 0x80000102 | Out of Memory | 1. 客户端或服务端内存分配失败的场景 | 1. 检查客户端、服务端内存是否充足 | -| 0x80000104 | Data file corrupted | 1. 存储数据文件损坏 2. udf文件无法创建 | 1. 联系涛思客户支持 2. 确认服务端对临时目录有读写创建文件权限 | -| 0x80000106 | too many Ref Objs | 无可用ref资源 | 保留现场和日志,github上报issue | -| 0x80000107 | Ref ID is removed | 引用的ref资源已经释放 | 保留现场和日志,github上报issue | -| 0x80000108 | Invalid Ref ID | 无效ref ID | 保留现场和日志,github上报issue | -| 0x8000010A | Ref is not there | ref信息不存在 | 保留现场和日志,github上报issue | -| 0x80000110 | | | | -| 0x80000111 | Action in progress | 操作进行中 | 1. 等待操作完成 2. 根据需要取消操作 3. 当超出合理时间仍然未完成可保留现场和日志,或联系客户支持 | +| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 | +| ---------- | -----------------------------| --- | --- | +| 0x80000100 | Operation not supported | 操作不被支持、不允许的场景 | 检查操作是否有误,确认该功能是否被支持 | +| 0x80000102 | Out of Memory | 客户端或服务端内存分配失败的场景 | 检查客户端、服务端内存是否充足 | +| 0x80000104 | Data file corrupted | 1.存储数据文件损坏 2.udf 文件无法创建 | 1.联系涛思客户支持 2.确认服务端对临时目录有读写创建文件权限 | +| 0x80000106 | too many Ref Objs | 无可用ref资源 | 保留现场和日志,github 上报 issue | +| 0x80000107 | Ref ID is removed | 引用的ref资源已经释放 | 保留现场和日志,github 上报 issue | +| 0x80000108 | Invalid Ref ID | 无效ref ID | 保留现场和日志,github 上报 issue | +| 0x8000010A | Ref is not there | ref 信息不存在 | 保留现场和日志,github 上报 issue | +| 0x80000110 | Unexpected generic error | 系统内部错误 | 保留现场和日志,github 上报 issue | +| 0x80000111 | Action in progress | 操作进行中 | 1.等待操作完成 2.根据需要取消操作 3.当超出合理时间仍然未完成可保留现场和日志,或联系客户支持 | | 0x80000112 | Out of range | 配置参数超出允许值范围 | 更改参数 | | 0x80000115 | Invalid message | 消息错误 | 1. 检查是否存在节点间版本不一致 2. 保留现场和日志,github上报issue | | 0x80000116 | Invalid message len | 消息长度错误 | 1. 检查是否存在节点间版本不一致 2. 保留现场和日志,github上报issue | @@ -309,11 +309,11 @@ description: TDengine 服务端的错误码列表和详细说明 | 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 | | ---------- | ---------------------------- | ----------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | -| 0x80000903 | Sync timeout | 场景1:发生了切主;旧主节点上已经开始协商但尚未达成一致的请求将超时。 场景2:从节点响应超时,导致协商超时。 | 检查集群状态,例如:show vgroups;查看服务端日志,以及服务端节点之间的网络状况。 | +| 0x80000903 | Sync timeout | 场景1:发生了切主 旧主节点上已经开始协商但尚未达成一致的请求将超时。 场景2:从节点响应超时,导致协商超时。 | 检查集群状态,例如:show vgroups 查看服务端日志,以及服务端节点之间的网络状况。 | | 0x8000090C | Sync leader is unreachable | 场景1:选主过程中 场景2:客户端请求路由到了从节点,且重定向失败 场景3:客户端或服务端网络配置错误 | 检查集群状态、网络配置、应用程序访问状态等。查看服务端日志,以及服务端节点之间的网络状况。 | | 0x8000090F | Sync new config error | 成员变更配置错误 | 内部错误,用户无法干预 | | 0x80000911 | Sync not ready to propose | 场景1:恢复未完成 | 检查集群状态,例如:show vgroups。查看服务端日志,以及服务端节点之间的网络状况。 | -| 0x80000914 | Sync leader is restoring | 场景1:发生了切主;选主后,日志重演中 | 检查集群状态,例如:show vgroups。查看服务端日志,观察恢复进度。 | +| 0x80000914 | Sync leader is restoring | 场景1:发生了切主 选主后,日志重演中 | 检查集群状态,例如:show vgroups。查看服务端日志,观察恢复进度。 | | 0x80000915 | Sync invalid snapshot msg | 快照复制消息错误 | 服务端内部错误 | | 0x80000916 | Sync buffer is full | 场景1:客户端请求并发数特别大,超过了服务端处理能力,或者因为网络和CPU资源严重不足,或者网络连接问题等。 | 检查集群状态,系统资源使用率(例如磁盘IO、CPU、网络通信等),以及节点之间网络连接状况。 | | 0x80000917 | Sync write stall | 场景1:状态机执行被阻塞,例如因系统繁忙,磁盘IO资源严重不足,或落盘失败等 | 检查集群状态,系统资源使用率(例如磁盘IO和CPU等),以及是否发生了落盘失败等。 | diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 9e8a85d301..e468d4d5ae 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -53,6 +53,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_PORT_EADDRINUSE, "Port already in use") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_BROKEN_LINK, "Conn is broken") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_TIMEOUT, "Conn read timeout") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED, "some vnode/qnode/mnode(s) out of service") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_SOMENODE_BROKEN_LINK, "some vnode/qnode/mnode(s) conn is broken") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_MAX_SESSIONS, "rpc open too many session") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_ERROR, "rpc network error") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_BUSY, "rpc network busy") From a274c398aae675f5956f203932a89bffae881d39 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 28 Nov 2024 18:47:30 +0800 Subject: [PATCH 73/76] doc: add some exec examples. --- docs/zh/06-advanced/06-TDgpt/02-management.md | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index 9aaa123299..82af1ab350 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -107,12 +107,33 @@ node_url 是提供服务的 Anode 的 IP 和 PORT组成的字符串, 例如:`c 列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`等属性。 ```sql SHOW ANODES; + +taos> show anodes; + id | url | status | create_time | update_time | +================================================================================================================== + 1 | 192.168.0.1:6090 | ready | 2024-11-28 18:44:27.089 | 2024-11-28 18:44:27.089 | +Query OK, 1 row(s) in set (0.037205s) + ``` #### 查看提供的时序数据分析服务 ```SQL SHOW ANODES FULL; + +taos> show anodes full; + id | type | algo | +============================================================================ + 1 | anomaly-detection | shesd | + 1 | anomaly-detection | iqr | + 1 | anomaly-detection | ksigma | + 1 | anomaly-detection | lof | + 1 | anomaly-detection | grubbs | + 1 | anomaly-detection | encoder | + 1 | forecast | holtwinters | + 1 | forecast | arima | +Query OK, 8 row(s) in set (0.008796s) + ``` #### 刷新集群中的分析算法缓存 From 3d632784463e697aded2020f47698c7834eec0d7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 28 Nov 2024 19:15:57 +0800 Subject: [PATCH 74/76] doc: add more details about add algorithms with training models. --- docs/zh/06-advanced/06-TDgpt/06-dev/index.md | 29 +++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md index 6db7d13079..072a66c7d3 100644 --- a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md +++ b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md @@ -21,7 +21,7 @@ Anode的主要目录结构如下图所示 . ├── cfg ├── model -│   └── ad_detection +│   └── ad_autoencoder ├── release ├── script └── taosanalytics @@ -72,11 +72,32 @@ SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col_name, 'algo=name') 将具有模型的分析算法添加到 Anode 中,首先需要在 `model` 目录中建立该算法对应的目录(目录名称可自拟),将采用该算法针对不同的输入时间序列数据生成的训练模型均需要保存在该目录下,同时目录名称要在分析算法中确定,以便能够固定加载该目录下的分析模型。为了确保模型能够正常读取加载,存储的模型使用`joblib`库进行序列化保存。 下面以自编码器(Autoencoder)为例,说明如何添加要预先训练的模型进行异常检测。 -首先我们在`model`目录中创建一个目录 -- `ad_detection`,该目录将用来保存所有使用自编码器训练的模型。然后,我们使用自编码器对 foo 表的时间序列数据进行训练,得到模型 ad_autoencoder_foo,使用 `joblib`序列化以后保存在`ad_detection` 目录中。 +首先我们在 `model `目录中创建一个目录 -- `ad_autoencoder` (见上图目录结构),该目录将用来保存所有使用自编码器训练的模型。然后,我们使用自编码器对 foo 表的时间序列数据进行训练,得到模型 针对 foo 表的模型,我们将其命名为 `ad_autoencoder_foo`,使用 `joblib`序列化该模型以后保存在 `ad_autoencoder` 目录中。如下图所示,ad_autoencoder_foo 由两个文件构成,分别是模型文件 (ad_autoencoder_foo.dat) 和模型文件描述文件 (ad_autoencoder_foo.info)。 -使用 SQL 调用已经保存的模型,需要在调用参数中指定模型名称``model=ad_autoencoder_foo`,而 `algo=encoder` 是确定调用的自编码器生成的模型(这里的`encoder`说明调用的是自编码器算法模型,该名称是添加算法的时候在代码中定义)以便能够调用该模型。 +```bash +. +├── cfg +├── model +│   └── ad_autoencoder +│   ├── ad_autoencoder_foo.dat +│   └── ad_autoencoder_foo.info +├── release +├── script +└── taosanalytics + ├── algo + │   ├── ad + │   └── fc + ├── misc + └── test + +``` + +接下来说明如何使用 SQL 调用该模型。 +通过设置参数 `algo=ad_encoder` 告诉分析平台要调用自编码器算法训练的模型(自编码器算法在可用算法列表中),因此直接指定即可。此外还需要指定自编码器针对某数据集训练的确定的模型,此时我们需要使用已经保存的模型 `ad_autoencoder_foo` ,因此需要添加参数 `model=ad_autoencoder_foo` 以便能够调用该模型。 ```SQL --- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测 -SELECT COUNT(*), _WSTART FROM foo ANOMALY_WINDOW(col1, 'algo=encoder, model=ad_autoencoder_foo'); +SELECT COUNT(*), _WSTART +FROM foo +ANOMALY_WINDOW(col1, 'algo=ad_encoder, model=ad_autoencoder_foo'); ``` From 041de11054fe8965e1d906b7741b37f5fa20cc17 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 28 Nov 2024 19:16:45 +0800 Subject: [PATCH 75/76] doc: fix some typos. --- docs/zh/06-advanced/06-TDgpt/02-management.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md index 82af1ab350..ef1206fc04 100644 --- a/docs/zh/06-advanced/06-TDgpt/02-management.md +++ b/docs/zh/06-advanced/06-TDgpt/02-management.md @@ -129,7 +129,7 @@ taos> show anodes full; 1 | anomaly-detection | ksigma | 1 | anomaly-detection | lof | 1 | anomaly-detection | grubbs | - 1 | anomaly-detection | encoder | + 1 | anomaly-detection | ad_encoder | 1 | forecast | holtwinters | 1 | forecast | arima | Query OK, 8 row(s) in set (0.008796s) From 43c2fc263adfc16f9c8c4e829bc06d6368adb489 Mon Sep 17 00:00:00 2001 From: Alex Duan <51781608+DuanKuanJun@users.noreply.github.com> Date: Thu, 28 Nov 2024 20:40:28 +0800 Subject: [PATCH 76/76] Update 08-taos-cli.md error_code add taos-CLI error code guide item --- docs/zh/14-reference/02-tools/08-taos-cli.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/zh/14-reference/02-tools/08-taos-cli.md b/docs/zh/14-reference/02-tools/08-taos-cli.md index a6f2f7ae05..18b4b604f4 100644 --- a/docs/zh/14-reference/02-tools/08-taos-cli.md +++ b/docs/zh/14-reference/02-tools/08-taos-cli.md @@ -89,6 +89,9 @@ taos -h h1.taos.com -s "use db; show tables;" 也可以通过配置文件中的参数设置来控制 TDengine CLI 的行为。可用配置参数请参考[客户端配置](../../components/taosc) +## 错误代码表 +在 TDengine 3.3.5.0 版本后 TDengine CLI 在返回的错误信息中包含了具体的错误代码,用户可到 TDengine 官网的错误代码详细说明页面查找具体原因及解决措施,见:[错误码参考表](../error_code/) + ## TDengine CLI TAB 键补全 - TAB 键前为空命令状态下按 TAB 键,会列出 TDengine CLI 支持的所有命令