diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index b2aff6fd7e..bcb40f4175 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -262,6 +262,9 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf, c int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId, tb_uid_t suid); +bool alreadyAddGroupId(char* ctbName); +bool isAutoTableName(char* ctbName); +void buildCtbNameAddGruopId(char* ctbName, uint64_t groupId); char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId); int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf); diff --git a/include/dnode/vnode/tqCommon.h b/include/dnode/vnode/tqCommon.h index 39e0c07406..50458d684f 100644 --- a/include/dnode/vnode/tqCommon.h +++ b/include/dnode/vnode/tqCommon.h @@ -32,8 +32,9 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve bool isLeader, bool restored); int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen); int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader); -int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta, int32_t* numOfTasks); +int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta); int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta); +int32_t tqStreamTasksGetTotalNum(SStreamMeta* pMeta); int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, SRpcMsg* pMsg); int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg); int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* pMsg, bool fromVnode); diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index 1dbc8f2f76..669340f9e5 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -189,7 +189,8 @@ typedef struct TsdReader { typedef struct SStoreCacheReader { int32_t (*openReader)(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols, - SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr); + SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr, + SArray *pFuncTypeList); void *(*closeReader)(void *pReader); int32_t (*retrieveRows)(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds, SArray *pTableUidList); diff --git a/include/libs/function/function.h b/include/libs/function/function.h index ffaad69373..8863201094 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -249,6 +249,10 @@ typedef struct SPoint { int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2, int32_t inputType); +#define LEASTSQUARES_DOUBLE_ITEM_LENGTH 25 +#define LEASTSQUARES_BUFF_LENGTH 128 +#define DOUBLE_PRECISION_DIGITS "16e" + #ifdef __cplusplus } #endif diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 7fbdbfb211..891084419b 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -149,6 +149,8 @@ void nodesRewriteExprPostOrder(SNode** pNode, FNodeRewriter rewriter, void* pCon void nodesRewriteExprsPostOrder(SNodeList* pList, FNodeRewriter rewriter, void* pContext); bool nodesEqualNode(const SNode* a, const SNode* b); +bool nodeListNodeEqual(const SNodeList* a, const SNode* b); + bool nodesMatchNode(const SNode* pSub, const SNode* pNode); SNode* nodesCloneNode(const SNode* pNode); diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 5b32ec9529..e9d4db7606 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -120,6 +120,7 @@ typedef struct SScanLogicNode { bool onlyMetaCtbIdx; // for tag scan with no tbname bool filesetDelimited; // returned blocks delimited by fileset bool isCountByTag; // true if selectstmt hasCountFunc & part by tag/tbname + SArray* pFuncTypes; // for last, last_row } SScanLogicNode; typedef struct SJoinLogicNode { @@ -155,6 +156,7 @@ typedef struct SProjectLogicNode { SNodeList* pProjections; char stmtName[TSDB_TABLE_NAME_LEN]; bool ignoreGroupId; + bool inputIgnoreGroup; } SProjectLogicNode; typedef struct SIndefRowsFuncLogicNode { @@ -402,6 +404,7 @@ typedef struct SLastRowScanPhysiNode { bool groupSort; bool ignoreNull; SNodeList* pTargets; + SArray* pFuncTypes; } SLastRowScanPhysiNode; typedef SLastRowScanPhysiNode STableCountScanPhysiNode; @@ -448,6 +451,7 @@ typedef struct SProjectPhysiNode { SNodeList* pProjections; bool mergeDataBlock; bool ignoreGroupId; + bool inputIgnoreGroup; } SProjectPhysiNode; typedef struct SIndefRowsFuncPhysiNode { diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 331679a3d3..9647c0adac 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -89,6 +89,7 @@ typedef struct SColumnNode { typedef struct SColumnRefNode { ENodeType type; + SDataType resType; char colName[TSDB_COL_NAME_LEN]; } SColumnRefNode; @@ -532,6 +533,7 @@ int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, EColle typedef bool (*FFuncClassifier)(int32_t funcId); int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList** pFuncs); +int32_t nodesCollectSelectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList* pFuncs); int32_t nodesCollectSpecialNodes(SSelectStmt* pSelect, ESqlClause clause, ENodeType type, SNodeList** pNodes); diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index 8b5bf1abb1..f79a0a0718 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -21,12 +21,12 @@ extern "C" { #endif +#include "systable.h" #include "tarray.h" #include "thash.h" #include "tlog.h" #include "tmsg.h" #include "tmsgcb.h" -#include "systable.h" typedef enum { JOB_TASK_STATUS_NULL = 0, @@ -90,8 +90,7 @@ typedef struct SExecResult { void* res; } SExecResult; - -#pragma pack(push, 1) +#pragma pack(push, 1) typedef struct SCTableMeta { uint64_t uid; uint64_t suid; @@ -100,8 +99,7 @@ typedef struct SCTableMeta { } SCTableMeta; #pragma pack(pop) - -#pragma pack(push, 1) +#pragma pack(push, 1) typedef struct STableMeta { // BEGIN: KEEP THIS PART SAME WITH SCTableMeta uint64_t uid; @@ -137,8 +135,8 @@ typedef struct SDBVgInfo { int8_t hashMethod; int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT int64_t stateTs; - SHashObj* vgHash; // key:vgId, value:SVgroupInfo - SArray* vgArray; // SVgroupInfo + SHashObj* vgHash; // key:vgId, value:SVgroupInfo + SArray* vgArray; // SVgroupInfo } SDBVgInfo; typedef struct SUseDbOutput { @@ -173,6 +171,7 @@ typedef struct SDataBuf { void* pData; uint32_t len; void* handle; + int64_t handleRefId; SEpSet* pEpSet; } SDataBuf; @@ -283,7 +282,7 @@ void getColumnTypeFromMeta(STableMeta* pMeta, char* pName, ETableColumnType* int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst); int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst); void freeVgInfo(SDBVgInfo* vgInfo); -void freeDbCfgInfo(SDbCfgInfo *pInfo); +void freeDbCfgInfo(SDbCfgInfo* pInfo); extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen, void* (*mallocFp)(int64_t)); @@ -314,7 +313,9 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t ((_code) == TSDB_CODE_SYN_NOT_LEADER || (_code) == TSDB_CODE_SYN_RESTORING || (_code) == TSDB_CODE_SYN_INTERNAL_ERROR) #define SYNC_OTHER_LEADER_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_MNODE_NOT_FOUND) -#define NO_RET_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || (_code) == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED) +#define NO_RET_REDIRECT_ERROR(_code) \ + ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || \ + (_code) == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED) #define NEED_REDIRECT_ERROR(_code) \ (NO_RET_REDIRECT_ERROR(_code) || SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || \ diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 993bb0a2a0..c1131a62a4 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -62,9 +62,10 @@ typedef struct SStreamTask SStreamTask; typedef struct SStreamQueue SStreamQueue; typedef struct SStreamTaskSM SStreamTaskSM; -#define SSTREAM_TASK_VER 2 +#define SSTREAM_TASK_VER 3 #define SSTREAM_TASK_INCOMPATIBLE_VER 1 #define SSTREAM_TASK_NEED_CONVERT_VER 2 +#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 enum { STREAM_STATUS__NORMAL = 0, @@ -689,9 +690,9 @@ typedef struct STaskStatusEntry { int64_t verStart; // start version in WAL, only valid for source task int64_t verEnd; // end version in WAL, only valid for source task int64_t processedVer; // only valid for source task - int64_t activeCheckpointId; // current active checkpoint id + int64_t checkpointId; // current active checkpoint id int32_t chkpointTransId; // checkpoint trans id - bool checkpointFailed; // denote if the checkpoint is failed or not + int8_t checkpointFailed; // denote if the checkpoint is failed or not bool inputQChanging; // inputQ is changing or not int64_t inputQUnchangeCounter; double inputQUsed; // in MiB @@ -800,7 +801,7 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_ int64_t* oldStage); int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList); void streamTaskResetUpstreamStageInfo(SStreamTask* pTask); -bool streamTaskAllUpstreamClosed(SStreamTask* pTask); +bool streamTaskIsAllUpstreamClosed(SStreamTask* pTask); bool streamTaskSetSchedStatusWait(SStreamTask* pTask); int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask); int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask); @@ -825,8 +826,7 @@ int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue); // common int32_t streamRestoreParam(SStreamTask* pTask); -int32_t streamResetParamForScanHistory(SStreamTask* pTask); -void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta); +void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask); void streamTaskResume(SStreamTask* pTask); int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask); void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet); @@ -837,6 +837,7 @@ int32_t streamTaskReloadState(SStreamTask* pTask); void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId); void streamTaskOpenAllUpstreamInput(SStreamTask* pTask); int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key); +bool streamTaskIsSinkTask(const SStreamTask* pTask); void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask); void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc); @@ -873,6 +874,8 @@ void streamMetaStartHb(SStreamMeta* pMeta); bool streamMetaTaskInTimer(SStreamMeta* pMeta); int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, int64_t endTs, bool ready); +int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta); + void streamMetaRLock(SStreamMeta* pMeta); void streamMetaRUnLock(SStreamMeta* pMeta); void streamMetaWLock(SStreamMeta* pMeta); @@ -884,6 +887,8 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta); int32_t streamMetaStartAllTasks(SStreamMeta* pMeta); int32_t streamMetaStopAllTasks(SStreamMeta* pMeta); int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); +bool streamMetaAllTasksReady(const SStreamMeta* pMeta); +tmr_h streamTimerGetInstance(); // checkpoint int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq); diff --git a/include/os/osFile.h b/include/os/osFile.h index 503535a454..4d760a791f 100644 --- a/include/os/osFile.h +++ b/include/os/osFile.h @@ -117,6 +117,8 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName); int32_t taosSetFileHandlesLimit(); +int32_t taosLinkFile(char *src, char *dst); + #ifdef __cplusplus } #endif diff --git a/include/util/taoserror.h b/include/util/taoserror.h index b41078dfbf..b5389e60d3 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -744,6 +744,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_VIEW_QUERY TAOS_DEF_ERROR_CODE(0, 0x266C) #define TSDB_CODE_PAR_COL_QUERY_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x266D) #define TSDB_CODE_PAR_VIEW_CONFLICT_WITH_TABLE TAOS_DEF_ERROR_CODE(0, 0x266E) +#define TSDB_CODE_PAR_ORDERBY_AMBIGUOUS TAOS_DEF_ERROR_CODE(0, 0x266F) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) //planner diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 28a29c2138..d4f89d6b54 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -285,8 +285,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { SRetrieveTableRsp* pRsp = NULL; - int8_t biMode = atomic_load_8(&pRequest->pTscObj->biMode); - int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, biMode); + int8_t biMode = atomic_load_8(&pRequest->pTscObj->biMode); + int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, biMode); if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); } @@ -324,7 +324,8 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { return; } - int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, atomic_load_8(&pRequest->pTscObj->biMode)); + int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, + atomic_load_8(&pRequest->pTscObj->biMode)); if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); } @@ -492,7 +493,8 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t pResInfo->userFields[i].bytes = pSchema[i].bytes; pResInfo->userFields[i].type = pSchema[i].type; - if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR || pSchema[i].type == TSDB_DATA_TYPE_VARBINARY || pSchema[i].type == TSDB_DATA_TYPE_GEOMETRY) { + if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR || pSchema[i].type == TSDB_DATA_TYPE_VARBINARY || + pSchema[i].type == TSDB_DATA_TYPE_GEOMETRY) { pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE; } else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) { pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; @@ -1103,9 +1105,9 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat SSqlCallbackWrapper* pWrapper) { int32_t code = TSDB_CODE_SUCCESS; pRequest->type = pQuery->msgType; - SArray* pMnodeList = NULL; - SQueryPlan* pDag = NULL; - int64_t st = taosGetTimestampUs(); + SArray* pMnodeList = NULL; + SQueryPlan* pDag = NULL; + int64_t st = taosGetTimestampUs(); if (!pRequest->parseOnly) { pMnodeList = taosArrayInit(4, sizeof(SQueryNodeLoad)); @@ -1278,7 +1280,7 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList, bool isView) { if (isView) { for (int32_t i = 0; i < tbNum; ++i) { SName* pViewName = taosArrayGet(tbList, i); - char dbFName[TSDB_DB_FNAME_LEN]; + char dbFName[TSDB_DB_FNAME_LEN]; tNameGetFullDbName(pViewName, dbFName); catalogRemoveViewMeta(pCatalog, dbFName, 0, pViewName->tname, 0); } @@ -1510,8 +1512,12 @@ int32_t doProcessMsgFromServer(void* param) { updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); - SDataBuf buf = { - .msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = pEpSet}; + SDataBuf buf = {.msgType = pMsg->msgType, + .len = pMsg->contLen, + .pData = NULL, + .handle = pMsg->info.handle, + .handleRefId = pMsg->info.refId, + .pEpSet = pEpSet}; if (pMsg->contLen > 0) { buf.pData = taosMemoryCalloc(1, pMsg->contLen); @@ -2538,13 +2544,13 @@ static void fetchCallback(void* pResult, void* param, int32_t code) { if (code != TSDB_CODE_SUCCESS) { pRequest->code = code; taosMemoryFreeClear(pResultInfo->pData); - pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0); + pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0); return; } if (pRequest->code != TSDB_CODE_SUCCESS) { taosMemoryFreeClear(pResultInfo->pData); - pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0); + pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0); return; } @@ -2565,12 +2571,12 @@ static void fetchCallback(void* pResult, void* param, int32_t code) { atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen); } - pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, pResultInfo->numOfRows); + pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, pResultInfo->numOfRows); } void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param) { pRequest->body.fetchFp = fp; - ((SSyncQueryParam *)pRequest->body.interParam)->userParam = param; + ((SSyncQueryParam*)pRequest->body.interParam)->userParam = param; SReqResultInfo* pResultInfo = &pRequest->body.resInfo; @@ -2611,7 +2617,7 @@ void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param void doRequestCallback(SRequestObj* pRequest, int32_t code) { pRequest->inCallback = true; int64_t this = pRequest->self; - pRequest->body.queryFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, code); + pRequest->body.queryFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, code); SRequestObj* pReq = acquireRequest(this); if (pReq != NULL) { pReq->inCallback = false; @@ -2619,11 +2625,11 @@ void doRequestCallback(SRequestObj* pRequest, int32_t code) { } } -int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser, SParseSqlRes* pRes) { +int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser, + SParseSqlRes* pRes) { #ifndef TD_ENTERPRISE return TSDB_CODE_SUCCESS; #else return clientParseSqlImpl(param, dbName, sql, parseOnly, effectiveUser, pRes); #endif } - diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index b99654172c..f6aef5aa26 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -57,10 +57,6 @@ void taos_cleanup(void) { tscStopCrashReport(); - int32_t id = clientReqRefPool; - clientReqRefPool = -1; - taosCloseRef(id); - hbMgrCleanUp(); catalogDestroy(); @@ -70,6 +66,12 @@ void taos_cleanup(void) { qCleanupKeywordsTable(); nodesDestroyAllocatorSet(); + cleanupTaskQueue(); + + int32_t id = clientReqRefPool; + clientReqRefPool = -1; + taosCloseRef(id); + id = clientConnRefPool; clientConnRefPool = -1; taosCloseRef(id); @@ -77,8 +79,6 @@ void taos_cleanup(void) { rpcCleanup(); tscDebug("rpc cleanup"); - cleanupTaskQueue(); - taosConvDestroy(); tscInfo("all local resources released"); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 054cff560f..f0d1630480 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2118,6 +2118,30 @@ _end: return TSDB_CODE_SUCCESS; } +void buildCtbNameAddGruopId(char* ctbName, uint64_t groupId){ + char tmp[TSDB_TABLE_NAME_LEN] = {0}; + snprintf(tmp, TSDB_TABLE_NAME_LEN, "_%"PRIu64, groupId); + ctbName[TSDB_TABLE_NAME_LEN - strlen(tmp) - 1] = 0; // put groupId to the end + strcat(ctbName, tmp); +} + +bool isAutoTableName(char* ctbName){ + return (strlen(ctbName) == 34 && ctbName[0] == 't' && ctbName[1] == '_'); +} + +bool alreadyAddGroupId(char* ctbName){ + size_t len = strlen(ctbName); + size_t _location = len - 1; + while(_location > 0){ + if(ctbName[_location] < '0' || ctbName[_location] > '9'){ + break; + } + _location--; + } + + return ctbName[_location] == '_' && len - 1 - _location > 15; //15 means the min length of groupid +} + char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) { char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1); if (!pBuf) { diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index c74007edd3..6c58fb746b 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -40,7 +40,7 @@ typedef struct SNodeEntry { typedef struct SVgroupChangeInfo { SHashObj *pDBMap; - SArray * pUpdateNodeList; // SArray + SArray *pUpdateNodeList; // SArray } SVgroupChangeInfo; static int32_t mndNodeCheckSentinel = 0; @@ -89,8 +89,8 @@ static void freeCheckpointCandEntry(void *); static SSdbRaw *mndStreamActionEncode(SStreamObj *pStream); static SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw); -SSdbRaw * mndStreamSeqActionEncode(SStreamObj *pStream); -SSdbRow * mndStreamSeqActionDecode(SSdbRaw *pRaw); +SSdbRaw *mndStreamSeqActionEncode(SStreamObj *pStream); +SSdbRow *mndStreamSeqActionDecode(SSdbRaw *pRaw); static int32_t mndStreamSeqActionInsert(SSdb *pSdb, SStreamSeq *pStream); static int32_t mndStreamSeqActionDelete(SSdb *pSdb, SStreamSeq *pStream); static int32_t mndStreamSeqActionUpdate(SSdb *pSdb, SStreamSeq *pOldStream, SStreamSeq *pNewStream); @@ -219,9 +219,9 @@ STREAM_ENCODE_OVER: SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw) { terrno = TSDB_CODE_OUT_OF_MEMORY; - SSdbRow * pRow = NULL; + SSdbRow *pRow = NULL; SStreamObj *pStream = NULL; - void * buf = NULL; + void *buf = NULL; int8_t sver = 0; if (sdbGetRawSoftVer(pRaw, &sver) != 0) { @@ -301,7 +301,7 @@ static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pOldStream, SStream } SStreamObj *mndAcquireStream(SMnode *pMnode, char *streamName) { - SSdb * pSdb = pMnode->pSdb; + SSdb *pSdb = pMnode->pSdb; SStreamObj *pStream = sdbAcquire(pSdb, SDB_STREAM, streamName); if (pStream == NULL && terrno == TSDB_CODE_SDB_OBJ_NOT_THERE) { terrno = TSDB_CODE_MND_STREAM_NOT_EXIST; @@ -356,7 +356,7 @@ static int32_t mndCheckCreateStreamReq(SCMCreateStreamReq *pCreate) { } static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, SCMCreateStreamReq *pCreate) { - SNode * pAst = NULL; + SNode *pAst = NULL; SQueryPlan *pPlan = NULL; mInfo("stream:%s to create", pCreate->name); @@ -596,7 +596,7 @@ int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStr static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStreamObj *pStream, const char *user) { SStbObj *pStb = NULL; - SDbObj * pDb = NULL; + SDbObj *pDb = NULL; SMCreateStbReq createReq = {0}; tstrncpy(createReq.name, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN); @@ -685,9 +685,10 @@ _OVER: return -1; } -static int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool* hasEpset, int32_t taskId, int32_t nodeId) { +static int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool *hasEpset, int32_t taskId, int32_t nodeId) { *hasEpset = false; + pEpSet->numOfEps = 0; if (nodeId == SNODE_HANDLE) { SSnodeObj *pObj = NULL; void *pIter = NULL; @@ -773,7 +774,7 @@ int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) static int32_t checkForNumOfStreams(SMnode *pMnode, SStreamObj *pStreamObj) { // check for number of existed tasks int32_t numOfStream = 0; SStreamObj *pStream = NULL; - void * pIter = NULL; + void *pIter = NULL; while ((pIter = sdbFetch(pMnode->pSdb, SDB_STREAM, pIter, (void **)&pStream)) != NULL) { if (pStream->sourceDbUid == pStreamObj->sourceDbUid) { @@ -804,7 +805,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; SStreamObj streamObj = {0}; - char * sql = NULL; + char *sql = NULL; int32_t sqlLen = 0; terrno = TSDB_CODE_SUCCESS; @@ -931,7 +932,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { } _OVER: - if (terrno != TSDB_CODE_SUCCESS && terrno != TSDB_CODE_ACTION_IN_PROGRESS) { + if (terrno != TSDB_CODE_SUCCESS && terrno != TSDB_CODE_ACTION_IN_PROGRESS) { mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr()); } @@ -947,8 +948,8 @@ _OVER: int64_t mndStreamGenChkpId(SMnode *pMnode) { SStreamObj *pStream = NULL; - void * pIter = NULL; - SSdb * pSdb = pMnode->pSdb; + void *pIter = NULL; + SSdb *pSdb = pMnode->pSdb; int64_t maxChkpId = 0; while (1) { pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); @@ -966,7 +967,7 @@ int64_t mndStreamGenChkpId(SMnode *pMnode) { static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; - SSdb * pSdb = pMnode->pSdb; + SSdb *pSdb = pMnode->pSdb; if (sdbGetSize(pSdb, SDB_STREAM) <= 0) { return 0; } @@ -982,7 +983,7 @@ static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq) { static int32_t mndProcessStreamRemainChkptTmr(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; - SSdb * pSdb = pMnode->pSdb; + SSdb *pSdb = pMnode->pSdb; if (sdbGetSize(pSdb, SDB_STREAM) <= 0) { return 0; } @@ -1023,7 +1024,7 @@ static int32_t mndBuildStreamCheckpointSourceReq2(void **pBuf, int32_t *pLen, in return -1; } - void * abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); + void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); SEncoder encoder; tEncoderInit(&encoder, abuf, tlen); tEncodeStreamCheckpointSourceReq(&encoder, &req); @@ -1077,7 +1078,7 @@ static int32_t mndProcessStreamCheckpointTrans(SMnode *pMnode, SStreamObj *pStre // 1. redo action: broadcast checkpoint source msg for all source vg int32_t totLevel = taosArrayGetSize(pStream->tasks); for (int32_t i = 0; i < totLevel; i++) { - SArray * pLevel = taosArrayGetP(pStream->tasks, i); + SArray *pLevel = taosArrayGetP(pStream->tasks, i); SStreamTask *p = taosArrayGetP(pLevel, 0); if (p->info.taskLevel == TASK_LEVEL__SOURCE) { @@ -1091,7 +1092,7 @@ static int32_t mndProcessStreamCheckpointTrans(SMnode *pMnode, SStreamObj *pStre goto _ERR; } - void * buf; + void *buf; int32_t tlen; if (mndBuildStreamCheckpointSourceReq2(&buf, &tlen, pTask->info.nodeId, checkpointId, pTask->id.streamId, pTask->id.taskId, pTrans->id) < 0) { @@ -1143,7 +1144,7 @@ static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream int32_t totLevel = taosArrayGetSize(pStream->tasks); for (int32_t i = 0; i < totLevel; i++) { - SArray * pLevel = taosArrayGetP(pStream->tasks, i); + SArray *pLevel = taosArrayGetP(pStream->tasks, i); SStreamTask *pTask = taosArrayGetP(pLevel, 0); if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { @@ -1160,7 +1161,7 @@ static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream return -1; } - void * buf; + void *buf; int32_t tlen; if (mndBuildStreamCheckpointSourceReq2(&buf, &tlen, pTask->info.nodeId, chkptId, pTask->id.streamId, pTask->id.taskId, pTrans->id) < 0) { @@ -1279,7 +1280,7 @@ static int32_t mndCheckNodeStatus(SMnode *pMnode) { } for (int32_t i = 0; i < taosArrayGetSize(execInfo.pTaskList); ++i) { - STaskId * p = taosArrayGet(execInfo.pTaskList, i); + STaskId *p = taosArrayGet(execInfo.pTaskList, i); STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, p, sizeof(*p)); if (pEntry == NULL) { continue; @@ -1298,9 +1299,9 @@ static int32_t mndCheckNodeStatus(SMnode *pMnode) { } static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) { - SMnode * pMnode = pReq->info.node; - SSdb * pSdb = pMnode->pSdb; - void * pIter = NULL; + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; SStreamObj *pStream = NULL; int32_t code = 0; @@ -1322,7 +1323,7 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) { static int32_t mndProcessStreamCheckpointInCandid(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; - void * pIter = NULL; + void *pIter = NULL; int32_t code = 0; taosThreadMutexLock(&execInfo.lock); @@ -1368,7 +1369,7 @@ static int32_t mndProcessStreamCheckpointInCandid(SRpcMsg *pReq) { } static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { - SMnode * pMnode = pReq->info.node; + SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; SMDropStreamReq dropReq = {0}; @@ -1525,7 +1526,7 @@ int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { } int32_t mndGetNumOfStreams(SMnode *pMnode, char *dbName, int32_t *pNumOfStreams) { - SSdb * pSdb = pMnode->pSdb; + SSdb *pSdb = pMnode->pSdb; SDbObj *pDb = mndAcquireDb(pMnode, dbName); if (pDb == NULL) { terrno = TSDB_CODE_MND_DB_NOT_SELECTED; @@ -1533,7 +1534,7 @@ int32_t mndGetNumOfStreams(SMnode *pMnode, char *dbName, int32_t *pNumOfStreams) } int32_t numOfStreams = 0; - void * pIter = NULL; + void *pIter = NULL; while (1) { SStreamObj *pStream = NULL; pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); @@ -1552,8 +1553,8 @@ int32_t mndGetNumOfStreams(SMnode *pMnode, char *dbName, int32_t *pNumOfStreams) } static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { - SMnode * pMnode = pReq->info.node; - SSdb * pSdb = pMnode->pSdb; + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; int32_t numOfRows = 0; SStreamObj *pStream = NULL; @@ -1728,7 +1729,7 @@ static int32_t setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SS colDataSetVal(pColInfo, numOfRows, (const char *)vbuf, false); // output queue -// sprintf(buf, queueInfoStr, pe->outputQUsed, pe->outputRate); + // sprintf(buf, queueInfoStr, pe->outputQUsed, pe->outputRate); // STR_TO_VARSTR(vbuf, buf); // pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1764,8 +1765,8 @@ static int32_t getNumOfTasks(SArray *pTaskList) { } static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rowsCapacity) { - SMnode * pMnode = pReq->info.node; - SSdb * pSdb = pMnode->pSdb; + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; int32_t numOfRows = 0; SStreamObj *pStream = NULL; @@ -1790,7 +1791,7 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock int32_t numOfLevels = taosArrayGetSize(pLevel); for (int32_t j = 0; j < numOfLevels; j++) { SStreamTask *pTask = taosArrayGetP(pLevel, j); - int32_t code = setTaskAttrInResBlock(pStream, pTask, pBlock, numOfRows); + int32_t code = setTaskAttrInResBlock(pStream, pTask, pBlock, numOfRows); if (code == TSDB_CODE_SUCCESS) { numOfRows++; } @@ -1824,7 +1825,8 @@ static int32_t mndPauseStreamTask(SMnode *pMnode, STrans *pTrans, SStreamTask *p pReq->taskId = pTask->id.taskId; pReq->streamId = pTask->id.streamId; - SEpSet epset; + SEpSet epset = {0}; + mDebug("pause node:%d, epset:%d", pTask->info.nodeId, epset.numOfEps); bool hasEpset = false; int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); if (code != TSDB_CODE_SUCCESS) { @@ -1870,12 +1872,14 @@ int32_t mndPauseAllStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStre return 0; } -static int32_t mndPersistStreamLog(STrans *pTrans, const SStreamObj *pStream, int8_t status) { - SStreamObj streamObj = {0}; - memcpy(streamObj.name, pStream->name, TSDB_STREAM_FNAME_LEN); - streamObj.status = status; +static int32_t mndPersistStreamLog(STrans *pTrans, SStreamObj *pStream, int8_t status) { + // SStreamObj streamObj = {0}; + // memcpy(streamObj.name, pStream->name, TSDB_STREAM_FNAME_LEN); + taosWLockLatch(&pStream->lock); + pStream->status = status; + SSdbRaw *pCommitRaw = mndStreamActionEncode(pStream); - SSdbRaw *pCommitRaw = mndStreamActionEncode(&streamObj); + taosWUnLockLatch(&pStream->lock); if (pCommitRaw == NULL) return -1; if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); @@ -1886,7 +1890,7 @@ static int32_t mndPersistStreamLog(STrans *pTrans, const SStreamObj *pStream, in } static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { - SMnode * pMnode = pReq->info.node; + SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; SMPauseStreamReq pauseReq = {0}; @@ -1990,7 +1994,7 @@ static int32_t mndResumeStreamTask(STrans *pTrans, SMnode *pMnode, SStreamTask * pReq->streamId = pTask->id.streamId; pReq->igUntreated = igUntreated; - SEpSet epset; + SEpSet epset = {0}; bool hasEpset = false; int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); if (code != TSDB_CODE_SUCCESS) { @@ -2028,7 +2032,7 @@ int32_t mndResumeAllStreamTasks(STrans *pTrans, SMnode *pMnode, SStreamObj *pStr } static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { - SMnode * pMnode = pReq->info.node; + SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; SMResumeStreamReq pauseReq = {0}; @@ -2146,7 +2150,7 @@ static int32_t doBuildStreamTaskUpdateMsg(void **pBuf, int32_t *pLen, SVgroupCha return -1; } - void * abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); + void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); SEncoder encoder; tEncoderInit(&encoder, abuf, tlen); tEncodeStreamTaskUpdateMsg(&encoder, &req); @@ -2213,7 +2217,7 @@ static int32_t createStreamUpdateTrans(SStreamObj *pStream, SVgroupChangeInfo *p for (int32_t k = 0; k < numOfTasks; ++k) { SStreamTask *pTask = taosArrayGetP(pLevel, k); - void * pBuf = NULL; + void *pBuf = NULL; int32_t len = 0; streamTaskUpdateEpsetInfo(pTask, pInfo->pUpdateNodeList); doBuildStreamTaskUpdateMsg(&pBuf, &len, pInfo, pTask->info.nodeId, &pTask->id, pTrans->id); @@ -2292,8 +2296,8 @@ static SVgroupChangeInfo mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pP } static SArray *mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady) { - SSdb * pSdb = pMnode->pSdb; - void * pIter = NULL; + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; SVgObj *pVgroup = NULL; *allReady = true; @@ -2361,7 +2365,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange STrans *pTrans = NULL; // conflict check for nodeUpdate trans, here we randomly chose one stream to add into the trans pool - while(1) { + while (1) { pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); if (pIter == NULL) { break; @@ -2371,13 +2375,12 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange sdbRelease(pSdb, pStream); if (conflict) { - mWarn("nodeUpdate trans in progress, current nodeUpdate ignored"); + mError("nodeUpdate conflict with other trans, current nodeUpdate ignored"); sdbCancelFetch(pSdb, pIter); return -1; } } - while (1) { pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); if (pIter == NULL) { @@ -2439,9 +2442,9 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange } static SArray *extractNodeListFromStream(SMnode *pMnode) { - SSdb * pSdb = pMnode->pSdb; + SSdb *pSdb = pMnode->pSdb; SStreamObj *pStream = NULL; - void * pIter = NULL; + void *pIter = NULL; SHashObj *pHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); while (1) { @@ -2488,9 +2491,9 @@ static SArray *extractNodeListFromStream(SMnode *pMnode) { } static void doExtractTasksFromStream(SMnode *pMnode) { - SSdb * pSdb = pMnode->pSdb; + SSdb *pSdb = pMnode->pSdb; SStreamObj *pStream = NULL; - void * pIter = NULL; + void *pIter = NULL; while (1) { pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); @@ -2542,7 +2545,7 @@ int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot) { int32_t numOfTask = taosArrayGetSize(execInfo.pTaskList); for (int32_t i = 0; i < numOfTask; ++i) { - STaskId * pId = taosArrayGet(execInfo.pTaskList, i); + STaskId *pId = taosArrayGet(execInfo.pTaskList, i); STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, pId, sizeof(*pId)); if (pEntry->nodeId == SNODE_HANDLE) continue; @@ -2585,14 +2588,22 @@ int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot) { // kill all trans in the dst DB static void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo) { + mDebug("start to clear checkpoints in all Dbs"); + void *pIter = NULL; while ((pIter = taosHashIterate(pChangeInfo->pDBMap, pIter)) != NULL) { char *pDb = (char *)pIter; size_t len = 0; - void * pKey = taosHashGetKey(pDb, &len); + void *pKey = taosHashGetKey(pDb, &len); + char *p = strndup(pKey, len); + + mDebug("clear checkpoint trans in Db:%s", p); doKillCheckpointTrans(pMnode, pKey, len); + taosMemoryFree(p); } + + mDebug("complete clear checkpoints in Dbs"); } // this function runs by only one thread, so it is not multi-thread safe @@ -2647,7 +2658,7 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) { execInfo.pNodeList = pNodeSnapshot; execInfo.ts = ts; } else { - mDebug("unexpect code during create nodeUpdate trans, code:%s", tstrerror(code)); + mError("unexpected code during create nodeUpdate trans, code:%s", tstrerror(code)); taosArrayDestroy(pNodeSnapshot); } } else { @@ -2670,7 +2681,7 @@ typedef struct SMStreamNodeCheckMsg { static int32_t mndProcessNodeCheck(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; - SSdb * pSdb = pMnode->pSdb; + SSdb *pSdb = pMnode->pSdb; if (sdbGetSize(pSdb, SDB_STREAM) <= 0) { return 0; } @@ -2694,7 +2705,7 @@ void saveStreamTasksInfo(SStreamObj *pStream, SStreamExecInfo *pExecNode) { SStreamTask *pTask = taosArrayGetP(pLevel, j); STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId}; - void * p = taosHashGet(pExecNode->pTaskMap, &id, sizeof(id)); + void *p = taosHashGet(pExecNode->pTaskMap, &id, sizeof(id)); if (p == NULL) { STaskStatusEntry entry = {0}; streamTaskStatusInit(&entry, pTask); @@ -2718,7 +2729,7 @@ void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode) { SStreamTask *pTask = taosArrayGetP(pLevel, j); STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId}; - void * p = taosHashGet(pExecNode->pTaskMap, &id, sizeof(id)); + void *p = taosHashGet(pExecNode->pTaskMap, &id, sizeof(id)); if (p != NULL) { taosHashRemove(pExecNode->pTaskMap, &id, sizeof(id)); @@ -2791,8 +2802,8 @@ int32_t createStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) { pReq->taskId = pTask->id.taskId; pReq->streamId = pTask->id.streamId; - SEpSet epset; - bool hasEpset = false; + SEpSet epset = {0}; + bool hasEpset = false; int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); if (code != TSDB_CODE_SUCCESS) { taosMemoryFree(pReq); @@ -2836,12 +2847,14 @@ int32_t createStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) { return TSDB_CODE_ACTION_IN_PROGRESS; } -void killTransImpl(SMnode* pMnode, int32_t transId, const char* pDbName) { +void killTransImpl(SMnode *pMnode, int32_t transId, const char *pDbName) { STrans *pTrans = mndAcquireTrans(pMnode, transId); if (pTrans != NULL) { mInfo("kill active transId:%d in Db:%s", transId, pDbName); mndKillTrans(pMnode, pTrans); mndReleaseTrans(pMnode, pTrans); + } else { + mError("failed to acquire trans in Db:%s, transId:%d", pDbName, transId); } } @@ -2858,7 +2871,7 @@ int32_t doKillCheckpointTrans(SMnode *pMnode, const char *pDBName, size_t len) { return TSDB_CODE_SUCCESS; } - char* pDupDBName = strndup(pDBName, len); + char *pDupDBName = strndup(pDBName, len); killTransImpl(pMnode, pTransInfo->transId, pDupDBName); taosMemoryFree(pDupDBName); @@ -2867,15 +2880,7 @@ int32_t doKillCheckpointTrans(SMnode *pMnode, const char *pDBName, size_t len) { static int32_t mndResetStatusFromCheckpoint(SMnode *pMnode, int64_t streamId, int32_t transId) { int32_t code = TSDB_CODE_SUCCESS; - - STrans *pTrans = mndAcquireTrans(pMnode, transId); - if (pTrans != NULL) { - mInfo("kill checkpoint transId:%d to reset task status", transId); - mndKillTrans(pMnode, pTrans); - mndReleaseTrans(pMnode, pTrans); - } else { - mError("failed to acquire checkpoint trans:%d", transId); - } + killTransImpl(pMnode, transId, ""); SStreamObj *pStream = mndGetStreamObj(pMnode, streamId); if (pStream == NULL) { @@ -2913,38 +2918,38 @@ static SStreamTask *mndGetStreamTask(STaskId *pId, SStreamObj *pStream) { return NULL; } -//static bool needDropRelatedFillhistoryTask(STaskStatusEntry *pTaskEntry, SStreamExecInfo *pExecNode) { -// if (pTaskEntry->status == TASK_STATUS__STREAM_SCAN_HISTORY && pTaskEntry->statusLastDuration >= 10) { -// if (!pTaskEntry->inputQChanging && pTaskEntry->inputQUnchangeCounter > 10) { -// int32_t numOfReady = 0; -// int32_t numOfTotal = 0; -// for (int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) { -// STaskId *pId = taosArrayGet(pExecNode->pTaskList, k); -// if (pTaskEntry->id.streamId == pId->streamId) { -// numOfTotal++; +// static bool needDropRelatedFillhistoryTask(STaskStatusEntry *pTaskEntry, SStreamExecInfo *pExecNode) { +// if (pTaskEntry->status == TASK_STATUS__STREAM_SCAN_HISTORY && pTaskEntry->statusLastDuration >= 10) { +// if (!pTaskEntry->inputQChanging && pTaskEntry->inputQUnchangeCounter > 10) { +// int32_t numOfReady = 0; +// int32_t numOfTotal = 0; +// for (int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) { +// STaskId *pId = taosArrayGet(pExecNode->pTaskList, k); +// if (pTaskEntry->id.streamId == pId->streamId) { +// numOfTotal++; // -// if (pTaskEntry->id.taskId != pId->taskId) { -// STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, pId, sizeof(*pId)); -// if (pEntry->status == TASK_STATUS__READY) { -// numOfReady++; -// } -// } -// } -// } +// if (pTaskEntry->id.taskId != pId->taskId) { +// STaskStatusEntry *pEntry = taosHashGet(execInfo.pTaskMap, pId, sizeof(*pId)); +// if (pEntry->status == TASK_STATUS__READY) { +// numOfReady++; +// } +// } +// } +// } // -// if (numOfReady > 0) { -// mDebug("stream:0x%" PRIx64 -// " %d tasks are ready, %d tasks in stream-scan-history for more than 50s, drop related fill-history task", -// pTaskEntry->id.streamId, numOfReady, numOfTotal - numOfReady); -// return true; -// } else { -// return false; -// } -// } -// } +// if (numOfReady > 0) { +// mDebug("stream:0x%" PRIx64 +// " %d tasks are ready, %d tasks in stream-scan-history for more than 50s, drop related fill-history +// task", pTaskEntry->id.streamId, numOfReady, numOfTotal - numOfReady); +// return true; +// } else { +// return false; +// } +// } +// } // -// return false; -//} +// return false; +// } // currently only handle the sink task // 1. sink task, drop related fill-history task msg is missing @@ -2974,7 +2979,7 @@ static int32_t mndDropRelatedFillhistoryTask(SMnode *pMnode, STaskStatusEntry *p mDebug("build and send drop related fill-history task for task:0x%x", pTask->id.taskId); - SEpSet epset; + SEpSet epset = {0}; bool hasEpset = false; int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); if (code != TSDB_CODE_SUCCESS) { @@ -3027,11 +3032,11 @@ static void updateStageInfo(STaskStatusEntry *pTaskEntry, int64_t stage) { } int32_t mndProcessStreamHb(SRpcMsg *pReq) { - SMnode * pMnode = pReq->info.node; + SMnode *pMnode = pReq->info.node; SStreamHbMsg req = {0}; bool checkpointFailed = false; - int64_t activeCheckpointId = 0; + int64_t checkpointId = 0; int64_t streamId = 0; int32_t transId = 0; @@ -3093,14 +3098,17 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { } streamTaskStatusCopy(pTaskEntry, p); - if (p->activeCheckpointId != 0) { - if (activeCheckpointId != 0) { - ASSERT(activeCheckpointId == p->activeCheckpointId); + if (p->checkpointId != 0) { + if (checkpointId != 0) { + ASSERT(checkpointId == p->checkpointId); } else { - activeCheckpointId = p->activeCheckpointId; + checkpointId = p->checkpointId; } if (p->checkpointFailed) { + mError("stream task:0x%" PRIx64 " checkpointId:%" PRIx64 " transId:%d failed, kill it", p->id.taskId, + p->checkpointId, p->chkpointTransId); + checkpointFailed = p->checkpointFailed; streamId = p->id.streamId; transId = p->chkpointTransId; @@ -3122,17 +3130,17 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { // current checkpoint is failed, rollback from the checkpoint trans // kill the checkpoint trans and then set all tasks status to be normal - if (checkpointFailed && activeCheckpointId != 0) { + if (checkpointFailed && checkpointId != 0) { bool allReady = true; SArray *p = mndTakeVgroupSnapshot(pMnode, &allReady); taosArrayDestroy(p); if (allReady || snodeChanged) { // if the execInfo.activeCheckpoint == 0, the checkpoint is restoring from wal - mInfo("checkpointId:%" PRId64 " failed, issue task-reset trans to reset all tasks status", activeCheckpointId); + mInfo("checkpointId:%" PRId64 " failed, issue task-reset trans to reset all tasks status", checkpointId); mndResetStatusFromCheckpoint(pMnode, streamId, transId); } else { - mInfo("not all vgroups are ready, wait for next HB from stream tasks"); + mInfo("not all vgroups are ready, wait for next HB from stream tasks to reset the task status"); } } @@ -3146,9 +3154,10 @@ void freeCheckpointCandEntry(void *param) { SCheckpointCandEntry *pEntry = param; taosMemoryFreeClear(pEntry->pName); } + SStreamObj *mndGetStreamObj(SMnode *pMnode, int64_t streamId) { - void * pIter = NULL; - SSdb * pSdb = pMnode->pSdb; + void *pIter = NULL; + SSdb *pSdb = pMnode->pSdb; SStreamObj *pStream = NULL; while ((pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream)) != NULL) { diff --git a/source/dnode/mnode/impl/src/mndStreamTrans.c b/source/dnode/mnode/impl/src/mndStreamTrans.c index 847f55a8fd..a6dd1c4856 100644 --- a/source/dnode/mnode/impl/src/mndStreamTrans.c +++ b/source/dnode/mnode/impl/src/mndStreamTrans.c @@ -89,7 +89,7 @@ bool mndStreamTransConflictCheck(SMnode* pMnode, int64_t streamUid, const char* } if (strcmp(tInfo.name, MND_STREAM_CHECKPOINT_NAME) == 0) { - if (strcmp(pTransName, MND_STREAM_DROP_NAME) != 0) { + if ((strcmp(pTransName, MND_STREAM_DROP_NAME) != 0) && (strcmp(pTransName, MND_STREAM_TASK_RESET_NAME) != 0)) { mWarn("conflict with other transId:%d streamUid:0x%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid, tInfo.name); terrno = TSDB_CODE_MND_TRANS_CONFLICT; diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 6eb5d7e592..1055aa0874 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -1831,10 +1831,12 @@ static int32_t mndAddIncVgroupReplicaToTrans(SMnode *pMnode, STrans *pTrans, SDb int32_t newDnodeId) { mInfo("vgId:%d, will add 1 vnode, replica:%d dnode:%d", pVgroup->vgId, pVgroup->replica, newDnodeId); + // assoc dnode SVnodeGid *pGid = &pVgroup->vnodeGid[pVgroup->replica]; pVgroup->replica++; pGid->dnodeId = newDnodeId; pGid->syncState = TAOS_SYNC_STATE_OFFLINE; + pGid->nodeRole = TAOS_SYNC_ROLE_LEARNER; SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); if (pVgRaw == NULL) return -1; @@ -1844,10 +1846,20 @@ static int32_t mndAddIncVgroupReplicaToTrans(SMnode *pMnode, STrans *pTrans, SDb } (void)sdbSetRawStatus(pVgRaw, SDB_STATUS_READY); + // learner for (int32_t i = 0; i < pVgroup->replica - 1; ++i) { if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[i].dnodeId) != 0) return -1; } if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pGid) != 0) return -1; + + // voter + pGid->nodeRole = TAOS_SYNC_ROLE_VOTER; + if (mndAddAlterVnodeTypeAction(pMnode, pTrans, pDb, pVgroup, pGid->dnodeId) != 0) return -1; + for (int32_t i = 0; i < pVgroup->replica - 1; ++i) { + if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[i].dnodeId) != 0) return -1; + } + + // confirm if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1; return 0; diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 4815dfe65c..4284b0838e 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -145,8 +145,7 @@ FAIL: } int32_t sndInit(SSnode *pSnode) { - int32_t numOfTasks = 0; - tqStreamTaskResetStatus(pSnode->pMeta, &numOfTasks); + streamMetaResetTaskStatus(pSnode->pMeta); streamMetaStartAllTasks(pSnode->pMeta); return 0; } diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 9a4e2edf8d..97cf0ffebc 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -175,7 +175,8 @@ void tsdbReaderSetNotifyCb(STsdbReader* pReader, TsdReaderNotifyCbFn not int32_t tsdbReuseCacherowsReader(void *pReader, void *pTableIdList, int32_t numOfTables); int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols, - SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr); + SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr, + SArray* pFuncTypeList); int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds, SArray *pTableUids); void *tsdbCacherowsReaderClose(void *pReader); diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 435b12bc06..cded4ddd7c 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -107,7 +107,6 @@ struct STQ { TTB* pExecStore; TTB* pCheckStore; SStreamMeta* pStreamMeta; - void* tqTimer; }; int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle); @@ -146,7 +145,7 @@ int32_t tqOffsetCommitFile(STqOffsetStore* pStore); // tqSink int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq, - const char* pIdStr); + const char* pIdStr, bool newSubTableRule); void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data); // tqOffset @@ -165,7 +164,7 @@ int32_t setDstTableDataPayload(uint64_t suid, const STSchema* pTSchema, int32_t int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, const char* id); SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t numOfCols, - SSDataBlock* pDataBlock, SArray* pTagArray); + SSDataBlock* pDataBlock, SArray* pTagArray, bool newSubTableRule); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index fd4cc7eaa5..138bcbb133 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -703,7 +703,7 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma code = terrno; TSDB_CHECK_CODE(code, lino, _exit); } - code = tqBuildDeleteReq(pSma->pVnode->pTq, NULL, output, &deleteReq, ""); + code = tqBuildDeleteReq(pSma->pVnode->pTq, NULL, output, &deleteReq, "", true); TSDB_CHECK_CODE(code, lino, _exit); code = tdRSmaProcessDelReq(pSma, suid, pItem->level, &deleteReq); TSDB_CHECK_CODE(code, lino, _exit); diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c index 289986e01f..f537ede8c1 100644 --- a/source/dnode/vnode/src/sma/smaTimeRange.c +++ b/source/dnode/vnode/src/sma/smaTimeRange.c @@ -188,7 +188,7 @@ int32_t smaBlockToSubmit(SVnode *pVnode, const SArray *pBlocks, const STSchema * if (pDataBlock->info.type == STREAM_DELETE_RESULT) { pDeleteReq->suid = suid; pDeleteReq->deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq)); - code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, pDeleteReq, ""); + code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, pDeleteReq, "", true); TSDB_CHECK_CODE(code, lino, _exit); continue; } @@ -196,7 +196,7 @@ int32_t smaBlockToSubmit(SVnode *pVnode, const SArray *pBlocks, const STSchema * SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = SUBMIT_REQ_AUTO_CREATE_TABLE,}; int32_t cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1; - tbData.pCreateTbReq = buildAutoCreateTableReq(stbFullName, suid, cid, pDataBlock, tagArray); + tbData.pCreateTbReq = buildAutoCreateTableReq(stbFullName, suid, cid, pDataBlock, tagArray, true); { uint64_t groupId = pDataBlock->info.id.groupId; diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index a53b322753..9ae4fa5e19 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -26,18 +26,6 @@ static FORCE_INLINE bool tqIsHandleExec(STqHandle* pHandle) { return TMQ_HANDLE_ static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_EXEC; } static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_IDLE; } -static int32_t tqTimerInit(STQ* pTq) { - pTq->tqTimer = taosTmrInit(100, 100, 1000, "TQ"); - if (pTq->tqTimer == NULL) { - return -1; - } - return 0; -} - -static void tqTimerCleanUp(STQ* pTq) { - taosTmrCleanUp(pTq->tqTimer); -} - void tqDestroyTqHandle(void* data) { STqHandle* pData = (STqHandle*)data; qDestroyTask(pData->execHandle.task); @@ -118,7 +106,6 @@ int32_t tqInitialize(STQ* pTq) { return -1; } - tqTimerInit(pTq); return 0; } @@ -149,7 +136,6 @@ void tqClose(STQ* pTq) { taosMemoryFree(pTq->path); tqMetaClose(pTq); streamMetaClose(pTq->pStreamMeta); - tqTimerCleanUp(pTq); qDebug("end to close tq"); taosMemoryFree(pTq); @@ -742,8 +728,6 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) { return code; } - streamTaskOpenAllUpstreamInput(pTask); - if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { STaskId taskId = {0}; if (pTask->info.fillHistory) { @@ -1126,6 +1110,19 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) pRsp->info.handle = NULL; SStreamCheckpointSourceReq req = {0}; + SDecoder decoder; + tDecoderInit(&decoder, (uint8_t*)msg, len); + if (tDecodeStreamCheckpointSourceReq(&decoder, &req) < 0) { + code = TSDB_CODE_MSG_DECODE_ERROR; + tDecoderClear(&decoder); + tqError("vgId:%d failed to decode checkpoint-source msg, code:%s", vgId, tstrerror(code)); + SRpcMsg rsp = {0}; + buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0); + tmsgSendRsp(&rsp); // error occurs + return code; + } + tDecoderClear(&decoder); + if (!vnodeIsRoleLeader(pTq->pVnode)) { tqDebug("vgId:%d not leader, ignore checkpoint-source msg, s-task:0x%x", vgId, req.taskId); SRpcMsg rsp = {0}; @@ -1142,19 +1139,6 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) return TSDB_CODE_SUCCESS; } - SDecoder decoder; - tDecoderInit(&decoder, (uint8_t*)msg, len); - if (tDecodeStreamCheckpointSourceReq(&decoder, &req) < 0) { - code = TSDB_CODE_MSG_DECODE_ERROR; - tDecoderClear(&decoder); - tqError("vgId:%d failed to decode checkpoint-source msg, code:%s", vgId, tstrerror(code)); - SRpcMsg rsp = {0}; - buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0); - tmsgSendRsp(&rsp); // error occurs - return code; - } - tDecoderClear(&decoder); - SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.taskId); if (pTask == NULL) { tqError("vgId:%d failed to find s-task:0x%x, ignore checkpoint msg. it may have been destroyed already", vgId, diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index bfa8cfdb53..0b05573aae 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -596,10 +596,12 @@ static int32_t doSetVal(SColumnInfoData* pColumnInfoData, int32_t rowIndex, SCol if (IS_STR_DATA_TYPE(pColVal->type)) { char val[65535 + 2] = {0}; - if (pColVal->value.pData != NULL) { - memcpy(varDataVal(val), pColVal->value.pData, pColVal->value.nData); + if(COL_VAL_IS_VALUE(pColVal)){ + if (pColVal->value.pData != NULL) { + memcpy(varDataVal(val), pColVal->value.pData, pColVal->value.nData); + } varDataSetLen(val, pColVal->value.nData); - code = colDataSetVal(pColumnInfoData, rowIndex, val, !COL_VAL_IS_VALUE(pColVal)); + code = colDataSetVal(pColumnInfoData, rowIndex, val, false); } else { colDataSetNULL(pColumnInfoData, rowIndex); } @@ -869,22 +871,8 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas sourceIdx++; } else if (pCol->cid == pColData->info.colId) { tColDataGetValue(pCol, i, &colVal); - - if (IS_STR_DATA_TYPE(colVal.type)) { - if (colVal.value.pData != NULL) { - char val[65535 + 2]; - memcpy(varDataVal(val), colVal.value.pData, colVal.value.nData); - varDataSetLen(val, colVal.value.nData); - if (colDataSetVal(pColData, curRow - lastRow, val, !COL_VAL_IS_VALUE(&colVal)) < 0) { - goto FAIL; - } - } else { - colDataSetNULL(pColData, curRow - lastRow); - } - } else { - if (colDataSetVal(pColData, curRow - lastRow, (void*)&colVal.value.val, !COL_VAL_IS_VALUE(&colVal)) < 0) { - goto FAIL; - } + if(doSetVal(pColData, curRow - lastRow, &colVal) != TDB_CODE_SUCCESS){ + goto FAIL; } sourceIdx++; targetIdx++; @@ -967,23 +955,10 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas if (colVal.cid < pColData->info.colId) { sourceIdx++; } else if (colVal.cid == pColData->info.colId) { - if (IS_STR_DATA_TYPE(colVal.type)) { - if (colVal.value.pData != NULL) { - char val[65535 + 2]; - memcpy(varDataVal(val), colVal.value.pData, colVal.value.nData); - varDataSetLen(val, colVal.value.nData); - if (colDataSetVal(pColData, curRow - lastRow, val, !COL_VAL_IS_VALUE(&colVal)) < 0) { - goto FAIL; - } - } else { - colDataSetNULL(pColData, curRow - lastRow); - } - } else { - if (colDataSetVal(pColData, curRow - lastRow, (void*)&colVal.value.val, !COL_VAL_IS_VALUE(&colVal)) < 0) { - goto FAIL; - } + if(doSetVal(pColData, curRow - lastRow, &colVal) != TDB_CODE_SUCCESS){ + goto FAIL; } - sourceIdx++; + sourceIdx++; targetIdx++; } } diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index c2e48d5d92..7fcb86d84a 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -41,10 +41,10 @@ static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const ch static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName, int32_t numOfTags); static SArray* createDefaultTagColName(); static void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName, - int64_t gid); + int64_t gid, bool newSubTableRule); int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq, - const char* pIdStr) { + const char* pIdStr, bool newSubTableRule) { int32_t totalRows = pDataBlock->info.rows; SColumnInfoData* pStartTsCol = taosArrayGet(pDataBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pEndTsCol = taosArrayGet(pDataBlock->pDataBlock, END_TS_COLUMN_INDEX); @@ -68,6 +68,12 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p if (varTbName != NULL && varTbName != (void*)-1) { name = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN); memcpy(name, varDataVal(varTbName), varDataLen(varTbName)); + if(newSubTableRule && + !isAutoTableName(name) && + !alreadyAddGroupId(name) && + groupId != 0) { + buildCtbNameAddGruopId(name, groupId); + } } else if (stbFullName) { name = buildCtbNameByGroupId(stbFullName, groupId); } else { @@ -173,9 +179,18 @@ SArray* createDefaultTagColName() { } void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName, - int64_t gid) { + int64_t gid, bool newSubTableRule) { if (pDataBlock->info.parTbName[0]) { - pCreateTableReq->name = taosStrdup(pDataBlock->info.parTbName); + if(newSubTableRule && + !isAutoTableName(pDataBlock->info.parTbName) && + !alreadyAddGroupId(pDataBlock->info.parTbName) && + gid != 0) { + pCreateTableReq->name = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN); + strcpy(pCreateTableReq->name, pDataBlock->info.parTbName); + buildCtbNameAddGruopId(pCreateTableReq->name, gid); + }else{ + pCreateTableReq->name = taosStrdup(pDataBlock->info.parTbName); + } } else { pCreateTableReq->name = buildCtbNameByGroupId(stbFullName, gid); } @@ -247,7 +262,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S ASSERT(gid == *(int64_t*)pGpIdData); } - setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid); + setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER); taosArrayPush(reqs.pArray, pCreateTbReq); tqDebug("s-task:%s build create table:%s msg complete", pTask->id.idStr, pCreateTbReq->name); @@ -357,7 +372,8 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* int64_t suid) { SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))}; - int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr); + int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, + pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -411,7 +427,7 @@ bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbNam } SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t numOfCols, - SSDataBlock* pDataBlock, SArray* pTagArray) { + SSDataBlock* pDataBlock, SArray* pTagArray, bool newSubTableRule) { SVCreateTbReq* pCreateTbReq = taosMemoryCalloc(1, sizeof(SVCreateTbReq)); if (pCreateTbReq == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -436,7 +452,7 @@ SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, in pCreateTbReq->ctb.tagName = createDefaultTagColName(); // set table name - setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, pDataBlock->info.id.groupId); + setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, pDataBlock->info.id.groupId, newSubTableRule); return pCreateTbReq; } @@ -637,6 +653,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat tqDebug("s-task:%s vgId:%d, gropuId:%" PRIu64 " datablock table name is null, set name:%s", id, vgId, groupId, dstTableName); } else { + tstrncpy(dstTableName, pTableSinkInfo->name.data, pTableSinkInfo->name.len + 1); if (pTableSinkInfo->uid != 0) { tqDebug("s-task:%s write %d rows into groupId:%" PRIu64 " dstTable:%s(uid:%" PRIu64 ")", id, numOfRows, groupId, dstTableName, pTableSinkInfo->uid); @@ -649,10 +666,17 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat if (dstTableName[0] == 0) { memset(dstTableName, 0, TSDB_TABLE_NAME_LEN); buildCtbNameByGroupIdImpl(stbFullName, groupId, dstTableName); + }else{ + if(pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && + !isAutoTableName(dstTableName) && + !alreadyAddGroupId(dstTableName) && + groupId != 0) { + buildCtbNameAddGruopId(dstTableName, groupId); + } } int32_t nameLen = strlen(dstTableName); - pTableSinkInfo = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen); + pTableSinkInfo = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1); if (pTableSinkInfo == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -690,7 +714,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE; pTableData->pCreateTbReq = - buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray); + buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray, pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER); taosArrayDestroy(pTagArray); if (pTableData->pCreateTbReq == NULL) { @@ -719,7 +743,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat } } - return TSDB_CODE_SUCCESS; + return TDB_CODE_SUCCESS; } int32_t setDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index 2a600d08ef..cdb5cc26f8 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -95,10 +95,14 @@ int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) { pParam->pTq = pTq; pParam->numOfTasks = numOfTasks; + + tmr_h pTimer = streamTimerGetInstance(); + ASSERT(pTimer); + if (pMeta->scanInfo.scanTimer == NULL) { - pMeta->scanInfo.scanTimer = taosTmrStart(doStartScanWal, idleDuration, pParam, pTq->tqTimer); + pMeta->scanInfo.scanTimer = taosTmrStart(doStartScanWal, idleDuration, pParam, pTimer); } else { - taosTmrReset(doStartScanWal, idleDuration, pParam, pTq->tqTimer, &pMeta->scanInfo.scanTimer); + taosTmrReset(doStartScanWal, idleDuration, pParam, pTimer, &pMeta->scanInfo.scanTimer); } return TSDB_CODE_SUCCESS; @@ -175,11 +179,11 @@ int32_t tqStopStreamTasksAsync(STQ* pTq) { SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); if (pRunReq == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - tqError("vgId:%d failed to create msg to stop tasks, code:%s", vgId, terrstr()); + tqError("vgId:%d failed to create msg to stop tasks async, code:%s", vgId, terrstr()); return -1; } - tqDebug("vgId:%d create msg to stop tasks", vgId); + tqDebug("vgId:%d create msg to stop all tasks async", vgId); pRunReq->head.vgId = vgId; pRunReq->streamId = 0; diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 48eca5c349..437c61f666 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -76,33 +76,6 @@ int32_t tqStreamOneTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t stream return 0; } -int32_t tqUpdateNodeEpsetAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId) { - int32_t vgId = pMeta->vgId; - - int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); - if (numOfTasks == 0) { - tqDebug("vgId:%d no stream tasks existed to run", vgId); - return 0; - } - - SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); - if (pRunReq == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - tqError("vgId:%d failed to create msg to start task:0x%x, code:%s", vgId, taskId, terrstr()); - return -1; - } - - tqDebug("vgId:%d update s-task:0x%x nodeEpset async", vgId, taskId); - pRunReq->head.vgId = vgId; - pRunReq->streamId = streamId; - pRunReq->taskId = taskId; - pRunReq->reqType = STREAM_EXEC_T_UPDATE_TASK_EPSET; - - SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; - tmsgPutToQueue(cb, STREAM_QUEUE, &msg); - return 0; -} - int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored) { int32_t vgId = pMeta->vgId; char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); @@ -130,8 +103,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM STaskId id = {.streamId = req.streamId, .taskId = req.taskId}; SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (ppTask == NULL || *ppTask == NULL) { - tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped already", vgId, - req.taskId); + tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped", vgId, req.taskId); rsp.code = TSDB_CODE_SUCCESS; streamMetaWUnLock(pMeta); @@ -151,6 +123,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM tqDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", idstr, req.transId); } + // duplicate update epset msg received, discard this redundant message STaskUpdateEntry entry = {.streamId = req.streamId, .taskId = req.taskId, .transId = req.transId}; void* pReqTask = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry)); @@ -162,7 +135,6 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM return rsp.code; } - // the following two functions should not be executed within the scope of meta lock to avoid deadlock streamTaskUpdateEpsetInfo(pTask, req.pNodeList); streamTaskResetStatus(pTask); @@ -186,11 +158,6 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM streamMetaSaveTask(pMeta, *ppHTask); } -#if 0 - if (streamMetaCommit(pMeta) < 0) { - // persist to disk - } -#endif } else { tqDebug("s-task:%s vgId:%d not save since restore not finish", idstr, vgId); } @@ -198,7 +165,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM tqDebug("s-task:%s vgId:%d start to stop task after save task", idstr, vgId); streamTaskStop(pTask); - // keep the already handled info + // keep the already updated info taosHashPut(pMeta->updateInfo.pTasks, &entry, sizeof(entry), NULL, 0); if (ppHTask != NULL) { @@ -227,6 +194,10 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM updateTasks, (numOfTasks - updateTasks)); streamMetaWUnLock(pMeta); } else { + if (streamMetaCommit(pMeta) < 0) { + // persist to disk + } + if (!restored) { tqDebug("vgId:%d vnode restore not completed, not start the tasks, clear the start after nodeUpdate flag", vgId); pMeta->startInfo.tasksWillRestart = 0; @@ -723,25 +694,21 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen return 0; } -int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta, int32_t* numOfTasks) { +int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta) { int32_t vgId = pMeta->vgId; - *numOfTasks = taosArrayGetSize(pMeta->pTaskList); + int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); - tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, *numOfTasks); - if (*numOfTasks == 0) { + tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, numOfTasks); + if (numOfTasks == 0) { return TSDB_CODE_SUCCESS; } - for (int32_t i = 0; i < (*numOfTasks); ++i) { + for (int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); streamTaskResetStatus(*pTask); - -// if ((*pTask)->info.fillHistory == 1) { -// streamResetParamForScanHistory(*pTask); -// } } return 0; @@ -795,8 +762,7 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { } if (isLeader && !tsDisableStream) { - int32_t numOfTasks = 0; - tqStreamTaskResetStatus(pMeta, &numOfTasks); + streamMetaResetTaskStatus(pMeta); streamMetaWUnLock(pMeta); streamMetaStartAllTasks(pMeta); @@ -854,8 +820,8 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead if (pTask != NULL) { // even in halt status, the data in inputQ must be processed char* p = NULL; if (streamTaskReadyToRun(pTask, &p)) { - tqDebug("vgId:%d s-task:%s start to process block from inputQ, next checked ver:%" PRId64, vgId, pTask->id.idStr, - pTask->chkInfo.nextProcessVer); + tqDebug("vgId:%d s-task:%s status:%s start to process block from inputQ, next checked ver:%" PRId64, vgId, pTask->id.idStr, + p, pTask->chkInfo.nextProcessVer); streamExecTask(pTask); } else { int8_t status = streamTaskSetSchedStatusInactive(pTask); @@ -874,23 +840,31 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta) { STaskStartInfo* pStartInfo = &pMeta->startInfo; - int32_t vgId = pMeta->vgId; + int32_t vgId = pMeta->vgId; streamMetaWLock(pMeta); if (pStartInfo->taskStarting == 1) { tqDebug("vgId:%d already in start tasks procedure in other thread, restartCounter:%d, do nothing", vgId, pMeta->startInfo.restartCount); } else { // not in starting procedure - if (pStartInfo->restartCount > 0) { + bool allReady = streamMetaAllTasksReady(pMeta); + + if ((pStartInfo->restartCount > 0) && (!allReady)) { + // if all tasks are ready now, do NOT restart again, and reset the value of pStartInfo->restartCount pStartInfo->restartCount -= 1; tqDebug("vgId:%d role:%d need to restart all tasks again, restartCounter:%d", vgId, pMeta->role, pStartInfo->restartCount); - streamMetaWUnLock(pMeta); + restartStreamTasks(pMeta, (pMeta->role == NODE_ROLE_LEADER)); return TSDB_CODE_SUCCESS; } else { - tqDebug("vgId:%d start all tasks completed in callbackFn", pMeta->vgId); + if (pStartInfo->restartCount == 0) { + tqDebug("vgId:%d start all tasks completed in callbackFn, restartCount is 0", pMeta->vgId); + } else if (allReady) { + pStartInfo->restartCount = 0; + tqDebug("vgId:%d all tasks are ready, reset restartCounter 0, not restart tasks", vgId); + } } } @@ -932,7 +906,7 @@ int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg){ } tqDebug("s-task:%s receive pause msg from mnode", pTask->id.idStr); - streamTaskPause(pTask, pMeta); + streamTaskPause(pMeta, pTask); SStreamTask* pHistoryTask = NULL; if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { @@ -949,7 +923,7 @@ int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg){ tqDebug("s-task:%s fill-history task handle paused along with related stream task", pHistoryTask->id.idStr); - streamTaskPause(pHistoryTask, pMeta); + streamTaskPause(pMeta, pHistoryTask); streamMetaReleaseTask(pMeta, pHistoryTask); } @@ -998,8 +972,7 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t } else if (status == TASK_STATUS__UNINIT) { // todo: fill-history task init ? if (pTask->info.fillHistory == 0) { - EStreamTaskEvent event = /*HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT; - streamTaskHandleEvent(pTask->status.pSM, event); + streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); } } @@ -1023,4 +996,8 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m } return code; +} + +int32_t tqStreamTasksGetTotalNum(SStreamMeta* pMeta) { + return taosArrayGetSize(pMeta->pTaskList); } \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 9b29329cf3..d86219542f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ #include "cos.h" +#include "functionMgt.h" #include "tsdb.h" #include "tsdbDataFileRW.h" #include "tsdbReadUtil.h" @@ -894,19 +895,56 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr } int num_keys = TARRAY_SIZE(remainCols); - int16_t *aCols = taosMemoryMalloc(num_keys * sizeof(int16_t)); int16_t *slotIds = taosMemoryMalloc(num_keys * sizeof(int16_t)); + int16_t *lastColIds = taosMemoryMalloc(num_keys * sizeof(int16_t)); + int16_t *lastSlotIds = taosMemoryMalloc(num_keys * sizeof(int16_t)); + int16_t *lastrowColIds = taosMemoryMalloc(num_keys * sizeof(int16_t)); + int16_t *lastrowSlotIds = taosMemoryMalloc(num_keys * sizeof(int16_t)); + SArray* lastTmpColArray = NULL; + SArray* lastTmpIndexArray = NULL; + SArray* lastrowTmpColArray = NULL; + SArray* lastrowTmpIndexArray = NULL; + + int lastIndex = 0; + int lastrowIndex = 0; + for (int i = 0; i < num_keys; ++i) { SIdxKey *idxKey = taosArrayGet(remainCols, i); - aCols[i] = idxKey->key.cid; slotIds[i] = pr->pSlotIds[idxKey->idx]; + if (idxKey->key.ltype == CACHESCAN_RETRIEVE_LAST >> 3) { + if(NULL == lastTmpIndexArray) { + lastTmpIndexArray = taosArrayInit(num_keys, sizeof(int32_t)); + } + taosArrayPush(lastTmpIndexArray, &(i)); + lastColIds[lastIndex] = idxKey->key.cid; + lastSlotIds[lastIndex] = pr->pSlotIds[idxKey->idx]; + lastIndex++; + } else { + if(NULL == lastrowTmpIndexArray) { + lastrowTmpIndexArray = taosArrayInit(num_keys, sizeof(int32_t)); + } + taosArrayPush(lastrowTmpIndexArray, &(i)); + lastrowColIds[lastrowIndex] = idxKey->key.cid; + lastrowSlotIds[lastrowIndex] = pr->pSlotIds[idxKey->idx]; + lastrowIndex++; + } } - if (ltype) { - mergeLastCid(uid, pTsdb, &pTmpColArray, pr, aCols, num_keys, slotIds); - } else { - mergeLastRowCid(uid, pTsdb, &pTmpColArray, pr, aCols, num_keys, slotIds); + pTmpColArray = taosArrayInit(lastIndex + lastrowIndex, sizeof(SLastCol)); + + if(lastTmpIndexArray != NULL) { + mergeLastCid(uid, pTsdb, &lastTmpColArray, pr, lastColIds, lastIndex, lastSlotIds); + for(int i = 0; i < taosArrayGetSize(lastTmpColArray); i++) { + taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastTmpIndexArray, i), taosArrayGet(lastTmpColArray, i)); + } + } + + if(lastrowTmpIndexArray != NULL) { + mergeLastCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds); + for(int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) { + taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastrowTmpIndexArray, i), taosArrayGet(lastrowTmpColArray, i)); + } } SLRUCache *pCache = pTsdb->lruCache; @@ -965,9 +1003,18 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr rocksMayWrite(pTsdb, false, true, false); } + taosArrayDestroy(lastrowTmpIndexArray); + taosArrayDestroy(lastrowTmpColArray); + taosArrayDestroy(lastTmpIndexArray); + taosArrayDestroy(lastTmpColArray); + + taosMemoryFree(lastColIds); + taosMemoryFree(lastSlotIds); + taosMemoryFree(lastrowColIds); + taosMemoryFree(lastrowSlotIds); + taosArrayDestroy(pTmpColArray); - taosMemoryFree(aCols); taosMemoryFree(slotIds); return code; @@ -1057,6 +1104,15 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache int16_t cid = ((int16_t *)TARRAY_DATA(pCidList))[i]; SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid}; + // for select last_row, last case + int32_t funcType = FUNCTION_TYPE_CACHE_LAST; + if (pr->pFuncTypeList != NULL && taosArrayGetSize(pr->pFuncTypeList) > i) { + funcType = ((int32_t *)TARRAY_DATA(pr->pFuncTypeList))[i]; + } + if (((pr->type & CACHESCAN_RETRIEVE_LAST) == CACHESCAN_RETRIEVE_LAST) && FUNCTION_TYPE_CACHE_LAST_ROW == funcType) { + int8_t tempType = CACHESCAN_RETRIEVE_LAST_ROW | (pr->type ^ CACHESCAN_RETRIEVE_LAST); + key->ltype = (tempType & CACHESCAN_RETRIEVE_LAST) >> 3; + } LRUHandle *h = taosLRUCacheLookup(pCache, key, ROCKS_KEY_LEN); if (h) { diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index f668ea5f72..d05e184fd8 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "functionMgt.h" #include "taoserror.h" #include "tarray.h" #include "tcommon.h" @@ -33,31 +34,69 @@ static void setFirstLastResColToNull(SColumnInfoData* pCol, int32_t row) { taosMemoryFree(buf); } +static void saveOneRowForLastRaw(SLastCol* pColVal, SCacheRowsReader* pReader, const int32_t slotId, + SColumnInfoData* pColInfoData, int32_t numOfRows) { + SColVal* pVal = &pColVal->colVal; + + // allNullRow = false; + if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) { + if (!COL_VAL_IS_VALUE(&pColVal->colVal)) { + colDataSetNULL(pColInfoData, numOfRows); + } else { + varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData); + + memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData); + colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false); + } + } else { + colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal)); + } + return; +} + static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds, const int32_t* dstSlotIds, void** pRes, const char* idStr) { int32_t numOfRows = pBlock->info.rows; // bool allNullRow = true; if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST)) { + uint64_t ts = TSKEY_MIN; SFirstLastRes* p = NULL; col_id_t colId = -1; + + SArray* funcTypeBlockArray = taosArrayInit(pReader->numOfCols, sizeof(int32_t)); for (int32_t i = 0; i < pReader->numOfCols; ++i) { SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]); + int32_t funcType = FUNCTION_TYPE_CACHE_LAST; + if (pReader->pFuncTypeList != NULL && taosArrayGetSize(pReader->pFuncTypeList) > i) { + funcType = *(int32_t*)taosArrayGet(pReader->pFuncTypeList, i); + } + taosArrayInsert(funcTypeBlockArray, dstSlotIds[i], taosArrayGet(pReader->pFuncTypeList, i)); + if (slotIds[i] == -1) { + if (FUNCTION_TYPE_CACHE_LAST_ROW == funcType) { + colDataSetNULL(pColInfoData, numOfRows); + continue; + } setFirstLastResColToNull(pColInfoData, numOfRows); continue; } int32_t slotId = slotIds[i]; SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i); colId = pColVal->colVal.cid; + + if (FUNCTION_TYPE_CACHE_LAST_ROW == funcType) { + saveOneRowForLastRaw(pColVal, pReader, slotId, pColInfoData, numOfRows); + continue; + } + p = (SFirstLastRes*)varDataVal(pRes[i]); p->ts = pColVal->ts; ts = p->ts; p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal); // allNullRow = p->isNull & allNullRow; - if (!p->isNull) { if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) { varDataSetLen(p->buf, pColVal->colVal.value.nData); @@ -77,6 +116,13 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p } for (int32_t idx = 0; idx < taosArrayGetSize(pBlock->pDataBlock); ++idx) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, idx); + if (idx < funcTypeBlockArray->size) { + int32_t funcType = *(int32_t*)taosArrayGet(funcTypeBlockArray, idx); + if (FUNCTION_TYPE_CACHE_LAST_ROW == funcType) { + continue; + } + } + if (pCol->info.colId == PRIMARYKEY_TIMESTAMP_COL_ID && pCol->info.type == TSDB_DATA_TYPE_TIMESTAMP) { if (ts == TSKEY_MIN) { colDataSetNULL(pCol, numOfRows); @@ -95,6 +141,7 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p // pBlock->info.rows += allNullRow ? 0 : 1; ++pBlock->info.rows; + taosArrayDestroy(funcTypeBlockArray); } else if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST_ROW)) { for (int32_t i = 0; i < pReader->numOfCols; ++i) { SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]); @@ -105,21 +152,8 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p continue; } SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i); - SColVal* pVal = &pColVal->colVal; - // allNullRow = false; - if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) { - if (!COL_VAL_IS_VALUE(&pColVal->colVal)) { - colDataSetNULL(pColInfoData, numOfRows); - } else { - varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData); - - memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData); - colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false); - } - } else { - colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal)); - } + saveOneRowForLastRaw(pColVal, pReader, slotId, pColInfoData, numOfRows); } // pBlock->info.rows += allNullRow ? 0 : 1; @@ -175,7 +209,8 @@ int32_t tsdbReuseCacherowsReader(void* reader, void* pTableIdList, int32_t numOf } int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols, - SArray* pCidList, int32_t* pSlotIds, uint64_t suid, void** pReader, const char* idstr) { + SArray* pCidList, int32_t* pSlotIds, uint64_t suid, void** pReader, const char* idstr, + SArray* pFuncTypeList) { *pReader = NULL; SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader)); if (p == NULL) { @@ -190,6 +225,7 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, p->numOfCols = numOfCols; p->pCidList = pCidList; p->pSlotIds = pSlotIds; + p->pFuncTypeList = pFuncTypeList; if (numOfTables == 0) { *pReader = p; @@ -391,7 +427,10 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 if (!COL_VAL_IS_VALUE(&p->colVal)) { hasNotNullRow = false; } - continue; + // For all of cols is null, the last null col of last table will be save + if (i != pr->numOfTables - 1 || k != pr->numOfCols - 1 || hasRes) { + continue; + } } hasRes = true; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 8a7ec981e8..609f38cead 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -4100,9 +4100,9 @@ void tsdbReaderClose2(STsdbReader* pReader) { size_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap); if (pReader->status.pTableMap != NULL) { destroyAllBlockScanInfo(pReader->status.pTableMap); - clearBlockScanInfoBuf(&pReader->blockInfoBuf); pReader->status.pTableMap = NULL; } + clearBlockScanInfoBuf(&pReader->blockInfoBuf); if (pReader->pFileReader != NULL) { tsdbDataFileReaderClose(&pReader->pFileReader); @@ -4120,8 +4120,10 @@ void tsdbReaderClose2(STsdbReader* pReader) { taosMemoryFreeClear(pReader->status.uidList.tableUidList); qTrace("tsdb/reader-close: %p, untake snapshot", pReader); - tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, true); - pReader->pReadSnap = NULL; + void* p = pReader->pReadSnap; + if ((p == atomic_val_compare_exchange_ptr((void**)&pReader->pReadSnap, p, NULL)) && (p != NULL)) { + tsdbUntakeReadSnap2(pReader, p, true); + } tsem_destroy(&pReader->resumeAfterSuspend); tsdbReleaseReader(pReader); @@ -4195,8 +4197,12 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { doSuspendCurrentReader(pReader); } - tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false); - pReader->pReadSnap = NULL; + // make sure only release once + void* p = pReader->pReadSnap; + if ((p == atomic_val_compare_exchange_ptr((void**)&pReader->pReadSnap, p, NULL)) && (p != NULL)) { + tsdbUntakeReadSnap2(pReader, p, false); + } + if (pReader->bFilesetDelimited) { pReader->status.memTableMinKey = INT64_MAX; pReader->status.memTableMaxKey = INT64_MIN; diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h index 16ee90823b..c27e9ebe04 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h +++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h @@ -348,6 +348,7 @@ typedef struct SCacheRowsReader { STsdbReadSnap* pReadSnap; char* idstr; int64_t lastTs; + SArray* pFuncTypeList; } SCacheRowsReader; int32_t tsdbCacheGetBatch(STsdb* pTsdb, tb_uid_t uid, SArray* pLastArray, SCacheRowsReader* pr, int8_t ltype); diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index e6a80757ff..8844e358d5 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -570,9 +570,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) vInfo("vgId:%d, sync restore finished, not launch stream tasks, since stream tasks are disabled", vgId); } else { vInfo("vgId:%d sync restore finished, start to launch stream task(s)", pVnode->config.vgId); - int32_t numOfTasks = 0; - tqStreamTaskResetStatus(pMeta, &numOfTasks); - + int32_t numOfTasks = tqStreamTasksGetTotalNum(pMeta); if (numOfTasks > 0) { if (pMeta->startInfo.taskStarting == 1) { pMeta->startInfo.restartCount += 1; @@ -580,6 +578,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) pMeta->startInfo.restartCount); } else { pMeta->startInfo.taskStarting = 1; + streamMetaWUnLock(pMeta); tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false); return; diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 640ed2f2f9..92de5c4364 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -82,6 +82,7 @@ typedef struct SColMatchItem { int32_t dstSlotId; bool needOutput; SDataType dataType; + int32_t funcType; } SColMatchItem; typedef struct SColMatchInfo { diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index c3f47cde9d..72da249f50 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -200,6 +200,7 @@ typedef struct SExchangeInfo { uint64_t self; SLimitInfo limitInfo; int64_t openedTs; // start exec time stamp, todo: move to SLoadRemoteDataInfo + char* pTaskId; } SExchangeInfo; typedef struct SScanInfo { diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index 2d0a044559..715c354873 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -273,6 +273,7 @@ SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { } int32_t doAggregateImpl(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx) { + int32_t code = TSDB_CODE_SUCCESS; for (int32_t k = 0; k < pOperator->exprSupp.numOfExprs; ++k) { if (functionNeedToExecute(&pCtx[k])) { // todo add a dummy funtion to avoid process check @@ -280,7 +281,13 @@ int32_t doAggregateImpl(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx) { continue; } - int32_t code = pCtx[k].fpSet.process(&pCtx[k]); + if ((&pCtx[k])->input.pData[0] == NULL) { + code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + qError("%s aggregate function error happens, input data is NULL.", GET_TASKID(pOperator->pTaskInfo)); + } else { + code = pCtx[k].fpSet.process(&pCtx[k]); + } + if (code != TSDB_CODE_SUCCESS) { qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code)); return code; @@ -562,7 +569,12 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC } else { int32_t code = TSDB_CODE_SUCCESS; if (functionNeedToExecute(&pCtx[k]) && pCtx[k].fpSet.process != NULL) { - code = pCtx[k].fpSet.process(&pCtx[k]); + if ((&pCtx[k])->input.pData[0] == NULL) { + code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + qError("%s apply functions error, input data is NULL.", GET_TASKID(taskInfo)); + } else { + code = pCtx[k].fpSet.process(&pCtx[k]); + } if (code != TSDB_CODE_SUCCESS) { qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code)); diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 9d4c20493a..63fcfba7c1 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -21,6 +21,7 @@ #include "tmsg.h" #include "executorInt.h" +#include "functionMgt.h" #include "operator.h" #include "querytask.h" #include "tcompare.h" @@ -44,6 +45,7 @@ typedef struct SCacheRowsScanInfo { SArray* pCidList; int32_t indexOfBufferedRes; STableListInfo* pTableList; + SArray* pFuncTypeList; } SCacheRowsScanInfo; static SSDataBlock* doScanCache(SOperatorInfo* pOperator); @@ -105,9 +107,15 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe } SArray* pCidList = taosArrayInit(numOfCols, sizeof(int16_t)); + pInfo->pFuncTypeList = taosArrayInit(taosArrayGetSize(pScanNode->pFuncTypes), sizeof(int32_t)); + taosArrayAddAll(pInfo->pFuncTypeList, pScanNode->pFuncTypes); + for (int i = 0; i < TARRAY_SIZE(pInfo->matchInfo.pList); ++i) { SColMatchItem* pColInfo = taosArrayGet(pInfo->matchInfo.pList, i); taosArrayPush(pCidList, &pColInfo->colId); + if (pInfo->pFuncTypeList != NULL && taosArrayGetSize(pInfo->pFuncTypeList) > i) { + pColInfo->funcType = *(int32_t*)taosArrayGet(pInfo->pFuncTypeList, i); + } } pInfo->pCidList = pCidList; @@ -132,7 +140,7 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe uint64_t suid = tableListGetSuid(pTableListInfo); code = pInfo->readHandle.api.cacheFn.openReader(pInfo->readHandle.vnode, pInfo->retrieveType, pList, totalTables, taosArrayGetSize(pInfo->matchInfo.pList), pCidList, pInfo->pSlotIds, - suid, &pInfo->pLastrowReader, pTaskInfo->id.str); + suid, &pInfo->pLastrowReader, pTaskInfo->id.str, pScanNode->pFuncTypes); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -274,7 +282,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { if (NULL == pInfo->pLastrowReader) { code = pInfo->readHandle.api.cacheFn.openReader(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num, taosArrayGetSize(pInfo->matchInfo.pList), pInfo->pCidList, pInfo->pSlotIds, suid, &pInfo->pLastrowReader, - pTaskInfo->id.str); + pTaskInfo->id.str, pInfo->pFuncTypeList); if (code != TSDB_CODE_SUCCESS) { pInfo->currentGroupIndex += 1; taosArrayClear(pInfo->pUidList); @@ -333,6 +341,7 @@ void destroyCacheScanOperator(void* param) { taosMemoryFree(pInfo->pSlotIds); taosMemoryFree(pInfo->pDstSlotIds); taosArrayDestroy(pInfo->pCidList); + taosArrayDestroy(pInfo->pFuncTypeList); taosArrayDestroy(pInfo->pUidList); taosArrayDestroy(pInfo->matchInfo.pList); tableListDestroy(pInfo->pTableList); @@ -405,6 +414,8 @@ int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColMatchInfo* pC SSlotDescNode* pDesc = (SSlotDescNode*)nodesListGetNode(pList, slotId); if (pDesc->dataType.type != TSDB_DATA_TYPE_TIMESTAMP) { taosArrayPush(pMatchInfo, pColInfo); + } else if (FUNCTION_TYPE_CACHE_LAST_ROW == pColInfo->funcType){ + taosArrayPush(pMatchInfo, pColInfo); } } diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 2797cb2d82..06dd43e170 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -260,14 +260,17 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo, const return TSDB_CODE_SUCCESS; } + int32_t len = strlen(id) + 1; + pInfo->pTaskId = taosMemoryCalloc(1, len); + strncpy(pInfo->pTaskId, id, len); for (int32_t i = 0; i < numOfSources; ++i) { SSourceDataInfo dataInfo = {0}; dataInfo.status = EX_SOURCE_DATA_NOT_READY; - dataInfo.taskId = id; + dataInfo.taskId = pInfo->pTaskId; dataInfo.index = i; SSourceDataInfo* pDs = taosArrayPush(pInfo->pSourceDataInfo, &dataInfo); if (pDs == NULL) { - taosArrayDestroy(pInfo->pSourceDataInfo); + taosArrayDestroyEx(pInfo->pSourceDataInfo, freeSourceDataInfo); return TSDB_CODE_OUT_OF_MEMORY; } } @@ -383,6 +386,8 @@ void doDestroyExchangeOperatorInfo(void* param) { tSimpleHashCleanup(pExInfo->pHashSources); tsem_destroy(&pExInfo->ready); + taosMemoryFreeClear(pExInfo->pTaskId); + taosMemoryFreeClear(param); } @@ -782,7 +787,7 @@ int32_t addSingleExchangeSource(SOperatorInfo* pOperator, SExchangeOperatorBasic if (pIdx->inUseIdx < 0) { SSourceDataInfo dataInfo = {0}; dataInfo.status = EX_SOURCE_DATA_NOT_READY; - dataInfo.taskId = GET_TASKID(pOperator->pTaskInfo); + dataInfo.taskId = pExchangeInfo->pTaskId; dataInfo.index = pIdx->srcIdx; dataInfo.pSrcUidList = taosArrayDup(pBasicParam->uidList, NULL); dataInfo.srcOpType = pBasicParam->srcOpType; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 111784316a..9c60432478 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1343,7 +1343,6 @@ int32_t extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod c.colId = pColNode->colId; c.srcSlotId = pColNode->slotId; c.dstSlotId = pNode->slotId; - c.dataType = pColNode->node.resType; taosArrayPush(pList, &c); } } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 6f1593a121..0d1adc4308 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -621,6 +621,10 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo } else { pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot); } + + if(pRes == NULL) { + st = taosGetTimestampUs(); + } int32_t rowsThreshold = pTaskInfo->pSubplan->rowsThreshold; if (!pTaskInfo->pSubplan->dynamicRowThreshold || 4096 <= pTaskInfo->pSubplan->rowsThreshold) { diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index d965f16862..cc83ecd84e 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -27,6 +27,7 @@ typedef struct SProjectOperatorInfo { SLimitInfo limitInfo; bool mergeDataBlocks; SSDataBlock* pFinalRes; + bool inputIgnoreGroup; } SProjectOperatorInfo; typedef struct SIndefOperatorInfo { @@ -109,7 +110,8 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys pInfo->pFinalRes = createOneDataBlock(pResBlock, false); pInfo->binfo.inputTsOrder = pProjPhyNode->node.inputTsOrder; pInfo->binfo.outputTsOrder = pProjPhyNode->node.outputTsOrder; - + pInfo->inputIgnoreGroup = pProjPhyNode->inputIgnoreGroup; + if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM || pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) { pInfo->mergeDataBlocks = false; } else { @@ -300,6 +302,10 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { return pBlock; } + if (pProjectInfo->inputIgnoreGroup) { + pBlock->info.id.groupId = 0; + } + int32_t status = discardGroupDataBlock(pBlock, pLimitInfo); if (status == PROJECT_RETRIEVE_CONTINUE) { continue; diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index e4d04c3bdb..7f914755a0 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -2905,6 +2905,14 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh goto _error; } + pInfo->twAggSup = (STimeWindowAggSupp){ + .waterMark = pSessionNode->window.watermark, + .calTrigger = pSessionNode->window.triggerType, + .maxTs = INT64_MIN, + .minTs = INT64_MAX, + .deleteMark = getDeleteMark(&pSessionNode->window, 0), + }; + code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, pSessionNode->gap, pTaskInfo->streamInfo.pState, 0, 0, &pTaskInfo->storageAPI.stateStore, pHandle, &pInfo->twAggSup, GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI); @@ -2912,13 +2920,6 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh goto _error; } - pInfo->twAggSup = (STimeWindowAggSupp){ - .waterMark = pSessionNode->window.watermark, - .calTrigger = pSessionNode->window.triggerType, - .maxTs = INT64_MIN, - .minTs = INT64_MAX, - }; - initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); pInfo->primaryTsIndex = ((SColumnNode*)pSessionNode->window.pTspk)->slotId; @@ -3787,6 +3788,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys .calTrigger = pStateNode->window.triggerType, .maxTs = INT64_MIN, .minTs = INT64_MAX, + .deleteMark = getDeleteMark(&pStateNode->window, 0), }; initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); diff --git a/source/libs/function/inc/fnLog.h b/source/libs/function/inc/fnLog.h index 9658b6b782..150d145f50 100644 --- a/source/libs/function/inc/fnLog.h +++ b/source/libs/function/inc/fnLog.h @@ -1,6 +1,17 @@ -// -// Created by slzhou on 22-4-20. -// +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ #ifndef TDENGINE_FNLOG_H #define TDENGINE_FNLOG_H diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index cbeca3c9be..79b2e6fb73 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -258,13 +258,24 @@ static int32_t addDbPrecisonParam(SNodeList** pList, uint8_t precision) { return TSDB_CODE_SUCCESS; } +static SDataType* getSDataTypeFromNode(SNode* pNode) { + if (pNode == NULL) return NULL; + if (nodesIsExprNode(pNode)) { + return &((SExprNode*)pNode)->resType; + } else if (QUERY_NODE_COLUMN_REF == pNode->type) { + return &((SColumnRefNode*)pNode)->resType; + } else { + return NULL; + } +} + // There is only one parameter of numeric type, and the return type is parameter type static int32_t translateInOutNum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } else if (IS_NULL_TYPE(paraType)) { @@ -281,7 +292,7 @@ static int32_t translateInNumOutDou(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -296,8 +307,8 @@ static int32_t translateIn2NumOutDou(SFunctionNode* pFunc, char* pErrBuf, int32_ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if ((!IS_NUMERIC_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) || (!IS_NUMERIC_TYPE(para2Type) && !IS_NULL_TYPE(para2Type))) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -313,12 +324,12 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - if (TSDB_DATA_TYPE_VARBINARY == pPara1->resType.type || !IS_STR_DATA_TYPE(pPara1->resType.type)) { + SDataType* pRestType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + if (TSDB_DATA_TYPE_VARBINARY == pRestType->type || !IS_STR_DATA_TYPE(pRestType->type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - pFunc->node.resType = (SDataType){.bytes = pPara1->resType.bytes, .type = pPara1->resType.type}; + pFunc->node.resType = (SDataType){.bytes = pRestType->bytes, .type = pRestType->type}; return TSDB_CODE_SUCCESS; } @@ -327,8 +338,8 @@ static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - if (TSDB_DATA_TYPE_VARBINARY == pPara1->resType.type || !IS_STR_DATA_TYPE(pPara1->resType.type)) { + SDataType* pRestType1 = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + if (TSDB_DATA_TYPE_VARBINARY == pRestType1->type || !IS_STR_DATA_TYPE(pRestType1->type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -342,8 +353,8 @@ static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len numOfSpaces = countTrailingSpaces(pValue, isLtrim); } - int32_t resBytes = pPara1->resType.bytes - numOfSpaces; - pFunc->node.resType = (SDataType){.bytes = resBytes, .type = pPara1->resType.type}; + int32_t resBytes = pRestType1->bytes - numOfSpaces; + pFunc->node.resType = (SDataType){.bytes = resBytes, .type = pRestType1->type}; return TSDB_CODE_SUCCESS; } @@ -361,13 +372,13 @@ static int32_t translateLogarithm(SFunctionNode* pFunc, char* pErrBuf, int32_t l return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } if (2 == numOfParams) { - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_NUMERIC_TYPE(para2Type) && !IS_NULL_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -390,7 +401,7 @@ static int32_t translateSum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -413,7 +424,7 @@ static int32_t translateAvgPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -441,7 +452,7 @@ static int32_t translateAvgMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t le return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (TSDB_DATA_TYPE_BINARY != paraType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -456,7 +467,7 @@ static int32_t translateStddevPartial(SFunctionNode* pFunc, char* pErrBuf, int32 return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -470,7 +481,7 @@ static int32_t translateStddevMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (TSDB_DATA_TYPE_BINARY != paraType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -529,7 +540,7 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(para1Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -538,7 +549,7 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, i); pValue->notReserved = true; - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; if (!IS_NUMERIC_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -591,15 +602,15 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t pValue->notReserved = true; - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } // param2 if (3 == numOfParams) { - uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; + uint8_t para3Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; if (!IS_STR_DATA_TYPE(para3Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -638,15 +649,15 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int pValue->notReserved = true; - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } // param2 if (3 == numOfParams) { - uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; + uint8_t para3Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; if (!IS_STR_DATA_TYPE(para3Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -668,14 +679,14 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int if (3 != numOfParams && 2 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (TSDB_DATA_TYPE_BINARY != para1Type || !IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } if (3 == numOfParams) { - uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; + uint8_t para3Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; if (!IS_STR_DATA_TYPE(para3Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -729,8 +740,8 @@ static int32_t translateTopBot(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -753,7 +764,7 @@ static int32_t translateTopBot(SFunctionNode* pFunc, char* pErrBuf, int32_t len) pValue->notReserved = true; // set result type - SDataType* pType = &((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType; + SDataType* pType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); pFunc->node.resType = (SDataType){.bytes = pType->bytes, .type = pType->type}; return TSDB_CODE_SUCCESS; } @@ -783,7 +794,7 @@ static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -797,7 +808,7 @@ static int32_t translateSpreadImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (isPartial) { if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -873,7 +884,7 @@ static int32_t translateElapsedImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_TIMESTAMP_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -881,6 +892,7 @@ static int32_t translateElapsedImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t // param1 if (2 == numOfParams) { SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -889,7 +901,7 @@ static int32_t translateElapsedImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t pValue->notReserved = true; - paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -907,7 +919,7 @@ static int32_t translateElapsedImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (TSDB_DATA_TYPE_BINARY != paraType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -948,13 +960,13 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le pValue->notReserved = true; } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } } - pFunc->node.resType = (SDataType){.bytes = 64, .type = TSDB_DATA_TYPE_BINARY}; + pFunc->node.resType = (SDataType){.bytes = LEASTSQUARES_BUFF_LENGTH, .type = TSDB_DATA_TYPE_BINARY}; return TSDB_CODE_SUCCESS; } @@ -1142,15 +1154,15 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } // param1 ~ param3 - if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY || - ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY || - !IS_INTEGER_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type)) { + if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type != TSDB_DATA_TYPE_BINARY || + getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_BINARY || + !IS_INTEGER_TYPE(getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 3))->type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1200,15 +1212,15 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32 return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } // param1 ~ param3 - if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY || - ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY || - !IS_INTEGER_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type)) { + if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type != TSDB_DATA_TYPE_BINARY || + getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_BINARY || + !IS_INTEGER_TYPE(getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 3))->type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1254,7 +1266,7 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32 return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type != TSDB_DATA_TYPE_BINARY) { + if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type != TSDB_DATA_TYPE_BINARY) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1319,7 +1331,7 @@ static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1342,9 +1354,9 @@ static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t pValue->notReserved = true; } - if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY || - (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT && - ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) { + if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type != TSDB_DATA_TYPE_BINARY || + (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_BIGINT && + getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_DOUBLE)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1359,7 +1371,7 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1385,14 +1397,14 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 pValue->notReserved = true; } - if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY || - (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT && - ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) { + if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type != TSDB_DATA_TYPE_BINARY || + (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_BIGINT && + getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_DOUBLE)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } if (numOfParams == 4 && - ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) { + getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 3))->type != TSDB_DATA_TYPE_BIGINT) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1420,7 +1432,7 @@ static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; uint8_t resType; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -1447,8 +1459,7 @@ static int32_t translateMavg(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; // param1 SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { @@ -1462,7 +1473,7 @@ static int32_t translateMavg(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { pValue->notReserved = true; - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_NUMERIC_TYPE(colType) || !IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1476,8 +1487,8 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - uint8_t colType = pCol->resType.type; + SDataType* pSDataType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + uint8_t colType = pSDataType->type; // param1 SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); @@ -1492,14 +1503,14 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len) pValue->notReserved = true; - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } // set result type if (IS_STR_DATA_TYPE(colType)) { - pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType}; + pFunc->node.resType = (SDataType){.bytes = pSDataType->bytes, .type = colType}; } else { pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType}; } @@ -1513,8 +1524,8 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - uint8_t colType = pCol->resType.type; + SDataType* pSDataType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + uint8_t colType = pSDataType->type; // param1 & param2 for (int32_t i = 1; i < numOfParams; ++i) { @@ -1534,7 +1545,7 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { pValue->notReserved = true; - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; if (!IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1542,7 +1553,7 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { // set result type if (IS_STR_DATA_TYPE(colType)) { - pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType}; + pFunc->node.resType = (SDataType){.bytes = pSDataType->bytes, .type = colType}; } else { pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType}; } @@ -1554,7 +1565,7 @@ static int32_t translateDerivative(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; // param1 SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); @@ -1596,7 +1607,7 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -1614,7 +1625,7 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) } static int32_t translateIrateImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (isPartial) { if (3 != LIST_LENGTH(pFunc->pParameterList)) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); @@ -1661,14 +1672,14 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) } uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 0)); - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if ((!IS_NUMERIC_TYPE(paraType) && !IS_BOOLEAN_TYPE(paraType)) || QUERY_NODE_VALUE == nodeType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } if (2 == numOfParams) { nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 1)); - paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_INTEGER_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1742,26 +1753,26 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l for (int32_t i = 0; i < numOfParams; ++i) { uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i)); - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; if (IS_NULL_TYPE(paraType) && QUERY_NODE_VALUE == nodeType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } } - pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType; + pFunc->node.resType = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); return TSDB_CODE_SUCCESS; } static int32_t translateFirstLastImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { // first(col_list) will be rewritten as first(col) SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - uint8_t paraType = ((SExprNode*)pPara)->resType.type; - int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes; + uint8_t paraType = getSDataTypeFromNode(pPara)->type; + int32_t paraBytes = getSDataTypeFromNode(pPara)->bytes; if (isPartial) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); for (int32_t i = 0; i < numOfParams; ++i) { uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i)); - uint8_t pType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; + uint8_t pType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; if (IS_NULL_TYPE(pType) && QUERY_NODE_VALUE == nodeType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1816,7 +1827,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_INTEGER_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType && !IS_TIMESTAMP_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -1824,7 +1835,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { // param1 if (numOfParams == 2) { - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1868,7 +1879,7 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) { + if (!IS_STR_DATA_TYPE(getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1899,7 +1910,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t /* For concat/concat_ws function, if params have NCHAR type, promote the final result to NCHAR */ for (int32_t i = 0; i < numOfParams; ++i) { SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); - uint8_t paraType = ((SExprNode*)pPara)->resType.type; + uint8_t paraType = getSDataTypeFromNode(pPara)->type; if (TSDB_DATA_TYPE_VARBINARY == paraType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1914,8 +1925,8 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t for (int32_t i = 0; i < numOfParams; ++i) { SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); - uint8_t paraType = ((SExprNode*)pPara)->resType.type; - int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes; + uint8_t paraType = getSDataTypeFromNode(pPara)->type; + int32_t paraBytes = getSDataTypeFromNode(pPara)->bytes; int32_t factor = 1; if (IS_NULL_TYPE(paraType)) { resultType = TSDB_DATA_TYPE_VARCHAR; @@ -2023,7 +2034,7 @@ static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t l } // param0 - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -2064,13 +2075,13 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (para1Type == TSDB_DATA_TYPE_VARBINARY || !IS_STR_DATA_TYPE(para1Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } if (2 == numOfParams) { - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -2101,8 +2112,8 @@ static int32_t translateToTimestamp(SFunctionNode* pFunc, char* pErrBuf, int32_t if (LIST_LENGTH(pFunc->pParameterList) != 2) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if (!IS_STR_DATA_TYPE(para1Type) || !IS_STR_DATA_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -2115,8 +2126,8 @@ static int32_t translateToChar(SFunctionNode* pFunc, char* pErrBuf, int32_t len) if (LIST_LENGTH(pFunc->pParameterList) != 2) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; // currently only support to_char(timestamp, str) if (!IS_STR_DATA_TYPE(para2Type) || !IS_TIMESTAMP_TYPE(para1Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -2131,8 +2142,8 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && !IS_TIMESTAMP_TYPE(para1Type)) || !IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -2150,7 +2161,7 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_ } if (3 == numOfParams) { - uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; + uint8_t para3Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; if (!IS_INTEGER_TYPE(para3Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -2185,14 +2196,14 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le } for (int32_t i = 0; i < 2; ++i) { - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } } if (3 == numOfParams) { - if (!IS_INTEGER_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type)) { + if (!IS_INTEGER_TYPE(getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } } @@ -2240,7 +2251,7 @@ static int32_t translateInStrOutGeom(SFunctionNode* pFunc, char* pErrBuf, int32_ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (!IS_STR_DATA_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -2255,7 +2266,7 @@ static int32_t translateInGeomOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; if (para1Type != TSDB_DATA_TYPE_GEOMETRY && !IS_NULL_TYPE(para1Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -2270,8 +2281,8 @@ static int32_t translateIn2NumOutGeom(SFunctionNode* pFunc, char* pErrBuf, int32 return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if ((!IS_NUMERIC_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) || (!IS_NUMERIC_TYPE(para2Type) && !IS_NULL_TYPE(para2Type))) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -2287,8 +2298,8 @@ static int32_t translateIn2GeomOutBool(SFunctionNode* pFunc, char* pErrBuf, int3 return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; if ((para1Type != TSDB_DATA_TYPE_GEOMETRY && !IS_NULL_TYPE(para1Type)) || (para2Type != TSDB_DATA_TYPE_GEOMETRY && !IS_NULL_TYPE(para2Type))) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 390190e8db..000f634fe5 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -832,6 +832,7 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return code; } +#ifdef BUILD_NO_CALL int32_t setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex) { if (pCtx->subsidiaries.num <= 0) { return TSDB_CODE_SUCCESS; @@ -847,6 +848,7 @@ int32_t setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32 return TSDB_CODE_SUCCESS; } +#endif int32_t setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex) { if (pCtx->subsidiaries.num <= 0) { @@ -1574,9 +1576,19 @@ int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { param12 /= param[1][1]; - char buf[512] = {0}; + char buf[LEASTSQUARES_BUFF_LENGTH] = {0}; + char slopBuf[64] = {0}; + char interceptBuf[64] = {0}; + int n = snprintf(slopBuf, 64, "%.6lf", param02); + if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) { + snprintf(slopBuf, 64, "%." DOUBLE_PRECISION_DIGITS, param02); + } + n = snprintf(interceptBuf, 64, "%.6lf", param12); + if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) { + snprintf(interceptBuf, 64, "%." DOUBLE_PRECISION_DIGITS, param12); + } size_t len = - snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", param02, param12); + snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%s, intercept:%s}", slopBuf, interceptBuf); varDataSetLen(buf, len); colDataSetVal(pCol, currentRow, buf, pResInfo->isNullRes); @@ -2125,7 +2137,7 @@ bool getGroupKeyFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { } static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowIndex) { - if (pTsColInfo == NULL) { + if (pTsColInfo == NULL || pTsColInfo->pData == NULL) { return 0; } diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index ccb75e9341..f559d90604 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -241,6 +241,29 @@ static SVgroupsInfo* vgroupsInfoClone(const SVgroupsInfo* pSrc) { return pDst; } +static SArray* functParamClone(const SArray* pSrc) { + int32_t len = sizeof(SArray) + pSrc->capacity * pSrc->elemSize; + + SArray* pDst = taosArrayInit(pSrc->capacity, pSrc->elemSize); + if (NULL == pDst) { + return NULL; + } + for (int i = 0; i < TARRAY_SIZE(pSrc); ++i) { + SFunctParam* pFunctParam = taosArrayGet(pSrc, i); + SFunctParam* pNewFunctParam = (SFunctParam*)taosArrayPush(pDst, pFunctParam); + + if (NULL == pNewFunctParam) { + return NULL; + } + pNewFunctParam->type = pFunctParam->type; + pNewFunctParam->pCol = taosMemoryCalloc(1, sizeof(SColumn)); + memcpy(pNewFunctParam->pCol, pFunctParam->pCol, sizeof(SColumn)); + } + + return pDst; +} + + static int32_t realTableNodeCopy(const SRealTableNode* pSrc, SRealTableNode* pDst) { COPY_BASE_OBJECT_FIELD(table, tableNodeCopy); CLONE_OBJECT_FIELD(pMeta, tableMetaClone); @@ -425,6 +448,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) { COPY_SCALAR_FIELD(onlyMetaCtbIdx); COPY_SCALAR_FIELD(filesetDelimited); COPY_SCALAR_FIELD(isCountByTag); + CLONE_OBJECT_FIELD(pFuncTypes, functParamClone); return TSDB_CODE_SUCCESS; } @@ -462,6 +486,7 @@ static int32_t logicProjectCopy(const SProjectLogicNode* pSrc, SProjectLogicNode CLONE_NODE_LIST_FIELD(pProjections); COPY_CHAR_ARRAY_FIELD(stmtName); COPY_SCALAR_FIELD(ignoreGroupId); + COPY_SCALAR_FIELD(inputIgnoreGroup); return TSDB_CODE_SUCCESS; } @@ -704,6 +729,15 @@ static int32_t physiPartitionCopy(const SPartitionPhysiNode* pSrc, SPartitionPhy return TSDB_CODE_SUCCESS; } +static int32_t physiProjectCopy(const SProjectPhysiNode* pSrc, SProjectPhysiNode* pDst) { + COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); + CLONE_NODE_LIST_FIELD(pProjections); + COPY_SCALAR_FIELD(mergeDataBlock); + COPY_SCALAR_FIELD(ignoreGroupId); + COPY_SCALAR_FIELD(inputIgnoreGroup); + return TSDB_CODE_SUCCESS; +} + static int32_t dataBlockDescCopy(const SDataBlockDescNode* pSrc, SDataBlockDescNode* pDst) { COPY_SCALAR_FIELD(dataBlockId); CLONE_NODE_LIST_FIELD(pSlots); @@ -936,6 +970,9 @@ SNode* nodesCloneNode(const SNode* pNode) { case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: code = physiPartitionCopy((const SPartitionPhysiNode*)pNode, (SPartitionPhysiNode*)pDst); break; + case QUERY_NODE_PHYSICAL_PLAN_PROJECT: + code = physiProjectCopy((const SProjectPhysiNode*)pNode, (SProjectPhysiNode*)pDst); + break; default: break; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 3f75c6724e..efb637f17e 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -786,6 +786,7 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) { static const char* jkProjectLogicPlanProjections = "Projections"; static const char* jkProjectLogicPlanIgnoreGroupId = "IgnoreGroupId"; +static const char* jkProjectLogicPlanInputIgnoreGroup= "InputIgnoreGroup"; static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) { const SProjectLogicNode* pNode = (const SProjectLogicNode*)pObj; @@ -797,6 +798,9 @@ static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkProjectLogicPlanIgnoreGroupId, pNode->ignoreGroupId); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkProjectLogicPlanInputIgnoreGroup, pNode->inputIgnoreGroup); + } return code; } @@ -811,7 +815,9 @@ static int32_t jsonToLogicProjectNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkProjectLogicPlanIgnoreGroupId, &pNode->ignoreGroupId); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkProjectLogicPlanInputIgnoreGroup, &pNode->inputIgnoreGroup); + } return code; } @@ -1786,6 +1792,24 @@ static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { static const char* jkLastRowScanPhysiPlanGroupTags = "GroupTags"; static const char* jkLastRowScanPhysiPlanGroupSort = "GroupSort"; static const char* jkLastRowScanPhysiPlanTargets = "Targets"; +static const char* jkLastRowScanPhysiPlanFuncType = "FuncType"; +static const char* jkLastRowScanPhysiPlanFuncTypes = "FuncTypes"; + +static int32_t funcTypeToJson(const void* pObj, SJson* pJson) { + const int32_t* pNode = (const int32_t*)pObj; + + int32_t code = tjsonAddIntegerToObject(pJson, jkLastRowScanPhysiPlanFuncType, *pNode); + return code; +} + +static int32_t jsonToFuncType(const SJson* pJson, void* pObj) { + int32_t* pNode = (int32_t*)pObj; + + int32_t code = tjsonGetIntValue(pJson, jkLastRowScanPhysiPlanFuncType, pNode); + return code; +} + + static int32_t physiLastRowScanNodeToJson(const void* pObj, SJson* pJson) { const SLastRowScanPhysiNode* pNode = (const SLastRowScanPhysiNode*)pObj; @@ -1800,6 +1824,9 @@ static int32_t physiLastRowScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = nodeListToJson(pJson, jkLastRowScanPhysiPlanTargets, pNode->pTargets); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddTArray(pJson, jkLastRowScanPhysiPlanFuncTypes, funcTypeToJson, pNode->pFuncTypes); + } return code; } @@ -1817,6 +1844,9 @@ static int32_t jsonToPhysiLastRowScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkLastRowScanPhysiPlanTargets, &pNode->pTargets); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToTArray(pJson, jkLastRowScanPhysiPlanFuncTypes, jsonToFuncType, &pNode->pFuncTypes, sizeof(int32_t)); + } return code; } @@ -2045,6 +2075,7 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) { static const char* jkProjectPhysiPlanProjections = "Projections"; static const char* jkProjectPhysiPlanMergeDataBlock = "MergeDataBlock"; static const char* jkProjectPhysiPlanIgnoreGroupId = "IgnoreGroupId"; +static const char* jkProjectPhysiPlanInputIgnoreGroup = "InputIgnoreGroup"; static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) { const SProjectPhysiNode* pNode = (const SProjectPhysiNode*)pObj; @@ -2059,7 +2090,9 @@ static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanIgnoreGroupId, pNode->ignoreGroupId); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanInputIgnoreGroup, pNode->inputIgnoreGroup); + } return code; } @@ -2076,7 +2109,9 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanIgnoreGroupId, &pNode->ignoreGroupId); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanInputIgnoreGroup, &pNode->inputIgnoreGroup); + } return code; } diff --git a/source/libs/nodes/src/nodesEqualFuncs.c b/source/libs/nodes/src/nodesEqualFuncs.c index f755b8cb8c..241da85267 100644 --- a/source/libs/nodes/src/nodesEqualFuncs.c +++ b/source/libs/nodes/src/nodesEqualFuncs.c @@ -194,3 +194,21 @@ bool nodesEqualNode(const SNode* a, const SNode* b) { return false; } + + bool nodeListNodeEqual(const SNodeList* a, const SNode* b) { + if (NULL == a || NULL == b) { + return false; + } + + if (LIST_LENGTH(a) < 1) { + return false; + } + + SNode *na; + FOREACH(na, a) { + if (nodesEqualNode(na, b)) { + return true; + } + } + return false; +} diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index a4837899fd..7e46a2e0f2 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -65,10 +65,14 @@ typedef int32_t (*FSetObject)(STlv* pTlv, void* pObj); static int32_t nodeToMsg(const void* pObj, STlvEncoder* pEncoder); static int32_t nodeListToMsg(const void* pObj, STlvEncoder* pEncoder); +static int32_t SArrayToMsg(const void* pObj, STlvEncoder* pEncoder); + static int32_t msgToNode(STlvDecoder* pDecoder, void** pObj); static int32_t msgToNodeFromTlv(STlv* pTlv, void** pObj); static int32_t msgToNodeList(STlvDecoder* pDecoder, void** pObj); static int32_t msgToNodeListFromTlv(STlv* pTlv, void** pObj); +static int32_t msgToSArray(STlv* pTlv, void** pObj); + static int32_t initTlvEncoder(STlvEncoder* pEncoder) { pEncoder->allocSize = NODES_MSG_DEFAULT_LEN; @@ -2053,7 +2057,8 @@ enum { PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, PHY_LAST_ROW_SCAN_CODE_GROUP_SORT, PHY_LAST_ROW_SCAN_CODE_IGNULL, - PHY_LAST_ROW_SCAN_CODE_TARGETS + PHY_LAST_ROW_SCAN_CODE_TARGETS, + PHY_LAST_ROW_SCAN_CODE_FUNCTYPES }; static int32_t physiLastRowScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { @@ -2072,6 +2077,9 @@ static int32_t physiLastRowScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_LAST_ROW_SCAN_CODE_TARGETS, nodeListToMsg, pNode->pTargets); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_LAST_ROW_SCAN_CODE_FUNCTYPES, SArrayToMsg, pNode->pFuncTypes); + } return code; } @@ -2098,6 +2106,10 @@ static int32_t msgToPhysiLastRowScanNode(STlvDecoder* pDecoder, void* pObj) { case PHY_LAST_ROW_SCAN_CODE_TARGETS: code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets); break; + case PHY_LAST_ROW_SCAN_CODE_FUNCTYPES: + code = msgToSArray(pTlv, (void**)&pNode->pFuncTypes); + break; + default: break; } @@ -2355,7 +2367,8 @@ enum { PHY_PROJECT_CODE_BASE_NODE = 1, PHY_PROJECT_CODE_PROJECTIONS, PHY_PROJECT_CODE_MERGE_DATA_BLOCK, - PHY_PROJECT_CODE_IGNORE_GROUP_ID + PHY_PROJECT_CODE_IGNORE_GROUP_ID, + PHY_PROJECT_CODE_INPUT_IGNORE_GROUP }; static int32_t physiProjectNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { @@ -2371,6 +2384,9 @@ static int32_t physiProjectNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_IGNORE_GROUP_ID, pNode->ignoreGroupId); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_INPUT_IGNORE_GROUP, pNode->inputIgnoreGroup); + } return code; } @@ -2394,6 +2410,9 @@ static int32_t msgToPhysiProjectNode(STlvDecoder* pDecoder, void* pObj) { case PHY_PROJECT_CODE_IGNORE_GROUP_ID: code = tlvDecodeBool(pTlv, &pNode->ignoreGroupId); break; + case PHY_PROJECT_CODE_INPUT_IGNORE_GROUP: + code = tlvDecodeBool(pTlv, &pNode->inputIgnoreGroup); + break; default: break; } @@ -4393,6 +4412,31 @@ static int32_t nodeListToMsg(const void* pObj, STlvEncoder* pEncoder) { return TSDB_CODE_SUCCESS; } +enum { + SARRAY_CODE_CAPACITY = 1, + SARRAY_CODE_ELEMSIZE, + SARRAY_CODE_SIZE, + SARRAY_CODE_PDATA +}; + +static int32_t SArrayToMsg(const void* pObj, STlvEncoder* pEncoder) { + const SArray* pArray = (const SArray*)pObj; + int32_t code = TSDB_CODE_SUCCESS; + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI32(pEncoder, SARRAY_CODE_CAPACITY, pArray->capacity); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI32(pEncoder, SARRAY_CODE_ELEMSIZE, pArray->elemSize); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI32(pEncoder, SARRAY_CODE_SIZE, pArray->size); + } + if (TSDB_CODE_SUCCESS == code && pArray->capacity * pArray->elemSize > 0 && pArray->pData != NULL) { + code = tlvEncodeBinary(pEncoder, SARRAY_CODE_PDATA, pArray->pData, pArray->capacity * pArray->elemSize); + } + return code; +} + static int32_t msgToNodeList(STlvDecoder* pDecoder, void** pObj) { SNodeList* pList = nodesMakeList(); @@ -4413,6 +4457,67 @@ static int32_t msgToNodeList(STlvDecoder* pDecoder, void** pObj) { return code; } +static int32_t msgToSArray(STlv* pTlv, void** pObj){ + SArray* pArray = NULL; + uint32_t capacity = 0; + uint32_t elemSize = 0; + uint32_t actualSize; + int32_t decodeFieldNum = 0;; + int32_t code = TSDB_CODE_SUCCESS; + STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value}; + STlv* pTlvTemp = NULL; + STlv* pDataTlv = NULL; + + tlvForEach(&decoder, pTlvTemp, code) { + switch (pTlvTemp->type) { + case SARRAY_CODE_CAPACITY: + code = tlvDecodeI32(pTlvTemp, &capacity); + break; + case SARRAY_CODE_ELEMSIZE: + code = tlvDecodeI32(pTlvTemp, &elemSize); + break; + case SARRAY_CODE_SIZE: + code = tlvDecodeI32(pTlvTemp, &actualSize); + break; + case SARRAY_CODE_PDATA: + if (decodeFieldNum < 3) { + pDataTlv = pTlvTemp; + break; + } + pArray = taosArrayInit(capacity, elemSize); + if (NULL == pArray) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pArray->size = actualSize; + if (TSDB_CODE_SUCCESS != code || pTlvTemp == NULL) { + taosArrayDestroy(pArray); + return TSDB_CODE_OUT_OF_MEMORY; + } + code = tlvDecodeBinary(pTlvTemp, pArray->pData); + break; + default: + break; + } + decodeFieldNum++; + } + + if (pDataTlv != NULL) { + pArray = taosArrayInit(capacity, elemSize); + if (NULL == pArray) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pArray->size = actualSize; + if (TSDB_CODE_SUCCESS != code || pTlvTemp == NULL) { + taosArrayDestroy(pArray); + return TSDB_CODE_OUT_OF_MEMORY; + } + code = tlvDecodeBinary(pDataTlv, pArray->pData); + } + *pObj = pArray; + return code; +} + + static int32_t msgToNodeListFromTlv(STlv* pTlv, void** pObj) { STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value}; return msgToNodeList(&decoder, pObj); diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 222539ec36..e746649add 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -295,7 +295,7 @@ SNode* nodesMakeNode(ENodeType type) { case QUERY_NODE_LEFT_VALUE: return makeNode(type, sizeof(SLeftValueNode)); case QUERY_NODE_COLUMN_REF: - return makeNode(type, sizeof(SColumnDefNode)); + return makeNode(type, sizeof(SColumnRefNode)); case QUERY_NODE_WHEN_THEN: return makeNode(type, sizeof(SWhenThenNode)); case QUERY_NODE_CASE_WHEN: @@ -676,6 +676,8 @@ static void destroyTableCfg(STableCfg* pCfg) { static void destroySmaIndex(void* pIndex) { taosMemoryFree(((STableIndexInfo*)pIndex)->expr); } +static void destroyFuncParam(void* pValue) { taosMemoryFree(((SFunctParam*)pValue)->pCol); } + static void destroyHintValue(EHintOption option, void* value) { switch (option) { default: @@ -1175,6 +1177,7 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyList(pLogicNode->pGroupTags); nodesDestroyList(pLogicNode->pTags); nodesDestroyNode(pLogicNode->pSubtable); + taosArrayDestroyEx(pLogicNode->pFuncTypes, destroyFuncParam); break; } case QUERY_NODE_LOGIC_PLAN_JOIN: { @@ -1302,6 +1305,7 @@ void nodesDestroyNode(SNode* pNode) { destroyScanPhysiNode((SScanPhysiNode*)pNode); nodesDestroyList(pPhyNode->pGroupTags); nodesDestroyList(pPhyNode->pTargets); + taosArrayDestroy(pPhyNode->pFuncTypes); break; } case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: @@ -2114,6 +2118,7 @@ static EDealRes collectFuncs(SNode* pNode, void* pContext) { FOREACH(pn, pCxt->pFuncs) { if (nodesEqualNode(pn, pNode)) { bFound = true; + break; } } if (!bFound) { @@ -2136,6 +2141,20 @@ static int32_t funcNodeEqual(const void* pLeft, const void* pRight, size_t len) return nodesEqualNode(*(const SNode**)pLeft, *(const SNode**)pRight) ? 0 : 1; } +int32_t nodesCollectSelectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList* pFuncs) { + if (NULL == pSelect || NULL == pFuncs) { + return TSDB_CODE_FAILED; + } + + SCollectFuncsCxt cxt = {.errCode = TSDB_CODE_SUCCESS, + .classifier = classifier, + .tableAlias = tableAlias, + .pFuncs = pFuncs}; + + nodesWalkSelectStmt(pSelect, clause, collectFuncs, &cxt); + return cxt.errCode; +} + int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList** pFuncs) { if (NULL == pSelect || NULL == pFuncs) { return TSDB_CODE_FAILED; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index e407cc1e81..70e4744644 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1085,22 +1085,31 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** pCol, bool* pFound) { SNodeList* pProjectionList = getProjectListFromCurrStmt(pCxt->pCurrStmt); SNode* pNode; + SNode* pFoundNode = NULL; + *pFound = false; FOREACH(pNode, pProjectionList) { SExprNode* pExpr = (SExprNode*)pNode; if (0 == strcmp((*pCol)->colName, pExpr->userAlias)) { - SColumnRefNode* pColRef = (SColumnRefNode*)nodesMakeNode(QUERY_NODE_COLUMN_REF); - if (NULL == pColRef) { - pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; + if (true == *pFound) { + if(nodesEqualNode(pFoundNode, pNode)) { + continue; + } + pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ORDERBY_AMBIGUOUS, (*pCol)->colName); return DEAL_RES_ERROR; } - strcpy(pColRef->colName, pExpr->aliasName); - nodesDestroyNode(*(SNode**)pCol); - *(SNode**)pCol = (SNode*)pColRef; *pFound = true; - return DEAL_RES_CONTINUE; + pFoundNode = pNode; } } - *pFound = false; + if (*pFound) { + SNode* pNew = nodesCloneNode(pFoundNode); + if (NULL == pNew) { + pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; + return DEAL_RES_ERROR; + } + nodesDestroyNode(*(SNode**)pCol); + *(SNode**)pCol = (SNode*)pNew; + } return DEAL_RES_CONTINUE; } @@ -2744,6 +2753,31 @@ static EDealRes doCheckAggColCoexist(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } +static int32_t checkIsEmptyResult(STranslateContext* pCxt, SSelectStmt* pSelect) { + if (pSelect->timeRange.skey > pSelect->timeRange.ekey + && !pSelect->hasCountFunc) { + pSelect->isEmptyResult = true; + } + return TSDB_CODE_SUCCESS; +} + +static int32_t resetSelectFuncNumWithoutDup(SSelectStmt* pSelect) { + if (pSelect->selectFuncNum <= 1) return TSDB_CODE_SUCCESS; + pSelect->selectFuncNum = 0; + SNodeList* pNodeList = nodesMakeList(); + int32_t code = nodesCollectSelectFuncs(pSelect, SQL_CLAUSE_FROM, NULL, fmIsSelectFunc, pNodeList); + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyList(pNodeList); + return code; + } + SNode* pNode = NULL; + FOREACH(pNode, pNodeList) { + pSelect->selectFuncNum = calcSelectFuncNum((SFunctionNode*)pNode, pSelect->selectFuncNum); + } + nodesDestroyList(pNodeList); + return TSDB_CODE_SUCCESS; +} + static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL != pSelect->pGroupByList || NULL != pSelect->pWindow || (!pSelect->hasAggFuncs && !pSelect->hasIndefiniteRowsFunc && !pSelect->hasInterpFunc)) { @@ -2757,7 +2791,8 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect) if (!pSelect->isDistinct) { nodesRewriteExprs(pSelect->pOrderByList, doCheckAggColCoexist, &cxt); } - if (1 == pSelect->selectFuncNum && !pSelect->hasOtherVectorFunc) { + if (((!cxt.existCol && 0 < pSelect->selectFuncNum) || (cxt.existCol && 1 == pSelect->selectFuncNum) ) + && !pSelect->hasOtherVectorFunc) { return rewriteColsToSelectValFunc(pCxt, pSelect); } if (cxt.existCol) { @@ -3454,13 +3489,7 @@ static int32_t translateOrderByPosition(STranslateContext* pCxt, SNodeList* pPro } else if (0 == pos || pos > LIST_LENGTH(pProjectionList)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_NUMBER_OF_SELECT); } else { - SColumnRefNode* pCol = (SColumnRefNode*)nodesMakeNode(QUERY_NODE_COLUMN_REF); - if (NULL == pCol) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY); - } - strcpy(pCol->colName, ((SExprNode*)nodesListGetNode(pProjectionList, pos - 1))->aliasName); - ((SOrderByExprNode*)pNode)->pExpr = (SNode*)pCol; - nodesDestroyNode(pExpr); + // No longer using SColumnRefNode, processing in replaceOrderByAliasImpl function } } else { *pOther = true; @@ -4405,9 +4434,6 @@ static int32_t translateWhere(STranslateContext* pCxt, SSelectStmt* pSelect) { if (TSDB_CODE_SUCCESS == code) { code = getQueryTimeRange(pCxt, pSelect->pWhere, &pSelect->timeRange); } - if (TSDB_CODE_SUCCESS == code && pSelect->timeRange.skey > pSelect->timeRange.ekey) { - pSelect->isEmptyResult = true; - } if (pSelect->pWhere != NULL) { setTableVgroupsFromEqualTbnameCond(pCxt, pSelect); } @@ -4483,12 +4509,12 @@ typedef struct SReplaceOrderByAliasCxt { static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) { SReplaceOrderByAliasCxt* pCxt = pContext; - if (QUERY_NODE_COLUMN_REF == nodeType(*pNode)) { - SNodeList* pProjectionList = pCxt->pProjectionList; - SNode* pProject = NULL; + SNodeList* pProjectionList = pCxt->pProjectionList; + SNode* pProject = NULL; + if (QUERY_NODE_COLUMN == nodeType(*pNode)) { FOREACH(pProject, pProjectionList) { SExprNode* pExpr = (SExprNode*)pProject; - if (0 == strcmp(((SColumnRefNode*)*pNode)->colName, pExpr->aliasName)) { + if (0 == strcmp(((SColumnRefNode*)*pNode)->colName, pExpr->userAlias) && nodeType(*pNode) == nodeType(pProject)) { SNode* pNew = nodesCloneNode(pProject); if (NULL == pNew) { pCxt->pTranslateCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; @@ -4500,7 +4526,29 @@ static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } } + } else if (QUERY_NODE_ORDER_BY_EXPR == nodeType(*pNode)) { + STranslateContext* pTransCxt = pCxt->pTranslateCxt; + SNode* pExpr = ((SOrderByExprNode*)*pNode)->pExpr; + if (QUERY_NODE_VALUE == nodeType(pExpr)) { + SValueNode* pVal = (SValueNode*)pExpr; + if (DEAL_RES_ERROR == translateValue(pTransCxt, pVal)) { + return pTransCxt->errCode; + } + int32_t pos = getPositionValue(pVal); + if (0 < pos && pos <= LIST_LENGTH(pProjectionList)) { + SNode* pNew = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1)); + if (NULL == pNew) { + pCxt->pTranslateCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; + return DEAL_RES_ERROR; + } + ((SExprNode*)pNew)->orderAlias = true; + ((SOrderByExprNode*)*pNode)->pExpr = pNew; + nodesDestroyNode(pExpr); + return DEAL_RES_CONTINUE; + } + } } + return DEAL_RES_CONTINUE; } @@ -4573,6 +4621,10 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect code = translateOrderBy(pCxt, pSelect); } if (TSDB_CODE_SUCCESS == code) { + code = checkIsEmptyResult(pCxt, pSelect); + } + if (TSDB_CODE_SUCCESS == code) { + resetSelectFuncNumWithoutDup(pSelect); code = checkAggColCoexist(pCxt, pSelect); } if (TSDB_CODE_SUCCESS == code) { diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 26e6111074..dfe33ce55e 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -190,6 +190,8 @@ static char* getSyntaxErrFormat(int32_t errCode) { return "invalid ip range"; case TSDB_CODE_OUT_OF_MEMORY: return "Out of memory"; + case TSDB_CODE_PAR_ORDERBY_AMBIGUOUS: + return "ORDER BY \"%s\" is ambiguous"; default: return "Unknown error"; } diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp index 53d97d0699..390faab537 100644 --- a/source/libs/parser/test/parSelectTest.cpp +++ b/source/libs/parser/test/parSelectTest.cpp @@ -413,6 +413,28 @@ TEST_F(ParserSelectTest, semanticCheck) { run("SELECT c1 FROM t1 order by COUNT(*)", TSDB_CODE_PAR_NOT_SINGLE_GROUP); + run("SELECT COUNT(*) FROM t1 order by COUNT(*)"); + + run("SELECT COUNT(*) FROM t1 order by last(c2)"); + + run("SELECT c1 FROM t1 order by last(ts)"); + + run("SELECT ts FROM t1 order by last(ts)"); + + run("SELECT c2 FROM t1 order by last(ts)"); + + run("SELECT * FROM t1 order by last(ts)"); + + run("SELECT last(ts) FROM t1 order by last(ts)"); + + run("SELECT last(ts), ts, c1 FROM t1 order by last(ts)"); + + run("SELECT ts, last(ts) FROM t1 order by last(ts)"); + + run("SELECT first(ts), c2 FROM t1 order by last(c1)", TSDB_CODE_PAR_NOT_SINGLE_GROUP); + + run("SELECT c1 FROM t1 order by concat(c2, 'abc')"); + // TSDB_CODE_PAR_NOT_SELECTED_EXPRESSION run("SELECT distinct c1, c2 FROM t1 WHERE c1 > 0 order by ts", TSDB_CODE_PAR_NOT_SELECTED_EXPRESSION); diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 730dcd9352..dfb9343cc8 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2501,17 +2501,30 @@ static bool lastRowScanOptCheckColNum(int32_t lastColNum, col_id_t lastColId, return true; } -static bool lastRowScanOptCheckFuncList(SLogicNode* pNode, bool* hasOtherFunc) { +static bool isNeedSplitCacheLastFunc(SFunctionNode* pFunc, SScanLogicNode* pScan) { + int32_t funcType = pFunc->funcType; + if ((FUNCTION_TYPE_LAST_ROW != funcType || (FUNCTION_TYPE_LAST_ROW == funcType && TSDB_CACHE_MODEL_LAST_VALUE == pScan->cacheLastMode)) && + (FUNCTION_TYPE_LAST != funcType || (FUNCTION_TYPE_LAST == funcType && (TSDB_CACHE_MODEL_LAST_ROW == pScan->cacheLastMode || + QUERY_NODE_OPERATOR == nodeType(nodesListGetNode(pFunc->pParameterList, 0)) || QUERY_NODE_VALUE == nodeType(nodesListGetNode(pFunc->pParameterList, 0))))) && + FUNCTION_TYPE_SELECT_VALUE != funcType && FUNCTION_TYPE_GROUP_KEY != funcType) { + return true; + } + return false; +} + +static bool lastRowScanOptCheckFuncList(SLogicNode* pNode, int8_t cacheLastModel, bool* hasOtherFunc) { bool hasNonPKSelectFunc = false; SNode* pFunc = NULL; int32_t lastColNum = 0, selectNonPKColNum = 0; col_id_t lastColId = -1, selectNonPKColId = -1; + SScanLogicNode* pScan = (SScanLogicNode*)nodesListGetNode(((SAggLogicNode*)pNode)->node.pChildren, 0); + uint32_t needSplitFuncCount = 0; FOREACH(pFunc, ((SAggLogicNode*)pNode)->pAggFuncs) { SFunctionNode* pAggFunc = (SFunctionNode*)pFunc; + SNode* pParam = nodesListGetNode(pAggFunc->pParameterList, 0); if (FUNCTION_TYPE_LAST == pAggFunc->funcType) { - SNode* pPar = nodesListGetNode(pAggFunc->pParameterList, 0); - if (QUERY_NODE_COLUMN == nodeType(pPar)) { - SColumnNode* pCol = (SColumnNode*)pPar; + if (QUERY_NODE_COLUMN == nodeType(pParam)) { + SColumnNode* pCol = (SColumnNode*)pParam; if (pCol->colType != COLUMN_TYPE_COLUMN) { return false; } @@ -2520,13 +2533,18 @@ static bool lastRowScanOptCheckFuncList(SLogicNode* pNode, bool* hasOtherFunc) { lastColNum++; } } - if (QUERY_NODE_VALUE == nodeType(nodesListGetNode(pAggFunc->pParameterList, 0))) { + else if (QUERY_NODE_VALUE == nodeType(pParam) || QUERY_NODE_OPERATOR == nodeType(pParam)) { + needSplitFuncCount++; + *hasOtherFunc = true; + } + if (!lastRowScanOptCheckColNum(lastColNum, lastColId, selectNonPKColNum, selectNonPKColId)) { return false; } - if (!lastRowScanOptCheckColNum(lastColNum, lastColId, selectNonPKColNum, selectNonPKColId)) - return false; + if (TSDB_CACHE_MODEL_LAST_ROW == cacheLastModel) { + needSplitFuncCount++; + *hasOtherFunc = true; + } } else if (FUNCTION_TYPE_SELECT_VALUE == pAggFunc->funcType) { - SNode* pParam = nodesListGetNode(pAggFunc->pParameterList, 0); if (QUERY_NODE_COLUMN == nodeType(pParam)) { SColumnNode* pCol = (SColumnNode*)pParam; if (COLUMN_TYPE_COLUMN == pCol->colType && PRIMARYKEY_TIMESTAMP_COL_ID != pCol->colId) { @@ -2548,15 +2566,21 @@ static bool lastRowScanOptCheckFuncList(SLogicNode* pNode, bool* hasOtherFunc) { } } else if (FUNCTION_TYPE_LAST_ROW != pAggFunc->funcType) { *hasOtherFunc = true; + needSplitFuncCount++; + } else if (FUNCTION_TYPE_LAST_ROW == pAggFunc->funcType && TSDB_CACHE_MODEL_LAST_VALUE == cacheLastModel) { + *hasOtherFunc = true; + needSplitFuncCount++; } } + if (needSplitFuncCount >= ((SAggLogicNode*)pNode)->pAggFuncs->length) { + return false; + } return true; } static bool lastRowScanOptCheckLastCache(SAggLogicNode* pAgg, SScanLogicNode* pScan) { - // Only one of LAST and LASTROW can appear - if (pAgg->hasLastRow == pAgg->hasLast || (!pAgg->hasLast && !pAgg->hasLastRow) || NULL != pAgg->pGroupKeys || NULL != pScan->node.pConditions || + if ((pAgg->hasLastRow == pAgg->hasLast && !pAgg->hasLastRow) || (!pAgg->hasLast && !pAgg->hasLastRow) || NULL != pAgg->pGroupKeys || NULL != pScan->node.pConditions || !hasSuitableCache(pScan->cacheLastMode, pAgg->hasLastRow, pAgg->hasLast) || IS_TSWINDOW_SPECIFIED(pScan->scanRange)) { return false; @@ -2578,7 +2602,7 @@ static bool lastRowScanOptMayBeOptimized(SLogicNode* pNode) { } bool hasOtherFunc = false; - if (!lastRowScanOptCheckFuncList(pNode, &hasOtherFunc)) { + if (!lastRowScanOptCheckFuncList(pNode, pScan->cacheLastMode, &hasOtherFunc)) { return false; } @@ -2593,6 +2617,7 @@ typedef struct SLastRowScanOptSetColDataTypeCxt { bool doAgg; SNodeList* pLastCols; SNodeList* pOtherCols; + int32_t funcType; } SLastRowScanOptSetColDataTypeCxt; static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) { @@ -2615,7 +2640,7 @@ static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) { return DEAL_RES_CONTINUE; } -static void lastRowScanOptSetLastTargets(SNodeList* pTargets, SNodeList* pLastCols, bool erase) { +static void lastRowScanOptSetLastTargets(SNodeList* pTargets, SNodeList* pLastCols, SNodeList* pLastRowCols, bool erase) { SNode* pTarget = NULL; WHERE_EACH(pTarget, pTargets) { bool found = false; @@ -2627,6 +2652,10 @@ static void lastRowScanOptSetLastTargets(SNodeList* pTargets, SNodeList* pLastCo break; } } + if (!found && nodeListNodeEqual(pLastRowCols, pTarget)) { + found = true; + } + if (!found && erase) { ERASE_NODE(pTargets); continue; @@ -2635,7 +2664,7 @@ static void lastRowScanOptSetLastTargets(SNodeList* pTargets, SNodeList* pLastCo } } -static void lastRowScanOptRemoveUslessTargets(SNodeList* pTargets, SNodeList* pList1, SNodeList* pList2) { +static void lastRowScanOptRemoveUslessTargets(SNodeList* pTargets, SNodeList* pList1, SNodeList* pList2, SNodeList* pList3) { SNode* pTarget = NULL; WHERE_EACH(pTarget, pTargets) { bool found = false; @@ -2654,6 +2683,11 @@ static void lastRowScanOptRemoveUslessTargets(SNodeList* pTargets, SNodeList* pL } } } + + if (!found && nodeListNodeEqual(pList3, pTarget)) { + found = true; + } + if (!found) { ERASE_NODE(pTargets); continue; @@ -2662,6 +2696,33 @@ static void lastRowScanOptRemoveUslessTargets(SNodeList* pTargets, SNodeList* pL } } +static int32_t lastRowScanBuildFuncTypes(SScanLogicNode* pScan, SColumnNode* pColNode, int32_t funcType) { + SFunctParam* pFuncTypeParam = taosMemoryCalloc(1, sizeof(SFunctParam)); + if (NULL == pFuncTypeParam) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pFuncTypeParam->type = funcType; + if (NULL == pScan->pFuncTypes) { + pScan->pFuncTypes = taosArrayInit(pScan->pScanCols->length, sizeof(SFunctParam)); + if (NULL == pScan->pFuncTypes) { + taosMemoryFree(pFuncTypeParam); + return TSDB_CODE_OUT_OF_MEMORY; + } + } + + pFuncTypeParam->pCol = taosMemoryCalloc(1, sizeof(SColumn)); + if (NULL == pFuncTypeParam->pCol) { + taosMemoryFree(pFuncTypeParam); + return TSDB_CODE_OUT_OF_MEMORY; + } + pFuncTypeParam->pCol->colId = pColNode->colId; + strcpy(pFuncTypeParam->pCol->name, pColNode->colName); + taosArrayPush(pScan->pFuncTypes, pFuncTypeParam); + + taosMemoryFree(pFuncTypeParam); + return TSDB_CODE_SUCCESS; +} + static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) { SAggLogicNode* pAgg = (SAggLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, lastRowScanOptMayBeOptimized); @@ -2673,10 +2734,16 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic SNode* pNode = NULL; SColumnNode* pPKTsCol = NULL; SColumnNode* pNonPKCol = NULL; + SScanLogicNode* pScan = (SScanLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0); + pScan->scanType = SCAN_TYPE_LAST_ROW; + pScan->igLastNull = pAgg->hasLast ? true : false; + SArray* isDuplicateCol = taosArrayInit(pScan->pScanCols->length, sizeof(bool)); + SNodeList* pLastRowCols = NULL; FOREACH(pNode, pAgg->pAggFuncs) { SFunctionNode* pFunc = (SFunctionNode*)pNode; int32_t funcType = pFunc->funcType; + SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, 0); if (FUNCTION_TYPE_LAST_ROW == funcType || FUNCTION_TYPE_LAST == funcType) { int32_t len = snprintf(pFunc->functionName, sizeof(pFunc->functionName), FUNCTION_TYPE_LAST_ROW == funcType ? "_cache_last_row" : "_cache_last"); @@ -2686,6 +2753,61 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic nodesClearList(cxt.pLastCols); return code; } + cxt.funcType = pFunc->funcType; + // add duplicate cols which be removed for both last_row, last + if (pAgg->hasLast && pAgg->hasLastRow) { + if (QUERY_NODE_COLUMN == nodeType(pParamNode)) { + SNode* pColNode = NULL; + int i = 0; + FOREACH(pColNode, pScan->pScanCols) { + bool isDup = false; + bool* isDuplicate = taosArrayGet(isDuplicateCol, i); + if (NULL == isDuplicate) { + taosArrayInsert(isDuplicateCol, i, &isDup); + isDuplicate = taosArrayGet(isDuplicateCol, i); + } + i++; + if (nodesEqualNode(pParamNode, pColNode)) { + if (*isDuplicate) { + if (0 == strncmp(((SColumnNode*)pColNode)->colName, "#dup_col.", 9)) { + continue; + } + SNode* newColNode = nodesCloneNode(pColNode); + sprintf(((SColumnNode*)newColNode)->colName, "#dup_col.%p", newColNode); + sprintf(((SColumnNode*)pParamNode)->colName, "#dup_col.%p", newColNode); + + nodesListAppend(pScan->pScanCols, newColNode); + isDup = true; + taosArrayInsert(isDuplicateCol, pScan->pScanCols->length, &isDup); + nodesListAppend(pScan->node.pTargets, nodesCloneNode(newColNode)); + if (funcType != FUNCTION_TYPE_LAST) { + nodesListMakeAppend(&pLastRowCols, nodesCloneNode(newColNode)); + } + + lastRowScanBuildFuncTypes(pScan, (SColumnNode*)newColNode, pFunc->funcType); + } else { + isDup = true; + *isDuplicate = isDup; + if (funcType != FUNCTION_TYPE_LAST && !nodeListNodeEqual(cxt.pLastCols, pColNode)) { + nodesListMakeAppend(&pLastRowCols, nodesCloneNode(pColNode)); + } + lastRowScanBuildFuncTypes(pScan, (SColumnNode*)pColNode, pFunc->funcType); + } + continue; + }else if (nodeListNodeEqual(pFunc->pParameterList, pColNode)) { + if (funcType != FUNCTION_TYPE_LAST && ((SColumnNode*)pColNode)->colId == PRIMARYKEY_TIMESTAMP_COL_ID && + !nodeListNodeEqual(pLastRowCols, pColNode)) { + nodesListMakeAppend(&pLastRowCols, nodesCloneNode(pColNode)); + + lastRowScanBuildFuncTypes(pScan, (SColumnNode*)pColNode, pFunc->funcType); + isDup = true; + *isDuplicate = isDup; + } + } + } + } + } + if (FUNCTION_TYPE_LAST == funcType) { nodesWalkExpr(nodesListGetNode(pFunc->pParameterList, 0), lastRowScanOptSetColDataType, &cxt); nodesListErase(pFunc->pParameterList, nodesListGetCell(pFunc->pParameterList, 1)); @@ -2707,15 +2829,13 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic } } - SScanLogicNode* pScan = (SScanLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0); - pScan->scanType = SCAN_TYPE_LAST_ROW; - pScan->igLastNull = pAgg->hasLast ? true : false; if (NULL != cxt.pLastCols) { cxt.doAgg = false; - lastRowScanOptSetLastTargets(pScan->pScanCols, cxt.pLastCols, true); + cxt.funcType = FUNCTION_TYPE_CACHE_LAST; + lastRowScanOptSetLastTargets(pScan->pScanCols, cxt.pLastCols, pLastRowCols, true); nodesWalkExprs(pScan->pScanPseudoCols, lastRowScanOptSetColDataType, &cxt); - lastRowScanOptSetLastTargets(pScan->node.pTargets, cxt.pLastCols, false); - lastRowScanOptRemoveUslessTargets(pScan->node.pTargets, cxt.pLastCols, cxt.pOtherCols); + lastRowScanOptSetLastTargets(pScan->node.pTargets, cxt.pLastCols, pLastRowCols, false); + lastRowScanOptRemoveUslessTargets(pScan->node.pTargets, cxt.pLastCols, cxt.pOtherCols, pLastRowCols); if (pPKTsCol && pScan->node.pTargets->length == 1) { // when select last(ts),ts from ..., we add another ts to targets sprintf(pPKTsCol->colName, "#sel_val.%p", pPKTsCol); @@ -2728,10 +2848,12 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic } nodesClearList(cxt.pLastCols); } + pAgg->hasLastRow = false; pAgg->hasLast = false; pCxt->optimized = true; + taosArrayDestroy(isDuplicateCol); return TSDB_CODE_SUCCESS; } @@ -2749,7 +2871,7 @@ static bool splitCacheLastFuncOptMayBeOptimized(SLogicNode* pNode) { } bool hasOtherFunc = false; - if (!lastRowScanOptCheckFuncList(pNode, &hasOtherFunc)) { + if (!lastRowScanOptCheckFuncList(pNode, pScan->cacheLastMode, &hasOtherFunc)) { return false; } @@ -2770,6 +2892,16 @@ static int32_t splitCacheLastFuncOptCreateAggLogicNode(SAggLogicNode** pNewAgg, pNew->hasLastRow = false; pNew->hasLast = false; + SNode* pFuncNode = NULL; + FOREACH(pFuncNode, pFunc) { + SFunctionNode* pFunc = (SFunctionNode*)pFuncNode; + if (FUNCTION_TYPE_LAST_ROW == pFunc->funcType) { + pNew->hasLastRow = true; + } else if (FUNCTION_TYPE_LAST == pFunc->funcType) { + pNew->hasLast = true; + } + } + pNew->hasTimeLineFunc = pAgg->hasTimeLineFunc; pNew->hasGroupKeyOptimized = false; pNew->onlyHasKeepOrderFunc = pAgg->onlyHasKeepOrderFunc; @@ -2894,21 +3026,31 @@ static int32_t splitCacheLastFuncOptimize(SOptimizeContext* pCxt, SLogicSubplan* if (NULL == pAgg) { return TSDB_CODE_SUCCESS; } - + SScanLogicNode* pScan = (SScanLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0); SNode* pNode = NULL; SNodeList* pAggFuncList = NULL; + { + bool hasLast = false; + bool hasLastRow = false; WHERE_EACH(pNode, pAgg->pAggFuncs) { SFunctionNode* pFunc = (SFunctionNode*)pNode; int32_t funcType = pFunc->funcType; - if (FUNCTION_TYPE_LAST_ROW != funcType && FUNCTION_TYPE_LAST != funcType && - FUNCTION_TYPE_SELECT_VALUE != funcType && FUNCTION_TYPE_GROUP_KEY != funcType) { + + if (isNeedSplitCacheLastFunc(pFunc, pScan)) { nodesListMakeStrictAppend(&pAggFuncList, nodesCloneNode(pNode)); ERASE_NODE(pAgg->pAggFuncs); continue; } + if (FUNCTION_TYPE_LAST_ROW == funcType ) { + hasLastRow = true; + } else if (FUNCTION_TYPE_LAST == funcType) { + hasLast = true; + } WHERE_NEXT; } + pAgg->hasLast = hasLast; + pAgg->hasLastRow = hasLastRow; } if (NULL == pAggFuncList) { @@ -2980,6 +3122,7 @@ static bool mergeProjectsMayBeOptimized(SLogicNode* pNode) { NULL != pChild->pConditions || NULL != pChild->pLimit || NULL != pChild->pSlimit) { return false; } + return true; } @@ -3018,7 +3161,9 @@ static EDealRes mergeProjectionsExpr(SNode** pNode, void* pContext) { static int32_t mergeProjectsOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SLogicNode* pSelfNode) { SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSelfNode->pChildren, 0); - + if (((SProjectLogicNode*)pChild)->ignoreGroupId) { + ((SProjectLogicNode*)pSelfNode)->inputIgnoreGroup = true; + } SMergeProjectionsContext cxt = {.pChildProj = (SProjectLogicNode*)pChild, .errCode = TSDB_CODE_SUCCESS}; nodesRewriteExprs(((SProjectLogicNode*)pSelfNode)->pProjections, mergeProjectionsExpr, &cxt); int32_t code = cxt.errCode; @@ -3183,7 +3328,11 @@ static bool pushDownLimitTo(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPu } case QUERY_NODE_LOGIC_PLAN_SCAN: if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && pNodeWithLimit->pLimit) { - swapLimit(pNodeWithLimit, pNodeLimitPushTo); + if (((SProjectLogicNode*)pNodeWithLimit)->inputIgnoreGroup) { + cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_LIMIT); + } else { + swapLimit(pNodeWithLimit, pNodeLimitPushTo); + } return true; } default: diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 8b9bfcfa22..c2014e97dc 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -562,9 +562,36 @@ static int32_t createLastRowScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSu pScan->groupSort = pScanLogicNode->groupSort; pScan->ignoreNull = pScanLogicNode->igLastNull; + vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); - return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); + int32_t code = createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); + + if (TSDB_CODE_SUCCESS == code && pScanLogicNode->pFuncTypes != NULL) { + pScan->pFuncTypes = taosArrayInit(taosArrayGetSize(pScanLogicNode->pFuncTypes), sizeof(int32_t)); + if (NULL == pScan->pFuncTypes) { + return TSDB_CODE_OUT_OF_MEMORY; + } + SNode* pTargetNode = NULL; + int funcTypeIndex = 0; + FOREACH(pTargetNode, ((SScanPhysiNode*)pScan)->pScanCols) { + if (((STargetNode*)pTargetNode)->pExpr->type != QUERY_NODE_COLUMN) { + continue; + } + SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pTargetNode)->pExpr; + + for (int i = 0; i < TARRAY_SIZE(pScanLogicNode->pFuncTypes); ++i) { + SFunctParam* pFunctParam = taosArrayGet(pScanLogicNode->pFuncTypes, i); + if (pColNode->colId == pFunctParam->pCol->colId && + 0 == strncmp(pColNode->colName, pFunctParam->pCol->name, strlen(pColNode->colName))) { + taosArrayInsert(pScan->pFuncTypes, funcTypeIndex, &pFunctParam->type); + break; + } + } + funcTypeIndex++; + } + } + return code; } static int32_t createTableCountScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, @@ -1432,6 +1459,7 @@ static int32_t createProjectPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChild pProject->mergeDataBlock = projectCanMergeDataBlock(pProjectLogicNode); pProject->ignoreGroupId = pProjectLogicNode->ignoreGroupId; + pProject->inputIgnoreGroup = pProjectLogicNode->inputIgnoreGroup; int32_t code = TSDB_CODE_SUCCESS; if (0 == LIST_LENGTH(pChildren)) { diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 987a1dc051..3e9c261446 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -1272,8 +1272,10 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) static int32_t stbSplGetSplitNodeForScan(SStableSplitInfo* pInfo, SLogicNode** pSplitNode) { *pSplitNode = pInfo->pSplitNode; - if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) && - NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) { + if (NULL != pInfo->pSplitNode->pParent && + QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) && + NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit && + !((SProjectLogicNode*)pInfo->pSplitNode->pParent)->inputIgnoreGroup) { *pSplitNode = pInfo->pSplitNode->pParent; if (NULL != pInfo->pSplitNode->pLimit) { (*pSplitNode)->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit); diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index e713043b80..2e44c75c17 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -2427,9 +2427,19 @@ int32_t leastSQRScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarPa matrix12 /= matrix[1][1]; - char buf[64] = {0}; - size_t len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", matrix02, - matrix12); + char buf[LEASTSQUARES_BUFF_LENGTH] = {0}; + char slopBuf[64] = {0}; + char interceptBuf[64] = {0}; + int n = snprintf(slopBuf, 64, "%.6lf", matrix02); + if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) { + snprintf(slopBuf, 64, "%." DOUBLE_PRECISION_DIGITS, matrix02); + } + n = snprintf(interceptBuf, 64, "%.6lf", matrix12); + if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) { + snprintf(interceptBuf, 64, "%." DOUBLE_PRECISION_DIGITS, matrix12); + } + size_t len = + snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%s, intercept:%s}", slopBuf, interceptBuf); varDataSetLen(buf, len); colDataSetVal(pOutputData, 0, buf, false); } diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 3b7a76bfc7..1c90e61ea3 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -52,10 +52,10 @@ typedef enum { SCH_ALL, } SCH_POLICY; -#define SCHEDULE_DEFAULT_MAX_JOB_NUM 1000 -#define SCHEDULE_DEFAULT_MAX_TASK_NUM 1000 -#define SCHEDULE_DEFAULT_POLICY SCH_LOAD_SEQ -#define SCHEDULE_DEFAULT_MAX_NODE_NUM 20 +#define SCHEDULE_DEFAULT_MAX_JOB_NUM 1000 +#define SCHEDULE_DEFAULT_MAX_TASK_NUM 1000 +#define SCHEDULE_DEFAULT_POLICY SCH_LOAD_SEQ +#define SCHEDULE_DEFAULT_MAX_NODE_NUM 20 #define SCH_DEFAULT_TASK_TIMEOUT_USEC 5000000 #define SCH_MAX_TASK_TIMEOUT_USEC 300000000 @@ -68,8 +68,9 @@ typedef struct SSchDebug { } SSchDebug; typedef struct SSchTrans { - void *pTrans; - void *pHandle; + void *pTrans; + void *pHandle; + int64_t pHandleId; } SSchTrans; typedef struct SSchHbTrans { @@ -299,7 +300,7 @@ typedef struct SSchJob { void *fetchRes; // TODO free it or not bool fetched; bool noMoreRetry; - int64_t resNumOfRows; // from int32_t to int64_t + int64_t resNumOfRows; // from int32_t to int64_t SSchResInfo userRes; char *sql; SQueryProfileSummary summary; @@ -334,14 +335,15 @@ extern SSchedulerMgmt schMgmt; (!SCH_IS_DATA_BIND_QRY_TASK(_task))) #define SCH_UPDATE_REDIRECT_CODE(job, _code) atomic_val_compare_exchange_32(&((job)->redirectCode), 0, _code) -#define SCH_GET_REDIRECT_CODE(job, _code) (((!NO_RET_REDIRECT_ERROR(_code)) || (job)->redirectCode == 0) ? (_code) : (job)->redirectCode) +#define SCH_GET_REDIRECT_CODE(job, _code) \ + (((!NO_RET_REDIRECT_ERROR(_code)) || (job)->redirectCode == 0) ? (_code) : (job)->redirectCode) #define SCH_SET_TASK_STATUS(task, st) atomic_store_8(&(task)->status, st) #define SCH_GET_TASK_STATUS(task) atomic_load_8(&(task)->status) #define SCH_GET_TASK_STATUS_STR(task) jobTaskStatusStr(SCH_GET_TASK_STATUS(task)) #define SCH_TASK_ALREADY_LAUNCHED(task) (SCH_GET_TASK_STATUS(task) >= JOB_TASK_STATUS_EXEC) -#define SCH_TASK_EXEC_DONE(task) (SCH_GET_TASK_STATUS(task) >= JOB_TASK_STATUS_PART_SUCC) +#define SCH_TASK_EXEC_DONE(task) (SCH_GET_TASK_STATUS(task) >= JOB_TASK_STATUS_PART_SUCC) #define SCH_GET_TASK_HANDLE(_task) ((_task) ? (_task)->handle : NULL) #define SCH_SET_TASK_HANDLE(_task, _handle) ((_task)->handle = (_handle)) @@ -362,8 +364,8 @@ extern SSchedulerMgmt schMgmt; #define SCH_JOB_NEED_FLOW_CTRL(_job) ((_job)->attr.needFlowCtrl) #define SCH_TASK_NEED_FLOW_CTRL(_job, _task) \ (SCH_IS_DATA_BIND_QRY_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEVEL_UNFINISHED((_task)->level)) -#define SCH_FETCH_TYPE(_pSrcTask) (SCH_IS_DATA_BIND_QRY_TASK(_pSrcTask) ? TDMT_SCH_FETCH : TDMT_SCH_MERGE_FETCH) -#define SCH_TASK_NEED_FETCH(_task) ((_task)->plan->subplanType != SUBPLAN_TYPE_MODIFY) +#define SCH_FETCH_TYPE(_pSrcTask) (SCH_IS_DATA_BIND_QRY_TASK(_pSrcTask) ? TDMT_SCH_FETCH : TDMT_SCH_MERGE_FETCH) +#define SCH_TASK_NEED_FETCH(_task) ((_task)->plan->subplanType != SUBPLAN_TYPE_MODIFY) #define SCH_MULTI_LEVEL_LAUNCHED(_job) ((_job)->levelIdx != ((_job)->levelNum - 1)) #define SCH_SET_JOB_TYPE(_job, type) \ @@ -380,25 +382,26 @@ extern SSchedulerMgmt schMgmt; #define SCH_JOB_NEED_WAIT(_job) (!SCH_IS_QUERY_JOB(_job)) #define SCH_JOB_NEED_DROP(_job) (SCH_IS_QUERY_JOB(_job)) #define SCH_IS_EXPLAIN_JOB(_job) (EXPLAIN_MODE_ANALYZE == (_job)->attr.explainMode) -#define SCH_NETWORK_ERR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || (_code) == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED) +#define SCH_NETWORK_ERR(_code) \ + ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || \ + (_code) == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED) #define SCH_REDIRECT_MSGTYPE(_msgType) \ ((_msgType) == TDMT_SCH_LINK_BROKEN || (_msgType) == TDMT_SCH_QUERY || (_msgType) == TDMT_SCH_MERGE_QUERY || \ (_msgType) == TDMT_SCH_FETCH || (_msgType) == TDMT_SCH_MERGE_FETCH) #define SCH_LOW_LEVEL_NETWORK_ERR(_job, _task, _code) \ - (SCH_NETWORK_ERR(_code) && ((_task)->level->level == (_job)->levelIdx)) + (SCH_NETWORK_ERR(_code) && ((_task)->level->level == (_job)->levelIdx)) #define SCH_TOP_LEVEL_NETWORK_ERR(_job, _task, _code) \ - (SCH_NETWORK_ERR(_code) && ((_task)->level->level > (_job)->levelIdx)) -#define SCH_TASK_RETRY_NETWORK_ERR(_task, _code) \ - (SCH_NETWORK_ERR(_code) && (_task)->redirectCtx.inRedirect) + (SCH_NETWORK_ERR(_code) && ((_task)->level->level > (_job)->levelIdx)) +#define SCH_TASK_RETRY_NETWORK_ERR(_task, _code) (SCH_NETWORK_ERR(_code) && (_task)->redirectCtx.inRedirect) -#define SCH_JOB_NEED_RETRY(_job, _task, _msgType, _code) \ - (SCH_REDIRECT_MSGTYPE(_msgType) && SCH_TOP_LEVEL_NETWORK_ERR(_job, _task, _code)) -#define SCH_TASKSET_NEED_RETRY(_job, _task, _msgType, _code) \ - (SCH_REDIRECT_MSGTYPE(_msgType) && \ - (NEED_SCHEDULER_REDIRECT_ERROR(_code) || SCH_LOW_LEVEL_NETWORK_ERR((_job), (_task), (_code)) || SCH_TASK_RETRY_NETWORK_ERR((_task), (_code)))) +#define SCH_JOB_NEED_RETRY(_job, _task, _msgType, _code) \ + (SCH_REDIRECT_MSGTYPE(_msgType) && SCH_TOP_LEVEL_NETWORK_ERR(_job, _task, _code)) +#define SCH_TASKSET_NEED_RETRY(_job, _task, _msgType, _code) \ + (SCH_REDIRECT_MSGTYPE(_msgType) && \ + (NEED_SCHEDULER_REDIRECT_ERROR(_code) || SCH_LOW_LEVEL_NETWORK_ERR((_job), (_task), (_code)) || \ + SCH_TASK_RETRY_NETWORK_ERR((_task), (_code)))) #define SCH_TASK_NEED_RETRY(_msgType, _code) \ - ((SCH_REDIRECT_MSGTYPE(_msgType) && SCH_NETWORK_ERR(_code)) || (_code) == TSDB_CODE_SCH_TIMEOUT_ERROR) - + ((SCH_REDIRECT_MSGTYPE(_msgType) && SCH_NETWORK_ERR(_code)) || (_code) == TSDB_CODE_SCH_TIMEOUT_ERROR) #define SCH_IS_LEVEL_UNFINISHED(_level) ((_level)->taskLaunchedNum < (_level)->taskNum) #define SCH_GET_CUR_EP(_addr) (&(_addr)->epSet.eps[(_addr)->epSet.inUse]) @@ -488,50 +491,51 @@ extern SSchedulerMgmt schMgmt; #define TD_RWLATCH_WRITE_FLAG_COPY 0x40000000 -#define SCH_LOCK(type, _lock) \ - do { \ - if (SCH_READ == (type)) { \ - ASSERTS(atomic_load_32(_lock) >= 0, "invalid lock value before read lock"); \ - SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ - taosRLockLatch(_lock); \ - SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ - ASSERTS(atomic_load_32(_lock) > 0, "invalid lock value after read lock"); \ - } else { \ - ASSERTS(atomic_load_32(_lock) >= 0, "invalid lock value before write lock"); \ - SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ - taosWLockLatch(_lock); \ - SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ - ASSERTS(atomic_load_32(_lock) == TD_RWLATCH_WRITE_FLAG_COPY, "invalid lock value after write lock"); \ - } \ +#define SCH_LOCK(type, _lock) \ + do { \ + if (SCH_READ == (type)) { \ + ASSERTS(atomic_load_32(_lock) >= 0, "invalid lock value before read lock"); \ + SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ + taosRLockLatch(_lock); \ + SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ + ASSERTS(atomic_load_32(_lock) > 0, "invalid lock value after read lock"); \ + } else { \ + ASSERTS(atomic_load_32(_lock) >= 0, "invalid lock value before write lock"); \ + SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ + taosWLockLatch(_lock); \ + SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ + ASSERTS(atomic_load_32(_lock) == TD_RWLATCH_WRITE_FLAG_COPY, "invalid lock value after write lock"); \ + } \ } while (0) -#define SCH_UNLOCK(type, _lock) \ - do { \ - if (SCH_READ == (type)) { \ - ASSERTS(atomic_load_32((_lock)) > 0, "invalid lock value before read unlock"); \ - SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ - taosRUnLockLatch(_lock); \ - SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ - ASSERTS(atomic_load_32((_lock)) >= 0, "invalid lock value after read unlock"); \ - } else { \ - ASSERTS(atomic_load_32((_lock)) & TD_RWLATCH_WRITE_FLAG_COPY, "invalid lock value before write unlock"); \ - SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ - taosWUnLockLatch(_lock); \ - SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ - ASSERTS(atomic_load_32((_lock)) >= 0, "invalid lock value after write unlock"); \ - } \ +#define SCH_UNLOCK(type, _lock) \ + do { \ + if (SCH_READ == (type)) { \ + ASSERTS(atomic_load_32((_lock)) > 0, "invalid lock value before read unlock"); \ + SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ + taosRUnLockLatch(_lock); \ + SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ + ASSERTS(atomic_load_32((_lock)) >= 0, "invalid lock value after read unlock"); \ + } else { \ + ASSERTS(atomic_load_32((_lock)) & TD_RWLATCH_WRITE_FLAG_COPY, "invalid lock value before write unlock"); \ + SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ + taosWUnLockLatch(_lock); \ + SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ + ASSERTS(atomic_load_32((_lock)) >= 0, "invalid lock value after write unlock"); \ + } \ } while (0) -#define SCH_RESET_JOB_LEVEL_IDX(_job) do { \ - (_job)->levelIdx = (_job)->levelNum - 1; \ - SCH_JOB_DLOG("set job levelIdx to %d", (_job)->levelIdx); \ -} while (0) +#define SCH_RESET_JOB_LEVEL_IDX(_job) \ + do { \ + (_job)->levelIdx = (_job)->levelNum - 1; \ + SCH_JOB_DLOG("set job levelIdx to %d", (_job)->levelIdx); \ + } while (0) void schDeregisterTaskHb(SSchJob *pJob, SSchTask *pTask); void schCleanClusterHb(void *pTrans); int32_t schLaunchTask(SSchJob *job, SSchTask *task); int32_t schDelayLaunchTask(SSchJob *pJob, SSchTask *pTask); -int32_t schBuildAndSendMsg(SSchJob *job, SSchTask *task, SQueryNodeAddr *addr, int32_t msgType, void* param); +int32_t schBuildAndSendMsg(SSchJob *job, SSchTask *task, SQueryNodeAddr *addr, int32_t msgType, void *param); SSchJob *schAcquireJob(int64_t refId); int32_t schReleaseJob(int64_t refId); void schFreeFlowCtrl(SSchJob *pJob); diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index f987420ece..5c67c7974f 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -505,7 +505,7 @@ int32_t schHandleLinkBrokenCallback(void *param, SDataBuf *pMsg, int32_t code) { taosMemoryFree(pMsg->pEpSet); SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param; - SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL}; + SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL, .pHandleId = 0}; SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans)); SCH_ERR_RET(schBuildAndSendHbMsg(&hbParam->nodeEpId, NULL)); @@ -537,6 +537,7 @@ int32_t schHandleHbCallback(void *param, SDataBuf *pMsg, int32_t code) { SSchTrans trans = {0}; trans.pTrans = pParam->pTrans; trans.pHandle = pMsg->handle; + trans.pHandleId = pMsg->handleRefId; SCH_ERR_JRET(schUpdateHbConnection(&rsp.epId, &trans)); SCH_ERR_JRET(schProcessOnTaskStatusRsp(&rsp.epId, rsp.taskStatus)); diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c index df9cde6b7a..39c54ea731 100644 --- a/source/libs/scheduler/src/schUtil.c +++ b/source/libs/scheduler/src/schUtil.c @@ -42,7 +42,7 @@ char *schDumpEpSet(SEpSet *pEpSet) { } int32_t maxSize = 1024; - char *str = taosMemoryMalloc(maxSize); + char *str = taosMemoryMalloc(maxSize); if (NULL == str) { return NULL; } @@ -73,7 +73,7 @@ char *schGetOpStr(SCH_OP_TYPE type) { } void schFreeHbTrans(SSchHbTrans *pTrans) { - rpcReleaseHandle(pTrans->trans.pHandle, TAOS_CONN_CLIENT); + rpcReleaseHandle((void *)pTrans->trans.pHandleId, TAOS_CONN_CLIENT); schFreeRpcCtx(&pTrans->rpcCtx); } @@ -209,7 +209,7 @@ int32_t schEnsureHbConnection(SSchJob *pJob, SSchTask *pTask) { if (!tsEnableQueryHb) { return TSDB_CODE_SUCCESS; } - + SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); SQueryNodeEpId epId = {0}; @@ -234,7 +234,8 @@ int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchTrans *trans) { hb = taosHashGet(schMgmt.hbConnections, epId, sizeof(SQueryNodeEpId)); if (NULL == hb) { SCH_UNLOCK(SCH_READ, &schMgmt.hbLock); - qInfo("taosHashGet hb connection not exists, nodeId:%d, fqdn:%s, port:%d", epId->nodeId, epId->ep.fqdn, epId->ep.port); + qInfo("taosHashGet hb connection not exists, nodeId:%d, fqdn:%s, port:%d", epId->nodeId, epId->ep.fqdn, + epId->ep.port); SCH_ERR_RET(TSDB_CODE_APP_ERROR); } diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 8a8904e916..3e4919d60d 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -48,8 +48,8 @@ extern "C" { #define stError(...) do { if (stDebugFlag & DEBUG_ERROR) { taosPrintLog("STM ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0) #define stWarn(...) do { if (stDebugFlag & DEBUG_WARN) { taosPrintLog("STM WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0) #define stInfo(...) do { if (stDebugFlag & DEBUG_INFO) { taosPrintLog("STM ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0) -#define stDebug(...) do { if (stDebugFlag & DEBUG_DEBUG) { taosPrintLog("STM ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0) -#define stTrace(...) do { if (stDebugFlag & DEBUG_TRACE) { taosPrintLog("STM ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0) +#define stDebug(...) do { if (stDebugFlag & DEBUG_DEBUG) { taosPrintLog("STM ", DEBUG_DEBUG, stDebugFlag, __VA_ARGS__); }} while(0) +#define stTrace(...) do { if (stDebugFlag & DEBUG_TRACE) { taosPrintLog("STM ", DEBUG_TRACE, stDebugFlag, __VA_ARGS__); }} while(0) // clang-format on typedef struct { @@ -120,7 +120,7 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask); void streamTaskSetCheckpointFailedId(SStreamTask* pTask); int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask); int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t numCap, int32_t numRate, float quotaRate, const char*); -STaskId streamTaskExtractKey(const SStreamTask* pTask); +STaskId streamTaskGetTaskId(const SStreamTask* pTask); void streamTaskInitForLaunchHTask(SHistoryTaskInfo* pInfo); void streamTaskSetRetryInfoForLaunch(SHistoryTaskInfo* pInfo); int32_t streamTaskBuildScanhistoryRspMsg(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, void** pBuffer, diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index c17567d5fc..7830bbdd39 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -35,6 +35,10 @@ void streamTimerCleanUp() { streamTimer = NULL; } +tmr_h streamTimerGetInstance() { + return streamTimer; +} + char* createStreamTaskIdStr(int64_t streamId, int32_t taskId) { char buf[128] = {0}; sprintf(buf, "0x%" PRIx64 "-0x%x", streamId, taskId); @@ -290,28 +294,6 @@ int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq) { void streamTaskInputFail(SStreamTask* pTask) { atomic_store_8(&pTask->inputq.status, TASK_INPUT_STATUS__FAILED); } -void streamTaskOpenAllUpstreamInput(SStreamTask* pTask) { - int32_t num = taosArrayGetSize(pTask->upstreamInfo.pList); - if (num == 0) { - return; - } - - for (int32_t i = 0; i < num; ++i) { - SStreamChildEpInfo* pInfo = taosArrayGetP(pTask->upstreamInfo.pList, i); - pInfo->dataAllowed = true; - } - - pTask->upstreamInfo.numOfClosed = 0; - stDebug("s-task:%s opening up inputQ from upstream tasks", pTask->id.idStr); -} - -void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId) { - SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, taskId); - if (pInfo != NULL) { - pInfo->dataAllowed = false; - } -} - SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId) { int32_t num = taosArrayGetSize(pTask->upstreamInfo.pList); for (int32_t i = 0; i < num; ++i) { diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 4b2a9e4a65..bcf289a019 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -41,6 +41,8 @@ void destroyRocksdbCfInst(RocksdbCfInst* inst); int32_t getCfIdx(const char* cfName); STaskDbWrapper* taskDbOpenImpl(char* key, char* statePath, char* dbPath); +int32_t backendCopyFiles(char* src, char* dst); + void destroyCompactFilteFactory(void* arg); void destroyCompactFilte(void* arg); const char* compactFilteFactoryName(void* arg); @@ -218,7 +220,7 @@ int32_t rebuildDirFromCheckpoint(const char* path, int64_t chkpId, char** dst) { taosRemoveDir(state); } taosMkDir(state); - code = copyFiles(chkp, state); + code = backendCopyFiles(chkp, state); stInfo("copy snap file from %s to %s", chkp, state); if (code != 0) { stError("failed to restart stream backend from %s, reason: %s", chkp, tstrerror(TAOS_SYSTEM_ERROR(errno))); @@ -334,7 +336,7 @@ int32_t rebuildFromRemoteChkp_rsync(char* key, char* chkpPath, int64_t chkpId, c if (code != 0) { return code; } - code = copyFiles(chkpPath, defaultPath); + code = backendCopyFiles(chkpPath, defaultPath); return code; } @@ -359,7 +361,7 @@ int32_t rebuildFromRemoteChkp_s3(char* key, char* chkpPath, int64_t chkpId, char if (code == 0) { taosMkDir(defaultPath); - code = copyFiles(chkpPath, defaultPath); + code = backendCopyFiles(chkpPath, defaultPath); } if (code != 0) { @@ -382,35 +384,143 @@ int32_t rebuildFromRemoteChkp(char* key, char* chkpPath, int64_t chkpId, char* d return -1; } -int32_t rebuildFromLocalChkp(char* key, char* chkpPath, int64_t chkpId, char* defaultPath) { - int32_t code = -1; - int32_t len = strlen(defaultPath) + 32; - char* tmp = taosMemoryCalloc(1, len); - sprintf(tmp, "%s%s", defaultPath, "_tmp"); +int32_t copyFiles_create(char* src, char* dst, int8_t type) { + // create and copy file + int32_t err = taosCopyFile(src, dst); - if (taosIsDir(tmp)) taosRemoveDir(tmp); - if (taosIsDir(defaultPath)) taosRenameFile(defaultPath, tmp); + if (errno == EXDEV || errno == ENOTSUP) { + errno = 0; + return 0; + } + return 0; +} +int32_t copyFiles_hardlink(char* src, char* dst, int8_t type) { + // same fs and hard link + return taosLinkFile(src, dst); +} - if (taosIsDir(chkpPath) && isValidCheckpoint(chkpPath)) { - if (taosIsDir(tmp)) { - taosRemoveDir(tmp); +int32_t backendFileCopyFilesImpl(char* src, char* dst) { + const char* current = "CURRENT"; + size_t currLen = strlen(current); + + int32_t code = 0; + int32_t sLen = strlen(src); + int32_t dLen = strlen(dst); + char* srcName = taosMemoryCalloc(1, sLen + 64); + char* dstName = taosMemoryCalloc(1, dLen + 64); + // copy file to dst + + TdDirPtr pDir = taosOpenDir(src); + if (pDir == NULL) { + taosMemoryFree(srcName); + taosMemoryFree(dstName); + errno = 0; + return -1; + } + + TdDirEntryPtr de = NULL; + while ((de = taosReadDir(pDir)) != NULL) { + char* name = taosGetDirEntryName(de); + if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0) continue; + + sprintf(srcName, "%s%s%s", src, TD_DIRSEP, name); + sprintf(dstName, "%s%s%s", dst, TD_DIRSEP, name); + + if (strncmp(name, current, strlen(name) <= currLen ? strlen(name) : currLen) == 0) { + code = copyFiles_create(srcName, dstName, 0); + if (code != 0) { + stError("failed to copy file, detail: %s to %s reason: %s", srcName, dstName, + tstrerror(TAOS_SYSTEM_ERROR(code))); + goto _ERROR; + } + } else { + code = copyFiles_hardlink(srcName, dstName, 0); + if (code != 0) { + stError("failed to hard line file, detail: %s to %s, reason: %s", srcName, dstName, + tstrerror(TAOS_SYSTEM_ERROR(code))); + goto _ERROR; + } } + memset(srcName, 0, sLen + 64); + memset(dstName, 0, dLen + 64); + } + + taosMemoryFreeClear(srcName); + taosMemoryFreeClear(dstName); + taosCloseDir(&pDir); + errno = 0; + return 0; +_ERROR: + taosMemoryFreeClear(srcName); + taosMemoryFreeClear(dstName); + taosCloseDir(&pDir); + errno = 0; + return -1; +} +int32_t backendCopyFiles(char* src, char* dst) { + return backendFileCopyFilesImpl(src, dst); + // // opt later, just hard link + // int32_t sLen = strlen(src); + // int32_t dLen = strlen(dst); + // char* srcName = taosMemoryCalloc(1, sLen + 64); + // char* dstName = taosMemoryCalloc(1, dLen + 64); + + // TdDirPtr pDir = taosOpenDir(src); + // if (pDir == NULL) { + // taosMemoryFree(srcName); + // taosMemoryFree(dstName); + // return -1; + // } + + // TdDirEntryPtr de = NULL; + // while ((de = taosReadDir(pDir)) != NULL) { + // char* name = taosGetDirEntryName(de); + // if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0) continue; + + // sprintf(srcName, "%s%s%s", src, TD_DIRSEP, name); + // sprintf(dstName, "%s%s%s", dst, TD_DIRSEP, name); + // // if (!taosDirEntryIsDir(de)) { + // // // code = taosCopyFile(srcName, dstName); + // // if (code == -1) { + // // goto _err; + // // } + // // } + // return backendFileCopyFilesImpl(src, dst); + + // memset(srcName, 0, sLen + 64); + // memset(dstName, 0, dLen + 64); + // } + + // _err: + // taosMemoryFreeClear(srcName); + // taosMemoryFreeClear(dstName); + // taosCloseDir(&pDir); + // return code >= 0 ? 0 : -1; + + // return 0; +} +int32_t rebuildFromLocalChkp(char* key, char* chkpPath, int64_t chkpId, char* defaultPath) { + int32_t code = 0; + if (taosIsDir(defaultPath)) { + taosRemoveDir(defaultPath); taosMkDir(defaultPath); - code = copyFiles(chkpPath, defaultPath); + + stInfo("succ to clear stream backend %s", defaultPath); + } + if (taosIsDir(chkpPath) && isValidCheckpoint(chkpPath)) { + code = backendCopyFiles(chkpPath, defaultPath); if (code != 0) { - stError("failed to restart stream backend from %s, reason: %s", chkpPath, tstrerror(TAOS_SYSTEM_ERROR(errno))); + taosRemoveDir(defaultPath); + taosMkDir(defaultPath); + + stError("failed to restart stream backend from %s, reason: %s, start to restart from empty path: %s", chkpPath, + tstrerror(TAOS_SYSTEM_ERROR(errno)), defaultPath); + code = 0; } else { stInfo("start to restart stream backend at checkpoint path: %s", chkpPath); } } - if (code != 0) { - if (taosIsDir(defaultPath)) taosRemoveDir(defaultPath); - if (taosIsDir(tmp)) taosRenameFile(tmp, defaultPath); - } else { - taosRemoveDir(tmp); - } - taosMemoryFree(tmp); return code; } diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index cb3f7a3504..eb50efadeb 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -185,6 +185,11 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc int64_t checkpointId = pDataBlock->info.version; const char* id = pTask->id.idStr; int32_t code = TSDB_CODE_SUCCESS; + int32_t vgId = pTask->pMeta->vgId; + + stDebug("s-task:%s vgId:%d start to handle the checkpoint block, checkpointId:%" PRId64 " ver:%" PRId64 + ", current checkpointingId:%" PRId64, + id, vgId, pTask->chkInfo.checkpointId, pTask->chkInfo.checkpointVer, checkpointId); // set task status if (streamTaskGetStatus(pTask)->state != TASK_STATUS__CK) { @@ -330,7 +335,7 @@ int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId) { vgId, id, p->info.taskLevel, checkpointId, pCKInfo->checkpointVer, pCKInfo->nextProcessVer, pStatus->name); // save the task if not sink task - if (p->info.taskLevel < TASK_LEVEL__SINK) { + if (p->info.taskLevel <= TASK_LEVEL__SINK) { streamMetaWLock(pMeta); code = streamMetaSaveTask(pMeta, p); @@ -355,6 +360,8 @@ int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId) { void streamTaskSetCheckpointFailedId(SStreamTask* pTask) { pTask->chkInfo.failedId = pTask->chkInfo.checkpointingId; + stDebug("s-task:%s mark the checkpointId:%" PRId64 " (transId:%d) failed", pTask->id.idStr, + pTask->chkInfo.checkpointingId, pTask->chkInfo.transId); } int32_t getChkpMeta(char* id, char* path, SArray* list) { @@ -455,7 +462,7 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) { // sink task do not need to save the status, and generated the checkpoint if (pTask->info.taskLevel != TASK_LEVEL__SINK) { - stDebug("s-task:%s level:%d start gen checkpoint", id, pTask->info.taskLevel); + stDebug("s-task:%s level:%d start gen checkpoint, checkpointId:%" PRId64, id, pTask->info.taskLevel, ckId); code = streamBackendDoCheckpoint(pTask->pBackend, ckId); if (code != TSDB_CODE_SUCCESS) { stError("s-task:%s gen checkpoint:%" PRId64 " failed, code:%s", id, ckId, tstrerror(terrno)); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 09b00e3712..62b18f970e 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -569,25 +569,21 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S memcpy(pDataBlock->info.parTbName, pBln->parTbName, strlen(pBln->parTbName)); } } else { - char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN); - if (ctbName == NULL) { - return -1; - } - + char ctbName[TSDB_TABLE_FNAME_LEN] = {0}; if (pDataBlock->info.parTbName[0]) { - snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db, - pDataBlock->info.parTbName); + if(pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && + !isAutoTableName(pDataBlock->info.parTbName) && + !alreadyAddGroupId(pDataBlock->info.parTbName) && + groupId != 0){ + buildCtbNameAddGruopId(pDataBlock->info.parTbName, groupId); + } } else { buildCtbNameByGroupIdImpl(pTask->outputInfo.shuffleDispatcher.stbFullName, groupId, pDataBlock->info.parTbName); - snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db, - pDataBlock->info.parTbName); } - + snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName); /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/ SUseDbRsp* pDbInfo = &pTask->outputInfo.shuffleDispatcher.dbInfo; - hashValue = - taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix); - taosMemoryFree(ctbName); + hashValue = taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix); SBlockName bln = {0}; bln.hashValue = hashValue; memcpy(bln.parTbName, pDataBlock->info.parTbName, strlen(pDataBlock->info.parTbName)); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index a35493a5c4..cc88a33638 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -713,7 +713,13 @@ bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus) { *pStatus = pState->name; } - return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK); + // pause & halt will still run for sink tasks. + if (streamTaskIsSinkTask(pTask)) { + return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK || + st == TASK_STATUS__PAUSE || st == TASK_STATUS__HALT); + } else { + return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK); + } } static void doStreamExecTaskHelper(void* param, void* tmrId) { diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index ee2b70eebe..6e35e39a0a 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -47,6 +47,12 @@ struct SMetaHbInfo { int64_t hbStart; }; +typedef struct STaskInitTs { + int64_t start; + int64_t end; + bool success; +} STaskInitTs; + SMetaRefMgt gMetaRefMgt; void metaRefMgtInit(); @@ -174,10 +180,8 @@ int32_t streamMetaCheckBackendCompatible(SStreamMeta* pMeta) { } if (info.msgVer <= SSTREAM_TASK_INCOMPATIBLE_VER) { ret = STREAM_STATA_NO_COMPATIBLE; - } else if (info.msgVer == SSTREAM_TASK_NEED_CONVERT_VER) { + } else if (info.msgVer >= SSTREAM_TASK_NEED_CONVERT_VER) { ret = STREAM_STATA_NEED_CONVERT; - } else if (info.msgVer == SSTREAM_TASK_VER) { - ret = STREAM_STATA_COMPATIBLE; } tDecoderClear(&decoder); break; @@ -581,7 +585,7 @@ int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pTaskId) { int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded) { *pAdded = false; - STaskId id = streamTaskExtractKey(pTask); + STaskId id = streamTaskGetTaskId(pTask); void* p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); if (p != NULL) { return 0; @@ -871,7 +875,7 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) { int32_t taskId = pTask->id.taskId; tFreeStreamTask(pTask); - STaskId id = streamTaskExtractKey(pTask); + STaskId id = streamTaskGetTaskId(pTask); taosArrayPush(pRecycleList, &id); int32_t total = taosArrayGetSize(pRecycleList); @@ -956,7 +960,7 @@ int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pReq) { if (tEncodeI64(pEncoder, ps->processedVer) < 0) return -1; if (tEncodeI64(pEncoder, ps->verStart) < 0) return -1; if (tEncodeI64(pEncoder, ps->verEnd) < 0) return -1; - if (tEncodeI64(pEncoder, ps->activeCheckpointId) < 0) return -1; + if (tEncodeI64(pEncoder, ps->checkpointId) < 0) return -1; if (tEncodeI8(pEncoder, ps->checkpointFailed) < 0) return -1; if (tEncodeI32(pEncoder, ps->chkpointTransId) < 0) return -1; } @@ -995,8 +999,8 @@ int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pReq) { if (tDecodeI64(pDecoder, &entry.processedVer) < 0) return -1; if (tDecodeI64(pDecoder, &entry.verStart) < 0) return -1; if (tDecodeI64(pDecoder, &entry.verEnd) < 0) return -1; - if (tDecodeI64(pDecoder, &entry.activeCheckpointId) < 0) return -1; - if (tDecodeI8(pDecoder, (int8_t*)&entry.checkpointFailed) < 0) return -1; + if (tDecodeI64(pDecoder, &entry.checkpointId) < 0) return -1; + if (tDecodeI8(pDecoder, &entry.checkpointFailed) < 0) return -1; if (tDecodeI32(pDecoder, &entry.chkpointTransId) < 0) return -1; entry.id.taskId = taskId; @@ -1109,8 +1113,8 @@ static int32_t metaHeartbeatToMnodeImpl(SStreamMeta* pMeta) { } if ((*pTask)->chkInfo.checkpointingId != 0) { - entry.checkpointFailed = ((*pTask)->chkInfo.failedId >= (*pTask)->chkInfo.checkpointingId); - entry.activeCheckpointId = (*pTask)->chkInfo.checkpointingId; + entry.checkpointFailed = ((*pTask)->chkInfo.failedId >= (*pTask)->chkInfo.checkpointingId)? 1:0; + entry.checkpointId = (*pTask)->chkInfo.checkpointingId; entry.chkpointTransId = (*pTask)->chkInfo.transId; if (entry.checkpointFailed) { @@ -1302,28 +1306,28 @@ void streamMetaResetStartInfo(STaskStartInfo* pStartInfo) { } void streamMetaRLock(SStreamMeta* pMeta) { - stTrace("vgId:%d meta-rlock", pMeta->vgId); +// stTrace("vgId:%d meta-rlock", pMeta->vgId); taosThreadRwlockRdlock(&pMeta->lock); } void streamMetaRUnLock(SStreamMeta* pMeta) { - stTrace("vgId:%d meta-runlock", pMeta->vgId); +// stTrace("vgId:%d meta-runlock", pMeta->vgId); int32_t code = taosThreadRwlockUnlock(&pMeta->lock); if (code != TSDB_CODE_SUCCESS) { stError("vgId:%d meta-runlock failed, code:%d", pMeta->vgId, code); } else { - stDebug("vgId:%d meta-runlock completed", pMeta->vgId); +// stDebug("vgId:%d meta-runlock completed", pMeta->vgId); } } void streamMetaWLock(SStreamMeta* pMeta) { - stTrace("vgId:%d meta-wlock", pMeta->vgId); +// stTrace("vgId:%d meta-wlock", pMeta->vgId); taosThreadRwlockWrlock(&pMeta->lock); - stTrace("vgId:%d meta-wlock completed", pMeta->vgId); +// stTrace("vgId:%d meta-wlock completed", pMeta->vgId); } void streamMetaWUnLock(SStreamMeta* pMeta) { - stTrace("vgId:%d meta-wunlock", pMeta->vgId); +// stTrace("vgId:%d meta-wunlock", pMeta->vgId); taosThreadRwlockUnlock(&pMeta->lock); } @@ -1369,7 +1373,6 @@ SArray* streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta) { SStreamTaskState* pState = streamTaskGetStatus(pTask); if (pState->state == TASK_STATUS__CK) { streamTaskSetCheckpointFailedId(pTask); - stDebug("s-task:%s mark the checkpoint:%"PRId64" failed", pTask->id.idStr, pTask->chkInfo.checkpointingId); } else { stDebug("s-task:%s status:%s not reset the checkpoint", pTask->id.idStr, pState->name); } @@ -1407,35 +1410,18 @@ void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader) } } -int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { - streamMetaRLock(pMeta); +static SArray* prepareBeforeStartTasks(SStreamMeta* pMeta) { + streamMetaWLock(pMeta); - int32_t num = taosArrayGetSize(pMeta->pTaskList); - stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num); - if (num == 0) { - streamMetaRUnLock(pMeta); - return TSDB_CODE_SUCCESS; - } + SArray* pTaskList = taosArrayDup(pMeta->pTaskList, NULL); + taosHashClear(pMeta->startInfo.pReadyTaskSet); + taosHashClear(pMeta->startInfo.pFailedTaskSet); + pMeta->startInfo.startTs = taosGetTimestampMs(); - // send hb msg to mnode before closing all tasks. - SArray* pTaskList = streamMetaSendMsgBeforeCloseTasks(pMeta); - int32_t numOfTasks = taosArrayGetSize(pTaskList); + streamMetaResetTaskStatus(pMeta); + streamMetaWUnLock(pMeta); - for (int32_t i = 0; i < numOfTasks; ++i) { - SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); - SStreamTask* pTask = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId); - if (pTask == NULL) { - continue; - } - - streamTaskStop(pTask); - streamMetaReleaseTask(pMeta, pTask); - } - - taosArrayDestroy(pTaskList); - - streamMetaRUnLock(pMeta); - return 0; + return pTaskList; } int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { @@ -1444,25 +1430,23 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); stInfo("vgId:%d start to check all %d stream task(s) downstream status", vgId, numOfTasks); + if (numOfTasks == 0) { stInfo("vgId:%d start tasks completed", pMeta->vgId); return TSDB_CODE_SUCCESS; } - SArray* pTaskList = NULL; - streamMetaWLock(pMeta); - pTaskList = taosArrayDup(pMeta->pTaskList, NULL); - taosHashClear(pMeta->startInfo.pReadyTaskSet); - taosHashClear(pMeta->startInfo.pFailedTaskSet); - pMeta->startInfo.startTs = taosGetTimestampMs(); - streamMetaWUnLock(pMeta); - - // broadcast the check downstream tasks msg int64_t now = taosGetTimestampMs(); + SArray* pTaskList = prepareBeforeStartTasks(pMeta); + numOfTasks = taosArrayGetSize(pTaskList); + + // broadcast the check downstream tasks msg for (int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + // todo: may be we should find the related fill-history task and set it failed. + // todo: use hashTable instead SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId); if (pTask == NULL) { stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId); @@ -1470,8 +1454,6 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { continue; } - // todo: may be we should find the related fill-history task and set it failed. - // fill-history task can only be launched by related stream tasks. STaskExecStatisInfo* pInfo = &pTask->execInfo; if (pTask->info.fillHistory == 1) { @@ -1512,6 +1494,60 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { return code; } +int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { + streamMetaRLock(pMeta); + + int32_t num = taosArrayGetSize(pMeta->pTaskList); + stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num); + if (num == 0) { + stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num); + streamMetaRUnLock(pMeta); + return TSDB_CODE_SUCCESS; + } + + int64_t st = taosGetTimestampMs(); + + // send hb msg to mnode before closing all tasks. + SArray* pTaskList = streamMetaSendMsgBeforeCloseTasks(pMeta); + int32_t numOfTasks = taosArrayGetSize(pTaskList); + + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + SStreamTask* pTask = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId); + if (pTask == NULL) { + continue; + } + + streamTaskStop(pTask); + streamMetaReleaseTask(pMeta, pTask); + } + + taosArrayDestroy(pTaskList); + + double el = (taosGetTimestampMs() - st) / 1000.0; + stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, num, el); + + streamMetaRUnLock(pMeta); + return 0; +} + +bool streamMetaAllTasksReady(const SStreamMeta* pMeta) { + int32_t num = taosArrayGetSize(pMeta->pTaskList); + for(int32_t i = 0; i < num; ++i) { + STaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); + SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, pTaskId, sizeof(*pTaskId)); + if (ppTask == NULL) { + continue; + } + + if ((*ppTask)->status.downstreamReady == 0) { + return false; + } + } + + return true; +} + int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { int32_t vgId = pMeta->vgId; stInfo("vgId:%d start to task:0x%x by checking downstream status", vgId, taskId); @@ -1547,4 +1583,93 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas streamMetaReleaseTask(pMeta, pTask); return ret; +} + +static void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) { + int32_t vgId = pMeta->vgId; + void* pIter = NULL; + size_t keyLen = 0; + + stInfo("vgId:%d %d tasks check-downstream completed %s", vgId, taosHashGetSize(pTaskSet), + succ ? "success" : "failed"); + + while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) { + STaskInitTs* pInfo = pIter; + void* key = taosHashGetKey(pIter, &keyLen); + + SStreamTask** pTask1 = taosHashGet(pMeta->pTasksMap, key, sizeof(STaskId)); + if (pTask1 == NULL) { + stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed"); + } else { + stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", (*pTask1)->id.idStr, + (*pTask1)->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed"); + } + } +} + +int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, + int64_t endTs, bool ready) { + STaskStartInfo* pStartInfo = &pMeta->startInfo; + STaskId id = {.streamId = streamId, .taskId = taskId}; + + streamMetaWLock(pMeta); + + if (pStartInfo->taskStarting != 1) { + int64_t el = endTs - startTs; + qDebug("vgId:%d not start all task(s), not record status, s-task:0x%x launch succ:%d elapsed time:%" PRId64 "ms", + pMeta->vgId, taskId, ready, el); + streamMetaWUnLock(pMeta); + return 0; + } + + SHashObj* pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet; + + STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready}; + taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); + + int32_t numOfTotal = streamMetaGetNumOfTasks(pMeta); + int32_t numOfRecv = taosHashGetSize(pStartInfo->pReadyTaskSet) + taosHashGetSize(pStartInfo->pFailedTaskSet); + + if (numOfRecv == numOfTotal) { + pStartInfo->readyTs = taosGetTimestampMs(); + pStartInfo->elapsedTime = (pStartInfo->startTs != 0) ? pStartInfo->readyTs - pStartInfo->startTs : 0; + + stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x (succ:%d) startTs:%" PRId64 + ", readyTs:%" PRId64 " total elapsed time:%.2fs", + pMeta->vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs, + pStartInfo->elapsedTime / 1000.0); + + // print the initialization elapsed time and info + displayStatusInfo(pMeta, pStartInfo->pReadyTaskSet, true); + displayStatusInfo(pMeta, pStartInfo->pFailedTaskSet, false); + streamMetaResetStartInfo(pStartInfo); + streamMetaWUnLock(pMeta); + + pStartInfo->completeFn(pMeta); + } else { + streamMetaWUnLock(pMeta); + stDebug("vgId:%d recv check downstream results, s-task:0x%x succ:%d, received:%d, total:%d", pMeta->vgId, taskId, + ready, numOfRecv, numOfTotal); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta) { + int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + + stDebug("vgId:%d reset all %d stream task(s) status to be uninit", pMeta->vgId, numOfTasks); + if (numOfTasks == 0) { + return TSDB_CODE_SUCCESS; + } + + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); + + STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; + SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); + streamTaskResetStatus(*pTask); + } + + return 0; } \ No newline at end of file diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c index 90823cd937..78929c365e 100644 --- a/source/libs/stream/src/streamQueue.c +++ b/source/libs/stream/src/streamQueue.c @@ -154,7 +154,6 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu *numOfBlocks = 0; *blockSize = 0; - // todo remove it // no available token in bucket for sink task, let's wait for a little bit if (taskLevel == TASK_LEVEL__SINK && (!streamTaskExtractAvailableToken(pTask->outputInfo.pTokenBucket, pTask->id.idStr))) { stDebug("s-task:%s no available token in bucket for sink data, wait for 10ms", id); diff --git a/source/libs/stream/src/streamStart.c b/source/libs/stream/src/streamStart.c index 84f1dbb4d7..5e1566c1e1 100644 --- a/source/libs/stream/src/streamStart.c +++ b/source/libs/stream/src/streamStart.c @@ -35,12 +35,6 @@ typedef struct STaskRecheckInfo { void* checkTimer; } STaskRecheckInfo; -typedef struct STaskInitTs { - int64_t start; - int64_t end; - bool success; -} STaskInitTs; - static int32_t streamSetParamForScanHistory(SStreamTask* pTask); static void streamTaskSetRangeStreamCalc(SStreamTask* pTask); static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated); @@ -546,15 +540,6 @@ int32_t streamSetParamForScanHistory(SStreamTask* pTask) { return qSetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor); } -int32_t streamResetParamForScanHistory(SStreamTask* pTask) { - stDebug("s-task:%s reset operator option for scan-history data", pTask->id.idStr); - if (pTask->exec.pExecutor != NULL) { - return qResetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor); - } else { - return TSDB_CODE_SUCCESS; - } -} - int32_t streamRestoreParam(SStreamTask* pTask) { stDebug("s-task:%s restore operator param after scan-history", pTask->id.idStr); return qRestoreStreamOperatorOption(pTask->exec.pExecutor); @@ -1134,97 +1119,3 @@ void streamTaskSetRangeStreamCalc(SStreamTask* pTask) { } } -void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta) { - streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_PAUSE); - - int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1); - stInfo("vgId:%d s-task:%s pause stream task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num); - - // in case of fill-history task, stop the tsdb file scan operation. - if (pTask->info.fillHistory == 1) { - void* pExecutor = pTask->exec.pExecutor; - qKillTask(pExecutor, TSDB_CODE_SUCCESS); - } - - stDebug("vgId:%d s-task:%s set pause flag and pause task", pMeta->vgId, pTask->id.idStr); -} - -void streamTaskResume(SStreamTask* pTask) { - SStreamTaskState prevState = *streamTaskGetStatus(pTask); - SStreamMeta* pMeta = pTask->pMeta; - - if (prevState.state == TASK_STATUS__PAUSE || prevState.state == TASK_STATUS__HALT) { - streamTaskRestoreStatus(pTask); - - char* pNew = streamTaskGetStatus(pTask)->name; - if (prevState.state == TASK_STATUS__PAUSE) { - int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1); - stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, prevState.name, num); - } else { - stInfo("s-task:%s status:%s resume from %s", pTask->id.idStr, pNew, prevState.name); - } - } else { - stDebug("s-task:%s status:%s not in pause/halt status, no need to resume", pTask->id.idStr, prevState.name); - } -} - -static void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) { - int32_t vgId = pMeta->vgId; - void* pIter = NULL; - size_t keyLen = 0; - - stInfo("vgId:%d %d tasks check-downstream completed %s", vgId, taosHashGetSize(pTaskSet), - succ ? "success" : "failed"); - - while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) { - STaskInitTs* pInfo = pIter; - void* key = taosHashGetKey(pIter, &keyLen); - - SStreamTask** pTask1 = taosHashGet(pMeta->pTasksMap, key, sizeof(STaskId)); - if (pTask1 == NULL) { - stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed"); - } else { - stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", (*pTask1)->id.idStr, - (*pTask1)->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed"); - } - } -} - -int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, - int64_t endTs, bool ready) { - STaskStartInfo* pStartInfo = &pMeta->startInfo; - STaskId id = {.streamId = streamId, .taskId = taskId}; - - streamMetaWLock(pMeta); - SHashObj* pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet; - - STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready}; - taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); - - int32_t numOfTotal = streamMetaGetNumOfTasks(pMeta); - int32_t numOfRecv = taosHashGetSize(pStartInfo->pReadyTaskSet) + taosHashGetSize(pStartInfo->pFailedTaskSet); - - if (numOfRecv == numOfTotal) { - pStartInfo->readyTs = taosGetTimestampMs(); - pStartInfo->elapsedTime = (pStartInfo->startTs != 0) ? pStartInfo->readyTs - pStartInfo->startTs : 0; - - stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x (succ:%d) startTs:%" PRId64 - ", readyTs:%" PRId64 " total elapsed time:%.2fs", - pMeta->vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs, - pStartInfo->elapsedTime / 1000.0); - - // print the initialization elapsed time and info - displayStatusInfo(pMeta, pStartInfo->pReadyTaskSet, true); - displayStatusInfo(pMeta, pStartInfo->pFailedTaskSet, false); - streamMetaResetStartInfo(pStartInfo); - streamMetaWUnLock(pMeta); - - pStartInfo->completeFn(pMeta); - } else { - streamMetaWUnLock(pMeta); - stDebug("vgId:%d recv check downstream results, s-task:0x%x succ:%d, received:%d, total:%d", pMeta->vgId, taskId, - ready, numOfRecv, numOfTotal); - } - - return TSDB_CODE_SUCCESS; -} diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 776a9db522..19b7359981 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -435,7 +435,6 @@ int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void* int32_t streamStateReleaseBuf(SStreamState* pState, void* pVal, bool used) { // todo refactor - stDebug("streamStateReleaseBuf"); if (!pVal) { return 0; } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 387803273a..de448d78f0 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -30,6 +30,55 @@ static int32_t addToTaskset(SArray* pArray, SStreamTask* pTask) { return 0; } +static int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEpSet) { + char buf[512] = {0}; + + if (pTask->info.nodeId == nodeId) { // execution task should be moved away + epsetAssign(&pTask->info.epSet, pEpSet); + EPSET_TO_STR(pEpSet, buf) + stDebug("s-task:0x%x (vgId:%d) self node epset is updated %s", pTask->id.taskId, nodeId, buf); + } + + // check for the dispath info and the upstream task info + int32_t level = pTask->info.taskLevel; + if (level == TASK_LEVEL__SOURCE) { + streamTaskUpdateDownstreamInfo(pTask, nodeId, pEpSet); + } else if (level == TASK_LEVEL__AGG) { + streamTaskUpdateUpstreamInfo(pTask, nodeId, pEpSet); + streamTaskUpdateDownstreamInfo(pTask, nodeId, pEpSet); + } else { // TASK_LEVEL__SINK + streamTaskUpdateUpstreamInfo(pTask, nodeId, pEpSet); + } + + return 0; +} + +static void freeItem(void* p) { + SStreamContinueExecInfo* pInfo = p; + rpcFreeCont(pInfo->msg.pCont); +} + +static void freeUpstreamItem(void* p) { + SStreamChildEpInfo** pInfo = p; + taosMemoryFree(*pInfo); +} + +static SStreamChildEpInfo* createStreamTaskEpInfo(const SStreamTask* pTask) { + SStreamChildEpInfo* pEpInfo = taosMemoryMalloc(sizeof(SStreamChildEpInfo)); + if (pEpInfo == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + pEpInfo->childId = pTask->info.selfChildId; + pEpInfo->epSet = pTask->info.epSet; + pEpInfo->nodeId = pTask->info.nodeId; + pEpInfo->taskId = pTask->id.taskId; + pEpInfo->stage = -1; + + return pEpInfo; +} + SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, bool fillHistory, int64_t triggerParam, SArray* pTaskList, bool hasFillhistory) { SStreamTask* pTask = (SStreamTask*)taosMemoryCalloc(1, sizeof(SStreamTask)); @@ -164,7 +213,7 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeI64(pDecoder, &pTask->ver) < 0) return -1; - if (pTask->ver != SSTREAM_TASK_VER) return -1; + if (pTask->ver <= SSTREAM_TASK_INCOMPATIBLE_VER) return -1; if (tDecodeI64(pDecoder, &pTask->id.streamId) < 0) return -1; if (tDecodeI32(pDecoder, &pTask->id.taskId) < 0) return -1; @@ -251,7 +300,7 @@ int32_t tDecodeStreamTaskChkInfo(SDecoder* pDecoder, SCheckpointInfo* pChkpInfo) if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeI64(pDecoder, &pChkpInfo->msgVer) < 0) return -1; - // if (ver != SSTREAM_TASK_VER) return -1; + // if (ver <= SSTREAM_TASK_INCOMPATIBLE_VER) return -1; if (tDecodeI64(pDecoder, &skip64) < 0) return -1; if (tDecodeI32(pDecoder, &skip32) < 0) return -1; @@ -279,7 +328,7 @@ int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId) { int64_t ver; if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeI64(pDecoder, &ver) < 0) return -1; - if (ver != SSTREAM_TASK_VER) return -1; + if (ver <= SSTREAM_TASK_INCOMPATIBLE_VER) return -1; if (tDecodeI64(pDecoder, &pTaskId->streamId) < 0) return -1; @@ -291,16 +340,6 @@ int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId) { return 0; } -static void freeItem(void* p) { - SStreamContinueExecInfo* pInfo = p; - rpcFreeCont(pInfo->msg.pCont); -} - -static void freeUpstreamItem(void* p) { - SStreamChildEpInfo** pInfo = p; - taosMemoryFree(*pInfo); -} - void tFreeStreamTask(SStreamTask* pTask) { char* p = NULL; int32_t taskId = pTask->id.taskId; @@ -475,14 +514,6 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i } taosThreadMutexInit(&pTask->lock, &attr); - // if (pTask->info.fillHistory == 1) { - // // - // } else { - - // } - // if (streamTaskSetDb(pMeta, pTask) != 0) { - // return -1; - // } streamTaskOpenAllUpstreamInput(pTask); pTask->outputInfo.pDownstreamUpdateList = taosArrayInit(4, sizeof(SDownstreamTaskEpset)); @@ -509,22 +540,6 @@ int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask) { } } -static SStreamChildEpInfo* createStreamTaskEpInfo(const SStreamTask* pTask) { - SStreamChildEpInfo* pEpInfo = taosMemoryMalloc(sizeof(SStreamChildEpInfo)); - if (pEpInfo == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - - pEpInfo->childId = pTask->info.selfChildId; - pEpInfo->epSet = pTask->info.epSet; - pEpInfo->nodeId = pTask->info.nodeId; - pEpInfo->taskId = pTask->id.taskId; - pEpInfo->stage = -1; - - return pEpInfo; -} - int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask) { SStreamChildEpInfo* pEpInfo = createStreamTaskEpInfo(pUpstreamTask); if (pEpInfo == NULL) { @@ -622,29 +637,6 @@ int32_t streamTaskStop(SStreamTask* pTask) { return 0; } -int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEpSet) { - char buf[512] = {0}; - - if (pTask->info.nodeId == nodeId) { // execution task should be moved away - epsetAssign(&pTask->info.epSet, pEpSet); - EPSET_TO_STR(pEpSet, buf) - stDebug("s-task:0x%x (vgId:%d) self node epset is updated %s", pTask->id.taskId, nodeId, buf); - } - - // check for the dispath info and the upstream task info - int32_t level = pTask->info.taskLevel; - if (level == TASK_LEVEL__SOURCE) { - streamTaskUpdateDownstreamInfo(pTask, nodeId, pEpSet); - } else if (level == TASK_LEVEL__AGG) { - streamTaskUpdateUpstreamInfo(pTask, nodeId, pEpSet); - streamTaskUpdateDownstreamInfo(pTask, nodeId, pEpSet); - } else { // TASK_LEVEL__SINK - streamTaskUpdateUpstreamInfo(pTask, nodeId, pEpSet); - } - - return 0; -} - int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList) { STaskExecStatisInfo* p = &pTask->execInfo; @@ -677,7 +669,29 @@ void streamTaskResetUpstreamStageInfo(SStreamTask* pTask) { stDebug("s-task:%s reset all upstream tasks stage info", pTask->id.idStr); } -bool streamTaskAllUpstreamClosed(SStreamTask* pTask) { +void streamTaskOpenAllUpstreamInput(SStreamTask* pTask) { + int32_t num = taosArrayGetSize(pTask->upstreamInfo.pList); + if (num == 0) { + return; + } + + for (int32_t i = 0; i < num; ++i) { + SStreamChildEpInfo* pInfo = taosArrayGetP(pTask->upstreamInfo.pList, i); + pInfo->dataAllowed = true; + } + + pTask->upstreamInfo.numOfClosed = 0; + stDebug("s-task:%s opening up inputQ for %d upstream tasks", pTask->id.idStr, num); +} + +void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId) { + SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, taskId); + if (pInfo != NULL) { + pInfo->dataAllowed = false; + } +} + +bool streamTaskIsAllUpstreamClosed(SStreamTask* pTask) { return pTask->upstreamInfo.numOfClosed == taosArrayGetSize(pTask->upstreamInfo.pList); } @@ -760,7 +774,7 @@ int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskI return code; } -STaskId streamTaskExtractKey(const SStreamTask* pTask) { +STaskId streamTaskGetTaskId(const SStreamTask* pTask) { STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId}; return id; } @@ -796,8 +810,45 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc) pDst->verEnd = pSrc->verEnd; pDst->sinkQuota = pSrc->sinkQuota; pDst->sinkDataSize = pSrc->sinkDataSize; - pDst->activeCheckpointId = pSrc->activeCheckpointId; + pDst->checkpointId = pSrc->checkpointId; pDst->checkpointFailed = pSrc->checkpointFailed; pDst->chkpointTransId = pSrc->chkpointTransId; } +void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask) { + streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_PAUSE); + + int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1); + stInfo("vgId:%d s-task:%s pause stream task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num); + + // in case of fill-history task, stop the tsdb file scan operation. + if (pTask->info.fillHistory == 1) { + void* pExecutor = pTask->exec.pExecutor; + qKillTask(pExecutor, TSDB_CODE_SUCCESS); + } + + stDebug("vgId:%d s-task:%s set pause flag and pause task", pMeta->vgId, pTask->id.idStr); +} + +void streamTaskResume(SStreamTask* pTask) { + SStreamTaskState prevState = *streamTaskGetStatus(pTask); + SStreamMeta* pMeta = pTask->pMeta; + + if (prevState.state == TASK_STATUS__PAUSE || prevState.state == TASK_STATUS__HALT) { + streamTaskRestoreStatus(pTask); + + char* pNew = streamTaskGetStatus(pTask)->name; + if (prevState.state == TASK_STATUS__PAUSE) { + int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1); + stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, prevState.name, num); + } else { + stInfo("s-task:%s status:%s resume from %s", pTask->id.idStr, pNew, prevState.name); + } + } else { + stDebug("s-task:%s status:%s not in pause/halt status, no need to resume", pTask->id.idStr, prevState.name); + } +} + +bool streamTaskIsSinkTask(const SStreamTask* pTask) { + return pTask->info.taskLevel == TASK_LEVEL__SINK; +} diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index aac78cf03b..fb5e02c827 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -527,8 +527,7 @@ bool hasRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen) { } SStreamSnapshot* getSnapshot(SStreamFileState* pFileState) { - int64_t mark = (INT64_MIN + pFileState->deleteMark >= pFileState->maxTs) ? INT64_MIN - : pFileState->maxTs - pFileState->deleteMark; + int64_t mark = (pFileState->deleteMark == INT64_MAX) ? INT64_MIN : pFileState->maxTs - pFileState->deleteMark; clearExpiredRowBuff(pFileState, mark, false); return pFileState->usedBuffs; } diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 9352d5662e..ff6401cba8 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -676,7 +676,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_ // heartbeat timeout if (syncNodeHeartbeatReplyTimeout(pSyncNode)) { terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY; - sNError(pSyncNode, "failed to sync propose since hearbeat timeout, type:%s, last:%" PRId64 ", cmt:%" PRId64, + sNError(pSyncNode, "failed to sync propose since heartbeat timeout, type:%s, last:%" PRId64 ", cmt:%" PRId64, TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex); return -1; } diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 578e6798e0..d9c8bb21ac 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -345,9 +345,9 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { for (int32_t seq = pSndBuf->cursor + 1; seq < pSndBuf->end; ++seq) { SyncSnapBlock *pBlk = pSndBuf->entries[seq % pSndBuf->size]; - ASSERT(pBlk && !pBlk->acked); + ASSERT(pBlk); int64_t nowMs = taosGetTimestampMs(); - if (nowMs < pBlk->sendTimeMs + SYNC_SNAP_RESEND_MS) { + if (pBlk->acked || nowMs < pBlk->sendTimeMs + SYNC_SNAP_RESEND_MS) { continue; } if (syncSnapSendMsg(pSender, pBlk->seq, pBlk->pBlock, pBlk->blockLen, 0) != 0) { diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 246605694b..15b35030d3 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -414,6 +414,7 @@ void cliHandleResp(SCliConn* conn) { // buf's mem alread translated to transMsg.pCont if (!CONN_NO_PERSIST_BY_APP(conn)) { transMsg.info.handle = (void*)conn->refId; + transMsg.info.refId = (int64_t)(void*)conn->refId; tDebug("%s conn %p ref by app", CONN_GET_INST_LABEL(conn), conn); } diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index 39de117339..f4e35c5b7f 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -18,8 +18,8 @@ #include "zlib.h" #ifdef WINDOWS -#include #include +#include #include #include #define F_OK 0 @@ -50,7 +50,7 @@ typedef struct TdFile { TdThreadRwlock rwlock; int refId; HANDLE hFile; - FILE* fp; + FILE *fp; int32_t tdFileOptions; } TdFile; #else @@ -230,7 +230,7 @@ int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *a int32_t code = _stati64(path, &fileStat); #else struct stat fileStat; - int32_t code = stat(path, &fileStat); + int32_t code = stat(path, &fileStat); #endif if (code < 0) { return code; @@ -274,7 +274,7 @@ int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) { return -1; } struct stat fileStat; - int32_t code = fstat(pFile->fd, &fileStat); + int32_t code = fstat(pFile->fd, &fileStat); if (code < 0) { printf("taosFStatFile run fstat fail."); return code; @@ -374,7 +374,7 @@ int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) { DWORD bytesRead; if (!ReadFile(pFile->hFile, buf, count, &bytesRead, NULL)) { bytesRead = -1; - } + } #if FILE_WITH_LOCK taosThreadRwlockUnlock(&(pFile->rwlock)); #endif @@ -389,7 +389,7 @@ int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count) { taosThreadRwlockWrlock(&(pFile->rwlock)); #endif - DWORD bytesWritten; + DWORD bytesWritten; if (!WriteFile(pFile->hFile, buf, count, &bytesWritten, NULL)) { bytesWritten = -1; } @@ -666,7 +666,7 @@ int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) { } int64_t leftbytes = count; int64_t readbytes; - char * tbuf = (char *)buf; + char *tbuf = (char *)buf; while (leftbytes > 0) { #ifdef WINDOWS @@ -716,7 +716,7 @@ int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count) { int64_t nleft = count; int64_t nwritten = 0; - char * tbuf = (char *)buf; + char *tbuf = (char *)buf; while (nleft > 0) { nwritten = write(pFile->fd, (void *)tbuf, (uint32_t)nleft); @@ -1028,7 +1028,7 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in #endif } -#endif // WINDOWS +#endif // WINDOWS TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) { FILE *fp = NULL; @@ -1056,7 +1056,7 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) { if (hFile != NULL) CloseHandle(hFile); #else if (fd >= 0) close(fd); -#endif +#endif if (fp != NULL) fclose(fp); return NULL; } @@ -1067,7 +1067,7 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) { pFile->fp = fp; pFile->refId = 0; - #ifdef WINDOWS +#ifdef WINDOWS pFile->hFile = hFile; pFile->tdFileOptions = tdFileOptions; // do nothing, since the property of pmode is set with _O_TEMPORARY; the OS will recycle @@ -1137,7 +1137,7 @@ int64_t taosPReadFile(TdFilePtr pFile, void *buf, int64_t count, int64_t offset) #endif return -1; } - DWORD ret = 0; + DWORD ret = 0; OVERLAPPED ol = {0}; ol.OffsetHigh = (uint32_t)((offset & 0xFFFFFFFF00000000LL) >> 0x20); ol.Offset = (uint32_t)(offset & 0xFFFFFFFFLL); @@ -1179,7 +1179,7 @@ int32_t taosFsyncFile(TdFilePtr pFile) { if (pFile->hFile != NULL) { if (pFile->tdFileOptions & TD_FILE_WRITE_THROUGH) { return 0; - } + } return !FlushFileBuffers(pFile->hFile); #else if (pFile->fd >= 0) { @@ -1204,7 +1204,7 @@ bool taosValidFile(TdFilePtr pFile) { return pFile != NULL && pFile->hFile != NULL; #else return pFile != NULL && pFile->fd > 0; -#endif +#endif } int32_t taosUmaskFile(int32_t maskVal) { @@ -1249,7 +1249,7 @@ int64_t taosGetLineFile(TdFilePtr pFile, char **__restrict ptrBuf) { } bufferSize += 512; - void* newBuf = taosMemoryRealloc(*ptrBuf, bufferSize); + void *newBuf = taosMemoryRealloc(*ptrBuf, bufferSize); if (newBuf == NULL) { taosMemoryFreeClear(*ptrBuf); return -1; @@ -1363,7 +1363,7 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) { cmp_end: if (pFile) { - taosCloseFile(&pFile); + taosCloseFile(&pFile); } if (pSrcFile) { taosCloseFile(&pSrcFile); @@ -1386,3 +1386,15 @@ int32_t taosSetFileHandlesLimit() { #endif return 0; } + +int32_t taosLinkFile(char *src, char *dst) { +#ifndef WINDOWS + if (link(src, dst) != 0) { + if (errno == EXDEV || errno == ENOTSUP) { + return -1; + } + return errno; + } +#endif + return 0; +} diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 360689cef3..79b9e9bbed 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -101,8 +101,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STARTING, "Database is starting TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STOPPING, "Database is closing down") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_DATA_FMT, "Invalid data format") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CFG_VALUE, "Invalid configuration value") -TAOS_DEFINE_ERROR(TSDB_CODE_IP_NOT_IN_WHITE_LIST, "Not allowed to connect") -TAOS_DEFINE_ERROR(TSDB_CODE_FAILED_TO_CONNECT_S3, "Failed to connect to s3 server") +TAOS_DEFINE_ERROR(TSDB_CODE_IP_NOT_IN_WHITE_LIST, "Not allowed to connect") +TAOS_DEFINE_ERROR(TSDB_CODE_FAILED_TO_CONNECT_S3, "Failed to connect to s3 server") //client TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation") diff --git a/tests/army/community/cluster/snapshot.json b/tests/army/community/cluster/snapshot.json index 64bb2aaf3c..d4f6f00d37 100644 --- a/tests/army/community/cluster/snapshot.json +++ b/tests/army/community/cluster/snapshot.json @@ -6,7 +6,8 @@ "user": "root", "password": "taosdata", "connection_pool_size": 8, - "num_of_records_per_req": 2000, + "num_of_records_per_req": 3000, + "prepared_rand": 3000, "thread_count": 2, "create_table_thread_count": 1, "confirm_parameter_prompt": "no", diff --git a/tests/army/community/cluster/snapshot.py b/tests/army/community/cluster/snapshot.py index 92ecc00726..5b5457be75 100644 --- a/tests/army/community/cluster/snapshot.py +++ b/tests/army/community/cluster/snapshot.py @@ -28,7 +28,9 @@ from frame import * class TDTestCase(TBase): - + updatecfgDict = { + "countAlwaysReturnValue" : "0" + } def insertData(self): tdLog.info(f"insert data.") @@ -42,6 +44,10 @@ class TDTestCase(TBase): self.insert_rows = 100000 self.timestamp_step = 10000 + # create count check table + sql = f"create table {self.db}.ta(ts timestamp, age int) tags(area int)" + tdSql.execute(sql) + def doAction(self): tdLog.info(f"do action.") self.flushDb() @@ -64,7 +70,10 @@ class TDTestCase(TBase): selid = random.choice(vgids) self.balanceVGroupLeaderOn(selid) - + # check count always return value + sql = f"select count(*) from {self.db}.ta" + tdSql.query(sql) + tdSql.checkRows(0) # countAlwaysReturnValue is false # run def run(self): diff --git a/tests/army/community/cmdline/fullopt.py b/tests/army/community/cmdline/fullopt.py index 22667149ae..39d1d581ed 100644 --- a/tests/army/community/cmdline/fullopt.py +++ b/tests/army/community/cmdline/fullopt.py @@ -75,7 +75,6 @@ class TDTestCase(TBase): # others etool.exeBinFile("taos", f'-N 200 -l 2048 -s "{sql}" ', wait=False) - etool.exeBinFile("taos", f'-n server', wait=False) def doTaosd(self): @@ -86,11 +85,11 @@ class TDTestCase(TBase): # -s sdb = "./sdb.json" eos.delFile(sdb) - etool.runBinFile("taosd", f"-s -c {cfg}") - self.checkFileExist(sdb) + etool.exeBinFile("taosd", f"-s -c {cfg}") + # -C - etool.runBinFile("taosd", "-C") + etool.exeBinFile("taosd", "-C") # -k rets = etool.runBinFile("taosd", "-C") self.checkListNotEmpty(rets) @@ -120,6 +119,8 @@ class TDTestCase(TBase): # stop taosd test taos as server sc.dnodeStop(idx) etool.exeBinFile("taos", f'-n server', wait=False) + time.sleep(3) + eos.exe("pkill -9 taos") # run def run(self): diff --git a/tests/army/community/query/query_basic.json b/tests/army/community/query/query_basic.json new file mode 100644 index 0000000000..c6a3482cd4 --- /dev/null +++ b/tests/army/community/query/query_basic.json @@ -0,0 +1,61 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 4000, + "prepared_rand": 10000, + "thread_count": 3, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 3, + "replica": 3, + "duration":"3d", + "wal_retention_period": 1, + "wal_retention_size": 1, + "stt_trigger": 1 + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 6, + "insert_rows": 100000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 30000, + "start_timestamp":"2023-10-01 10:00:00", + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc" }, + { "type": "double", "name": "dc"}, + { "type": "tinyint", "name": "ti"}, + { "type": "smallint", "name": "si" }, + { "type": "int", "name": "ic" }, + { "type": "bigint", "name": "bi" }, + { "type": "utinyint", "name": "uti"}, + { "type": "usmallint", "name": "usi"}, + { "type": "uint", "name": "ui" }, + { "type": "ubigint", "name": "ubi"}, + { "type": "binary", "name": "bin", "len": 8}, + { "type": "nchar", "name": "nch", "len": 16} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 10,"min": 1}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} diff --git a/tests/army/community/query/query_basic.py b/tests/army/community/query/query_basic.py new file mode 100644 index 0000000000..90916a1c6c --- /dev/null +++ b/tests/army/community/query/query_basic.py @@ -0,0 +1,100 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import time +import random + +import taos +import frame +import frame.etool + + +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * + + +class TDTestCase(TBase): + updatecfgDict = { + "keepColumnName" : "1", + "ttlChangeOnWrite" : "1", + "querySmaOptimize": "1" + } + + def insertData(self): + tdLog.info(f"insert data.") + # taosBenchmark run + jfile = etool.curFile(__file__, "query_basic.json") + etool.benchMark(json=jfile) + + tdSql.execute(f"use {self.db}") + tdSql.execute("select database();") + # set insert data information + self.childtable_count = 6 + self.insert_rows = 100000 + self.timestamp_step = 30000 + + + def doQuery(self): + tdLog.info(f"do query.") + + # __group_key + sql = f"select count(*),_group_key(uti),uti from {self.stb} partition by uti" + tdSql.query(sql) + # column index 1 value same with 2 + tdSql.checkSameColumn(1, 2) + + sql = f"select count(*),_group_key(usi),usi from {self.stb} group by usi limit 100;" + tdSql.query(sql) + tdSql.checkSameColumn(1, 2) + + # tail + sql1 = "select ts,ui from d0 order by ts desc limit 5 offset 2;" + sql2 = "select ts,tail(ui,5,2) from d0;" + self.checkSameResult(sql1, sql2) + + # uninqe + sql1 = "select distinct uti from d0 order by uti;" + sql2 = "select UNIQUE(uti) from d0 order by uti asc;" + self.checkSameResult(sql1, sql2) + + # top + sql1 = "select top(bi,10) from stb;" + sql2 = "select bi from stb where bi is not null order by bi desc limit 10;" + self.checkSameResult(sql1, sql2) + + # run + def run(self): + tdLog.debug(f"start to excute {__file__}") + + # insert data + self.insertData() + + # check insert data correct + self.checkInsertCorrect() + + # check + self.checkConsistency("usi") + + # do action + self.doQuery() + + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/enterprise/s3/s3_basic.json b/tests/army/enterprise/s3/s3_basic.json index e56a18e757..d7544a897c 100644 --- a/tests/army/enterprise/s3/s3_basic.json +++ b/tests/army/enterprise/s3/s3_basic.json @@ -7,6 +7,7 @@ "password": "taosdata", "connection_pool_size": 8, "num_of_records_per_req": 2000, + "prepared_rand": 1000, "thread_count": 2, "create_table_thread_count": 1, "confirm_parameter_prompt": "no", diff --git a/tests/army/frame/autogen.py b/tests/army/frame/autogen.py index 9dca96e7b0..1e041f633d 100644 --- a/tests/army/frame/autogen.py +++ b/tests/army/frame/autogen.py @@ -14,11 +14,12 @@ import time # Auto Gen class # class AutoGen: - def __init__(self): + def __init__(self, fillOne=False): self.ts = 1600000000000 self.batch_size = 100 seed = time.time() % 10000 random.seed(seed) + self.fillOne = fillOne # set start ts def set_start_ts(self, ts): @@ -87,6 +88,23 @@ class AutoGen: return datas + # fill one data + def fillone_data(self, i, marr): + datas = "" + for c in marr: + if datas != "": + datas += "," + + if c == 0: + datas += "%d" % (self.ts + i) + elif c == 12 or c == 13: # binary + datas += '"1"' + else: + datas += '1' + + return datas + + # generate specail wide random string def random_string(self, count): letters = string.ascii_letters @@ -96,7 +114,6 @@ class AutoGen: def create_db(self, dbname, vgroups = 2, replica = 1): self.dbname = dbname tdSql.execute(f'create database {dbname} vgroups {vgroups} replica {replica}') - tdSql.execute(f'use {dbname}') # create table or stable def create_stable(self, stbname, tag_cnt, column_cnt, binary_len, nchar_len): @@ -106,7 +123,7 @@ class AutoGen: self.mtags, tags = self.gen_columns_sql("t", tag_cnt, binary_len, nchar_len) self.mcols, cols = self.gen_columns_sql("c", column_cnt - 1, binary_len, nchar_len) - sql = f"create table {stbname} (ts timestamp, {cols}) tags({tags})" + sql = f"create table {self.dbname}.{stbname} (ts timestamp, {cols}) tags({tags})" tdSql.execute(sql) # create child table @@ -115,7 +132,7 @@ class AutoGen: self.child_name = prename for i in range(cnt): tags_data = self.gen_data(i, self.mtags) - sql = f"create table {prename}{i} using {stbname} tags({tags_data})" + sql = f"create table {self.dbname}.{prename}{i} using {self.dbname}.{stbname} tags({tags_data})" tdSql.execute(sql) tdLog.info(f"create child tables {cnt} ok") @@ -127,17 +144,20 @@ class AutoGen: # loop do for i in range(cnt): - value = self.gen_data(i, self.mcols) + if self.fillOne : + value = self.fillone_data(i, self.mcols) + else: + value = self.gen_data(i, self.mcols) ts += step values += f"({ts},{value}) " if batch_size == 1 or (i > 0 and i % batch_size == 0) : - sql = f"insert into {child_name} values {values}" + sql = f"insert into {self.dbname}.{child_name} values {values}" tdSql.execute(sql) values = "" # end batch if values != "": - sql = f"insert into {child_name} values {values}" + sql = f"insert into {self.dbname}.{child_name} values {values}" tdSql.execute(sql) tdLog.info(f" insert data i={i}") values = "" @@ -159,5 +179,3 @@ class AutoGen: self.insert_data_child(name, cnt, self.batch_size, 0) tdLog.info(f" insert same timestamp ok, child table={self.child_cnt} insert rows={cnt}") - - diff --git a/tests/army/frame/caseBase.py b/tests/army/frame/caseBase.py index ec6b36aa1b..b1500b69e1 100644 --- a/tests/army/frame/caseBase.py +++ b/tests/army/frame/caseBase.py @@ -16,6 +16,7 @@ import os import time import datetime import random +import copy from frame.log import * from frame.sql import * @@ -135,8 +136,9 @@ class TBase: tdSql.checkAgg(sql, self.childtable_count) # check step - sql = f"select count(*) from (select diff(ts) as dif from {self.stb} partition by tbname) where dif != {self.timestamp_step}" - tdSql.checkAgg(sql, 0) + sql = f"select * from (select diff(ts) as dif from {self.stb} partition by tbname) where dif != {self.timestamp_step}" + tdSql.query(sql) + tdSql.checkRows(0) # save agg result def snapshotAgg(self): @@ -156,6 +158,71 @@ class TBase: tdSql.checkAgg(self.sqlFirst, self.first) tdSql.checkAgg(self.sqlLast, self.last) + # self check + def checkConsistency(self, col): + # top with max + sql = f"select max({col}) from {self.stb}" + expect = tdSql.getFirstValue(sql) + sql = f"select top({col}, 5) from {self.stb}" + tdSql.checkFirstValue(sql, expect) + + #bottom with min + sql = f"select min({col}) from {self.stb}" + expect = tdSql.getFirstValue(sql) + sql = f"select bottom({col}, 5) from {self.stb}" + tdSql.checkFirstValue(sql, expect) + + # order by asc limit 1 with first + sql = f"select last({col}) from {self.stb}" + expect = tdSql.getFirstValue(sql) + sql = f"select {col} from {self.stb} order by _c0 desc limit 1" + tdSql.checkFirstValue(sql, expect) + + # order by desc limit 1 with last + sql = f"select first({col}) from {self.stb}" + expect = tdSql.getFirstValue(sql) + sql = f"select {col} from {self.stb} order by _c0 asc limit 1" + tdSql.checkFirstValue(sql, expect) + + + # check sql1 is same result with sql2 + def checkSameResult(self, sql1, sql2): + tdLog.info(f"sql1={sql1}") + tdLog.info(f"sql2={sql2}") + tdLog.info("compare sql1 same with sql2 ...") + + # sql + rows1 = tdSql.query(sql1,queryTimes=2) + res1 = copy.deepcopy(tdSql.res) + + tdSql.query(sql2,queryTimes=2) + res2 = tdSql.res + + rowlen1 = len(res1) + rowlen2 = len(res2) + errCnt = 0 + + if rowlen1 != rowlen2: + tdLog.exit(f"both row count not equal. rowlen1={rowlen1} rowlen2={rowlen2} ") + return False + + for i in range(rowlen1): + row1 = res1[i] + row2 = res2[i] + collen1 = len(row1) + collen2 = len(row2) + if collen1 != collen2: + tdLog.exit(f"both col count not equal. collen1={collen1} collen2={collen2}") + return False + for j in range(collen1): + if row1[j] != row2[j]: + tdLog.info(f"error both column value not equal. row={i} col={j} col1={row1[j]} col2={row2[j]} .") + errCnt += 1 + + if errCnt > 0: + tdLog.exit(f"sql2 column value different with sql1. different count ={errCnt} ") + + tdLog.info("sql1 same result with sql2.") # # get db information diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index 5cdb3f9f46..d11a0dbb4d 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -795,7 +795,7 @@ class TDCom: def getOneRow(self, location, containElm): res_list = list() if 0 <= location < tdSql.queryRows: - for row in tdSql.queryResult: + for row in tdSql.res: if row[location] == containElm: res_list.append(row) return res_list @@ -943,7 +943,7 @@ class TDCom: """drop all streams """ tdSql.query("show streams") - stream_name_list = list(map(lambda x: x[0], tdSql.queryResult)) + stream_name_list = list(map(lambda x: x[0], tdSql.res)) for stream_name in stream_name_list: tdSql.execute(f'drop stream if exists {stream_name};') @@ -962,7 +962,7 @@ class TDCom: """drop all databases """ tdSql.query("show databases;") - db_list = list(map(lambda x: x[0], tdSql.queryResult)) + db_list = list(map(lambda x: x[0], tdSql.res)) for dbname in db_list: if dbname not in self.white_list and "telegraf" not in dbname: tdSql.execute(f'drop database if exists `{dbname}`') @@ -1412,7 +1412,7 @@ class TDCom: input_function (str): scalar """ tdSql.query(sql) - res = tdSql.queryResult + res = tdSql.res if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]: tdSql.checkEqual(res[1][1], "DOUBLE") tdSql.checkEqual(res[2][1], "DOUBLE") @@ -1490,7 +1490,7 @@ class TDCom: bigint: bigint-ts """ tdSql.query(f'select cast({str_ts} as bigint)') - return tdSql.queryResult[0][0] + return tdSql.res[0][0] def cast_query_data(self, query_data): """cast query-result for existed-stb @@ -1514,7 +1514,7 @@ class TDCom: tdSql.query(f'select cast("{v}" as binary(6))') else: tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})') - query_data_l[i] = tdSql.queryResult[0][0] + query_data_l[i] = tdSql.res[0][0] else: query_data_l[i] = v nl.append(tuple(query_data_l)) @@ -1566,9 +1566,9 @@ class TDCom: if tag_value_list: dvalue = len(self.tag_type_str.split(',')) - defined_tag_count tdSql.query(sql1) - res1 = tdSql.queryResult + res1 = tdSql.res tdSql.query(sql2) - res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + res2 = self.cast_query_data(tdSql.res) if tag_value_list or use_exist_stb else tdSql.res tdSql.sql = sql1 new_list = list() if tag_value_list: @@ -1601,10 +1601,10 @@ class TDCom: tdLog.info("query retrying ...") new_list = list() tdSql.query(sql1) - res1 = tdSql.queryResult + res1 = tdSql.res tdSql.query(sql2) - # res2 = tdSql.queryResult - res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + # res2 = tdSql.res + res2 = self.cast_query_data(tdSql.res) if tag_value_list or use_exist_stb else tdSql.res tdSql.sql = sql1 if tag_value_list: @@ -1643,10 +1643,10 @@ class TDCom: tdLog.info("query retrying ...") new_list = list() tdSql.query(sql1) - res1 = tdSql.queryResult + res1 = tdSql.res tdSql.query(sql2) - # res2 = tdSql.queryResult - res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + # res2 = tdSql.res + res2 = self.cast_query_data(tdSql.res) if tag_value_list or use_exist_stb else tdSql.res tdSql.sql = sql1 if tag_value_list: diff --git a/tests/army/frame/server/cluster.py b/tests/army/frame/server/cluster.py index ade8ac39a2..aba2d2e630 100644 --- a/tests/army/frame/server/cluster.py +++ b/tests/army/frame/server/cluster.py @@ -86,10 +86,9 @@ class ConfigureyCluster: count=0 while count < 5: tdSql.query("select * from information_schema.ins_dnodes") - # tdLog.debug(tdSql.queryResult) status=0 for i in range(self.dnodeNums): - if tdSql.queryResult[i][4] == "ready": + if tdSql.res[i][4] == "ready": status+=1 # tdLog.debug(status) diff --git a/tests/army/frame/sql.py b/tests/army/frame/sql.py index eafae9be2d..8b7538321d 100644 --- a/tests/army/frame/sql.py +++ b/tests/army/frame/sql.py @@ -78,6 +78,107 @@ class TDSql: self.cursor.execute(s) time.sleep(2) + + # + # do execute + # + + def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None): + self.sql = sql + i=1 + while i <= queryTimes: + try: + self.cursor.execute(sql) + self.res = self.cursor.fetchall() + self.queryRows = len(self.res) + self.queryCols = len(self.cursor.description) + + if count_expected_res is not None: + counter = 0 + while count_expected_res != self.res[0][0]: + self.cursor.execute(sql) + self.res = self.cursor.fetchall() + if counter < queryTimes: + counter += 0.5 + time.sleep(0.5) + else: + return False + if row_tag: + return self.res + return self.queryRows + except Exception as e: + tdLog.notice("Try to query again, query times: %d "%i) + if i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 + time.sleep(1) + pass + + def executeTimes(self, sql, times): + for i in range(times): + try: + return self.cursor.execute(sql) + except BaseException: + time.sleep(1) + continue + + def execute(self, sql, queryTimes=30, show=False): + self.sql = sql + if show: + tdLog.info(sql) + i=1 + while i <= queryTimes: + try: + self.affectedRows = self.cursor.execute(sql) + return self.affectedRows + except Exception as e: + tdLog.notice("Try to execute sql again, query times: %d "%i) + if i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 + time.sleep(1) + pass + + # execute many sql + def executes(self, sqls, queryTimes=30, show=False): + for sql in sqls: + self.execute(sql, queryTimes, show) + + def waitedQuery(self, sql, expectRows, timeout): + tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout)) + self.sql = sql + try: + for i in range(timeout): + self.cursor.execute(sql) + self.res = self.cursor.fetchall() + self.queryRows = len(self.res) + self.queryCols = len(self.cursor.description) + tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) + if self.queryRows >= expectRows: + return (self.queryRows, i) + time.sleep(1) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return (self.queryRows, timeout) + + def is_err_sql(self, sql): + err_flag = True + try: + self.cursor.execute(sql) + except BaseException: + err_flag = False + + return False if err_flag else True + def error(self, sql, expectedErrno = None, expectErrInfo = None): caller = inspect.getframeinfo(inspect.stack()[1][0]) expectErrNotOccured = True @@ -95,7 +196,7 @@ class TDSql: else: self.queryRows = 0 self.queryCols = 0 - self.queryResult = None + self.res = None if expectedErrno != None: if expectedErrno == self.errno: @@ -115,49 +216,25 @@ class TDSql: return self.error_info - def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None): + # + # get session + # + + def getData(self, row, col): + self.checkRowCol(row, col) + return self.res[row][col] + + def getResult(self, sql): self.sql = sql - i=1 - while i <= queryTimes: - try: - self.cursor.execute(sql) - self.queryResult = self.cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(self.cursor.description) - - if count_expected_res is not None: - counter = 0 - while count_expected_res != self.queryResult[0][0]: - self.cursor.execute(sql) - self.queryResult = self.cursor.fetchall() - if counter < queryTimes: - counter += 0.5 - time.sleep(0.5) - else: - return False - if row_tag: - return self.queryResult - return self.queryRows - except Exception as e: - tdLog.notice("Try to query again, query times: %d "%i) - if i == queryTimes: - caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, sql, repr(e)) - tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) - i+=1 - time.sleep(1) - pass - - - def is_err_sql(self, sql): - err_flag = True try: self.cursor.execute(sql) - except BaseException: - err_flag = False - - return False if err_flag else True + self.res = self.cursor.fetchall() + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return self.res def getVariable(self, search_attr): ''' @@ -193,29 +270,19 @@ class TDSql: return col_name_list, col_type_list return col_name_list - def waitedQuery(self, sql, expectRows, timeout): - tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout)) - self.sql = sql - try: - for i in range(timeout): - self.cursor.execute(sql) - self.queryResult = self.cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(self.cursor.description) - tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) - if self.queryRows >= expectRows: - return (self.queryRows, i) - time.sleep(1) - except Exception as e: - caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, sql, repr(e)) - tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) - return (self.queryRows, timeout) - def getRows(self): return self.queryRows + # get first value + def getFirstValue(self, sql) : + self.query(sql) + return self.getData(0, 0) + + + # + # check session + # + def checkRows(self, expectRows): if self.queryRows == expectRows: tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows)) @@ -273,26 +340,26 @@ class TDSql: self.checkRowCol(row, col) - if self.queryResult[row][col] != data: + if self.res[row][col] != data: if self.cursor.istype(col, "TIMESTAMP"): # suppose user want to check nanosecond timestamp if a longer data passed`` if isinstance(data,str) : if (len(data) >= 28): - if self.queryResult[row][col] == _parse_ns_timestamp(data): + if self.res[row][col] == _parse_ns_timestamp(data): if(show): tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) else: - if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc): - # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if self.res[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc): + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}") if(show): tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) return elif isinstance(data,int): @@ -304,72 +371,72 @@ class TDSql: precision = 'ns' else: caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) return success = False if precision == 'ms': - dt_obj = self.queryResult[row][col] + dt_obj = self.res[row][col] ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1000) + int(dt_obj.microsecond/1000) if ts == data: success = True elif precision == 'us': - dt_obj = self.queryResult[row][col] + dt_obj = self.res[row][col] ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e6) + int(dt_obj.microsecond) if ts == data: success = True elif precision == 'ns': - if data == self.queryResult[row][col]: + if data == self.res[row][col]: success = True if success: if(show): tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) return elif isinstance(data,datetime.datetime): - dt_obj = self.queryResult[row][col] + dt_obj = self.res[row][col] delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo) - delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo) + delt_result = self.res[row][col] - datetime.datetime.fromtimestamp(0,self.res[row][col].tzinfo) if delt_data == delt_result: if(show): tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) return else: caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) - if str(self.queryResult[row][col]) == str(data): - # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if str(self.res[row][col]) == str(data): + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}") if(show): tdLog.info("check successfully") return elif isinstance(data, float): - if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: - # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if abs(data) >= 1 and abs((self.res[row][col] - data) / data) <= 0.000001: + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}") if(show): tdLog.info("check successfully") - elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: - # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + elif abs(data) < 1 and abs(self.res[row][col] - data) <= 0.000001: + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}") if(show): tdLog.info("check successfully") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) return else: caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) if(show): tdLog.info("check successfully") @@ -397,23 +464,23 @@ class TDSql: def checkDataNoExit(self, row, col, data): if self.checkRowColNoExit(row, col) == False: return False - if self.queryResult[row][col] != data: + if self.res[row][col] != data: if self.cursor.istype(col, "TIMESTAMP"): # suppose user want to check nanosecond timestamp if a longer data passed if (len(data) >= 28): - if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data): + if pd.to_datetime(self.res[row][col]) == pd.to_datetime(data): return True else: - if self.queryResult[row][col] == _parse_datetime(data): + if self.res[row][col] == _parse_datetime(data): return True return False - if str(self.queryResult[row][col]) == str(data): + if str(self.res[row][col]) == str(data): return True elif isinstance(data, float): - if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: + if abs(data) >= 1 and abs((self.res[row][col] - data) / data) <= 0.000001: return True - elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: + elif abs(data) < 1 and abs(self.res[row][col] - data) <= 0.000001: return True else: return False @@ -437,56 +504,6 @@ class TDSql: self.query(sql) self.checkData(row, col, data) - - def getData(self, row, col): - self.checkRowCol(row, col) - return self.queryResult[row][col] - - def getResult(self, sql): - self.sql = sql - try: - self.cursor.execute(sql) - self.queryResult = self.cursor.fetchall() - except Exception as e: - caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, sql, repr(e)) - tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) - return self.queryResult - - def executeTimes(self, sql, times): - for i in range(times): - try: - return self.cursor.execute(sql) - except BaseException: - time.sleep(1) - continue - - def execute(self, sql, queryTimes=30, show=False): - self.sql = sql - if show: - tdLog.info(sql) - i=1 - while i <= queryTimes: - try: - self.affectedRows = self.cursor.execute(sql) - return self.affectedRows - except Exception as e: - tdLog.notice("Try to execute sql again, query times: %d "%i) - if i == queryTimes: - caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, sql, repr(e)) - tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) - i+=1 - time.sleep(1) - pass - - # execute many sql - def executes(self, sqls, queryTimes=30, show=False): - for sql in sqls: - self.execute(sql, queryTimes, show) - def checkAffectedRows(self, expectAffectedRows): if self.affectedRows != expectAffectedRows: caller = inspect.getframeinfo(inspect.stack()[1][0]) @@ -544,11 +561,22 @@ class TDSql: def checkAgg(self, sql, expectCnt): self.query(sql) self.checkData(0, 0, expectCnt) - - # get first value - def getFirstValue(self, sql) : + + # expect first value + def checkFirstValue(self, sql, expect): self.query(sql) - return self.getData(0, 0) + self.checkData(0, 0, expect) + + # colIdx1 value same with colIdx2 + def checkSameColumn(self, c1, c2): + for i in range(self.queryRows): + if self.res[i][c1] != self.res[i][c2]: + tdLog.exit(f"Not same. row={i} col1={c1} col2={c2}. {self.res[i][c1]}!={self.res[i][c2]}") + tdLog.info(f"check {self.queryRows} rows two column value same. column index [{c1},{c2}]") + + # + # others session + # def get_times(self, time_str, precision="ms"): caller = inspect.getframeinfo(inspect.stack()[1][0]) diff --git a/tests/army/test.py b/tests/army/test.py index a3e28b772d..a94fefe0be 100644 --- a/tests/army/test.py +++ b/tests/army/test.py @@ -560,17 +560,6 @@ if __name__ == "__main__": conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") else: conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) - # tdSql.init(conn.cursor()) - # tdSql.execute("create qnode on dnode 1") - # tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy) - # tdSql.query("show local variables;") - # for i in range(tdSql.queryRows): - # if tdSql.queryResult[i][0] == "queryPolicy" : - # if int(tdSql.queryResult[i][1]) == int(queryPolicy): - # tdLog.info('alter queryPolicy to %d successfully'%queryPolicy) - # else : - # tdLog.debug(tdSql.queryResult) - # tdLog.exit("alter queryPolicy to %d failed"%queryPolicy) cursor = conn.cursor() cursor.execute("create qnode on dnode 1") diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index d52059bc08..1e99b219e0 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -3,7 +3,13 @@ #NA,NA,y or n,script,./test.sh -f tsim/user/basic.sim #unit-test + +archOs=$(arch) +if [[ $archOs =~ "aarch64" ]]; then +,,n,unit-test,bash test.sh +else ,,y,unit-test,bash test.sh +fi # # army-test @@ -11,6 +17,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f enterprise/multi-level/mlevel_basic.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1 ,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2 +,,y,army,./pytest.sh python3 ./test.py -f community/query/query_basic.py -N 3 ,,n,army,python3 ./test.py -f community/cmdline/fullopt.py # @@ -34,8 +41,9 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/pause_resume_test.py #,,n,system-test,python3 ./test.py -f 8-stream/vnode_restart.py -N 4 #,,n,system-test,python3 ./test.py -f 8-stream/snode_restart.py -N 4 -#,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4 +,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/project_group.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tbname_vgroup.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_interval.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/compact-col.py @@ -146,6 +154,10 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-28068.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-28068.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-28068.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-28068.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False @@ -422,6 +434,11 @@ e ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_and_last_row.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_and_last_row.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_and_last_row.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_and_last_row.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_and_last_row.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py @@ -1235,7 +1252,9 @@ e ,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim ,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim ,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim -,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim + +### refactor stream backend, open case after rsma refactored +#,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim ,,y,script,./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim ,,n,script,./test.sh -f tsim/valgrind/checkError1.sim ,,n,script,./test.sh -f tsim/valgrind/checkError2.sim diff --git a/tests/pytest/util/autogen.py b/tests/pytest/util/autogen.py index e1227a680f..72e64dc448 100644 --- a/tests/pytest/util/autogen.py +++ b/tests/pytest/util/autogen.py @@ -14,11 +14,12 @@ import time # Auto Gen class # class AutoGen: - def __init__(self): + def __init__(self, fillOne=False): self.ts = 1600000000000 self.batch_size = 100 seed = time.time() % 10000 random.seed(seed) + self.fillOne = fillOne # set start ts def set_start_ts(self, ts): @@ -87,6 +88,23 @@ class AutoGen: return datas + # fill one data + def fillone_data(self, i, marr): + datas = "" + for c in marr: + if datas != "": + datas += "," + + if c == 0: + datas += "%d" % (self.ts + i) + elif c == 12 or c == 13: # binary + datas += '"1"' + else: + datas += '1' + + return datas + + # generate specail wide random string def random_string(self, count): letters = string.ascii_letters @@ -96,7 +114,7 @@ class AutoGen: def create_db(self, dbname, vgroups = 2, replica = 1): self.dbname = dbname tdSql.execute(f'create database {dbname} vgroups {vgroups} replica {replica}') - tdSql.execute(f'use {dbname}') + tdSql.execute(f"use {dbname}") # create table or stable def create_stable(self, stbname, tag_cnt, column_cnt, binary_len, nchar_len): @@ -106,7 +124,7 @@ class AutoGen: self.mtags, tags = self.gen_columns_sql("t", tag_cnt, binary_len, nchar_len) self.mcols, cols = self.gen_columns_sql("c", column_cnt - 1, binary_len, nchar_len) - sql = f"create table {stbname} (ts timestamp, {cols}) tags({tags})" + sql = f"create table {self.dbname}.{stbname} (ts timestamp, {cols}) tags({tags})" tdSql.execute(sql) # create child table @@ -115,7 +133,7 @@ class AutoGen: self.child_name = prename for i in range(cnt): tags_data = self.gen_data(i, self.mtags) - sql = f"create table {prename}{i} using {stbname} tags({tags_data})" + sql = f"create table {self.dbname}.{prename}{i} using {self.dbname}.{stbname} tags({tags_data})" tdSql.execute(sql) tdLog.info(f"create child tables {cnt} ok") @@ -127,17 +145,20 @@ class AutoGen: # loop do for i in range(cnt): - value = self.gen_data(i, self.mcols) + if self.fillOne : + value = self.fillone_data(i, self.mcols) + else: + value = self.gen_data(i, self.mcols) ts += step values += f"({ts},{value}) " if batch_size == 1 or (i > 0 and i % batch_size == 0) : - sql = f"insert into {child_name} values {values}" + sql = f"insert into {self.dbname}.{child_name} values {values}" tdSql.execute(sql) values = "" # end batch if values != "": - sql = f"insert into {child_name} values {values}" + sql = f"insert into {self.dbname}.{child_name} values {values}" tdSql.execute(sql) tdLog.info(f" insert data i={i}") values = "" diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 010f45a573..c4885747d1 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -1845,6 +1845,23 @@ class TDCom: if i == 1: self.record_history_ts = ts_value + def get_subtable(self, tbname_pre): + tdSql.query(f'show tables') + tbname_list = list(map(lambda x:x[0], tdSql.queryResult)) + for tbname in tbname_list: + if tbname_pre in tbname: + return tbname + + def get_subtable_wait(self, tbname_pre): + tbname = self.get_subtable(tbname_pre) + latency = 0 + while tbname is None: + tbname = self.get_subtable(tbname_pre) + if latency < self.stream_timeout: + latency += 1 + time.sleep(1) + return tbname + def is_json(msg): if isinstance(msg, str): try: diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 000040d958..62af9a1af9 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -88,7 +88,7 @@ class TDSql: self.affectedRows = self.cursor.execute(sql) return self.affectedRows except Exception as e: - tdLog.notice("Try to execute sql again, query times: %d "%i) + tdLog.notice("Try to execute sql again, execute times: %d "%i) if i == queryTimes: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, sql, repr(e)) diff --git a/tests/script/sh/checkAsan.sh b/tests/script/sh/checkAsan.sh index 19057d8834..9f67d437e2 100755 --- a/tests/script/sh/checkAsan.sh +++ b/tests/script/sh/checkAsan.sh @@ -3,22 +3,22 @@ set +e #set -x if [[ "$OSTYPE" == "darwin"* ]]; then - TD_OS="Darwin" + TD_OS="Darwin" else - OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2) - len=$(echo ${#OS}) - len=$((len-2)) - TD_OS=$(echo -ne ${OS:1:${len}} | cut -d" " -f1) + OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2) + len=$(echo ${#OS}) + len=$((len - 2)) + TD_OS=$(echo -ne ${OS:1:${len}} | cut -d" " -f1) fi if [[ "$TD_OS" == "Alpine" ]]; then - echo -e "os is Alpine,skip check Asan" - exit 0 + echo -e "os is Alpine,skip check Asan" + exit 0 fi unset LD_PRELOAD -SCRIPT_DIR=`dirname $0` +SCRIPT_DIR=$(dirname $0) cd $SCRIPT_DIR/../ -SCRIPT_DIR=`pwd` +SCRIPT_DIR=$(pwd) IN_TDINTERNAL="community" if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then @@ -27,31 +27,28 @@ else cd ../../ fi -TAOS_DIR=`pwd` +TAOS_DIR=$(pwd) LOG_DIR=$TAOS_DIR/sim/asan +error_num=$(cat ${LOG_DIR}/*.asan | grep "ERROR" | wc -l) - -error_num=`cat ${LOG_DIR}/*.asan | grep "ERROR" | wc -l` - -archOs=`arch` -if [[ $archOs =~ "aarch64" ]]; then - echo "arm64 check mem leak" - memory_leak=`cat ${LOG_DIR}/*.asan | grep "Direct leak" | grep -v "Direct leak of 32 byte"| wc -l` - memory_count=`cat ${LOG_DIR}/*.asan | grep "Direct leak of 32 byte"| wc -l` +archOs=$(arch) +if [[ $archOs =~ "aarch64" ]]; then + echo "arm64 check mem leak" + memory_leak=$(cat ${LOG_DIR}/*.asan | grep "Direct leak" | grep -v "Direct leak of 32 byte" | wc -l) + memory_count=$(cat ${LOG_DIR}/*.asan | grep "Direct leak of 32 byte" | wc -l) if [ $memory_count -eq $error_num ] && [ $memory_leak -eq 0 ]; then echo "reset error_num to 0, ignore: __cxa_thread_atexit_impl leak" error_num=0 - fi + fi else echo "os check mem leak" - memory_leak=`cat ${LOG_DIR}/*.asan | grep "Direct leak" | wc -l` + memory_leak=$(cat ${LOG_DIR}/*.asan | grep "Direct leak" | wc -l) fi - -indirect_leak=`cat ${LOG_DIR}/*.asan | grep "Indirect leak" | wc -l` -python_error=`cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l` +indirect_leak=$(cat ${LOG_DIR}/*.asan | grep "Indirect leak" | wc -l) +python_error=$(cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l) # ignore @@ -64,18 +61,18 @@ python_error=`cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l` # /root/TDengine/source/libs/scalar/src/sclfunc.c:772:11: runtime error: 3.52344e+09 is outside the range of representable values of type 'int' # /root/TDengine/source/libs/scalar/src/sclfunc.c:753:11: runtime error: 4.75783e+11 is outside the range of representable values of type 'short int' -# TD-20569 +# TD-20569 # /root/TDengine/source/libs/function/src/builtinsimpl.c:856:29: runtime error: signed integer overflow: 9223372036854775806 + 9223372036854775805 cannot be represented in type 'long int' # /root/TDengine/source/libs/scalar/src/sclvector.c:1075:66: runtime error: signed integer overflow: 9223372034707292160 + 1668838476672 cannot be represented in type 'long int' # /root/TDengine/source/common/src/tdataformat.c:1876:7: runtime error: signed integer overflow: 8252423483843671206 + 2406154664059062870 cannot be represented in type 'long int' # /home/chr/TDengine/source/libs/scalar/src/filter.c:3149:14: runtime error: applying non-zero offset 18446744073709551615 to null pointer -# /home/chr/TDengine/source/libs/scalar/src/filter.c:3149:14: runtime error: applying non-zero offset 18446744073709551615 to null pointer +# /home/chr/TDengine/source/libs/scalar/src/filter.c:3149:14: runtime error: applying non-zero offset 18446744073709551615 to null pointer # /home/TDinternal/community/source/libs/scalar/src/sclvector.c:1109:66: runtime error: signed integer overflow: 9223372034707292160 + 1676867897049 cannot be represented in type 'long int' - + #0 0x7f2d64f5a808 in __interceptor_malloc ../../../../src/libsanitizer/asan/asan_malloc_linux.cc:144 #1 0x7f2d63fcf459 in strerror /build/glibc-SzIz7B/glibc-2.31/string/strerror.c:38 -runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type"| grep -v "signed integer overflow" |grep -v "strerror.c"| grep -v "asan_malloc_linux.cc" |grep -v "strerror.c"|wc -l` +runtime_error=$(cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type" | grep -v "signed integer overflow" | grep -v "strerror.c" | grep -v "asan_malloc_linux.cc" | grep -v "strerror.c" | wc -l) echo -e "\033[44;32;1m"asan error_num: $error_num"\033[0m" echo -e "\033[44;32;1m"asan memory_leak: $memory_leak"\033[0m" @@ -86,10 +83,10 @@ echo -e "\033[44;32;1m"asan python error: $python_error"\033[0m" let "errors=$error_num+$memory_leak+$indirect_leak+$runtime_error+$python_error" if [ $errors -eq 0 ]; then - echo -e "\033[44;32;1m"no asan errors"\033[0m" + echo -e "\033[44;32;1m"no asan errors"\033[0m" exit 0 else - echo -e "\033[44;31;1m"asan total errors: $errors"\033[0m" + echo -e "\033[44;31;1m"asan total errors: $errors"\033[0m" if [ $python_error -ne 0 ]; then cat ${LOG_DIR}/*.info fi diff --git a/tests/script/tsim/parser/condition_query.sim b/tests/script/tsim/parser/condition_query.sim index d9bfcb8074..660e91dbfd 100644 --- a/tests/script/tsim/parser/condition_query.sim +++ b/tests/script/tsim/parser/condition_query.sim @@ -1505,7 +1505,7 @@ if $data10 != @21-05-05 18:19:21.000@ then return -1 endi -sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts; +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by a.ts; if $rows != 4 then return -1 endi @@ -1521,8 +1521,9 @@ endi if $data30 != @21-05-05 18:19:14.000@ then return -1 endi +sql_error select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts; -sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts; +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by a.ts; if $rows != 3 then return -1 endi @@ -1535,6 +1536,8 @@ endi if $data20 != @21-05-05 18:19:10.000@ then return -1 endi +sql_error select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts; +sql select a.ts,a.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts; sql select * from stb1 where c1 is null and c1 is not null; if $rows != 0 then @@ -2469,7 +2472,7 @@ endi if $data10 != @21-05-05 18:19:05.000@ then return -1 endi -sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts > '2021-05-05 18:19:03.000' and tb2_1.u1 < 5 order by ts; +sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts > '2021-05-05 18:19:03.000' and tb2_1.u1 < 5 order by tb1.ts; if $rows != 2 then return -1 endi @@ -2480,7 +2483,7 @@ if $data10 != @21-05-05 18:19:06.000@ then return -1 endi -sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4 order by ts; +sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4 order by tb1.ts; if $rows != 2 then return -1 endi @@ -2491,7 +2494,7 @@ if $data10 != @21-05-05 18:19:07.000@ then return -1 endi -sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 order by ts; +sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 order by stb1.ts; if $rows != 9 then return -1 endi @@ -2523,7 +2526,7 @@ if $data80 != @21-05-05 18:19:11.000@ then return -1 endi -sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 and stb1.c1 > 2 and stb2.u1 <=4 order by ts; +sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 and stb1.c1 > 2 and stb2.u1 <=4 order by stb1.ts; if $rows != 3 then return -1 endi diff --git a/tests/script/tsim/query/cache_last.sim b/tests/script/tsim/query/cache_last.sim index 8247a2f723..65eb46de69 100644 --- a/tests/script/tsim/query/cache_last.sim +++ b/tests/script/tsim/query/cache_last.sim @@ -55,7 +55,7 @@ if $rows != 1 then return -1 endi sql explain select count(*), last_row(f1), last(f1) from sta; -if $data00 != @-> Aggragate (functions=3 width=24 input_order=desc )@ then +if $data00 != @-> Merge (columns=3 width=24 input_order=unknown output_order=unknown mode=column)@ then return -1 endi sql_error select count(*), last_row(f1), min(f1), f1 from sta; diff --git a/tests/script/tsim/stream/udTableAndTag0.sim b/tests/script/tsim/stream/udTableAndTag0.sim index c81927abcb..e8b51a3846 100644 --- a/tests/script/tsim/stream/udTableAndTag0.sim +++ b/tests/script/tsim/stream/udTableAndTag0.sim @@ -48,16 +48,6 @@ if $rows != 2 then goto loop0 endi -if $data00 != aaa-t1 then - print =====data00=$data00 - goto loop0 -endi - -if $data10 != aaa-t2 then - print =====data10=$data10 - goto loop0 -endi - $loop_count = 0 loop1: @@ -264,17 +254,6 @@ if $rows != 2 then goto loop6 endi -if $data00 != tbn-t1 then - print =====data00=$data00 - goto loop6 -endi - -if $data10 != tbn-t2 then - print =====data10=$data10 - goto loop6 -endi - - print ===== step5 print ===== tag name + table name @@ -312,21 +291,6 @@ if $rows != 3 then goto loop7 endi -if $data00 != tbn-t1 then - print =====data00=$data00 - goto loop7 -endi - -if $data10 != tbn-t2 then - print =====data10=$data10 - goto loop7 -endi - -if $data20 != tbn-t3 then - print =====data20=$data20 - goto loop7 -endi - $loop_count = 0 loop8: diff --git a/tests/script/tsim/stream/udTableAndTag1.sim b/tests/script/tsim/stream/udTableAndTag1.sim index e9dfbaabcf..e6427aab10 100644 --- a/tests/script/tsim/stream/udTableAndTag1.sim +++ b/tests/script/tsim/stream/udTableAndTag1.sim @@ -47,16 +47,6 @@ if $rows != 2 then goto loop0 endi -if $data00 != aaa-1 then - print =====data00=$data00 - goto loop0 -endi - -if $data10 != aaa-2 then - print =====data10=$data10 - goto loop0 -endi - $loop_count = 0 loop1: @@ -264,16 +254,6 @@ if $rows != 2 then goto loop6 endi -if $data00 != tbn-1 then - print =====data00=$data00 - goto loop6 -endi - -if $data10 != tbn-2 then - print =====data10=$data10 - goto loop6 -endi - print ===== step5 print ===== tag name + table name @@ -311,21 +291,6 @@ if $rows != 3 then goto loop7 endi -if $data00 != tbn-t1 then - print =====data00=$data00 - goto loop7 -endi - -if $data10 != tbn-t2 then - print =====data10=$data10 - goto loop7 -endi - -if $data20 != tbn-t3 then - print =====data20=$data20 - goto loop7 -endi - $loop_count = 0 loop8: diff --git a/tests/script/tsim/stream/udTableAndTag2.sim b/tests/script/tsim/stream/udTableAndTag2.sim index 973c55b9ef..8977dc219b 100644 --- a/tests/script/tsim/stream/udTableAndTag2.sim +++ b/tests/script/tsim/stream/udTableAndTag2.sim @@ -415,11 +415,6 @@ if $data00 != aaa then goto loop9 endi -if $data10 != aaa-1 then - print =====data00=$data00 - goto loop9 -endi - $loop_count = 0 loop10: diff --git a/tests/script/tsim/valgrind/checkError8.sim b/tests/script/tsim/valgrind/checkError8.sim index 2f204768eb..3942765eb7 100644 --- a/tests/script/tsim/valgrind/checkError8.sim +++ b/tests/script/tsim/valgrind/checkError8.sim @@ -129,10 +129,10 @@ sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * f sql select * from stb1 where (c6 > 3.0 or c6 < 60) and c6 > 50 and (c6 != 53 or c6 != 63);; sql select ts,c1 from stb1 where (c1 > 60 or c1 < 10 or (c1 > 20 and c1 < 30)) and ts > '2021-05-05 18:19:00.000' and ts < '2021-05-05 18:19:25.000' and c1 != 21 and c1 != 22 order by ts; sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts;; -sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts;; -sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts;; -sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4 order by ts;; -sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 order by ts;; +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by a.ts;; +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by a.ts;; +sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4 order by tb1.ts;; +sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 order by stb1.ts;; sql select count(*) from stb1 where tbname like 'tb%' or c1 > 0;; sql select * from stb1 where tbname like 'tb%' and (t1=1 or t2=2 or t3=3) and t1 > 2 order by ts;; diff --git a/tests/script/win-test-file b/tests/script/win-test-file index 744415c89f..b9f250927f 100644 --- a/tests/script/win-test-file +++ b/tests/script/win-test-file @@ -280,7 +280,9 @@ ./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim -./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim + +### refactor stream backend, open case after rsma refactored +#./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim ./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim ./test.sh -f tsim/valgrind/checkError1.sim ./test.sh -f tsim/valgrind/checkError2.sim diff --git a/tests/system-test/2-query/csum.py b/tests/system-test/2-query/csum.py index 1e6b26ada0..b16f511491 100644 --- a/tests/system-test/2-query/csum.py +++ b/tests/system-test/2-query/csum.py @@ -162,6 +162,16 @@ class TDTestCase: case6 = {"col": "c9"} self.checkcsum(**case6) + # unsigned check + case61 = {"col": "c11"} + self.checkcsum(**case61) + case62 = {"col": "c12"} + self.checkcsum(**case62) + case63 = {"col": "c13"} + self.checkcsum(**case63) + case64 = {"col": "c14"} + self.checkcsum(**case64) + # case7~8: nested query case7 = {"table_expr": "(select ts,c1 from db.stb1 order by ts, tbname )"} self.checkcsum(**case7) @@ -317,14 +327,14 @@ class TDTestCase: f"insert into t{i} values (" f"{basetime + (j+1)*10 + i * msec_per_min}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " - f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}', {j},{j},{j},{j} )" ) tdSql.execute( f"insert into t{i} values (" f"{basetime - (j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " - f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1', {j*2},{j*2},{j*2},{j*2} )" ) tdSql.execute( f"insert into tt{i} values ( {basetime-(j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)} )" @@ -340,8 +350,8 @@ class TDTestCase: tdSql.execute( "create stable db.stb1 (\ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ - c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ - ) \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16),\ + c11 int unsigned, c12 smallint unsigned, c13 tinyint unsigned, c14 bigint unsigned) \ tags(st1 int)" ) tdSql.execute( @@ -373,10 +383,10 @@ class TDTestCase: tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") self.csum_test_table(tbnum) - tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values " - f"({nowtime - (per_table_rows + 1) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") - tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values " - f"({nowtime - (per_table_rows + 2) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7,c11) values " + f"({nowtime - (per_table_rows + 1) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1}, 128)") + tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7,c11) values " + f"({nowtime - (per_table_rows + 2) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1}, 129)") self.csum_current_query() self.csum_error_query() @@ -460,7 +470,7 @@ class TDTestCase: tdSql.checkRows(40) # bug need fix - tdSql.query("select tbname , csum(c1) from db.stb1 partition by tbname") + tdSql.query("select tbname , csum(c1), csum(c12) from db.stb1 partition by tbname") tdSql.checkRows(40) tdSql.query("select tbname , csum(st1) from db.stb1 partition by tbname") tdSql.checkRows(70) @@ -468,7 +478,7 @@ class TDTestCase: tdSql.checkRows(7) # partition by tags - tdSql.query("select st1 , csum(c1) from db.stb1 partition by st1") + tdSql.query("select st1 , csum(c1), csum(c13) from db.stb1 partition by st1") tdSql.checkRows(40) tdSql.query("select csum(c1) from db.stb1 partition by st1") tdSql.checkRows(40) diff --git a/tests/system-test/2-query/last_and_last_row.py b/tests/system-test/2-query/last_and_last_row.py new file mode 100644 index 0000000000..b04b3a75f3 --- /dev/null +++ b/tests/system-test/2-query/last_and_last_row.py @@ -0,0 +1,660 @@ +import datetime +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes +from math import inf + + +class TDTestCase: + def init(self, conn, logSql, replicaVer=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + + def check_explain_res_has_row(self, plan_str_expect: str, rows, sql): + plan_found = False + for row in rows: + if str(row).find(plan_str_expect) >= 0: + tdLog.debug("plan: [%s] found in: [%s]" % (plan_str_expect, str(row))) + plan_found = True + break + if not plan_found: + tdLog.exit("plan: %s not found in res: [%s] in sql: %s" % (plan_str_expect, str(rows), sql)) + + def check_explain_res_no_row(self, plan_str_not_expect: str, res, sql): + for row in res: + if str(row).find(plan_str_not_expect) >= 0: + tdLog.exit('plan: [%s] found in: [%s] for sql: %s' % (plan_str_not_expect, str(row), sql)) + + def explain_sql(self, sql: str): + sql = "explain " + sql + tdSql.query(sql, queryTimes=1) + return tdSql.queryResult + + def explain_and_check_res(self, sqls, hasLastRowScanRes): + for sql, has_last in zip(sqls, hasLastRowScanRes): + res = self.explain_sql(sql) + if has_last == 1: + self.check_explain_res_has_row("Last Row Scan", res, sql) + else: + self.check_explain_res_no_row("Last Row Scan", res, sql) + + def none_model_test(self): + tdSql.execute("drop database if exists last_test_none_model ;") + tdSql.execute("create database last_test_none_model cachemodel 'none';") + tdSql.execute("use last_test_none_model;") + tdSql.execute("create stable last_test_none_model.st(ts timestamp, id int) tags(tid int);") + tdSql.execute("create table last_test_none_model.test_t1 using last_test_none_model.st tags(1);") + tdSql.execute("create table last_test_none_model.test_t2 using last_test_none_model.st tags(2);") + tdSql.execute("create table last_test_none_model.test_t3 using last_test_none_model.st tags(3);") + tdSql.execute("create table last_test_none_model.test_t4 using last_test_none_model.st tags(4);") + + maxRange = 100 + # 2023-11-13 00:00:00.000 + startTs = 1699804800000 + for i in range(maxRange): + insertSqlString = "insert into last_test_none_model.test_t1 values(%d, %d);" % (startTs + i, i) + tdSql.execute(insertSqlString) + + last_ts = startTs + maxRange + tdSql.execute("insert into last_test_none_model.test_t1 (ts) values(%d)" % (last_ts)) + sql = f'select last_row(ts), last(*) from last_test_none_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_no_row("Last Row Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), last_row(id), last(id) from last_test_none_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, None) + tdSql.checkData(0, 3, maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_no_row("Last Row Scan", explain_res, sql) + + sql = f'select last(*), last_row(ts), count(*) from last_test_none_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, maxRange - 1) + tdSql.checkData(0, 2, last_ts) + tdSql.checkData(0, 3, maxRange + 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_no_row("Last Row Scan", explain_res, sql) + + tdSql.error(f'select last(*), last_row(ts), ts from last_test_none_model.test_t1;') + + sql = f'select last_row(ts), last(ts), count(*) , last_row(id), last(id), last(*) from last_test_none_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, maxRange + 1) + tdSql.checkData(0, 3, None) + tdSql.checkData(0, 4, maxRange - 1) + tdSql.checkData(0, 5, last_ts) + tdSql.checkData(0, 6, maxRange - 1) + + + startTs2 = startTs + 86400000 + for i in range(maxRange): + i = i + 2 * maxRange + insertSqlString = "insert into last_test_none_model.test_t2 values(%d, %d);" % (startTs2 + i, i) + tdSql.execute(insertSqlString) + last_ts2 = startTs2 + maxRange + + startTs3 = startTs + 2 * 86400000 + for i in range(maxRange): + i = i + 3 * maxRange + insertSqlString = "insert into last_test_none_model.test_t3 values(%d, %d);" % (startTs3 + i, i) + tdSql.execute(insertSqlString) + last_ts3 = startTs3 + 4 * maxRange - 1 + + startTs4 = startTs + 3 * 86400000 + for i in range(maxRange): + i = i + 4 * maxRange + insertSqlString = "insert into last_test_none_model.test_t4 values(%d, %d);" % (startTs4 + i, i) + tdSql.execute(insertSqlString) + + last_ts4 = startTs4 + 5 * maxRange - 1 + sql = f'select last_row(ts), last(*) from last_test_none_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 5 * maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_no_row("Last Row Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), last_row(id), last(id) from last_test_none_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 5 * maxRange - 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_no_row("Last Row Scan", explain_res, sql) + + sql = f'select last(*), last_row(ts), count(*) from last_test_none_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, 5 * maxRange - 1) + tdSql.checkData(0, 2, last_ts4) + tdSql.checkData(0, 3, 4 * maxRange + 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_no_row("Last Row Scan", explain_res, sql) + + tdSql.error(f'select last(*), last_row(ts), ts from last_test_none_model.st;') + + sql = f'select last_row(ts), last(ts), count(*) , last_row(id), last(id), last(*) from last_test_none_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 4 * maxRange + 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + tdSql.checkData(0, 4, 5 * maxRange - 1) + tdSql.checkData(0, 5, last_ts4) + tdSql.checkData(0, 6, 5 * maxRange - 1) + + sql = f'select last_row(1), last(2), count(*) , last_row(id), last(id), last(*) from last_test_none_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 2) + tdSql.checkData(0, 2, 4 * maxRange + 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + tdSql.checkData(0, 4, 5 * maxRange - 1) + tdSql.checkData(0, 5, last_ts4) + tdSql.checkData(0, 6, 5 * maxRange - 1) + + tdSql.execute("drop table if exists last_test_none_model.test_t4 ;") + tdSql.execute("drop table if exists last_test_none_model.test_t3 ;") + tdSql.execute("drop table if exists last_test_none_model.test_t2 ;") + tdSql.execute("drop table if exists last_test_none_model.test_t1 ;") + tdSql.execute("drop stable if exists last_test_none_model.st;") + tdSql.execute("drop database if exists last_test_none_model;") + + def last_value_model_test(self): + tdSql.execute("create database last_test_last_value_model cachemodel 'last_value' ;") + tdSql.execute("use last_test_last_value_model;") + tdSql.execute("create stable last_test_last_value_model.st(ts timestamp, id int) tags(tid int);") + tdSql.execute("create table last_test_last_value_model.test_t1 using last_test_last_value_model.st tags(1);") + tdSql.execute("create table last_test_last_value_model.test_t2 using last_test_last_value_model.st tags(2);") + tdSql.execute("create table last_test_last_value_model.test_t3 using last_test_last_value_model.st tags(3);") + tdSql.execute("create table last_test_last_value_model.test_t4 using last_test_last_value_model.st tags(4);") + + maxRange = 100 + # 2023-11-13 00:00:00.000 + startTs = 1699804800000 + for i in range(maxRange): + insertSqlString = "insert into last_test_last_value_model.test_t1 values(%d, %d);" % (startTs + i, i) + tdSql.execute(insertSqlString) + + last_ts = startTs + maxRange + tdSql.execute("insert into last_test_last_value_model.test_t1 (ts) values(%d)" % (last_ts)) + sql = f'select last_row(ts), last(*) from last_test_last_value_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), last_row(id), last(id) from last_test_last_value_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, None) + tdSql.checkData(0, 3, maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last(*), last_row(ts), count(*) from last_test_last_value_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, maxRange - 1) + tdSql.checkData(0, 2, last_ts) + tdSql.checkData(0, 3, maxRange + 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), count(*) , last_row(id), last(id), last(*) from last_test_last_value_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, maxRange + 1) + tdSql.checkData(0, 3, None) + tdSql.checkData(0, 4, maxRange - 1) + tdSql.checkData(0, 5, last_ts) + tdSql.checkData(0, 6, maxRange - 1) + + startTs2 = startTs + 86400000 + for i in range(maxRange): + i = i + 2 * maxRange + insertSqlString = "insert into last_test_last_value_model.test_t2 values(%d, %d);" % (startTs2 + i, i) + tdSql.execute(insertSqlString) + last_ts2 = startTs2 + maxRange + + startTs3 = startTs + 2 * 86400000 + for i in range(maxRange): + i = i + 3 * maxRange + insertSqlString = "insert into last_test_last_value_model.test_t3 values(%d, %d);" % (startTs3 + i, i) + tdSql.execute(insertSqlString) + last_ts3 = startTs3 + 4 * maxRange - 1 + + startTs4 = startTs + 3 * 86400000 + for i in range(maxRange): + i = i + 4 * maxRange + insertSqlString = "insert into last_test_last_value_model.test_t4 values(%d, %d);" % (startTs4 + i, i) + tdSql.execute(insertSqlString) + + last_ts4 = startTs4 + 5 * maxRange - 1 + sql = f'select last_row(ts), last(*) from last_test_last_value_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 5 * maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), last_row(id), last(id) from last_test_last_value_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 5 * maxRange - 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last(*), last_row(ts), count(*) from last_test_last_value_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, 5 * maxRange - 1) + tdSql.checkData(0, 2, last_ts4) + tdSql.checkData(0, 3, 4 * maxRange + 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + tdSql.error(f'select last(*), last_row(ts), ts from last_test_last_value_model.st;') + + sql = f'select last_row(ts), last(ts), count(*) , last_row(id), last(id), last(*) from last_test_last_value_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 4 * maxRange + 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + tdSql.checkData(0, 4, 5 * maxRange - 1) + tdSql.checkData(0, 5, last_ts4) + tdSql.checkData(0, 6, 5 * maxRange - 1) + + sql = f'select last_row(1), last(2), count(*) , last_row(id), last(id), last(*) from last_test_last_value_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 2) + tdSql.checkData(0, 2, 4 * maxRange + 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + tdSql.checkData(0, 4, 5 * maxRange - 1) + tdSql.checkData(0, 5, last_ts4) + tdSql.checkData(0, 6, 5 * maxRange - 1) + + tdSql.execute("drop table if exists last_test_last_value_model.test_t4 ;") + tdSql.execute("drop table if exists last_test_last_value_model.test_t3 ;") + tdSql.execute("drop table if exists last_test_last_value_model.test_t2 ;") + tdSql.execute("drop table if exists last_test_last_value_model.test_t1 ;") + tdSql.execute("drop stable if exists last_test_last_value_model.st;") + tdSql.execute("drop database if exists last_test_last_value_model;") + + def last_row_model_test(self): + tdSql.execute("create database last_test_last_row_model cachemodel 'last_row';") + tdSql.execute("use last_test_last_row_model;") + tdSql.execute("create stable last_test_last_row_model.st(ts timestamp, id int) tags(tid int);") + tdSql.execute("create table last_test_last_row_model.test_t1 using last_test_last_row_model.st tags(1);") + tdSql.execute("create table last_test_last_row_model.test_t2 using last_test_last_row_model.st tags(2);") + tdSql.execute("create table last_test_last_row_model.test_t3 using last_test_last_row_model.st tags(3);") + tdSql.execute("create table last_test_last_row_model.test_t4 using last_test_last_row_model.st tags(4);") + + maxRange = 100 + # 2023-11-13 00:00:00.000 + startTs = 1699804800000 + for i in range(maxRange): + insertSqlString = "insert into last_test_last_row_model.test_t1 values(%d, %d);" % (startTs + i, i) + tdSql.execute(insertSqlString) + + last_ts = startTs + maxRange + tdSql.execute("insert into last_test_last_row_model.test_t1 (ts) values(%d)" % (last_ts)) + sql = f'select last_row(ts), last(*) from last_test_last_row_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), last_row(id), last(id) from last_test_last_row_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, None) + tdSql.checkData(0, 3, maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last(*), last_row(ts), count(*) from last_test_last_row_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, maxRange - 1) + tdSql.checkData(0, 2, last_ts) + tdSql.checkData(0, 3, maxRange + 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), count(*) , last_row(id), last(id), last(*) from last_test_last_row_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, maxRange + 1) + tdSql.checkData(0, 3, None) + tdSql.checkData(0, 4, maxRange - 1) + tdSql.checkData(0, 5, last_ts) + tdSql.checkData(0, 6, maxRange - 1) + + startTs2 = startTs + 86400000 + for i in range(maxRange): + i = i + 2 * maxRange + insertSqlString = "insert into last_test_last_row_model.test_t2 values(%d, %d);" % (startTs2 + i, i) + tdSql.execute(insertSqlString) + last_ts2 = startTs2 + maxRange + + startTs3 = startTs + 2 * 86400000 + for i in range(maxRange): + i = i + 3 * maxRange + insertSqlString = "insert into last_test_last_row_model.test_t3 values(%d, %d);" % (startTs3 + i, i) + tdSql.execute(insertSqlString) + last_ts3 = startTs3 + 4 * maxRange - 1 + + startTs4 = startTs + 3 * 86400000 + for i in range(maxRange): + i = i + 4 * maxRange + insertSqlString = "insert into last_test_last_row_model.test_t4 values(%d, %d);" % (startTs4 + i, i) + tdSql.execute(insertSqlString) + + last_ts4 = startTs4 + 5 * maxRange - 1 + sql = f'select last_row(ts), last(*) from last_test_last_row_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 5 * maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), last_row(id), last(id) from last_test_last_row_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 5 * maxRange - 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + sql = f'select last(*), last_row(ts), count(*) from last_test_last_row_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, 5 * maxRange - 1) + tdSql.checkData(0, 2, last_ts4) + tdSql.checkData(0, 3, 4 * maxRange + 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_has_row("Table Scan", explain_res, sql) + + tdSql.error(f'select last(*), last_row(ts), ts from last_test_last_row_model.st;') + + sql = f'select last_row(ts), last(ts), count(*) , last_row(id), last(id), last(*) from last_test_last_row_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 4 * maxRange + 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + tdSql.checkData(0, 4, 5 * maxRange - 1) + tdSql.checkData(0, 5, last_ts4) + tdSql.checkData(0, 6, 5 * maxRange - 1) + + sql = f'select last_row(1), last(2), count(*) , last_row(id), last(id), last(*) from last_test_last_row_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 2) + tdSql.checkData(0, 2, 4 * maxRange + 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + tdSql.checkData(0, 4, 5 * maxRange - 1) + tdSql.checkData(0, 5, last_ts4) + tdSql.checkData(0, 6, 5 * maxRange - 1) + + tdSql.execute("drop table if exists last_test_last_row_model.test_t4 ;") + tdSql.execute("drop table if exists last_test_last_row_model.test_t3 ;") + tdSql.execute("drop table if exists last_test_last_row_model.test_t2 ;") + tdSql.execute("drop table if exists last_test_last_row_model.test_t1 ;") + tdSql.execute("drop stable if exists last_test_last_row_model.st;") + tdSql.execute("drop database if exists last_test_last_row_model;") + + def both_model_test(self): + tdSql.execute("create database last_test_both_model cachemodel 'both';") + tdSql.execute("use last_test_both_model;") + tdSql.execute("create stable last_test_both_model.st(ts timestamp, id int) tags(tid int);") + tdSql.execute("create table last_test_both_model.test_t1 using last_test_both_model.st tags(1);") + tdSql.execute("create table last_test_both_model.test_t2 using last_test_both_model.st tags(2);") + tdSql.execute("create table last_test_both_model.test_t3 using last_test_both_model.st tags(3);") + tdSql.execute("create table last_test_both_model.test_t4 using last_test_both_model.st tags(4);") + + maxRange = 100 + # 2023-11-13 00:00:00.000 + startTs = 1699804800000 + for i in range(maxRange): + insertSqlString = "insert into last_test_both_model.test_t1 values(%d, %d);" % (startTs + i, i) + tdSql.execute(insertSqlString) + + last_ts = startTs + maxRange + tdSql.execute("insert into last_test_both_model.test_t1 (ts) values(%d)" % (last_ts)) + sql = f'select last_row(ts), last(*) from last_test_both_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_no_row("Table Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), last_row(id), last(id) from last_test_both_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, None) + tdSql.checkData(0, 3, maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_no_row("Table Scan", explain_res, sql) + + sql = f'select last(*), last_row(ts), count(*) from last_test_both_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, maxRange - 1) + tdSql.checkData(0, 2, last_ts) + tdSql.checkData(0, 3, maxRange + 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), count(*) , last_row(id), last(id), last(*) from last_test_both_model.test_t1;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts) + tdSql.checkData(0, 1, last_ts) + tdSql.checkData(0, 2, maxRange + 1) + tdSql.checkData(0, 3, None) + tdSql.checkData(0, 4, maxRange - 1) + tdSql.checkData(0, 5, last_ts) + tdSql.checkData(0, 6, maxRange - 1) + + tdSql.error(f'select last(*), last_row(ts), ts from last_test_both_model.test_t1;') + + startTs2 = startTs + 86400000 + for i in range(maxRange): + i = i + 2 * maxRange + insertSqlString = "insert into last_test_both_model.test_t2 values(%d, %d);" % (startTs2 + i, i) + tdSql.execute(insertSqlString) + last_ts2 = startTs2 + maxRange + + startTs3 = startTs + 2 * 86400000 + for i in range(maxRange): + i = i + 3 * maxRange + insertSqlString = "insert into last_test_both_model.test_t3 values(%d, %d);" % (startTs3 + i, i) + tdSql.execute(insertSqlString) + last_ts3 = startTs3 + 4 * maxRange - 1 + + startTs4 = startTs + 3 * 86400000 + for i in range(maxRange): + i = i + 4 * maxRange + insertSqlString = "insert into last_test_both_model.test_t4 values(%d, %d);" % (startTs4 + i, i) + tdSql.execute(insertSqlString) + + last_ts4 = startTs4 + 5 * maxRange - 1 + + sql = f'select last_row(ts), last(*) from last_test_both_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 5 * maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_no_row("Table Scan", explain_res, sql) + + sql = f'select last_row(ts), last(ts), last_row(id), last(id) from last_test_both_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 5 * maxRange - 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + self.check_explain_res_no_row("Table Scan", explain_res, sql) + + sql = f'select last(*), last_row(ts), count(*) from last_test_both_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, 5 * maxRange - 1) + #tdSql.checkData(0, 2, last_ts4) + tdSql.checkData(0, 3, 4 * maxRange + 1) + + explain_res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", explain_res, sql) + + tdSql.error(f'select last(*), last_row(ts), ts from last_test_both_model.st;') + + sql = f'select last_row(ts), last(ts), count(*) , last_row(id), last(id), last(*) from last_test_both_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, last_ts4) + tdSql.checkData(0, 1, last_ts4) + tdSql.checkData(0, 2, 4 * maxRange + 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + tdSql.checkData(0, 4, 5 * maxRange - 1) + tdSql.checkData(0, 5, last_ts4) + tdSql.checkData(0, 6, 5 * maxRange - 1) + + sql = f'select last_row(1), last(2), count(*) , last_row(id), last(id), last(*) from last_test_both_model.st;' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 2) + tdSql.checkData(0, 2, 4 * maxRange + 1) + tdSql.checkData(0, 3, 5 * maxRange - 1) + tdSql.checkData(0, 4, 5 * maxRange - 1) + tdSql.checkData(0, 5, last_ts4) + tdSql.checkData(0, 6, 5 * maxRange - 1) + + tdSql.execute("drop table if exists last_test_both_model.test_t4 ;") + tdSql.execute("drop table if exists last_test_both_model.test_t3 ;") + tdSql.execute("drop table if exists last_test_both_model.test_t2 ;") + tdSql.execute("drop table if exists last_test_both_model.test_t1 ;") + tdSql.execute("drop stable if exists last_test_both_model.st;") + tdSql.execute("drop database if exists last_test_both_model;") + + def run(self): + self.none_model_test() + + self.last_value_model_test() + + self.last_row_model_test() + + self.both_model_test() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/last_cache_scan.py b/tests/system-test/2-query/last_cache_scan.py index 3881607437..b267cedc9d 100644 --- a/tests/system-test/2-query/last_cache_scan.py +++ b/tests/system-test/2-query/last_cache_scan.py @@ -250,7 +250,7 @@ class TDTestCase: "last_row(c1), last(c1)", "last_row(c1), c1,c3, ts" ] - has_last_row_scan_res = [0,0,1] + has_last_row_scan_res = [1,1,1] sqls = self.format_sqls(sql_template, select_items) self.explain_and_check_res(sqls, has_last_row_scan_res) #res_expect = [None, None, [999, 999, 499, "2018-11-25 19:30:00.000"]] @@ -387,7 +387,7 @@ class TDTestCase: tdSql.query('select last(c1) from meters partition by t1') print(str(tdSql.queryResult)) tdSql.checkCols(1) - tdSql.checkRows(2) + tdSql.checkRows(5) p = subprocess.run(["taos", '-s', "alter table test.meters drop column c1; alter table test.meters add column c2 int"]) p.check_returncode() tdSql.query_success_failed('select last(c1) from meters partition by t1', queryTimes=10, expectErrInfo="Invalid column name: c1") diff --git a/tests/system-test/2-query/nestedQueryInterval.py b/tests/system-test/2-query/nestedQueryInterval.py index 07b5519432..3f1abe666b 100644 --- a/tests/system-test/2-query/nestedQueryInterval.py +++ b/tests/system-test/2-query/nestedQueryInterval.py @@ -1310,81 +1310,81 @@ class TDTestCase: ts = ts + 20 tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 52); + tdSql.checkData(0, 1, 52) #stables - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values(now,'stable_1_1',1) \ - nested.stable_1 (ts,tbname,q_bigint) values(now+1a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_smallint) values(now+2a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_tinyint) values(now+3a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_float) values(now+4a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_double) values(now+5a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_bool) values(now+6a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_binary) values(now+7a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_nchar) values(now+8a,'stable_1_1',1)\ - nested.stable_1 (ts,tbname,q_ts) values(now+9a,'stable_1_1',1)\ - nested.stable_null_data (ts,tbname,q_int) values(now,'stable_null_data_1',1) \ - nested.stable_null_data (ts,tbname,q_bigint) values(now+1a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_smallint) values(now+2a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_tinyint) values(now+3a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_float) values(now+4a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_double) values(now+5a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_bool) values(now+6a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_binary) values(now+7a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_nchar) values(now+8a,'stable_null_data_1',1)\ - nested.stable_null_data (ts,tbname,q_ts) values(now+9a,'stable_null_data_1',1)\ - nested.stable_null_childtable (ts,tbname,q_int) values(now,'stable_null_childtable_1',1) \ - nested.stable_null_childtable (ts,tbname,q_bigint) values(now+1a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_smallint) values(now+2a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_tinyint) values(now+3a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_float) values(now+4a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_double) values(now+5a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_bool) values(now+6a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_binary) values(now+7a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_nchar) values(now+8a,'stable_null_childtable_1',1)\ - nested.stable_null_childtable (ts,tbname,q_ts) values(now+9a,'stable_null_childtable_1',1);") + tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values({ts},'stable_1_1',1) \ + nested.stable_1 (ts,tbname,q_bigint) values({ts}+1a,'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_smallint) values({ts}+2a,'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_tinyint) values({ts}+3a,'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_float) values({ts}+4a,'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_double) values({ts}+5a,'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_bool) values({ts}+6a,'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_binary) values({ts}+7a,'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_nchar) values({ts}+8a,'stable_1_1',1)\ + nested.stable_1 (ts,tbname,q_ts) values({ts}+9a,'stable_1_1',1)\ + nested.stable_null_data (ts,tbname,q_int) values({ts},'stable_null_data_1',1) \ + nested.stable_null_data (ts,tbname,q_bigint) values({ts}+1a,'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_smallint) values({ts}+2a,'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_tinyint) values({ts}+3a,'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_float) values({ts}+4a,'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_double) values({ts}+5a,'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_bool) values({ts}+6a,'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_binary) values({ts}+7a,'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_nchar) values({ts}+8a,'stable_null_data_1',1)\ + nested.stable_null_data (ts,tbname,q_ts) values({ts}+9a,'stable_null_data_1',1)\ + nested.stable_null_childtable (ts,tbname,q_int) values({ts},'stable_null_childtable_1',1) \ + nested.stable_null_childtable (ts,tbname,q_bigint) values({ts}+1a,'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_smallint) values({ts}+2a,'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_tinyint) values({ts}+3a,'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_float) values({ts}+4a,'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_double) values({ts}+5a,'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_bool) values({ts}+6a,'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_binary) values({ts}+7a,'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_nchar) values({ts}+8a,'stable_null_childtable_1',1)\ + nested.stable_null_childtable (ts,tbname,q_ts) values({ts}+9a,'stable_null_childtable_1',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.checkRows(6) - tdSql.checkData(0, 1, 162); - tdSql.checkData(1, 1, 200); + tdSql.checkData(0, 1, 162) + tdSql.checkData(1, 1, 200) tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 62); + tdSql.checkData(0, 1, 62) tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(1) - tdSql.checkData(0, 1, 62); + tdSql.checkData(0, 1, 62) #test special character - tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values(now+10a,'!@!@$$^$',1) \ - nested.stable_null_data (ts,tbname,q_int) values(now+10a,'%^$^&^&',1) \ - nested.stable_null_childtable (ts,tbname,q_int) values(now+10a,'$^%$%^&',1);") + tdSql.query(f"insert into nested.stable_1 (ts,tbname,q_int) values({ts}+10a,'!@!@$$^$',1) \ + nested.stable_null_data (ts,tbname,q_int) values({ts}+10a,'%^$^&^&',1) \ + nested.stable_null_childtable (ts,tbname,q_int) values({ts}+10a,'$^%$%^&',1);") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.checkRows(7) - tdSql.checkData(0, 1, 1); - tdSql.checkData(1, 1, 162); - tdSql.checkData(2, 1, 200); + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 162) + tdSql.checkData(2, 1, 200) tdSql.query(f"select count(*) from nested.stable_1 where tbname ='!@!@$$^$' ;") - tdSql.checkData(0, 0, 1); + tdSql.checkData(0, 0, 1) tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(2) - tdSql.checkData(0, 1, 1); - tdSql.checkData(1, 1, 62); + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 62) tdSql.query(f"select count(*) from nested.stable_null_data where tbname ='%^$^&^&' ;") - tdSql.checkData(0, 0, 1); + tdSql.checkData(0, 0, 1) tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(2) - tdSql.checkData(0, 1, 1); - tdSql.checkData(1, 1, 62); + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 62) tdSql.query(f"select count(*) from nested.stable_null_childtable where tbname ='$^%$%^&' ;") - tdSql.checkData(0, 0, 1); + tdSql.checkData(0, 0, 1) #test csv sql1 = "select tbname,ts,q_int,q_binary from nested.stable_1 >>'%s/stable_1.csv';" %self.testcasePath @@ -1404,31 +1404,31 @@ class TDTestCase: tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.checkRows(7) - tdSql.checkData(0, 1, 1); - tdSql.checkData(1, 1, 162); - tdSql.checkData(2, 1, 200); + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 162) + tdSql.checkData(2, 1, 200) tdSql.query(f"select count(*) from nested.stable_1 where tbname ='!@!@$$^$' ;") - tdSql.checkData(0, 0, 1); + tdSql.checkData(0, 0, 1) tdSql.query(f"select count(q_bool) from nested.stable_1;") - tdSql.checkData(0, 0, 0); + tdSql.checkData(0, 0, 0) tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(2) - tdSql.checkData(0, 1, 1); - tdSql.checkData(1, 1, 62); + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 62) tdSql.query(f"select count(*) from nested.stable_null_data where tbname ='%^$^&^&' ;") - tdSql.checkData(0, 0, 1); + tdSql.checkData(0, 0, 1) tdSql.query(f"select count(q_bool) from nested.stable_null_data;") - tdSql.checkData(0, 0, 0); + tdSql.checkData(0, 0, 0) tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(2) - tdSql.checkData(0, 1, 1); - tdSql.checkData(1, 1, 62); + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 62) tdSql.query(f"select count(*) from nested.stable_null_childtable where tbname ='$^%$%^&' ;") - tdSql.checkData(0, 0, 1); + tdSql.checkData(0, 0, 1) tdSql.query(f"select count(q_bool) from nested.stable_null_childtable;") - tdSql.checkData(0, 0, 0); + tdSql.checkData(0, 0, 0) tdSql.query(f"delete from nested.stable_1;") @@ -1440,37 +1440,46 @@ class TDTestCase: tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.checkRows(7) - tdSql.checkData(0, 1, 1); - tdSql.checkData(1, 1, 162); - tdSql.checkData(2, 1, 200); + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 162) + tdSql.checkData(2, 1, 200) tdSql.query(f"select count(*) from nested.stable_1 where tbname ='!@!@$$^$' ;") - tdSql.checkData(0, 0, 1); + tdSql.checkData(0, 0, 1) tdSql.query(f"select count(q_bool) from nested.stable_1;") - tdSql.checkData(0, 0, 0); + tdSql.checkData(0, 0, 0) tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.checkRows(2) - tdSql.checkData(0, 1, 1); - tdSql.checkData(1, 1, 62); + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 62) tdSql.query(f"select count(*) from nested.stable_null_data where tbname ='%^$^&^&' ;") - tdSql.checkData(0, 0, 1); + tdSql.checkData(0, 0, 1) tdSql.query(f"select count(q_bool) from nested.stable_null_data;") - tdSql.checkData(0, 0, 0); + tdSql.checkData(0, 0, 0) tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.checkRows(2) - tdSql.checkData(0, 1, 1); - tdSql.checkData(1, 1, 62); + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 62) tdSql.query(f"select count(*) from nested.stable_null_childtable where tbname ='$^%$%^&' ;") - tdSql.checkData(0, 0, 1); + tdSql.checkData(0, 0, 1) tdSql.query(f"select count(q_bool) from nested.stable_null_childtable;") - tdSql.checkData(0, 0, 0); + tdSql.checkData(0, 0, 0) + + tdSql.query(f"select tbname,count(*) from nested.stable_1 where ts is not null group by tbname order by tbname;") + tdSql.checkRows(7) + tdSql.query(f"select tbname,count(*) from nested.stable_1 where ts is null group by tbname order by tbname;") + tdSql.checkRows(7) + tdSql.query(f"select tbname,last(*) from nested.stable_1 where ts is not null group by tbname order by tbname;") + tdSql.checkRows(3) + tdSql.query(f"select tbname,last(*) from nested.stable_1 where ts is null group by tbname order by tbname;") + tdSql.checkRows(0) #test stable - tdSql.error(f"insert into nested.stable_1 (ts,tbname,q_int) values(now,'stable_1',1) \ - nested.stable_null_data (ts,tbname,q_int) values(now,'stable_null_data',1) \ - nested.stable_null_childtable (ts,tbname,q_int) values(now,'stable_null_childtable',1);") + tdSql.error(f"insert into nested.stable_1 (ts,tbname,q_int) values({ts},'stable_1',1) \ + nested.stable_null_data (ts,tbname,q_int) values({ts},'stable_null_data',1) \ + nested.stable_null_childtable (ts,tbname,q_int) values({ts},'stable_null_childtable',1);") def stop(self): diff --git a/tests/system-test/2-query/orderBy.py b/tests/system-test/2-query/orderBy.py index fa447cbca4..dedc57eab3 100644 --- a/tests/system-test/2-query/orderBy.py +++ b/tests/system-test/2-query/orderBy.py @@ -276,6 +276,33 @@ class TDTestCase: sql2 = "select count(*) as a, count(c2) as b, max(c2) as c, min(c2) as d, sum(c2) as e from st;" self.queryResultSame(sql1, sql2) + def queryOrderByAgg(self): + + tdSql.query("SELECT COUNT(*) FROM t1 order by COUNT(*)") + + tdSql.query("SELECT COUNT(*) FROM t1 order by last(c2)") + + tdSql.query("SELECT c1 FROM t1 order by last(ts)") + + tdSql.query("SELECT ts FROM t1 order by last(ts)") + + tdSql.query("SELECT last(ts), ts, c1 FROM t1 order by 2") + + tdSql.query("SELECT ts, last(ts) FROM t1 order by last(ts)") + + tdSql.query(f"SELECT * FROM t1 order by last(ts)") + + tdSql.query(f"SELECT last(ts) as t2, ts FROM t1 order by 1") + tdSql.checkRows(1) + + tdSql.query(f"SELECT last(ts), ts FROM t1 order by last(ts)") + tdSql.checkRows(1) + + tdSql.error(f"SELECT first(ts), ts FROM t1 order by last(ts)") + + tdSql.error(f"SELECT last(ts) as t2, ts FROM t1 order by last(t2)") + + # run def run(self): # prepare env @@ -287,6 +314,9 @@ class TDTestCase: # advance self.queryAdvance() + # agg + self.queryOrderByAgg() + # stop def stop(self): diff --git a/tests/system-test/2-query/project_group.py b/tests/system-test/2-query/project_group.py new file mode 100644 index 0000000000..44943e5088 --- /dev/null +++ b/tests/system-test/2-query/project_group.py @@ -0,0 +1,65 @@ +from wsgiref.headers import tspecials +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.batchNum = 5 + self.ts = 1537146000000 + + def run(self): + dbname = "db" + tdSql.prepare() + + tdSql.execute(f'''create table sta(ts timestamp, col1 int, col2 bigint) tags(tg1 int, tg2 binary(20))''') + tdSql.execute(f"create table sta1 using sta tags(1, 'a')") + tdSql.execute(f"create table sta2 using sta tags(2, 'b')") + tdSql.execute(f"create table sta3 using sta tags(3, 'c')") + tdSql.execute(f"create table sta4 using sta tags(4, 'a')") + tdSql.execute(f"insert into sta1 values(1537146000001, 11, 110)") + tdSql.execute(f"insert into sta1 values(1537146000002, 12, 120)") + tdSql.execute(f"insert into sta1 values(1537146000003, 13, 130)") + tdSql.execute(f"insert into sta2 values(1537146000001, 21, 210)") + tdSql.execute(f"insert into sta2 values(1537146000002, 22, 220)") + tdSql.execute(f"insert into sta2 values(1537146000003, 23, 230)") + tdSql.execute(f"insert into sta3 values(1537146000001, 31, 310)") + tdSql.execute(f"insert into sta3 values(1537146000002, 32, 320)") + tdSql.execute(f"insert into sta3 values(1537146000003, 33, 330)") + tdSql.execute(f"insert into sta4 values(1537146000001, 41, 410)") + tdSql.execute(f"insert into sta4 values(1537146000002, 42, 420)") + tdSql.execute(f"insert into sta4 values(1537146000003, 43, 430)") + + tdSql.execute(f'''create table stb(ts timestamp, col1 int, col2 bigint) tags(tg1 int, tg2 binary(20))''') + tdSql.execute(f"create table stb1 using stb tags(1, 'a')") + tdSql.execute(f"create table stb2 using stb tags(2, 'b')") + tdSql.execute(f"create table stb3 using stb tags(3, 'c')") + tdSql.execute(f"create table stb4 using stb tags(4, 'a')") + tdSql.execute(f"insert into stb1 values(1537146000001, 911, 9110)") + tdSql.execute(f"insert into stb1 values(1537146000002, 912, 9120)") + tdSql.execute(f"insert into stb1 values(1537146000003, 913, 9130)") + tdSql.execute(f"insert into stb2 values(1537146000001, 921, 9210)") + tdSql.execute(f"insert into stb2 values(1537146000002, 922, 9220)") + tdSql.execute(f"insert into stb2 values(1537146000003, 923, 9230)") + tdSql.execute(f"insert into stb3 values(1537146000001, 931, 9310)") + tdSql.execute(f"insert into stb3 values(1537146000002, 932, 9320)") + tdSql.execute(f"insert into stb3 values(1537146000003, 933, 9330)") + tdSql.execute(f"insert into stb4 values(1537146000001, 941, 9410)") + tdSql.execute(f"insert into stb4 values(1537146000002, 942, 9420)") + tdSql.execute(f"insert into stb4 values(1537146000003, 943, 9430)") + + tdSql.query("select * from (select ts, col1 from sta partition by tbname) limit 2"); + tdSql.checkRows(2) + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py index 27096fe5ad..f449597841 100644 --- a/tests/system-test/2-query/sum.py +++ b/tests/system-test/2-query/sum.py @@ -2,6 +2,7 @@ from util.log import * from util.sql import * from util.cases import * from util.dnodes import * +from util.autogen import * INT_COL = "c1" @@ -23,11 +24,11 @@ TS_TYPE_COL = [TS_COL] DBNAME = "db" class TDTestCase: - def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) + self.autoGen = AutoGen(True) def __sum_condition(self): sum_condition = [] @@ -207,9 +208,55 @@ class TDTestCase: ''' ) + def testAllTypes(self): + # create stable and insert + tdLog.info("test all types") + dbname = "sumdb" + stbname = "stb" + colnum = 16 + self.autoGen.set_batch_size(1000) + self.autoGen.create_db(dbname) + self.autoGen.create_stable(stbname, 16, colnum, 8, 16) + self.autoGen.create_child(stbname, "d", 4) + self.autoGen.insert_data(10000) + + # check correct + i = 0 + for c in self.autoGen.mcols: + + if c in [0, 11, 12, 13]: + i += 1 + continue + + # query + col = f"c{i}" + sql = f"select count({col}), sum({col}), avg({col}), max({col}), min({col}), stddev({col}), leastsquares({col},1,9) from {dbname}.{stbname}" + tdSql.query(sql) + # sum + tdSql.checkData(0, 0, 4*10000, True) + # sum + tdSql.checkData(0, 1, 4*10000, True) + # avg + tdSql.checkData(0, 2, 1, True) + # max + tdSql.checkData(0, 3, 1, True) + # min + tdSql.checkData(0, 4, 1, True) + # stddev + tdSql.checkData(0, 5, 0, True) + + sql = f"select twa({col}) from {dbname}.d0" + tdSql.query(sql) + tdSql.checkData(0, 0, 1, True) + + i += 1 + + def run(self): tdSql.prepare() + self.testAllTypes() + tdLog.printNoPrefix("==========step1:create table") self.__create_tb() diff --git a/tests/system-test/2-query/td-28068.py b/tests/system-test/2-query/td-28068.py new file mode 100644 index 0000000000..d77e012d9a --- /dev/null +++ b/tests/system-test/2-query/td-28068.py @@ -0,0 +1,68 @@ +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + + tdSql.execute("drop database if exists td_28068;") + tdSql.execute("create database td_28068;") + tdSql.execute("create database if not exists td_28068;") + tdSql.execute("create stable td_28068.st (ts timestamp, test_case nchar(10), time_cost float, num float) tags (branch nchar(10), scenario nchar(10));") + tdSql.execute("insert into td_28068.ct1 using td_28068.st (branch, scenario) tags ('3.0', 'scenario1') values (now(), 'query1', 1,2);") + tdSql.execute("insert into td_28068.ct1 using td_28068.st (branch, scenario) tags ('3.0', 'scenario1') values (now(), 'query1', 2,3);") + tdSql.execute("insert into td_28068.ct2 using td_28068.st (branch, scenario) tags ('3.0', 'scenario2') values (now(), 'query1', 10,1);") + tdSql.execute("insert into td_28068.ct2 using td_28068.st (branch, scenario) tags ('3.0', 'scenario2') values (now(), 'query1', 11,5);") + tdSql.execute("insert into td_28068.ct3 using td_28068.st (branch, scenario) tags ('3.1', 'scenario1') values (now(), 'query1', 20,4);") + tdSql.execute("insert into td_28068.ct3 using td_28068.st (branch, scenario) tags ('3.1', 'scenario1') values (now(), 'query1', 30,1);") + tdSql.execute("insert into td_28068.ct4 using td_28068.st (branch, scenario) tags ('3.1', 'scenario2') values (now(), 'query1', 8,8);") + tdSql.execute("insert into td_28068.ct4 using td_28068.st (branch, scenario) tags ('3.1', 'scenario2') values (now(), 'query1', 9,10);") + + def run(self): + tdSql.error('select last(ts) as ts, last(branch) as branch, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch);') + tdSql.error('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch), last(scenario); ') + + tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch); ') + tdSql.checkRows(4) + + tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario, last(test_case) from td_28068.st group by branch, scenario order by last(branch), last(test_case);') + tdSql.checkRows(4) + + tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch), last(scenario);') + tdSql.checkRows(4) + + tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case from td_28068.st group by branch, scenario order by branch1, scenario1;') + tdSql.checkRows(4) + + tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case from td_28068.st group by tbname; ') + tdSql.checkRows(4) + + tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case from td_28068.st group by branch, scenario order by test_case;') + tdSql.checkRows(4) + + tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case1 from td_28068.st group by branch, scenario order by last(test_case);') + tdSql.checkRows(4) + + tdSql.query('select time_cost, num, time_cost + num as final_cost from td_28068.st partition by branch; ') + tdSql.checkRows(8) + + tdSql.query('select count(*) from td_28068.st partition by branch order by branch; ') + tdSql.checkRows(2) + + tdSql.query('select time_cost, num, time_cost + num as final_cost from td_28068.st order by time_cost;') + tdSql.checkRows(8) + + tdSql.query('select time_cost, num, time_cost + num as final_cost from td_28068.st order by final_cost;') + tdSql.checkRows(8) + + tdSql.execute("drop database if exists td_28068;") + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-stb-false.py b/tests/system-test/7-tmq/tmqVnodeSplit-stb-false.py new file mode 100644 index 0000000000..384959c52b --- /dev/null +++ b/tests/system-test/7-tmq/tmqVnodeSplit-stb-false.py @@ -0,0 +1,218 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +from util.cluster import * +sys.path.append("./7-tmq") +from tmqCommon import * + +from util.cluster import * +sys.path.append("./6-cluster") +from clusterCommonCreate import * +from clusterCommonCheck import clusterComCheck + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 10 + self.rowsPerTbl = 1000 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def getDataPath(self): + selfPath = tdCom.getBuildPath() + + return selfPath + '/../sim/dnode%d/data/vnode/vnode%d/wal/*'; + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 1000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 60, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdCom.drop_all_db() + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], wal_retention_period=36000,vgroups=paraDict["vgroups"],replica=self.replicaVar) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + return + + def restartAndRemoveWal(self, deleteWal): + tdDnodes = cluster.dnodes + tdSql.query("select * from information_schema.ins_vnodes") + for result in tdSql.queryResult: + if result[2] == 'dbt': + tdLog.debug("dnode is %d"%(result[0])) + dnodeId = result[0] + vnodeId = result[1] + + tdDnodes[dnodeId - 1].stoptaosd() + time.sleep(1) + dataPath = self.getDataPath() + dataPath = dataPath%(dnodeId,vnodeId) + tdLog.debug("dataPath:%s"%dataPath) + if deleteWal: + if os.system('rm -rf ' + dataPath) != 0: + tdLog.exit("rm error") + + tdDnodes[dnodeId - 1].starttaosd() + time.sleep(1) + break + tdLog.debug("restart dnode ok") + + def splitVgroups(self): + tdSql.query("select * from information_schema.ins_vnodes") + vnodeId = 0 + for result in tdSql.queryResult: + if result[2] == 'dbt': + vnodeId = result[1] + tdLog.debug("vnode is %d"%(vnodeId)) + break + splitSql = "split vgroup %d" %(vnodeId) + tdLog.debug("splitSql:%s"%(splitSql)) + tdSql.query(splitSql) + tdLog.debug("splitSql ok") + + def tmqCase1(self, deleteWal=False): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb1', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 1000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 120, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + # expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb ") + queryString = "stable %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + # tdSql.query(queryString) + # expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + tdLog.info("create ctb1") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("create ctb2") + paraDict2 = paraDict.copy() + paraDict2['ctbPrefix'] = "ctb2" + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict2['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("insert ctb1 data") + pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict) + + tmqCom.getStartConsumeNotifyFromTmqsim() + tmqCom.getStartCommitNotifyFromTmqsim() + + #restart dnode & remove wal + self.restartAndRemoveWal(deleteWal) + + # split vgroup + self.splitVgroups() + + + tdLog.info("insert ctb2 data") + pInsertThread1 = tmqCom.asyncInsertDataByInterlace(paraDict2) + pInsertThread.join() + pInsertThread1.join() + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectrowcnt / 2 > resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectrowcnt / 2, resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(2) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + if deleteWal == True: + clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240) + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + self.prepareTestEnv() + self.tmqCase1(False) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-false.py b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-false.py new file mode 100644 index 0000000000..f01bf2558c --- /dev/null +++ b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select-false.py @@ -0,0 +1,222 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +from util.cluster import * +sys.path.append("./7-tmq") +from tmqCommon import * + +from util.cluster import * +sys.path.append("./6-cluster") +from clusterCommonCreate import * +from clusterCommonCheck import clusterComCheck + + + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 10 + self.rowsPerTbl = 1000 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def getDataPath(self): + selfPath = tdCom.getBuildPath() + + return selfPath + '/../sim/dnode%d/data/vnode/vnode%d/wal/*'; + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 1000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 60, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdCom.drop_all_db() + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], wal_retention_period=36000,vgroups=paraDict["vgroups"],replica=self.replicaVar) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + return + + def restartAndRemoveWal(self, deleteWal): + tdDnodes = cluster.dnodes + tdSql.query("select * from information_schema.ins_vnodes") + for result in tdSql.queryResult: + if result[2] == 'dbt': + tdLog.debug("dnode is %d"%(result[0])) + dnodeId = result[0] + vnodeId = result[1] + + tdDnodes[dnodeId - 1].stoptaosd() + time.sleep(1) + dataPath = self.getDataPath() + dataPath = dataPath%(dnodeId,vnodeId) + tdLog.debug("dataPath:%s"%dataPath) + if deleteWal: + if os.system('rm -rf ' + dataPath) != 0: + tdLog.exit("rm error") + + tdDnodes[dnodeId - 1].starttaosd() + time.sleep(1) + break + tdLog.debug("restart dnode ok") + + def splitVgroups(self): + tdSql.query("select * from information_schema.ins_vnodes") + vnodeId = 0 + for result in tdSql.queryResult: + if result[2] == 'dbt': + vnodeId = result[1] + tdLog.debug("vnode is %d"%(vnodeId)) + break + splitSql = "split vgroup %d" %(vnodeId) + tdLog.debug("splitSql:%s"%(splitSql)) + tdSql.query(splitSql) + tdLog.debug("splitSql ok") + + def tmqCase1(self, deleteWal=False): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb1', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 1000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 180, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + # expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + # tdSql.query(queryString) + # expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + tdLog.info("create ctb1") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("create ctb2") + paraDict2 = paraDict.copy() + paraDict2['ctbPrefix'] = "ctb2" + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict2['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("insert ctb1 data") + pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict) + + tmqCom.getStartConsumeNotifyFromTmqsim() + tmqCom.getStartCommitNotifyFromTmqsim() + + #restart dnode & remove wal + self.restartAndRemoveWal(deleteWal) + + # split vgroup + self.splitVgroups() + + + tdLog.info("insert ctb2 data") + pInsertThread1 = tmqCom.asyncInsertDataByInterlace(paraDict2) + pInsertThread.join() + pInsertThread1.join() + + expectRows = 1 + + + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectrowcnt / 2 > resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectrowcnt / 2, resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(2) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + if deleteWal == True: + clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240) + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + self.prepareTestEnv() + self.tmqCase1(False) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-stb-select.py b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select.py index e79a0ca77d..5e11de04cb 100644 --- a/tests/system-test/7-tmq/tmqVnodeSplit-stb-select.py +++ b/tests/system-test/7-tmq/tmqVnodeSplit-stb-select.py @@ -190,6 +190,8 @@ class TDTestCase: pInsertThread1.join() expectRows = 1 + + resultList = tmqCom.selectConsumeResult(expectRows) if expectrowcnt / 2 > resultList[0]: @@ -209,8 +211,6 @@ class TDTestCase: def run(self): self.prepareTestEnv() self.tmqCase1(True) - self.prepareTestEnv() - self.tmqCase1(False) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmqVnodeSplit-stb.py b/tests/system-test/7-tmq/tmqVnodeSplit-stb.py index a96d52ee2b..5aa2054e96 100644 --- a/tests/system-test/7-tmq/tmqVnodeSplit-stb.py +++ b/tests/system-test/7-tmq/tmqVnodeSplit-stb.py @@ -207,8 +207,6 @@ class TDTestCase: def run(self): self.prepareTestEnv() self.tmqCase1(True) - self.prepareTestEnv() - self.tmqCase1(False) def stop(self): tdSql.close() diff --git a/tests/system-test/8-stream/at_once_interval.py b/tests/system-test/8-stream/at_once_interval.py index 8f5438be37..763b88bc69 100644 --- a/tests/system-test/8-stream/at_once_interval.py +++ b/tests/system-test/8-stream/at_once_interval.py @@ -106,14 +106,18 @@ class TDTestCase: ptn_counter = 0 for c1_value in tdSql.queryResult: if partition == "c1": - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition is None: - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "abs(c1)": abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) @@ -121,14 +125,18 @@ class TDTestCase: ptn_counter = 0 for c1_value in tdSql.queryResult: if partition == "c1": - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition is None: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "abs(c1)": abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) @@ -208,8 +216,8 @@ class TDTestCase: self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True) self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end') self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL") - # for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: - for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + # for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value) self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True) diff --git a/tests/system-test/8-stream/at_once_interval_ext.py b/tests/system-test/8-stream/at_once_interval_ext.py index e1dc057448..cbb4d7ca5a 100644 --- a/tests/system-test/8-stream/at_once_interval_ext.py +++ b/tests/system-test/8-stream/at_once_interval_ext.py @@ -153,12 +153,15 @@ class TDTestCase: ptn_counter = 0 for c1_value in tdSql.queryResult: if partition == "c1": - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "abs(c1)": abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 else: tdSql.query(f'select cast(cast(cast({c1_value[1]} as int unsigned) as bigint) as varchar(100))') @@ -166,7 +169,8 @@ class TDTestCase: if subtable == "constant": return else: - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{subtable_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{subtable_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) def run(self): diff --git a/tests/system-test/8-stream/at_once_session.py b/tests/system-test/8-stream/at_once_session.py index 3462561c4e..6f25e5ad97 100644 --- a/tests/system-test/8-stream/at_once_session.py +++ b/tests/system-test/8-stream/at_once_session.py @@ -171,35 +171,43 @@ class TDTestCase: for c1_value in tdSql.queryResult: if c1_value[1] is not None: if partition == "c1": - tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "abs(c1)": if subtable: abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') else: - tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 - tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) if subtable is not None else tdSql.checkEqual(tdSql.queryResult[0][0] >= 0, True) tdSql.query(f'select * from {self.tb_name}') ptn_counter = 0 for c1_value in tdSql.queryResult: if c1_value[1] is not None: if partition == "c1": - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "abs(c1)": if subtable: abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') else: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 - tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) if subtable is not None else tdSql.checkEqual(tdSql.queryResult[0][0] >= 0, True) @@ -224,4 +232,4 @@ class TDTestCase: event = threading.Event() tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/8-stream/at_once_state_window.py b/tests/system-test/8-stream/at_once_state_window.py index 60a4f4e890..933a41553e 100644 --- a/tests/system-test/8-stream/at_once_state_window.py +++ b/tests/system-test/8-stream/at_once_state_window.py @@ -94,15 +94,19 @@ class TDTestCase: for c1_value in tdSql.queryResult: if partition == "c1": if subtable: - tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') else: - tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') return elif partition == "abs(c1)": abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) @@ -111,17 +115,20 @@ class TDTestCase: for c1_value in tdSql.queryResult: if partition == "c1": if subtable: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') else: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') return elif partition == "abs(c1)": abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 - tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) diff --git a/tests/system-test/8-stream/pause_resume_test.py b/tests/system-test/8-stream/pause_resume_test.py index 484383f1ce..8b71466f01 100644 --- a/tests/system-test/8-stream/pause_resume_test.py +++ b/tests/system-test/8-stream/pause_resume_test.py @@ -117,14 +117,18 @@ class TDTestCase: ptn_counter = 0 for c1_value in tdSql.queryResult: if partition == "c1": - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition is None: - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "abs(c1)": abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) @@ -132,14 +136,18 @@ class TDTestCase: ptn_counter = 0 for c1_value in tdSql.queryResult: if partition == "c1": - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition is None: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "abs(c1)": abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) diff --git a/tests/system-test/8-stream/window_close_interval.py b/tests/system-test/8-stream/window_close_interval.py index 31a566a0f8..1ac7a61d2f 100644 --- a/tests/system-test/8-stream/window_close_interval.py +++ b/tests/system-test/8-stream/window_close_interval.py @@ -146,12 +146,15 @@ class TDTestCase: ptn_counter = 0 for c1_value in tdSql.queryResult: if partition == "c1": - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`', count_expected_res=self.tdCom.range_count) elif partition == "abs(c1)": abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`', count_expected_res=self.tdCom.range_count) elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) + tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`', count_expected_res=self.tdCom.range_count) ptn_counter += 1 tdSql.checkEqual(tdSql.queryResult[0][0] , self.tdCom.range_count) @@ -161,12 +164,15 @@ class TDTestCase: ptn_counter = 0 for c1_value in tdSql.queryResult: if partition == "c1": - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "abs(c1)": abs_c1_value = abs(c1_value[1]) - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') elif partition == "tbname" and ptn_counter == 0: - tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') + tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}') + tdSql.query(f'select count(*) from `{tbname}`') ptn_counter += 1 tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)