diff --git a/README-CN.md b/README-CN.md index a9bc814e8d..c78050c7bc 100644 --- a/README-CN.md +++ b/README-CN.md @@ -174,6 +174,8 @@ cmake .. -G "NMake Makefiles" nmake ``` +如果你使用的是 Visual Studio 2022 版本, 脚本 `vcvarsall.bat` 的默认安装路径是 `C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat`。 + ### Mac OS X 系统 安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。 diff --git a/README.md b/README.md index 2dea05f09d..48349e891e 100644 --- a/README.md +++ b/README.md @@ -136,7 +136,7 @@ cmake .. -DCPUTYPE=mips64 && cmake --build . ### On Windows platform -If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". +If you use Visual Studio 2013, please open a command window by executing "cmd.exe". Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. ```cmd mkdir debug && cd debug @@ -145,7 +145,7 @@ cmake .. -G "NMake Makefiles" nmake ``` -If you use the Visual Studio 2019 or 2017: +If you use Visual Studio 2019 or 2017: please open a command window by executing "cmd.exe". Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. @@ -164,6 +164,8 @@ cmake .. -G "NMake Makefiles" nmake ``` +If you use Visual Studio 2022, the only change is the default path of `vcvarsall.bat`, which is `C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat`. + ### On Mac OS X platform Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 31b9936f3e..e80e7e4110 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -118,6 +118,7 @@ execute_process(COMMAND "${CMAKE_COMMAND}" --build . # ================================================================================================ # googletest if(${BUILD_TEST}) + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) add_subdirectory(googletest EXCLUDE_FROM_ALL) target_include_directories( gtest @@ -259,7 +260,7 @@ if(${BUILD_MSVCREGEX}) SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex) endif(${BUILD_MSVCREGEX}) -# msvcregex +# wcwidth if(${BUILD_WCWIDTH}) add_library(wcwidth STATIC "") target_sources(wcwidth diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 454b940862..dedc06a2b9 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -55,8 +55,13 @@ extern int32_t tMsgDict[]; #define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8) #define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff) -#define TMSG_INFO(TYPE) tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] -#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)) +#define TMSG_INFO(TYPE) \ + ((TYPE) >= 0 && \ + ((TYPE) < TDMT_DND_MAX_MSG | (TYPE) < TDMT_MND_MAX_MSG | (TYPE) < TDMT_VND_MAX_MSG | (TYPE) < TDMT_SCH_MAX_MSG | \ + (TYPE) < TDMT_STREAM_MAX_MSG | (TYPE) < TDMT_MON_MAX_MSG | (TYPE) < TDMT_SYNC_MAX_MSG)) \ + ? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \ + : 0 +#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)) typedef uint16_t tmsg_t; @@ -708,6 +713,7 @@ typedef struct { int32_t buffer; // MB int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; @@ -736,6 +742,7 @@ typedef struct { int32_t buffer; int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; @@ -1023,8 +1030,10 @@ typedef struct { int64_t clusterId; int64_t rebootTime; int64_t updateTime; - int32_t numOfCores; + float numOfCores; int32_t numOfSupportVnodes; + int64_t memTotal; + int64_t memAvail; char dnodeEp[TSDB_EP_LEN]; SMnodeLoad mload; SQnodeLoad qload; @@ -1079,6 +1088,7 @@ typedef struct { int32_t buffer; int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; @@ -1131,6 +1141,7 @@ typedef struct { int32_t buffer; int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 371e079913..2130a9c264 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -82,6 +82,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_SYSTABLE_RETRIEVE, "dnode-retrieve", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_MND_MSG) TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL) @@ -164,6 +165,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SHOW_VARIABLES, "show-variables", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_VND_MSG) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp) @@ -198,6 +200,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_ALTER_HASHRANGE, "alter-hashrange", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_COMPACT, "compact", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_TTL_TABLE, "drop-ttl-stb", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_SCH_MSG) TD_DEF_MSG_TYPE(TDMT_SCH_QUERY, "query", NULL, NULL) @@ -209,6 +212,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_SCH_DROP_TASK, "drop-task", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SCH_EXPLAIN, "explain", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_STREAM_MSG) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DEPLOY, "stream-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp) @@ -217,6 +221,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RECOVER, "stream-task-recover", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_MON_MSG) TD_DEF_MSG_TYPE(TDMT_MON_MM_INFO, "monitor-minfo", NULL, NULL) @@ -227,6 +232,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MON_VM_LOAD, "monitor-vload", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MON_MM_LOAD, "monitor-mload", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MON_QM_LOAD, "monitor-qload", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_SYNC_MSG) TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT, "sync-timer", NULL, NULL) @@ -251,6 +257,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_SYNC_LEADER_TRANSFER, "sync-leader-transfer", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_SET_MNODE_STANDBY, "set-mnode-standby", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_SET_VNODE_STANDBY, "set-vnode-standby", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL) #if defined(TD_MSG_NUMBER_) TDMT_MAX diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 3558e04bbf..debfce5f2d 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -82,6 +82,7 @@ typedef struct SScanLogicNode { typedef struct SJoinLogicNode { SLogicNode node; EJoinType joinType; + SNode* pMergeCondition; SNode* pOnConditions; bool isSingleTableJoin; } SJoinLogicNode; @@ -320,6 +321,7 @@ typedef struct SInterpFuncPhysiNode { SNodeList* pFuncs; STimeWindow timeRange; int64_t interval; + int8_t intervalUnit; EFillMode fillMode; SNode* pFillValues; // SNodeListNode SNode* pTimeSeries; // SColumnNode @@ -328,6 +330,7 @@ typedef struct SInterpFuncPhysiNode { typedef struct SJoinPhysiNode { SPhysiNode node; EJoinType joinType; + SNode* pMergeCondition; SNode* pOnConditions; SNodeList* pTargets; } SJoinPhysiNode; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 3478926fef..dd337bcee0 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -376,6 +376,7 @@ void nodesRewriteSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeRewrit typedef enum ECollectColType { COLLECT_COL_TYPE_COL = 1, COLLECT_COL_TYPE_TAG, COLLECT_COL_TYPE_ALL } ECollectColType; int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* pTableAlias, ECollectColType type, SNodeList** pCols); +int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols); typedef bool (*FFuncClassifier)(int32_t funcId); int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs); diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h index be3d16ab0d..1c73b2c2c8 100644 --- a/include/libs/scheduler/scheduler.h +++ b/include/libs/scheduler/scheduler.h @@ -130,7 +130,7 @@ void schedulerStopQueryHb(void *pTrans); * Free the query job * @param pJob */ -void schedulerFreeJob(int64_t job, int32_t errCode); +void schedulerFreeJob(int64_t* job, int32_t errCode); void schedulerDestroy(void); diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index d07cf0adf1..a93b359ef3 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -31,6 +31,12 @@ extern bool gRaftDetailLog; #define SYNC_INDEX_INVALID -1 #define SYNC_TERM_INVALID 0xFFFFFFFFFFFFFFFF +typedef enum { + SYNC_STRATEGY_NO_SNAPSHOT = 0, + SYNC_STRATEGY_STANDARD_SNAPSHOT = 1, + SYNC_STRATEGY_WAL_FIRST = 2, +} ESyncStrategy; + typedef uint64_t SyncNodeId; typedef int32_t SyncGroupId; typedef int64_t SyncIndex; @@ -48,11 +54,6 @@ typedef enum { TAOS_SYNC_STATE_ERROR = 103, } ESyncState; -typedef enum { - TAOS_SYNC_FSM_CB_SUCCESS = 0, - TAOS_SYNC_FSM_CB_OTHER_ERROR = 1, -} ESyncFsmCbCode; - typedef struct SNodeInfo { uint16_t nodePort; char nodeFqdn[TSDB_FQDN_LEN]; @@ -96,6 +97,11 @@ typedef struct SReConfigCbMeta { } SReConfigCbMeta; +typedef struct SSnapshotParam { + SyncIndex start; + SyncIndex end; +} SSnapshotParam; + typedef struct SSnapshot { void* data; SyncIndex lastApplyIndex; @@ -125,7 +131,7 @@ typedef struct SSyncFSM { int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader); int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len); - int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void** ppWriter); + int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void* pWriterParam, void** ppWriter); int32_t (*FpSnapshotStopWrite)(struct SSyncFSM* pFsm, void* pWriter, bool isApply); int32_t (*FpSnapshotDoWrite)(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len); @@ -178,15 +184,15 @@ typedef struct SSyncLogStore { } SSyncLogStore; typedef struct SSyncInfo { - bool isStandBy; - bool snapshotEnable; - SyncGroupId vgId; - int32_t batchSize; - SSyncCfg syncCfg; - char path[TSDB_FILENAME_LEN]; - SWal* pWal; - SSyncFSM* pFsm; - SMsgCb* msgcb; + bool isStandBy; + ESyncStrategy snapshotStrategy; + SyncGroupId vgId; + int32_t batchSize; + SSyncCfg syncCfg; + char path[TSDB_FILENAME_LEN]; + SWal* pWal; + SSyncFSM* pFsm; + SMsgCb* msgcb; int32_t (*FpSendMsg)(const SEpSet* pEpSet, SRpcMsg* pMsg); int32_t (*FpEqMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg); } SSyncInfo; @@ -205,7 +211,7 @@ SyncGroupId syncGetVgId(int64_t rid); void syncGetEpSet(int64_t rid, SEpSet* pEpSet); void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet); int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak); -// int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize); +int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize); bool syncEnvIsStart(); const char* syncStr(ESyncState state); bool syncIsRestoreFinish(int64_t rid); diff --git a/include/libs/sync/syncTools.h b/include/libs/sync/syncTools.h index 745c63d5b3..7e95623740 100644 --- a/include/libs/sync/syncTools.h +++ b/include/libs/sync/syncTools.h @@ -191,12 +191,12 @@ void syncTimeoutLog2(char* s, const SyncTimeout* pMsg); typedef struct SyncClientRequest { uint32_t bytes; int32_t vgId; - uint32_t msgType; // SyncClientRequest msgType - uint32_t originalRpcType; // user RpcMsg msgType + uint32_t msgType; // TDMT_SYNC_CLIENT_REQUEST + uint32_t originalRpcType; // origin RpcMsg msgType uint64_t seqNum; bool isWeak; - uint32_t dataLen; // user RpcMsg.contLen - char data[]; // user RpcMsg.pCont + uint32_t dataLen; // origin RpcMsg.contLen + char data[]; // origin RpcMsg.pCont } SyncClientRequest; SyncClientRequest* syncClientRequestBuild(uint32_t dataLen); @@ -220,11 +220,6 @@ void syncClientRequestLog(const SyncClientRequest* pMsg); void syncClientRequestLog2(char* s, const SyncClientRequest* pMsg); // --------------------------------------------- -typedef struct SOffsetAndContLen { - int32_t offset; - int32_t contLen; -} SOffsetAndContLen; - typedef struct SRaftMeta { uint64_t seqNum; bool isWeak; @@ -232,20 +227,33 @@ typedef struct SRaftMeta { // block1: // block2: SRaftMeta array -// block3: rpc msg array (with pCont) +// block3: rpc msg array (with pCont pointer) typedef struct SyncClientRequestBatch { uint32_t bytes; int32_t vgId; - uint32_t msgType; // SyncClientRequestBatch msgType + uint32_t msgType; // TDMT_SYNC_CLIENT_REQUEST_BATCH uint32_t dataCount; - uint32_t dataLen; // user RpcMsg.contLen - char data[]; // user RpcMsg.pCont + uint32_t dataLen; + char data[]; // block2, block3 } SyncClientRequestBatch; SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMeta* raftArr, int32_t arrSize, int32_t vgId); void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg); +void syncClientRequestBatchDestroy(SyncClientRequestBatch* pMsg); +void syncClientRequestBatchDestroyDeep(SyncClientRequestBatch* pMsg); +SRaftMeta* syncClientRequestBatchMetaArr(const SyncClientRequestBatch* pSyncMsg); +SRpcMsg* syncClientRequestBatchRpcMsgArr(const SyncClientRequestBatch* pSyncMsg); +SyncClientRequestBatch* syncClientRequestBatchFromRpcMsg(const SRpcMsg* pRpcMsg); +cJSON* syncClientRequestBatch2Json(const SyncClientRequestBatch* pMsg); +char* syncClientRequestBatch2Str(const SyncClientRequestBatch* pMsg); + +// for debug ---------------------- +void syncClientRequestBatchPrint(const SyncClientRequestBatch* pMsg); +void syncClientRequestBatchPrint2(char* s, const SyncClientRequestBatch* pMsg); +void syncClientRequestBatchLog(const SyncClientRequestBatch* pMsg); +void syncClientRequestBatchLog2(char* s, const SyncClientRequestBatch* pMsg); // --------------------------------------------- typedef struct SyncClientRequestReply { @@ -318,12 +326,15 @@ void syncRequestVoteReplyLog(const SyncRequestVoteReply* pMsg); void syncRequestVoteReplyLog2(char* s, const SyncRequestVoteReply* pMsg); // --------------------------------------------- +// data: entry + typedef struct SyncAppendEntries { uint32_t bytes; int32_t vgId; uint32_t msgType; SRaftId srcId; SRaftId destId; + // private data SyncTerm term; SyncIndex prevLogIndex; @@ -354,18 +365,14 @@ void syncAppendEntriesLog2(char* s, const SyncAppendEntries* pMsg); // --------------------------------------------- -// define ahead -/* typedef struct SOffsetAndContLen { int32_t offset; int32_t contLen; } SOffsetAndContLen; -*/ -// block1: SOffsetAndContLen -// block2: SOffsetAndContLen Array -// block3: SRpcMsg Array -// block4: SRpcMsg pCont Array +// data: +// block1: SOffsetAndContLen Array +// block2: entry Array typedef struct SyncAppendEntriesBatch { uint32_t bytes; @@ -382,10 +389,11 @@ typedef struct SyncAppendEntriesBatch { SyncTerm privateTerm; int32_t dataCount; uint32_t dataLen; - char data[]; + char data[]; // block1, block2 } SyncAppendEntriesBatch; -SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SRpcMsg* rpcMsgArr, int32_t arrSize, int32_t vgId); +SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SSyncRaftEntry** entryPArr, int32_t arrSize, int32_t vgId); +SOffsetAndContLen* syncAppendEntriesBatchMetaTableArray(SyncAppendEntriesBatch* pMsg); void syncAppendEntriesBatchDestroy(SyncAppendEntriesBatch* pMsg); void syncAppendEntriesBatchSerialize(const SyncAppendEntriesBatch* pMsg, char* buf, uint32_t bufLen); void syncAppendEntriesBatchDeserialize(const char* buf, uint32_t len, SyncAppendEntriesBatch* pMsg); @@ -396,8 +404,6 @@ void syncAppendEntriesBatchFromRpcMsg(const SRpcMsg* pRpcMsg, SyncAppendEntriesBatch* syncAppendEntriesBatchFromRpcMsg2(const SRpcMsg* pRpcMsg); cJSON* syncAppendEntriesBatch2Json(const SyncAppendEntriesBatch* pMsg); char* syncAppendEntriesBatch2Str(const SyncAppendEntriesBatch* pMsg); -void syncAppendEntriesBatch2RpcMsgArray(SyncAppendEntriesBatch* pSyncMsg, SRpcMsg* rpcMsgArr, int32_t maxArrSize, - int32_t* pRetArrSize); // for debug ---------------------- void syncAppendEntriesBatchPrint(const SyncAppendEntriesBatch* pMsg); @@ -477,9 +483,10 @@ typedef struct SyncSnapshotSend { SRaftId destId; SyncTerm term; - SyncIndex lastIndex; // lastIndex of snapshot - SyncTerm lastTerm; // lastTerm of snapshot - SyncIndex lastConfigIndex; + SyncIndex beginIndex; // snapshot.beginIndex + SyncIndex lastIndex; // snapshot.lastIndex + SyncTerm lastTerm; // snapshot.lastTerm + SyncIndex lastConfigIndex; // snapshot.lastConfigIndex SSyncCfg lastConfig; SyncTerm privateTerm; int32_t seq; @@ -617,6 +624,9 @@ int32_t syncNodeOnRequestVoteReplySnapshotCb(SSyncNode* ths, SyncRequestVoteRepl int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMsg); int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg); +int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatch* pMsg); +int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntriesReply* pMsg); + int32_t syncNodeOnSnapshotSendCb(SSyncNode* ths, SyncSnapshotSend* pMsg); int32_t syncNodeOnSnapshotRspCb(SSyncNode* ths, SyncSnapshotRsp* pMsg); @@ -633,7 +643,8 @@ typedef int32_t (*FpOnSnapshotSendCb)(SSyncNode* ths, SyncSnapshotSend* pMsg); typedef int32_t (*FpOnSnapshotRspCb)(SSyncNode* ths, SyncSnapshotRsp* pMsg); // option ---------------------------------- -bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); +bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); +ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode); // --------------------------------------------- diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index 8471aa8286..48550b890a 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -110,12 +110,15 @@ typedef struct { } SRpcCtx; int32_t rpcInit(); -void rpcCleanup(); -void * rpcOpen(const SRpcInit *pRpc); -void rpcClose(void *); -void * rpcMallocCont(int32_t contLen); -void rpcFreeCont(void *pCont); -void * rpcReallocCont(void *ptr, int32_t contLen); + +void rpcCleanup(); +void *rpcOpen(const SRpcInit *pRpc); + +void rpcClose(void *); +void rpcCloseImpl(void *); +void *rpcMallocCont(int32_t contLen); +void rpcFreeCont(void *pCont); +void *rpcReallocCont(void *ptr, int32_t contLen); // Because taosd supports multi-process mode // These functions should not be used on the server side diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 7130c48348..baa49650fa 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -173,11 +173,12 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_DNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0341) #define TSDB_CODE_MND_TOO_MANY_DNODES TAOS_DEF_ERROR_CODE(0, 0x0342) #define TSDB_CODE_MND_NO_ENOUGH_DNODES TAOS_DEF_ERROR_CODE(0, 0x0343) -#define TSDB_CODE_MND_INVALID_CLUSTER_CFG TAOS_DEF_ERROR_CODE(0, 0x0344) -#define TSDB_CODE_MND_INVALID_CLUSTER_ID TAOS_DEF_ERROR_CODE(0, 0x0345) -#define TSDB_CODE_MND_INVALID_DNODE_CFG TAOS_DEF_ERROR_CODE(0, 0x0346) -#define TSDB_CODE_MND_INVALID_DNODE_EP TAOS_DEF_ERROR_CODE(0, 0x0347) -#define TSDB_CODE_MND_INVALID_DNODE_ID TAOS_DEF_ERROR_CODE(0, 0x0348) +#define TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0344) +#define TSDB_CODE_MND_INVALID_CLUSTER_CFG TAOS_DEF_ERROR_CODE(0, 0x0345) +#define TSDB_CODE_MND_INVALID_CLUSTER_ID TAOS_DEF_ERROR_CODE(0, 0x0346) +#define TSDB_CODE_MND_INVALID_DNODE_CFG TAOS_DEF_ERROR_CODE(0, 0x0347) +#define TSDB_CODE_MND_INVALID_DNODE_EP TAOS_DEF_ERROR_CODE(0, 0x0348) +#define TSDB_CODE_MND_INVALID_DNODE_ID TAOS_DEF_ERROR_CODE(0, 0x0349) // mnode-node #define TSDB_CODE_MND_MNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0350) @@ -428,6 +429,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_SYN_PROPOSE_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0911) #define TSDB_CODE_SYN_STANDBY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0912) #define TSDB_CODE_SYN_BATCH_ERROR TAOS_DEF_ERROR_CODE(0, 0x0913) +#define TSDB_CODE_SYN_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0914) #define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF) // tq diff --git a/include/util/tdef.h b/include/util/tdef.h index 7c7413a421..84bc30b9e7 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -334,6 +334,9 @@ typedef enum ELogicConditionType { #define TSDB_MIN_DB_CACHE_LAST_ROW 0 #define TSDB_MAX_DB_CACHE_LAST_ROW 3 #define TSDB_DEFAULT_CACHE_LAST_ROW 0 +#define TSDB_MIN_DB_LAST_ROW_MEM 1 // MB +#define TSDB_MAX_DB_LAST_ROW_MEM 65536 +#define TSDB_DEFAULT_LAST_ROW_MEM 1 #define TSDB_DB_STREAM_MODE_OFF 0 #define TSDB_DB_STREAM_MODE_ON 1 #define TSDB_DEFAULT_DB_STREAM_MODE 0 diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 53292ed46a..b930f57d92 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -65,7 +65,7 @@ enum { typedef struct SAppInstInfo SAppInstInfo; typedef struct { - char* key; + char* key; // statistics int32_t reportCnt; int32_t connKeyCnt; @@ -177,14 +177,14 @@ typedef struct SReqResultInfo { } SReqResultInfo; typedef struct SRequestSendRecvBody { - tsem_t rspSem; // not used now - __taos_async_fn_t queryFp; - __taos_async_fn_t fetchFp; - void* param; - SDataBuf requestMsg; - int64_t queryJob; // query job, created according to sql query DAG. - int32_t subplanNum; - SReqResultInfo resInfo; + tsem_t rspSem; // not used now + __taos_async_fn_t queryFp; + __taos_async_fn_t fetchFp; + void* param; + SDataBuf requestMsg; + int64_t queryJob; // query job, created according to sql query DAG. + int32_t subplanNum; + SReqResultInfo resInfo; } SRequestSendRecvBody; typedef struct { @@ -284,6 +284,7 @@ static FORCE_INLINE SReqResultInfo* tscGetCurResInfo(TAOS_RES* res) { extern SAppInfo appInfo; extern int32_t clientReqRefPool; extern int32_t clientConnRefPool; +extern void* tscQhandle; __async_send_cb_fn_t getMsgRspHandle(int32_t msgType); @@ -301,7 +302,7 @@ void destroyRequest(SRequestObj* pRequest); SRequestObj* acquireRequest(int64_t rid); int32_t releaseRequest(int64_t rid); int32_t removeRequest(int64_t rid); -void doDestroyRequest(void *p); +void doDestroyRequest(void* p); char* getDbOfConnection(STscObj* pObj); void setConnectionDB(STscObj* pTscObj, const char* db); @@ -336,8 +337,9 @@ int hbHandleRsp(SClientHbBatchRsp* hbRsp); // cluster level SAppHbMgr* appHbMgrInit(SAppInstInfo* pAppInstInfo, char* key); void appHbMgrCleanup(void); -void hbRemoveAppHbMrg(SAppHbMgr **pAppHbMgr); -void closeAllRequests(SHashObj *pRequests); +void hbRemoveAppHbMrg(SAppHbMgr** pAppHbMgr); +void destroyAllRequests(SHashObj* pRequests); +void stopAllRequests(SHashObj* pRequests); // conn level int hbRegisterConn(SAppHbMgr* pAppHbMgr, int64_t tscRefId, int64_t clusterId, int8_t connType); @@ -356,6 +358,9 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList); // todo move to clie int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); // todo move to xxx bool qnodeRequired(SRequestObj* pRequest); +void initTscQhandle(); +void cleanupTscQhandle(); + #ifdef __cplusplus } #endif diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index a234311569..797d58e6ef 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -25,6 +25,7 @@ #include "tmsg.h" #include "tref.h" #include "trpc.h" +#include "tsched.h" #include "ttime.h" #define TSC_VAR_NOT_RELEASE 1 @@ -34,9 +35,20 @@ SAppInfo appInfo; int32_t clientReqRefPool = -1; int32_t clientConnRefPool = -1; +void *tscQhandle = NULL; + static TdThreadOnce tscinit = PTHREAD_ONCE_INIT; volatile int32_t tscInitRes = 0; +void initTscQhandle() { + // init handle + tscQhandle = taosInitScheduler(4096, 5, "tsc"); +} + +void cleanupTscQhandle() { + // destroy handle + taosCleanUpScheduler(tscQhandle); +} static int32_t registerRequest(SRequestObj *pRequest) { STscObj *pTscObj = acquireTscObj(pRequest->pTscObj->id); if (NULL == pTscObj) { @@ -121,12 +133,31 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) { return pDnodeConn; } -void closeAllRequests(SHashObj *pRequests) { +void destroyAllRequests(SHashObj *pRequests) { void *pIter = taosHashIterate(pRequests, NULL); while (pIter != NULL) { int64_t *rid = pIter; - removeRequest(*rid); + SRequestObj *pRequest = acquireRequest(*rid); + if (pRequest) { + destroyRequest(pRequest); + releaseRequest(*rid); + } + + pIter = taosHashIterate(pRequests, pIter); + } +} + +void stopAllRequests(SHashObj *pRequests) { + void *pIter = taosHashIterate(pRequests, NULL); + while (pIter != NULL) { + int64_t *rid = pIter; + + SRequestObj *pRequest = acquireRequest(*rid); + if (pRequest) { + taos_stop_query(pRequest); + releaseRequest(*rid); + } pIter = taosHashIterate(pRequests, pIter); } @@ -153,12 +184,18 @@ void destroyAppInst(SAppInstInfo *pAppInfo) { } void destroyTscObj(void *pObj) { + if (NULL == pObj) { + return; + } + STscObj *pTscObj = pObj; + int64_t tscId = pTscObj->id; + tscTrace("begin to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj); SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType}; hbDeregisterConn(pTscObj->pAppInfo->pAppHbMgr, connKey); int64_t connNum = atomic_sub_fetch_64(&pTscObj->pAppInfo->numOfConns, 1); - closeAllRequests(pTscObj->pRequests); + destroyAllRequests(pTscObj->pRequests); schedulerStopQueryHb(pTscObj->pAppInfo->pTransporter); tscDebug("connObj 0x%" PRIx64 " p:%p destroyed, remain inst totalConn:%" PRId64, pTscObj->id, pTscObj, pTscObj->pAppInfo->numOfConns); @@ -167,7 +204,9 @@ void destroyTscObj(void *pObj) { destroyAppInst(pTscObj->pAppInfo); } taosThreadMutexDestroy(&pTscObj->mutex); - taosMemoryFreeClear(pTscObj); + taosMemoryFree(pTscObj); + + tscTrace("end to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj); } void *createTscObj(const char *user, const char *auth, const char *db, int32_t connType, SAppInstInfo *pAppInfo) { @@ -261,14 +300,18 @@ int32_t releaseRequest(int64_t rid) { return taosReleaseRef(clientReqRefPool, ri int32_t removeRequest(int64_t rid) { return taosRemoveRef(clientReqRefPool, rid); } void doDestroyRequest(void *p) { - assert(p != NULL); + if (NULL == p) { + return; + } + SRequestObj *pRequest = (SRequestObj *)p; + int64_t reqId = pRequest->self; + tscTrace("begin to destroy request %" PRIx64 " p:%p", reqId, pRequest); + taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self)); - if (pRequest->body.queryJob != 0) { - schedulerFreeJob(pRequest->body.queryJob, 0); - } + schedulerFreeJob(&pRequest->body.queryJob, 0); taosMemoryFreeClear(pRequest->msgBuf); taosMemoryFreeClear(pRequest->sqlstr); @@ -284,7 +327,9 @@ void doDestroyRequest(void *p) { if (pRequest->self) { deregisterRequest(pRequest); } - taosMemoryFreeClear(pRequest); + taosMemoryFree(pRequest); + + tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest); } void destroyRequest(SRequestObj *pRequest) { @@ -292,6 +337,8 @@ void destroyRequest(SRequestObj *pRequest) { return; } + taos_stop_query(pRequest); + removeRequest(pRequest->self); } @@ -299,7 +346,7 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. atexit(taos_cleanup); - + initTscQhandle(); errno = TSDB_CODE_SUCCESS; taosSeedRand(taosGetTimestampSec()); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 7d42bbdad9..0fe4274091 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -25,6 +25,7 @@ #include "tmsgtype.h" #include "tpagedbuf.h" #include "tref.h" +#include "tsched.h" static int32_t initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet); static SMsgSendInfo* buildConnectMsg(SRequestObj* pRequest); @@ -56,14 +57,14 @@ static char* getClusterKey(const char* user, const char* auth, const char* ip, i } bool chkRequestKilled(void* param) { - bool killed = false; + bool killed = false; SRequestObj* pRequest = acquireRequest((int64_t)param); if (NULL == pRequest || pRequest->killed) { killed = true; } releaseRequest((int64_t)param); - + return killed; } @@ -278,7 +279,6 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { } pRequest->body.queryFp(pRequest->body.param, pRequest, code); - // pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows); } int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) { @@ -645,9 +645,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList pRequest->body.resInfo.execRes = res.res; if (code != TSDB_CODE_SUCCESS) { - if (pRequest->body.queryJob != 0) { - schedulerFreeJob(pRequest->body.queryJob, 0); - } + schedulerFreeJob(&pRequest->body.queryJob, 0); pRequest->code = code; terrno = code; @@ -658,9 +656,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList TDMT_VND_CREATE_TABLE == pRequest->type) { pRequest->body.resInfo.numOfRows = res.numOfRows; - if (pRequest->body.queryJob != 0) { - schedulerFreeJob(pRequest->body.queryJob, 0); - } + schedulerFreeJob(&pRequest->body.queryJob, 0); } pRequest->code = res.code; @@ -769,7 +765,7 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) { code = handleSubmitExecRes(pRequest, pRes->res, pCatalog, &epset); break; } - case TDMT_SCH_QUERY: + case TDMT_SCH_QUERY: case TDMT_SCH_MERGE_QUERY: { code = handleQueryExecRes(pRequest, pRes->res, pCatalog, &epset); break; @@ -792,10 +788,7 @@ void schedulerExecCb(SQueryResult* pResult, void* param, int32_t code) { TDMT_VND_CREATE_TABLE == pRequest->type) { pRequest->body.resInfo.numOfRows = pResult->numOfRows; - if (pRequest->body.queryJob != 0) { - schedulerFreeJob(pRequest->body.queryJob, 0); - pRequest->body.queryJob = 0; - } + schedulerFreeJob(&pRequest->body.queryJob, 0); } taosMemoryFree(pResult); @@ -1239,7 +1232,16 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, } } -void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { +typedef struct SchedArg { + SRpcMsg msg; + SEpSet* pEpset; +} SchedArg; + +void doProcessMsgFromServer(SSchedMsg* schedMsg) { + SchedArg* arg = (SchedArg*)schedMsg->ahandle; + SRpcMsg* pMsg = &arg->msg; + SEpSet* pEpSet = arg->pEpset; + SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; assert(pMsg->info.ahandle != NULL); STscObj* pTscObj = NULL; @@ -1272,7 +1274,8 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); - SDataBuf buf = {.msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = pEpSet}; + SDataBuf buf = { + .msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = pEpSet}; if (pMsg->contLen > 0) { buf.pData = taosMemoryCalloc(1, pMsg->contLen); @@ -1287,6 +1290,25 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { pSendInfo->fp(pSendInfo->param, &buf, pMsg->code); rpcFreeCont(pMsg->pCont); destroySendMsgInfo(pSendInfo); + + taosMemoryFree(arg); +} + +void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { + SSchedMsg schedMsg = {0}; + + SEpSet* tEpSet = pEpSet != NULL ? taosMemoryCalloc(1, sizeof(SEpSet)) : NULL; + if (tEpSet != NULL) { + *tEpSet = *pEpSet; + } + + SchedArg* arg = taosMemoryCalloc(1, sizeof(SchedArg)); + arg->msg = *pMsg; + arg->pEpset = tEpSet; + + schedMsg.fp = doProcessMsgFromServer; + schedMsg.ahandle = arg; + taosScheduleTask(tscQhandle, &schedMsg); } TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) { @@ -1415,7 +1437,7 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU pParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); tsem_init(&pParam->sem, 0, 0); } - + // convert ucs4 to native multi-bytes string pResultInfo->convertUcs4 = convertUcs4; diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index d8a9ce581a..a30d60a589 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -47,11 +47,9 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) { atomic_store_32(&lock, 0); return ret; } - // this function may be called by user or system, or by both simultaneously. void taos_cleanup(void) { - tscInfo("start to cleanup client environment"); - + tscInfo("start to cleanup client environment"); if (atomic_val_compare_exchange_32(&sentinel, TSC_VAR_NOT_RELEASE, TSC_VAR_RELEASED) != TSC_VAR_NOT_RELEASE) { return; } @@ -74,8 +72,8 @@ void taos_cleanup(void) { catalogDestroy(); schedulerDestroy(); + cleanupTscQhandle(); rpcCleanup(); - tscInfo("all local resources released"); taosCleanupCfg(); taosCloseLog(); @@ -108,7 +106,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha if (pObj) { int64_t *rid = taosMemoryCalloc(1, sizeof(int64_t)); *rid = pObj->id; - return (TAOS*)rid; + return (TAOS *)rid; } return NULL; @@ -196,10 +194,10 @@ void taos_kill_query(TAOS *taos) { if (NULL == taos) { return; } - int64_t rid = *(int64_t*)taos; - - STscObj* pTscObj = acquireTscObj(rid); - closeAllRequests(pTscObj->pRequests); + + int64_t rid = *(int64_t *)taos; + STscObj *pTscObj = acquireTscObj(rid); + stopAllRequests(pTscObj->pRequests); releaseTscObj(rid); } @@ -244,7 +242,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { #endif } else if (TD_RES_TMQ(res)) { - SMqRspObj *msg = ((SMqRspObj *)res); + SMqRspObj * msg = ((SMqRspObj *)res); SReqResultInfo *pResultInfo; if (msg->resIter == -1) { pResultInfo = tmqGetNextResInfo(res, true); @@ -420,7 +418,7 @@ int taos_affected_rows(TAOS_RES *res) { return 0; } - SRequestObj *pRequest = (SRequestObj *)res; + SRequestObj * pRequest = (SRequestObj *)res; SReqResultInfo *pResInfo = &pRequest->body.resInfo; return pResInfo->numOfRows; } @@ -480,9 +478,7 @@ void taos_stop_query(TAOS_RES *res) { return; } - if (pRequest->body.queryJob) { - schedulerFreeJob(pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED); - } + schedulerFreeJob(&pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED); tscDebug("request %" PRIx64 " killed", pRequest->requestId); } @@ -606,7 +602,7 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) { } SReqResultInfo *pResInfo = tscGetCurResInfo(res); - TAOS_FIELD *pField = &pResInfo->userFields[columnIndex]; + TAOS_FIELD * pField = &pResInfo->userFields[columnIndex]; if (!IS_VAR_DATA_TYPE(pField->type)) { return 0; } @@ -650,8 +646,8 @@ const char *taos_get_server_info(TAOS *taos) { typedef struct SqlParseWrapper { SParseContext *pCtx; SCatalogReq catalogReq; - SRequestObj *pRequest; - SQuery *pQuery; + SRequestObj * pRequest; + SQuery * pQuery; } SqlParseWrapper; static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) { @@ -669,11 +665,9 @@ static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) { } void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { - tscDebug("enter meta callback, code %s", tstrerror(code)); - SqlParseWrapper *pWrapper = (SqlParseWrapper *)param; - SQuery *pQuery = pWrapper->pQuery; - SRequestObj *pRequest = pWrapper->pRequest; + SQuery * pQuery = pWrapper->pQuery; + SRequestObj * pRequest = pWrapper->pRequest; if (code == TSDB_CODE_SUCCESS) { code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery); @@ -690,10 +684,11 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { TSWAP(pRequest->tableList, (pQuery)->pTableList); destorySqlParseWrapper(pWrapper); + + tscDebug("0x%"PRIx64" analysis semantics completed, start async query, reqId:0x%"PRIx64, pRequest->self, pRequest->requestId); launchAsyncQuery(pRequest, pQuery, pResultMeta); } else { destorySqlParseWrapper(pWrapper); - tscDebug("error happens, code:%d", code); if (NEED_CLIENT_HANDLE_ERROR(code)) { tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId); @@ -722,31 +717,29 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) { return TSDB_CODE_OUT_OF_MEMORY; } - **pCxt = (SParseContext){ - .requestId = pRequest->requestId, - .requestRid = pRequest->self, - .acctId = pTscObj->acctId, - .db = pRequest->pDb, - .topicQuery = false, - .pSql = pRequest->sqlstr, - .sqlLen = pRequest->sqlLen, - .pMsg = pRequest->msgBuf, - .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, - .pTransporter = pTscObj->pAppInfo->pTransporter, - .pStmtCb = NULL, - .pUser = pTscObj->user, - .schemalessType = pTscObj->schemalessType, - .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), - .async = true, - .svrVer = pTscObj->sVer, - .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes) - }; + **pCxt = (SParseContext){.requestId = pRequest->requestId, + .requestRid = pRequest->self, + .acctId = pTscObj->acctId, + .db = pRequest->pDb, + .topicQuery = false, + .pSql = pRequest->sqlstr, + .sqlLen = pRequest->sqlLen, + .pMsg = pRequest->msgBuf, + .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, + .pTransporter = pTscObj->pAppInfo->pTransporter, + .pStmtCb = NULL, + .pUser = pTscObj->user, + .schemalessType = pTscObj->schemalessType, + .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), + .async = true, + .svrVer = pTscObj->sVer, + .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)}; return TSDB_CODE_SUCCESS; } void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { SParseContext *pCxt = NULL; - STscObj *pTscObj = pRequest->pTscObj; + STscObj * pTscObj = pRequest->pTscObj; int32_t code = 0; if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) { @@ -911,10 +904,10 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { return terrno; } - int64_t rid = *(int64_t*)taos; + int64_t rid = *(int64_t *)taos; const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list int32_t code = 0; - SRequestObj *pRequest = NULL; + SRequestObj * pRequest = NULL; SCatalogReq catalogReq = {0}; if (NULL == tableNameList) { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index b72fd961f5..bca740e9ce 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -895,7 +895,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) { SBlockOrderInfo* pOrder = taosArrayGet(pOrderInfo, 0); int64_t p0 = taosGetTimestampUs(); - + __compar_fn_t fn = getKeyComparFunc(pColInfoData->info.type, pOrder->order); taosSort(pColInfoData->pData, pDataBlock->info.rows, pColInfoData->info.bytes, fn); @@ -923,8 +923,9 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) { pInfo->pColData = taosArrayGet(pDataBlock->pDataBlock, pInfo->slotId); } + terrno = 0; taosqsort(index, rows, sizeof(int32_t), &helper, dataBlockCompar); - if(terrno) return terrno; + if (terrno) return terrno; int64_t p1 = taosGetTimestampUs(); @@ -1438,21 +1439,21 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) { } } -static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, size_t end){ +static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, size_t end) { int32_t dataOffset = -1; int32_t dataLen = 0; int32_t beigin = start; - while(beigin < end){ + while (beigin < end) { int32_t offset = pColInfoData->varmeta.offset[beigin]; - if(offset == -1) { + if (offset == -1) { beigin++; continue; } - if(start != 0) { + if (start != 0) { pColInfoData->varmeta.offset[beigin] = dataLen; } - char *data = pColInfoData->pData + offset; - if(dataOffset == -1) dataOffset = offset; // mark the begin of data + char* data = pColInfoData->pData + offset; + if (dataOffset == -1) dataOffset = offset; // mark the begin of data int32_t type = pColInfoData->info.type; if (type == TSDB_DATA_TYPE_JSON) { dataLen += getJsonValueLen(data); @@ -1461,7 +1462,7 @@ static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, s } beigin++; } - if(dataOffset > 0){ + if (dataOffset > 0) { memmove(pColInfoData->pData, pColInfoData->pData + dataOffset, dataLen); memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[start], (end - start) * sizeof(int32_t)); } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index aaf1e5414c..8157ba4d92 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -947,8 +947,10 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { if (tEncodeI64(&encoder, pReq->clusterId) < 0) return -1; if (tEncodeI64(&encoder, pReq->rebootTime) < 0) return -1; if (tEncodeI64(&encoder, pReq->updateTime) < 0) return -1; - if (tEncodeI32(&encoder, pReq->numOfCores) < 0) return -1; + if (tEncodeFloat(&encoder, pReq->numOfCores) < 0) return -1; if (tEncodeI32(&encoder, pReq->numOfSupportVnodes) < 0) return -1; + if (tEncodeI64(&encoder, pReq->memTotal) < 0) return -1; + if (tEncodeI64(&encoder, pReq->memAvail) < 0) return -1; if (tEncodeCStr(&encoder, pReq->dnodeEp) < 0) return -1; // cluster cfg @@ -1008,8 +1010,10 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { if (tDecodeI64(&decoder, &pReq->clusterId) < 0) return -1; if (tDecodeI64(&decoder, &pReq->rebootTime) < 0) return -1; if (tDecodeI64(&decoder, &pReq->updateTime) < 0) return -1; - if (tDecodeI32(&decoder, &pReq->numOfCores) < 0) return -1; + if (tDecodeFloat(&decoder, &pReq->numOfCores) < 0) return -1; if (tDecodeI32(&decoder, &pReq->numOfSupportVnodes) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->memTotal) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->memAvail) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->dnodeEp) < 0) return -1; // cluster cfg @@ -1974,6 +1978,7 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) { if (tEncodeI32(&encoder, pReq->buffer) < 0) return -1; if (tEncodeI32(&encoder, pReq->pageSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->pages) < 0) return -1; + if (tEncodeI32(&encoder, pReq->lastRowMem) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysPerFile) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep0) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep1) < 0) return -1; @@ -2015,6 +2020,7 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) if (tDecodeI32(&decoder, &pReq->buffer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pageSize) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pages) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->lastRowMem) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysPerFile) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep0) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep1) < 0) return -1; @@ -2069,6 +2075,7 @@ int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) { if (tEncodeI32(&encoder, pReq->buffer) < 0) return -1; if (tEncodeI32(&encoder, pReq->pageSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->pages) < 0) return -1; + if (tEncodeI32(&encoder, pReq->lastRowMem) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysPerFile) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep0) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep1) < 0) return -1; @@ -2094,6 +2101,7 @@ int32_t tDeserializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) { if (tDecodeI32(&decoder, &pReq->buffer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pageSize) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pages) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->lastRowMem) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysPerFile) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep0) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep1) < 0) return -1; @@ -2679,10 +2687,12 @@ int32_t tDeserializeSDbCfgRsp(void *buf, int32_t bufLen, SDbCfgRsp *pRsp) { if (tDecodeI8(&decoder, &pRsp->strict) < 0) return -1; if (tDecodeI8(&decoder, &pRsp->cacheLastRow) < 0) return -1; if (tDecodeI32(&decoder, &pRsp->numOfRetensions) < 0) return -1; - pRsp->pRetensions = taosArrayInit(pRsp->numOfRetensions, sizeof(SRetention)); - if (pRsp->pRetensions == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + if (pRsp->numOfRetensions > 0) { + pRsp->pRetensions = taosArrayInit(pRsp->numOfRetensions, sizeof(SRetention)); + if (pRsp->pRetensions == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } } for (int32_t i = 0; i < pRsp->numOfRetensions; ++i) { @@ -3586,6 +3596,7 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR if (tEncodeI32(&encoder, pReq->buffer) < 0) return -1; if (tEncodeI32(&encoder, pReq->pageSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->pages) < 0) return -1; + if (tEncodeI32(&encoder, pReq->lastRowMem) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysPerFile) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep0) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep1) < 0) return -1; @@ -3643,6 +3654,7 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * if (tDecodeI32(&decoder, &pReq->buffer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pageSize) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pages) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->lastRowMem) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysPerFile) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep0) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep1) < 0) return -1; @@ -3665,7 +3677,6 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * SReplica *pReplica = &pReq->replicas[i]; if (tDecodeSReplica(&decoder, pReplica) < 0) return -1; } - if (tDecodeI32(&decoder, &pReq->numOfRetensions) < 0) return -1; pReq->pRetensions = taosArrayInit(pReq->numOfRetensions, sizeof(SRetention)); if (pReq->pRetensions == NULL) { @@ -3768,6 +3779,7 @@ int32_t tSerializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pReq if (tEncodeI32(&encoder, pReq->buffer) < 0) return -1; if (tEncodeI32(&encoder, pReq->pageSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->pages) < 0) return -1; + if (tEncodeI32(&encoder, pReq->lastRowMem) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysPerFile) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep0) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep1) < 0) return -1; @@ -3782,7 +3794,6 @@ int32_t tSerializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pReq SReplica *pReplica = &pReq->replicas[i]; if (tEncodeSReplica(&encoder, pReplica) < 0) return -1; } - tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -3799,6 +3810,7 @@ int32_t tDeserializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pR if (tDecodeI32(&decoder, &pReq->buffer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pageSize) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pages) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->lastRowMem) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysPerFile) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep0) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep1) < 0) return -1; diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 234a133243..59b442881a 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -67,6 +67,8 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { req.updateTime = pMgmt->pData->updateTime; req.numOfCores = tsNumOfCores; req.numOfSupportVnodes = tsNumOfSupportVnodes; + req.memTotal = tsTotalMemoryKB * 1024; + req.memAvail = req.memTotal - tsRpcQueueMemoryAllowed - 16 * 1024 * 1024; tstrncpy(req.dnodeEp, tsLocalEp, TSDB_EP_LEN); req.clusterCfg.statusInterval = tsStatusInterval; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 613f3fb994..90f852eed1 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -32,8 +32,7 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) { if (pVnode && num < size) { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); // dTrace("vgId:%d, acquire vnode, refCount:%d", pVnode->vgId, refCount); - pVnodes[num] = (*ppVnode); - num++; + pVnodes[num++] = (*ppVnode); pIter = taosHashIterate(pMgmt->hash, pIter); } else { taosHashCancelIterate(pMgmt->hash, pIter); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 681440dec4..dc1bcbd258 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -88,7 +88,7 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { while (!taosQueueEmpty(pVnode->pApplyQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10); - dTrace("vgId:%d, vnode-fetch queue is empty", pVnode->vgId); + dTrace("vgId:%d, vnode queue is empty", pVnode->vgId); vmFreeQueue(pMgmt, pVnode); vnodeClose(pVnode->pImpl); @@ -140,7 +140,7 @@ static void *vmOpenVnodeInThread(void *param) { } static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { - pMgmt->hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + pMgmt->hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); if (pMgmt->hash == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; dError("failed to init vnode hash since %s", terrstr()); @@ -156,7 +156,8 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { pMgmt->state.totalVnodes = numOfVnodes; - int32_t threadNum = 1; + int32_t threadNum = tsNumOfCores / 2; + if (threadNum < 1) threadNum = 1; int32_t vnodesPerThread = numOfVnodes / threadNum + 1; SVnodeThread *threads = taosMemoryCalloc(threadNum, sizeof(SVnodeThread)); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index ecd02ae8dc..beefc502e7 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -114,28 +114,6 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf } } -static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { - SVnodeObj *pVnode = pInfo->ahandle; - SRpcMsg *pMsg = NULL; - - for (int32_t i = 0; i < numOfMsgs; ++i) { - if (taosGetQitem(qall, (void **)&pMsg) == 0) continue; - const STraceId *trace = &pMsg->info.traceId; - dGTrace("vgId:%d, msg:%p get from vnode-merge queue", pVnode->vgId, pMsg); - - int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); - if (code != 0) { - if (terrno != 0) code = terrno; - dGError("vgId:%d, msg:%p failed to merge since %s", pVnode->vgId, pMsg, terrstr()); - vmSendRsp(pMsg, code); - } - - dGTrace("msg:%p, is freed, code:0x%x", pMsg, code); - rpcFreeCont(pMsg->pCont); - taosFreeQitem(pMsg); - } -} - static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) { const STraceId *trace = &pMsg->info.traceId; SMsgHead *pHead = pMsg->pCont; @@ -207,7 +185,11 @@ int32_t vmPutMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) { SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM); - if (pMsg == NULL) return -1; + if (pMsg == NULL) { + rpcFreeCont(pMsg->pCont); + pRpc->pCont = NULL; + return -1; + } SMsgHead *pHead = pRpc->pCont; dTrace("vgId:%d, msg:%p is created, type:%s", pHead->vgId, pMsg, TMSG_INFO(pRpc->msgType)); @@ -215,7 +197,16 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) { pHead->contLen = htonl(pHead->contLen); pHead->vgId = htonl(pHead->vgId); memcpy(pMsg, pRpc, sizeof(SRpcMsg)); - return vmPutMsgToQueue(pMgmt, pMsg, qtype); + + int32_t code = vmPutMsgToQueue(pMgmt, pMsg, qtype); + if (code != 0) { + dTrace("msg:%p, is freed", pMsg); + taosFreeQitem(pMsg); + rpcFreeCont(pMsg->pCont); + pRpc->pCont = NULL; + } + + return code; } int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) { diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index ee27f27f06..d70ed09920 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -212,6 +212,7 @@ void dmCleanupDnode(SDnode *pDnode) { dmCleanupClient(pDnode); dmCleanupServer(pDnode); dmClearVars(pDnode); + rpcCleanup(); dDebug("dnode is closed, ptr:%p", pDnode); } diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index df3c9c4e88..7043991525 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -71,9 +71,9 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg) { } static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { - SDnodeTrans * pTrans = &pDnode->trans; + SDnodeTrans *pTrans = &pDnode->trans; int32_t code = -1; - SRpcMsg * pMsg = NULL; + SRpcMsg *pMsg = NULL; SMgmtWrapper *pWrapper = NULL; SDnodeHandle *pHandle = &pTrans->msgHandles[TMSG_INDEX(pRpc->msgType)]; @@ -185,6 +185,7 @@ _OVER: taosFreeQitem(pMsg); } rpcFreeCont(pRpc->pCont); + pRpc->pCont = NULL; } dmReleaseWrapper(pWrapper); @@ -195,11 +196,11 @@ int32_t dmInitMsgHandle(SDnode *pDnode) { for (EDndNodeType ntype = DNODE; ntype < NODE_END; ++ntype) { SMgmtWrapper *pWrapper = &pDnode->wrappers[ntype]; - SArray * pArray = (*pWrapper->func.getHandlesFp)(); + SArray *pArray = (*pWrapper->func.getHandlesFp)(); if (pArray == NULL) return -1; for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) { - SMgmtHandle * pMgmt = taosArrayGet(pArray, i); + SMgmtHandle *pMgmt = taosArrayGet(pArray, i); SDnodeHandle *pHandle = &pTrans->msgHandles[TMSG_INDEX(pMgmt->msgType)]; if (pMgmt->needCheckVgId) { pHandle->needCheckVgId = pMgmt->needCheckVgId; diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 096fc753b2..7fe7d44827 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -280,7 +280,7 @@ static void dmPrintEps(SDnodeData *pData) { dDebug("print dnode list, num:%d", numOfEps); for (int32_t i = 0; i < numOfEps; i++) { SDnodeEp *pEp = taosArrayGet(pData->dnodeEps, i); - dDebug("dnode:%d, fqdn:%s port:%u is_mnode:%d", pEp->id, pEp->ep.fqdn, pEp->ep.port, pEp->isMnode); + dDebug("dnode:%d, fqdn:%s port:%u isMnode:%d", pEp->id, pEp->ep.fqdn, pEp->ep.port, pEp->isMnode); } } diff --git a/source/dnode/mgmt/test/sut/src/sut.cpp b/source/dnode/mgmt/test/sut/src/sut.cpp index 21d9351ceb..a5e6128800 100644 --- a/source/dnode/mgmt/test/sut/src/sut.cpp +++ b/source/dnode/mgmt/test/sut/src/sut.cpp @@ -30,12 +30,14 @@ void Testbase::InitLog(const char* path) { tsdbDebugFlag = 0; tsLogEmbedded = 1; tsAsyncLog = 0; - tsRpcQueueMemoryAllowed = 1024 * 1024 * 64; - + taosRemoveDir(path); taosMkDir(path); tstrncpy(tsLogDir, path, PATH_MAX); - if (taosInitLog("taosdlog", 1) != 0) { + + taosGetSystemInfo(); + tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 0.1; +if (taosInitLog("taosdlog", 1) != 0) { printf("failed to init log file\n"); } } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 21002f5f9c..f39a848992 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -148,7 +148,10 @@ typedef struct { int32_t accessTimes; int32_t numOfVnodes; int32_t numOfSupportVnodes; - int32_t numOfCores; + float numOfCores; + int64_t memTotal; + int64_t memAvail; + int64_t memUsed; EDndReason offlineReason; uint16_t port; char fqdn[TSDB_FQDN_LEN]; @@ -243,6 +246,7 @@ typedef struct { int32_t buffer; int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; @@ -255,8 +259,8 @@ typedef struct { int8_t compression; int8_t replications; int8_t strict; - int8_t cacheLastRow; int8_t hashMethod; // default is 1 + int8_t cacheLastRow; int32_t numOfRetensions; SArray* pRetensions; int8_t schemaless; diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h index 6622b89b1d..c8237d17d8 100644 --- a/source/dnode/mnode/impl/inc/mndVgroup.h +++ b/source/dnode/mnode/impl/inc/mndVgroup.h @@ -30,6 +30,8 @@ SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup); SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup); int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId); void mndSortVnodeGid(SVgObj *pVgroup); +int64_t mndGetVnodesMemory(SMnode *pMnode, int32_t dnodeId); +int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup); SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId); int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup); diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 4f958de121..6770cd578a 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -93,6 +93,7 @@ static SSdbRaw *mndDbActionEncode(SDbObj *pDb) { SDB_SET_INT32(pRaw, dataPos, pDb->cfg.buffer, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.pageSize, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.pages, _OVER) + SDB_SET_INT32(pRaw, dataPos, pDb->cfg.lastRowMem, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.daysPerFile, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.daysToKeep0, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.daysToKeep1, _OVER) @@ -165,6 +166,7 @@ static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw) { SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.buffer, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.pageSize, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.pages, _OVER) + SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.lastRowMem, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.daysPerFile, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.daysToKeep0, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.daysToKeep1, _OVER) @@ -230,8 +232,9 @@ static int32_t mndDbActionUpdate(SSdb *pSdb, SDbObj *pOld, SDbObj *pNew) { pOld->cfgVersion = pNew->cfgVersion; pOld->vgVersion = pNew->vgVersion; pOld->cfg.buffer = pNew->cfg.buffer; - pOld->cfg.pages = pNew->cfg.pages; pOld->cfg.pageSize = pNew->cfg.pageSize; + pOld->cfg.pages = pNew->cfg.pages; + pOld->cfg.lastRowMem = pNew->cfg.lastRowMem; pOld->cfg.daysPerFile = pNew->cfg.daysPerFile; pOld->cfg.daysToKeep0 = pNew->cfg.daysToKeep0; pOld->cfg.daysToKeep1 = pNew->cfg.daysToKeep1; @@ -288,6 +291,7 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) { if (pCfg->buffer < TSDB_MIN_BUFFER_PER_VNODE || pCfg->buffer > TSDB_MAX_BUFFER_PER_VNODE) return -1; if (pCfg->pageSize < TSDB_MIN_PAGESIZE_PER_VNODE || pCfg->pageSize > TSDB_MAX_PAGESIZE_PER_VNODE) return -1; if (pCfg->pages < TSDB_MIN_PAGES_PER_VNODE || pCfg->pages > TSDB_MAX_PAGES_PER_VNODE) return -1; + if (pCfg->lastRowMem < TSDB_MIN_DB_LAST_ROW_MEM || pCfg->lastRowMem > TSDB_MAX_DB_LAST_ROW_MEM) return -1; if (pCfg->daysPerFile < TSDB_MIN_DAYS_PER_FILE || pCfg->daysPerFile > TSDB_MAX_DAYS_PER_FILE) return -1; if (pCfg->daysToKeep0 < TSDB_MIN_KEEP || pCfg->daysToKeep0 > TSDB_MAX_KEEP) return -1; if (pCfg->daysToKeep1 < TSDB_MIN_KEEP || pCfg->daysToKeep1 > TSDB_MAX_KEEP) return -1; @@ -336,6 +340,7 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) { if (pCfg->replications < 0) pCfg->replications = TSDB_DEFAULT_DB_REPLICA; if (pCfg->strict < 0) pCfg->strict = TSDB_DEFAULT_DB_STRICT; if (pCfg->cacheLastRow < 0) pCfg->cacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW; + if (pCfg->lastRowMem <= 0) pCfg->lastRowMem = TSDB_DEFAULT_LAST_ROW_MEM; if (pCfg->numOfRetensions < 0) pCfg->numOfRetensions = 0; if (pCfg->schemaless < 0) pCfg->schemaless = TSDB_DB_SCHEMALESS_OFF; } @@ -434,6 +439,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, .buffer = pCreate->buffer, .pageSize = pCreate->pageSize, .pages = pCreate->pages, + .lastRowMem = pCreate->lastRowMem, .daysPerFile = pCreate->daysPerFile, .daysToKeep0 = pCreate->daysToKeep0, .daysToKeep1 = pCreate->daysToKeep1, @@ -475,7 +481,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, int32_t code = -1; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq); if (pTrans == NULL) goto _OVER; - + // mndTransSetSerial(pTrans); mDebug("trans:%d, used to create db:%s", pTrans->id, pCreate->db); mndTransSetDbName(pTrans, dbObj.name, NULL); @@ -622,6 +628,11 @@ static int32_t mndSetDbCfgFromAlterDbReq(SDbObj *pDb, SAlterDbReq *pAlter) { terrno = 0; } + if (pAlter->lastRowMem > 0 && pAlter->lastRowMem != pDb->cfg.lastRowMem) { + pDb->cfg.lastRowMem = pAlter->lastRowMem; + terrno = 0; + } + if (pAlter->replications > 0 && pAlter->replications != pDb->cfg.replications) { #if 1 terrno = TSDB_CODE_OPS_NOT_SUPPORT; diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 8198445753..6ead922d95 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -15,8 +15,8 @@ #define _DEFAULT_SOURCE #include "mndDnode.h" -#include "mndPrivilege.h" #include "mndMnode.h" +#include "mndPrivilege.h" #include "mndQnode.h" #include "mndShow.h" #include "mndSnode.h" @@ -274,15 +274,14 @@ static void mndGetDnodeData(SMnode *pMnode, SArray *pDnodeEps) { SDnodeEp dnodeEp = {0}; dnodeEp.id = pDnode->id; - dnodeEp.isMnode = 0; dnodeEp.ep.port = pDnode->port; memcpy(dnodeEp.ep.fqdn, pDnode->fqdn, TSDB_FQDN_LEN); + sdbRelease(pSdb, pDnode); + dnodeEp.isMnode = 0; if (mndIsMnode(pMnode, pDnode->id)) { dnodeEp.isMnode = 1; } - - sdbRelease(pSdb, pDnode); taosArrayPush(pDnodeEps, &dnodeEp); } } @@ -432,7 +431,8 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { } if (!online) { - mInfo("dnode:%d, from offline to online", pDnode->id); + mInfo("dnode:%d, from offline to online, memory avail:%" PRId64 " total:%" PRId64 " cores:%.2f", pDnode->id, + statusReq.memAvail, statusReq.memTotal, statusReq.numOfCores); } else { mDebug("dnode:%d, send dnode epset, online:%d dnodeVer:%" PRId64 ":%" PRId64 " reboot:%d", pDnode->id, online, statusReq.dnodeVer, dnodeVer, reboot); @@ -441,6 +441,8 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { pDnode->rebootTime = statusReq.rebootTime; pDnode->numOfCores = statusReq.numOfCores; pDnode->numOfSupportVnodes = statusReq.numOfSupportVnodes; + pDnode->memAvail = statusReq.memAvail; + pDnode->memTotal = statusReq.memTotal; SStatusRsp statusRsp = {0}; statusRsp.dnodeVer = dnodeVer; @@ -580,7 +582,7 @@ static int32_t mndProcessShowVariablesReq(SRpcMsg *pReq) { strcpy(info.name, "timezone"); snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%s", tsTimezoneStr); taosArrayPush(rsp.variables, &info); - + strcpy(info.name, "locale"); snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%s", tsLocale); taosArrayPush(rsp.variables, &info); diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index c39c9847a9..5e708616fd 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -58,7 +58,7 @@ static void *mndBuildTimerMsg(int32_t *pContLen) { static void mndPullupTrans(SMnode *pMnode) { int32_t contLen = 0; - void * pReq = mndBuildTimerMsg(&contLen); + void *pReq = mndBuildTimerMsg(&contLen); if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_TRANS_TIMER, .pCont = pReq, .contLen = contLen}; tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); @@ -67,14 +67,14 @@ static void mndPullupTrans(SMnode *pMnode) { static void mndTtlTimer(SMnode *pMnode) { int32_t contLen = 0; - void * pReq = mndBuildTimerMsg(&contLen); + void *pReq = mndBuildTimerMsg(&contLen); SRpcMsg rpcMsg = {.msgType = TDMT_MND_TTL_TIMER, .pCont = pReq, .contLen = contLen}; tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); } static void mndCalMqRebalance(SMnode *pMnode) { int32_t contLen = 0; - void * pReq = mndBuildTimerMsg(&contLen); + void *pReq = mndBuildTimerMsg(&contLen); if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_MQ_TIMER, .pCont = pReq, .contLen = contLen}; tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); @@ -83,7 +83,7 @@ static void mndCalMqRebalance(SMnode *pMnode) { static void mndPullupTelem(SMnode *pMnode) { int32_t contLen = 0; - void * pReq = mndBuildTimerMsg(&contLen); + void *pReq = mndBuildTimerMsg(&contLen); if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_TELEM_TIMER, .pCont = pReq, .contLen = contLen}; tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); @@ -395,7 +395,7 @@ void mndStop(SMnode *pMnode) { } int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { - SMnode * pMnode = pMsg->info.node; + SMnode *pMnode = pMsg->info.node; SSyncMgmt *pMgmt = &pMnode->syncMgmt; int32_t code = 0; @@ -413,7 +413,7 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { } do { - char * syncNodeStr = sync2SimpleStr(pMgmt->sync); + char *syncNodeStr = sync2SimpleStr(pMgmt->sync); static int64_t mndTick = 0; if (++mndTick % 10 == 1) { mTrace("vgId:%d, sync trace msg:%s, %s", syncGetVgId(pMgmt->sync), TMSG_INFO(pMsg->msgType), syncNodeStr); @@ -427,7 +427,7 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { } while (0); // ToDo: ugly! use function pointer - if (syncNodeSnapshotEnable(pSyncNode)) { + if (syncNodeStrategy(pSyncNode) == SYNC_STRATEGY_STANDARD_SNAPSHOT) { if (pMsg->msgType == TDMT_SYNC_TIMEOUT) { SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pMsg); code = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg); @@ -519,6 +519,8 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { } } + syncNodeRelease(pSyncNode); + if (code != 0) { terrno = TSDB_CODE_SYN_INTERNAL_ERROR; } @@ -579,7 +581,7 @@ static int32_t mndCheckMsgContent(SRpcMsg *pMsg) { } int32_t mndProcessRpcMsg(SRpcMsg *pMsg) { - SMnode * pMnode = pMsg->info.node; + SMnode *pMnode = pMsg->info.node; const STraceId *trace = &pMsg->info.traceId; MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)]; @@ -632,7 +634,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr SMonStbInfo *pStbInfo, SMonGrantInfo *pGrantInfo) { if (mndAcquireRpcRef(pMnode) != 0) return -1; - SSdb * pSdb = pMnode->pSdb; + SSdb *pSdb = pMnode->pSdb; int64_t ms = taosGetTimestampMs(); pClusterInfo->dnodes = taosArrayInit(sdbGetSize(pSdb, SDB_DNODE), sizeof(SMonDnodeDesc)); @@ -713,7 +715,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pGrantInfo->timeseries_used += pVgroup->numOfTimeSeries; tstrncpy(desc.status, "unsynced", sizeof(desc.status)); for (int32_t i = 0; i < pVgroup->replica; ++i) { - SVnodeGid * pVgid = &pVgroup->vnodeGid[i]; + SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; SMonVnodeDesc *pVnDesc = &desc.vnodes[i]; pVnDesc->dnode_id = pVgid->dnodeId; tstrncpy(pVnDesc->vnode_role, syncStr(pVgid->role), sizeof(pVnDesc->vnode_role)); diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 693eed5222..2053f3886c 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -134,7 +134,7 @@ int32_t mndSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, in return sdbDoRead(pMnode->pSdb, pReader, ppBuf, len); } -int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) { +int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void *pParam, void **ppWriter) { mInfo("start to apply snapshot to sdb"); SMnode *pMnode = pFsm->data; return sdbStartWrite(pMnode->pSdb, (SSdbIter **)ppWriter); @@ -178,7 +178,7 @@ int32_t mndInitSync(SMnode *pMnode) { syncInfo.pWal = pMnode->pWal; syncInfo.pFsm = mndSyncMakeFsm(pMnode); syncInfo.isStandBy = pMgmt->standby; - syncInfo.snapshotEnable = true; + syncInfo.snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT; mInfo("start to open mnode sync, standby:%d", pMgmt->standby); if (pMgmt->standby || pMgmt->replica.id > 0) { diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index c53e831e84..85f1ce6843 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -207,6 +207,7 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg createReq.buffer = pDb->cfg.buffer; createReq.pageSize = pDb->cfg.pageSize; createReq.pages = pDb->cfg.pages; + createReq.lastRowMem = pDb->cfg.lastRowMem; createReq.daysPerFile = pDb->cfg.daysPerFile; createReq.daysToKeep0 = pDb->cfg.daysToKeep0; createReq.daysToKeep1 = pDb->cfg.daysToKeep1; @@ -274,8 +275,9 @@ void *mndBuildAlterVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_ SAlterVnodeReq alterReq = {0}; alterReq.vgVersion = pVgroup->version; alterReq.buffer = pDb->cfg.buffer; - alterReq.pages = pDb->cfg.pages; alterReq.pageSize = pDb->cfg.pageSize; + alterReq.pages = pDb->cfg.pages; + alterReq.lastRowMem = pDb->cfg.lastRowMem; alterReq.daysPerFile = pDb->cfg.daysPerFile; alterReq.daysToKeep0 = pDb->cfg.daysToKeep0; alterReq.daysToKeep1 = pDb->cfg.daysToKeep1; @@ -392,9 +394,10 @@ static bool mndBuildDnodesArrayFp(SMnode *pMnode, void *pObj, void *p1, void *p2 bool online = mndIsDnodeOnline(pDnode, curMs); bool isMnode = mndIsMnode(pMnode, pDnode->id); pDnode->numOfVnodes = mndGetVnodesNum(pMnode, pDnode->id); + pDnode->memUsed = mndGetVnodesMemory(pMnode, pDnode->id); - mDebug("dnode:%d, vnodes:%d support_vnodes:%d is_mnode:%d online:%d", pDnode->id, pDnode->numOfVnodes, - pDnode->numOfSupportVnodes, isMnode, online); + mDebug("dnode:%d, vnodes:%d supportVnodes:%d isMnode:%d online:%d memory avail:%" PRId64 " used:%" PRId64, pDnode->id, + pDnode->numOfVnodes, pDnode->numOfSupportVnodes, isMnode, online, pDnode->memAvail, pDnode->memUsed); if (isMnode) { pDnode->numOfVnodes++; @@ -426,15 +429,7 @@ static int32_t mndCompareDnodeId(int32_t *dnode1Id, int32_t *dnode2Id) { return static int32_t mndCompareDnodeVnodes(SDnodeObj *pDnode1, SDnodeObj *pDnode2) { float d1Score = (float)pDnode1->numOfVnodes / pDnode1->numOfSupportVnodes; float d2Score = (float)pDnode2->numOfVnodes / pDnode2->numOfSupportVnodes; -#if 0 - if (d1Score == d2Score) { - return pDnode2->id - pDnode1->id; - } else { - return d1Score >= d2Score ? 1 : 0; - } -#else return d1Score >= d2Score ? 1 : 0; -#endif } void mndSortVnodeGid(SVgObj *pVgroup) { @@ -447,7 +442,7 @@ void mndSortVnodeGid(SVgObj *pVgroup) { } } -static int32_t mndGetAvailableDnode(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) { +static int32_t mndGetAvailableDnode(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, SArray *pArray) { SSdb *pSdb = pMnode->pSdb; int32_t allocedVnodes = 0; void *pIter = NULL; @@ -470,6 +465,16 @@ static int32_t mndGetAvailableDnode(SMnode *pMnode, SVgObj *pVgroup, SArray *pAr return -1; } + int64_t vgMem = mndGetVgroupMemory(pMnode, pDb, pVgroup); + if (pDnode->memAvail - vgMem - pDnode->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d, avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pDnode->id, pDnode->memAvail, pDnode->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pDnode->memUsed += vgMem; + } + pVgid->dnodeId = pDnode->id; if (pVgroup->replica == 1) { pVgid->role = TAOS_SYNC_STATE_LEADER; @@ -477,7 +482,8 @@ static int32_t mndGetAvailableDnode(SMnode *pMnode, SVgObj *pVgroup, SArray *pAr pVgid->role = TAOS_SYNC_STATE_FOLLOWER; } - mInfo("db:%s, vgId:%d, vn:%d dnode:%d is alloced", pVgroup->dbName, pVgroup->vgId, v, pVgid->dnodeId); + mInfo("db:%s, vgId:%d, vn:%d is alloced, memory:%" PRId64 ", dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, v, vgMem, pVgid->dnodeId, pDnode->memAvail, pDnode->memUsed); pDnode->numOfVnodes++; } @@ -498,7 +504,7 @@ int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) { pVgroup->dbUid = pDb->uid; pVgroup->replica = 1; - if (mndGetAvailableDnode(pMnode, pVgroup, pArray) != 0) return -1; + if (mndGetAvailableDnode(pMnode, pDb, pVgroup, pArray) != 0) return -1; mInfo("db:%s, sma vgId:%d is alloced", pDb->name, pVgroup->vgId); return 0; @@ -546,8 +552,7 @@ int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) { pVgroup->dbUid = pDb->uid; pVgroup->replica = pDb->cfg.replications; - if (mndGetAvailableDnode(pMnode, pVgroup, pArray) != 0) { - terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; + if (mndGetAvailableDnode(pMnode, pDb, pVgroup, pArray) != 0) { goto _OVER; } @@ -728,6 +733,46 @@ int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId) { return numOfVnodes; } +int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDbInput, SVgObj *pVgroup) { + SDbObj *pDb = pDbInput; + if (pDbInput == NULL) { + pDb = mndAcquireDb(pMnode, pVgroup->dbName); + } + + int64_t vgroupMemroy = 0; + if (pDb != NULL) { + vgroupMemroy = (int64_t)pDb->cfg.buffer * 1024 * 1024 + (int64_t)pDb->cfg.pages * pDb->cfg.pageSize * 1024; + if (pDb->cfg.cacheLastRow > 0) { + vgroupMemroy += (int64_t)pDb->cfg.lastRowMem * 1024 * 1024; + } + } + + if (pDbInput == NULL) { + mndReleaseDb(pMnode, pDb); + } + return vgroupMemroy; +} + +static bool mndGetVnodeMemroyFp(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3) { + SVgObj *pVgroup = pObj; + int32_t dnodeId = *(int32_t *)p1; + int64_t *pVnodeMemory = (int64_t *)p2; + + for (int32_t v = 0; v < pVgroup->replica; ++v) { + if (pVgroup->vnodeGid[v].dnodeId == dnodeId) { + *pVnodeMemory += mndGetVgroupMemory(pMnode, NULL, pVgroup); + } + } + + return true; +} + +int64_t mndGetVnodesMemory(SMnode *pMnode, int32_t dnodeId) { + int64_t vnodeMemory = 0; + sdbTraverse(pMnode->pSdb, SDB_VGROUP, mndGetVnodeMemroyFp, &dnodeId, &vnodeMemory, NULL); + return vnodeMemory; +} + static int32_t mndRetrieveVnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; @@ -807,9 +852,20 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) { return -1; } + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + if (pDnode->memAvail - vgMem - pDnode->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pDnode->id, pDnode->memAvail, pDnode->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pDnode->memUsed += vgMem; + } + pVgid->dnodeId = pDnode->id; pVgid->role = TAOS_SYNC_STATE_ERROR; - mInfo("db:%s, vgId:%d, vn:%d dnode:%d, is added", pVgroup->dbName, pVgroup->vgId, pVgroup->replica, pVgid->dnodeId); + mInfo("db:%s, vgId:%d, vn:%d is added, memory:%" PRId64 ", dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, pVgroup->replica, vgMem, pVgid->dnodeId, pDnode->memAvail, pDnode->memUsed); pVgroup->replica++; pDnode->numOfVnodes++; @@ -835,7 +891,10 @@ int32_t mndRemoveVnodeFromVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray for (int32_t vn = 0; vn < pVgroup->replica; ++vn) { SVnodeGid *pVgid = &pVgroup->vnodeGid[vn]; if (pVgid->dnodeId == pDnode->id) { - mInfo("db:%s, vgId:%d, vn:%d dnode:%d, is removed", pVgroup->dbName, pVgroup->vgId, vn, pVgid->dnodeId); + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + pDnode->memUsed -= vgMem; + mInfo("db:%s, vgId:%d, vn:%d is removed, memory:%" PRId64 ", dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vn, vgMem, pVgid->dnodeId, pDnode->memAvail, pDnode->memUsed); pDnode->numOfVnodes--; pVgroup->replica--; *pDelVgid = *pVgid; @@ -1161,6 +1220,17 @@ static int32_t mndRedistributeVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; goto _OVER; } + + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + if (pNew1->memAvail - vgMem - pNew1->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pNew1->id, pNew1->memAvail, pNew1->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pNew1->memUsed += vgMem; + } + if (mndAddIncVgroupReplicaToTrans(pMnode, pTrans, pDb, &newVg, pNew1->id) != 0) goto _OVER; if (mndAddDecVgroupReplicaFromTrans(pMnode, pTrans, pDb, &newVg, pOld1->id) != 0) goto _OVER; } @@ -1173,6 +1243,15 @@ static int32_t mndRedistributeVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; goto _OVER; } + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + if (pNew2->memAvail - vgMem - pNew2->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pNew2->id, pNew2->memAvail, pNew2->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pNew2->memUsed += vgMem; + } if (mndAddIncVgroupReplicaToTrans(pMnode, pTrans, pDb, &newVg, pNew2->id) != 0) goto _OVER; if (mndAddDecVgroupReplicaFromTrans(pMnode, pTrans, pDb, &newVg, pOld2->id) != 0) goto _OVER; } @@ -1185,6 +1264,15 @@ static int32_t mndRedistributeVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; goto _OVER; } + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + if (pNew3->memAvail - vgMem - pNew3->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pNew3->id, pNew3->memAvail, pNew3->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pNew3->memUsed += vgMem; + } if (mndAddIncVgroupReplicaToTrans(pMnode, pTrans, pDb, &newVg, pNew3->id) != 0) goto _OVER; if (mndAddDecVgroupReplicaFromTrans(pMnode, pTrans, pDb, &newVg, pOld3->id) != 0) goto _OVER; } @@ -1415,7 +1503,7 @@ _OVER: mndReleaseDb(pMnode, pDb); return code; -#endif +#endif } int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SArray *pArray) { @@ -1654,9 +1742,9 @@ static int32_t mndBalanceVgroupBetweenDnode(SMnode *pMnode, STrans *pTrans, SDno } static int32_t mndBalanceVgroup(SMnode *pMnode, SRpcMsg *pReq, SArray *pArray) { - int32_t code = -1; - int32_t numOfVgroups = 0; - STrans *pTrans = NULL; + int32_t code = -1; + int32_t numOfVgroups = 0; + STrans *pTrans = NULL; SHashObj *pBalancedVgroups = NULL; pBalancedVgroups = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); @@ -1721,7 +1809,7 @@ static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; SArray *pArray = NULL; - void *pIter = NULL; + void *pIter = NULL; int64_t curMs = taosGetTimestampMs(); SBalanceVgroupReq req = {0}; @@ -1766,7 +1854,7 @@ _OVER: taosArrayDestroy(pArray); return code; -#endif +#endif } bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; } \ No newline at end of file diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c index fbf66da632..3db0087334 100644 --- a/source/dnode/mnode/sdb/src/sdb.c +++ b/source/dnode/mnode/sdb/src/sdb.c @@ -131,7 +131,7 @@ int32_t sdbSetTable(SSdb *pSdb, SSdbTable table) { hashType = TSDB_DATA_TYPE_BINARY; } - SHashObj *hash = taosHashInit(64, taosGetDefaultHashFunction(hashType), true, HASH_NO_LOCK); + SHashObj *hash = taosHashInit(64, taosGetDefaultHashFunction(hashType), true, HASH_ENTRY_LOCK); if (hash == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c index 30299e8792..6e75176136 100644 --- a/source/dnode/vnode/src/sma/smaCommit.c +++ b/source/dnode/vnode/src/sma/smaCommit.c @@ -83,8 +83,8 @@ int32_t smaBegin(SSma *pSma) { /** * @brief pre-commit for rollup sma. * 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED. - * 2) perform persist task for qTaskInfo - * 3) wait all triggered fetch tasks finished + * 2) wait all triggered fetch tasks finished + * 3) perform persist task for qTaskInfo * * @param pSma * @return int32_t @@ -102,10 +102,7 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) { // step 1: set persistence task paused atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED); - // step 2: perform persist task for qTaskInfo - tdRSmaPersistExecImpl(pRSmaStat); - - // step 3: wait all triggered fetch tasks finished + // step 2: wait all triggered fetch tasks finished int32_t nLoops = 0; while (1) { if (T_REF_VAL_GET(pStat) == 0) { @@ -121,6 +118,9 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) { } } + // step 3: perform persist task for qTaskInfo + tdRSmaPersistExecImpl(pRSmaStat); + smaDebug("vgId:%d, rsma pre commit succeess", SMA_VID(pSma)); return TSDB_CODE_SUCCESS; diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 0b28d6bf10..c74baa6d7b 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -79,7 +79,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) { SJson *pNodeRetentions = tjsonCreateArray(); tjsonAddItemToObject(pJson, "retentions", pNodeRetentions); for (int32_t i = 0; i < nRetention; ++i) { - SJson *pNodeRetention = tjsonCreateObject(); + SJson * pNodeRetention = tjsonCreateObject(); const SRetention *pRetention = pCfg->tsdbCfg.retentions + i; tjsonAddIntegerToObject(pNodeRetention, "freq", pRetention->freq); tjsonAddIntegerToObject(pNodeRetention, "freqUnit", pRetention->freqUnit); @@ -118,45 +118,45 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { int32_t code; tjsonGetNumberValue(pJson, "vgId", pCfg->vgId, code); - if(code < 0) return -1; + if (code < 0) return -1; if (tjsonGetStringValue(pJson, "dbname", pCfg->dbname) < 0) return -1; tjsonGetNumberValue(pJson, "dbId", pCfg->dbId, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "szPage", pCfg->szPage, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "szCache", pCfg->szCache, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "szBuf", pCfg->szBuf, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "isHeap", pCfg->isHeap, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "isWeak", pCfg->isWeak, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "isTsma", pCfg->isTsma, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "isRsma", pCfg->isRsma, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "precision", pCfg->tsdbCfg.precision, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "update", pCfg->tsdbCfg.update, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "compression", pCfg->tsdbCfg.compression, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "slLevel", pCfg->tsdbCfg.slLevel, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "daysPerFile", pCfg->tsdbCfg.days, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "minRows", pCfg->tsdbCfg.minRows, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "maxRows", pCfg->tsdbCfg.maxRows, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "keep0", pCfg->tsdbCfg.keep0, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "keep1", pCfg->tsdbCfg.keep1, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "keep2", pCfg->tsdbCfg.keep2, code); - if(code < 0) return -1; - SJson *pNodeRetentions = tjsonGetObjectItem(pJson, "retentions"); + if (code < 0) return -1; + SJson * pNodeRetentions = tjsonGetObjectItem(pJson, "retentions"); int32_t nRetention = tjsonGetArraySize(pNodeRetentions); if (nRetention > TSDB_RETENTION_MAX) { nRetention = TSDB_RETENTION_MAX; @@ -170,30 +170,30 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { tjsonGetNumberValue(pNodeRetention, "keepUnit", (pCfg->tsdbCfg.retentions)[i].keepUnit, code); } tjsonGetNumberValue(pJson, "wal.vgId", pCfg->walCfg.vgId, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "wal.fsyncPeriod", pCfg->walCfg.fsyncPeriod, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "wal.retentionPeriod", pCfg->walCfg.retentionPeriod, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "wal.rollPeriod", pCfg->walCfg.rollPeriod, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "wal.retentionSize", pCfg->walCfg.retentionSize, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "wal.segSize", pCfg->walCfg.segSize, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "wal.level", pCfg->walCfg.level, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "hashBegin", pCfg->hashBegin, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum, code); - if(code < 0) return -1; + if (code < 0) return -1; tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex, code); - if(code < 0) return -1; + if (code < 0) return -1; SJson *pNodeInfoArr = tjsonGetObjectItem(pJson, "syncCfg.nodeInfo"); int arraySize = tjsonGetArraySize(pNodeInfoArr); diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 32d183ca0a..5aa6762e83 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -27,10 +27,10 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) { SMetaReader mer1 = {0}; SMetaReader mer2 = {0}; char tableFName[TSDB_TABLE_FNAME_LEN]; - SRpcMsg rpcMsg; + SRpcMsg rpcMsg = {0}; int32_t code = 0; int32_t rspLen = 0; - void *pRsp = NULL; + void * pRsp = NULL; SSchemaWrapper schema = {0}; SSchemaWrapper schemaTag = {0}; @@ -111,6 +111,7 @@ _exit: rpcMsg.pCont = pRsp; rpcMsg.contLen = rspLen; rpcMsg.code = code; + rpcMsg.msgType = pMsg->msgType; if (code) { qError("get table %s meta failed cause of %s", infoReq.tbName, tstrerror(code)); @@ -130,10 +131,10 @@ int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg) { SMetaReader mer1 = {0}; SMetaReader mer2 = {0}; char tableFName[TSDB_TABLE_FNAME_LEN]; - SRpcMsg rpcMsg; + SRpcMsg rpcMsg = {0}; int32_t code = 0; int32_t rspLen = 0; - void *pRsp = NULL; + void * pRsp = NULL; SSchemaWrapper schema = {0}; SSchemaWrapper schemaTag = {0}; @@ -220,6 +221,7 @@ _exit: rpcMsg.pCont = pRsp; rpcMsg.contLen = rspLen; rpcMsg.code = code; + rpcMsg.msgType = pMsg->msgType; if (code) { qError("get table %s cfg failed cause of %s", cfgReq.tbName, tstrerror(code)); @@ -260,7 +262,7 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId) { } // wrapper of tsdb read interface -tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo* tableList, uint64_t qId, +tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *tableList, uint64_t qId, void *pMemRef) { #if 0 return tsdbQueryCacheLastT(pVnode->pTsdb, pCond, groupList, qId, pMemRef); diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 80ba80e61a..0445eda7af 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -256,70 +256,133 @@ int32_t vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { SRpcMsg *pRpcMsg = pMsg; - if (pRpcMsg->msgType == TDMT_SYNC_TIMEOUT) { - SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + // ToDo: ugly! use function pointer + // use different strategy + if (syncNodeStrategy(pSyncNode) == SYNC_STRATEGY_NO_SNAPSHOT) { + if (pRpcMsg->msgType == TDMT_SYNC_TIMEOUT) { + SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg); + syncTimeoutDestroy(pSyncMsg); - ret = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg); - syncTimeoutDestroy(pSyncMsg); + } else if (pRpcMsg->msgType == TDMT_SYNC_PING) { + SyncPing *pSyncMsg = syncPingFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnPingCb(pSyncNode, pSyncMsg); + syncPingDestroy(pSyncMsg); - } else if (pRpcMsg->msgType == TDMT_SYNC_PING) { - SyncPing *pSyncMsg = syncPingFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + } else if (pRpcMsg->msgType == TDMT_SYNC_PING_REPLY) { + SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg); + syncPingReplyDestroy(pSyncMsg); - ret = syncNodeOnPingCb(pSyncNode, pSyncMsg); - syncPingDestroy(pSyncMsg); + } else if (pRpcMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { + SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg, NULL); + syncClientRequestDestroy(pSyncMsg); - } else if (pRpcMsg->msgType == TDMT_SYNC_PING_REPLY) { - SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE) { + SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg); + syncRequestVoteDestroy(pSyncMsg); - ret = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg); - syncPingReplyDestroy(pSyncMsg); + } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE_REPLY) { + SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg); + syncRequestVoteReplyDestroy(pSyncMsg); - } else if (pRpcMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { - SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES) { + SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg); + syncAppendEntriesDestroy(pSyncMsg); - ret = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg, NULL); - syncClientRequestDestroy(pSyncMsg); + } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES_REPLY) { + SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg); + syncAppendEntriesReplyDestroy(pSyncMsg); - } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE) { - SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + } else if (pRpcMsg->msgType == TDMT_SYNC_SET_VNODE_STANDBY) { + ret = vnodeSetStandBy(pVnode); + if (ret != 0 && terrno != 0) ret = terrno; + SRpcMsg rsp = {.code = ret, .info = pMsg->info}; + tmsgSendRsp(&rsp); + } else { + vError("==vnodeProcessSyncReq== error msg type:%d", pRpcMsg->msgType); + ret = -1; + } - ret = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg); - syncRequestVoteDestroy(pSyncMsg); - - } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE_REPLY) { - SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); - - ret = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg); - syncRequestVoteReplyDestroy(pSyncMsg); - - } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES) { - SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); - - ret = syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg); - syncAppendEntriesDestroy(pSyncMsg); - - } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES_REPLY) { - SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); - - ret = syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg); - syncAppendEntriesReplyDestroy(pSyncMsg); - - } else if (pRpcMsg->msgType == TDMT_SYNC_SET_VNODE_STANDBY) { - ret = vnodeSetStandBy(pVnode); - if (ret != 0 && terrno != 0) ret = terrno; - SRpcMsg rsp = {.code = ret, .info = pMsg->info}; - tmsgSendRsp(&rsp); } else { - vError("==vnodeProcessSyncReq== error msg type:%d", pRpcMsg->msgType); - ret = -1; + // use wal first strategy + + if (pRpcMsg->msgType == TDMT_SYNC_TIMEOUT) { + SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg); + syncTimeoutDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_PING) { + SyncPing *pSyncMsg = syncPingFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnPingCb(pSyncNode, pSyncMsg); + syncPingDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_PING_REPLY) { + SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg); + syncPingReplyDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { + SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg, NULL); + syncClientRequestDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_CLIENT_REQUEST_BATCH) { + SyncClientRequestBatch *pSyncMsg = syncClientRequestBatchFromRpcMsg(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnClientRequestBatchCb(pSyncNode, pSyncMsg); + syncClientRequestBatchDestroyDeep(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE) { + SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg); + syncRequestVoteDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE_REPLY) { + SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg); + syncRequestVoteReplyDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES_BATCH) { + SyncAppendEntriesBatch *pSyncMsg = syncAppendEntriesBatchFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnAppendEntriesSnapshot2Cb(pSyncNode, pSyncMsg); + syncAppendEntriesBatchDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES_REPLY) { + SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnAppendEntriesReplySnapshot2Cb(pSyncNode, pSyncMsg); + syncAppendEntriesReplyDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_SET_VNODE_STANDBY) { + ret = vnodeSetStandBy(pVnode); + if (ret != 0 && terrno != 0) ret = terrno; + SRpcMsg rsp = {.code = ret, .info = pMsg->info}; + tmsgSendRsp(&rsp); + } else { + vError("==vnodeProcessSyncReq== error msg type:%d", pRpcMsg->msgType); + ret = -1; + } } syncNodeRelease(pSyncNode); @@ -415,7 +478,7 @@ static int32_t vnodeSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) { ret static int32_t vnodeSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) { return 0; } -static int32_t vnodeSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) { return 0; } +static int32_t vnodeSnapshotStartWrite(struct SSyncFSM *pFsm, void *pParam, void **ppWriter) { return 0; } static int32_t vnodeSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply) { return 0; } @@ -442,7 +505,8 @@ static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) { int32_t vnodeSyncOpen(SVnode *pVnode, char *path) { SSyncInfo syncInfo = { - .snapshotEnable = false, + .snapshotStrategy = SYNC_STRATEGY_NO_SNAPSHOT, + .batchSize = 10, .vgId = pVnode->config.vgId, .isStandBy = pVnode->config.standby, .syncCfg = pVnode->config.syncCfg, diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 06e9af7569..6aca581f45 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -29,13 +29,17 @@ extern "C" { #define EXPLAIN_TAG_SCAN_FORMAT "Tag Scan on %s" #define EXPLAIN_TBL_SCAN_FORMAT "Table Scan on %s" #define EXPLAIN_SYSTBL_SCAN_FORMAT "System Table Scan on %s" +#define EXPLAIN_DISTBLK_SCAN_FORMAT "Block Dist Scan on %s" +#define EXPLAIN_LASTROW_SCAN_FORMAT "Last Row Scan on %s" #define EXPLAIN_PROJECTION_FORMAT "Projection" #define EXPLAIN_JOIN_FORMAT "%s" #define EXPLAIN_AGG_FORMAT "Aggragate" #define EXPLAIN_INDEF_ROWS_FORMAT "Indefinite Rows Function" #define EXPLAIN_EXCHANGE_FORMAT "Data Exchange %d:1" #define EXPLAIN_SORT_FORMAT "Sort" +#define EXPLAIN_GROUP_SORT_FORMAT "Group Sort" #define EXPLAIN_INTERVAL_FORMAT "Interval on Column %s" +#define EXPLAIN_MERGE_INTERVAL_FORMAT "Merge Interval on Column %s" #define EXPLAIN_FILL_FORMAT "Fill" #define EXPLAIN_SESSION_FORMAT "Session" #define EXPLAIN_STATE_WINDOW_FORMAT "StateWindow on Column %s" @@ -62,10 +66,12 @@ extern "C" { #define EXPLAIN_COST_FORMAT "cost=%.2f..%.2f" #define EXPLAIN_ROWS_FORMAT "rows=%" PRIu64 #define EXPLAIN_COLUMNS_FORMAT "columns=%d" +#define EXPLAIN_PSEUDO_COLUMNS_FORMAT "pseudo_columns=%d" #define EXPLAIN_WIDTH_FORMAT "width=%d" #define EXPLAIN_TABLE_SCAN_FORMAT "order=[asc|%d desc|%d]" #define EXPLAIN_GROUPS_FORMAT "groups=%d" #define EXPLAIN_WIDTH_FORMAT "width=%d" +#define EXPLAIN_INTERVAL_VALUE_FORMAT "interval=%" PRId64 "%c" #define EXPLAIN_FUNCTIONS_FORMAT "functions=%d" #define EXPLAIN_EXECINFO_FORMAT "cost=%.3f..%.3f rows=%" PRIu64 #define EXPLAIN_MODE_FORMAT "mode=%s" diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index cd454c075b..fc3e3cbc8a 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -548,19 +548,17 @@ static int32_t execShowLocalVariables(SRetrieveTableRsp** pRsp) { } static int32_t createSelectResultDataBlock(SNodeList* pProjects, SSDataBlock** pOutput) { - SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + SSDataBlock* pBlock = createDataBlock(); if (NULL == pBlock) { return TSDB_CODE_OUT_OF_MEMORY; } - pBlock->pDataBlock = taosArrayInit(LIST_LENGTH(pProjects), sizeof(SColumnInfoData)); - SNode* pProj = NULL; FOREACH(pProj, pProjects) { SColumnInfoData infoData = {0}; infoData.info.type = ((SExprNode*)pProj)->resType.type; infoData.info.bytes = ((SExprNode*)pProj)->resType.bytes; - taosArrayPush(pBlock->pDataBlock, &infoData); + blockDataAppendColInfo(pBlock, &infoData); } *pOutput = pBlock; return TSDB_CODE_SUCCESS; diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 7af36a0842..fde53b7064 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -199,6 +199,31 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo pPhysiChildren = mergePhysiNode->scan.node.pChildren; break; } + case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: { + SBlockDistScanPhysiNode *distPhysiNode = (SBlockDistScanPhysiNode *)pNode; + pPhysiChildren = distPhysiNode->node.pChildren; + break; + } + case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: { + SLastRowScanPhysiNode *lastRowPhysiNode = (SLastRowScanPhysiNode *)pNode; + pPhysiChildren = lastRowPhysiNode->node.pChildren; + break; + } + case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT: { + SGroupSortPhysiNode *groupSortPhysiNode = (SGroupSortPhysiNode *)pNode; + pPhysiChildren = groupSortPhysiNode->node.pChildren; + break; + } + case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: { + SMergeIntervalPhysiNode *mergeIntPhysiNode = (SMergeIntervalPhysiNode *)pNode; + pPhysiChildren = mergeIntPhysiNode->window.node.pChildren; + break; + } + case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: { + SInterpFuncPhysiNode *interpPhysiNode = (SInterpFuncPhysiNode *)pNode; + pPhysiChildren = interpPhysiNode->node.pChildren; + break; + } default: qError("not supported physical node type %d", pNode->type); QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR); @@ -378,6 +403,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pTagScanNode->pScanCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pTagScanNode->pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); @@ -415,6 +444,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pTblScanNode->scan.pScanCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pTblScanNode->scan.pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTblScanNode->scan.pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTblScanNode->scan.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_TABLE_SCAN_FORMAT, pTblScanNode->scanSeq[0], pTblScanNode->scanSeq[1]); @@ -516,6 +549,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pSTblScanNode->scan.pScanCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pSTblScanNode->scan.pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pSTblScanNode->scan.pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSTblScanNode->scan.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); @@ -1131,6 +1168,258 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } break; } + case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: { + SBlockDistScanPhysiNode *pDistScanNode = (SBlockDistScanPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_DISTBLK_SCAN_FORMAT, pDistScanNode->tableName.tname); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pDistScanNode->pScanCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pDistScanNode->pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pDistScanNode->pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDistScanNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pDistScanNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDistScanNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pDistScanNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pDistScanNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: { + SLastRowScanPhysiNode *pLastRowNode = (SLastRowScanPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_LASTROW_SCAN_FORMAT, pLastRowNode->tableName.tname); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pLastRowNode->pScanCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pLastRowNode->pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pLastRowNode->pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pLastRowNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pLastRowNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pLastRowNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pLastRowNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pLastRowNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT: { + SGroupSortPhysiNode *pSortNode = (SGroupSortPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_GROUP_SORT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + + SDataBlockDescNode *pDescNode = pSortNode->node.pOutputDataBlockDesc; + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (EXPLAIN_MODE_ANALYZE == ctx->mode) { + // sort key + EXPLAIN_ROW_NEW(level + 1, "Sort Key: "); + if (pResNode->pExecInfo) { + for (int32_t i = 0; i < LIST_LENGTH(pSortNode->pSortKeys); ++i) { + SOrderByExprNode *ptn = (SOrderByExprNode *)nodesListGetNode(pSortNode->pSortKeys, i); + EXPLAIN_ROW_APPEND("%s ", nodesGetNameFromColumnNode(ptn->pExpr)); + } + } + + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + // sort method + EXPLAIN_ROW_NEW(level + 1, "Sort Method: "); + + int32_t nodeNum = taosArrayGetSize(pResNode->pExecInfo); + SExplainExecInfo *execInfo = taosArrayGet(pResNode->pExecInfo, 0); + SSortExecInfo *pExecInfo = (SSortExecInfo *)execInfo->verboseInfo; + EXPLAIN_ROW_APPEND("%s", pExecInfo->sortMethod == SORT_QSORT_T ? "quicksort" : "merge sort"); + if (pExecInfo->sortBuffer > 1024 * 1024) { + EXPLAIN_ROW_APPEND(" Buffers:%.2f Mb", pExecInfo->sortBuffer / (1024 * 1024.0)); + } else if (pExecInfo->sortBuffer > 1024) { + EXPLAIN_ROW_APPEND(" Buffers:%.2f Kb", pExecInfo->sortBuffer / (1024.0)); + } else { + EXPLAIN_ROW_APPEND(" Buffers:%d b", pExecInfo->sortBuffer); + } + + EXPLAIN_ROW_APPEND(" loops:%d", pExecInfo->loops); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + } + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pSortNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSortNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pSortNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pSortNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: { + SMergeIntervalPhysiNode *pIntNode = (SMergeIntervalPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_MERGE_INTERVAL_FORMAT, nodesGetNameFromColumnNode(pIntNode->window.pTspk)); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pIntNode->window.pFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pIntNode->window.node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + uint8_t precision = getIntervalPrecision(pIntNode); + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_TIME_WINDOWS_FORMAT, + INVERAL_TIME_FROM_PRECISION_TO_UNIT(pIntNode->interval, pIntNode->intervalUnit, precision), + pIntNode->intervalUnit, pIntNode->offset, getPrecisionUnit(precision), + INVERAL_TIME_FROM_PRECISION_TO_UNIT(pIntNode->sliding, pIntNode->slidingUnit, precision), + pIntNode->slidingUnit); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pIntNode->window.node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pIntNode->window.node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: { + SInterpFuncPhysiNode *pInterpNode = (SInterpFuncPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + if (pInterpNode->pFuncs) { + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pInterpNode->pFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + + EXPLAIN_ROW_APPEND(EXPLAIN_MODE_FORMAT, nodesGetFillModeString(pInterpNode->fillMode)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pInterpNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pInterpNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + if (pInterpNode->pFillValues) { + SNodeListNode *pValues = (SNodeListNode *)pInterpNode->pFillValues; + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILL_VALUE_FORMAT); + SNode *tNode = NULL; + int32_t i = 0; + FOREACH(tNode, pValues->pNodeList) { + if (i) { + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + SValueNode *tValue = (SValueNode *)tNode; + char *value = nodesGetStrValueFromNode(tValue); + EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, value); + taosMemoryFree(value); + ++i; + } + + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_INTERVAL_VALUE_FORMAT, pInterpNode->interval, pInterpNode->intervalUnit); + EXPLAIN_ROW_END(); + + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_TIMERANGE_FORMAT, pInterpNode->timeRange.skey, pInterpNode->timeRange.ekey); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pInterpNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pInterpNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } default: qError("not supported physical node type %d", pNode->type); return TSDB_CODE_QRY_APP_ERROR; diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index b562b0631e..00f2e09e0c 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -51,6 +51,13 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int #define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0) +#define START_TS_COLUMN_INDEX 0 +#define END_TS_COLUMN_INDEX 1 +#define UID_COLUMN_INDEX 2 +#define GROUPID_COLUMN_INDEX UID_COLUMN_INDEX +#define DELETE_GROUPID_COLUMN_INDEX 2 + + enum { // when this task starts to execute, this status will set TASK_NOT_COMPLETED = 0x1u, @@ -364,6 +371,8 @@ typedef struct SStreamBlockScanInfo { int32_t scanWinIndex; // for state operator int32_t pullDataResIndex; SSDataBlock* pPullDataRes; // pull data SSDataBlock + SSDataBlock* pDeleteDataRes; // delete data SSDataBlock + int32_t deleteDataIndex; } SStreamBlockScanInfo; typedef struct SSysTableScanInfo { @@ -429,6 +438,10 @@ typedef struct SIntervalAggOperatorInfo { bool invertible; SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. bool ignoreExpiredData; + SArray* pRecycledPages; + SArray* pDelWins; // SWinRes + int32_t delIndex; + SSDataBlock* pDelRes; } SIntervalAggOperatorInfo; typedef struct SStreamFinalIntervalOperatorInfo { @@ -451,6 +464,10 @@ typedef struct SStreamFinalIntervalOperatorInfo { int32_t pullIndex; SSDataBlock* pPullDataRes; bool ignoreExpiredData; + SArray* pRecycledPages; + SArray* pDelWins; // SWinRes + int32_t delIndex; + SSDataBlock* pDelRes; } SStreamFinalIntervalOperatorInfo; typedef struct SAggOperatorInfo { @@ -680,7 +697,7 @@ typedef struct SJoinOperatorInfo { SSDataBlock *pRight; int32_t rightPos; SColumnInfo rightCol; - SNode *pOnCondition; + SNode *pCondAfterMerge; } SJoinOperatorInfo; #define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 20e507b040..25b61e15c3 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -2843,11 +2843,18 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { int32_t type = pOperator->operatorType; + + pOperator->status = OP_OPENED; + if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { SStreamBlockScanInfo* pScanInfo = pOperator->info; pScanInfo->blockType = STREAM_INPUT__DATA_SCAN; + pScanInfo->pSnapshotReadOp->status = OP_OPENED; + STableScanInfo* pInfo = pScanInfo->pSnapshotReadOp->info; + ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER); + if (uid == 0) { pInfo->noTable = 1; return TSDB_CODE_SUCCESS; @@ -2861,14 +2868,6 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { pInfo->noTable = 0; if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) { - tsdbSetTableId(pInfo->dataReader, uid); - int64_t oldSkey = pInfo->cond.twindows[0].skey; - pInfo->cond.twindows[0].skey = ts + 1; - tsdbResetReadHandle(pInfo->dataReader, &pInfo->cond, 0); - pInfo->cond.twindows[0].skey = oldSkey; - pInfo->scanTimes = 0; - pInfo->curTWinIdx = 0; - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); @@ -2880,8 +2879,17 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { pInfo->currentTable = i; } } - // TODO after processing drop, + // TODO after processing drop, found can be false ASSERT(found); + + tsdbSetTableId(pInfo->dataReader, uid); + int64_t oldSkey = pInfo->cond.twindows[0].skey; + pInfo->cond.twindows[0].skey = ts + 1; + tsdbResetReadHandle(pInfo->dataReader, &pInfo->cond, 0); + pInfo->cond.twindows[0].skey = oldSkey; + pInfo->scanTimes = 0; + pInfo->curTWinIdx = 0; + qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts, pInfo->currentTable, tableSz); } diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 6fbda77808..e9995ed77a 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -53,13 +53,28 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t pOperator->info = pInfo; pOperator->pTaskInfo = pTaskInfo; - SNode* pOnCondition = pJoinNode->pOnConditions; - if (nodeType(pOnCondition) == QUERY_NODE_OPERATOR) { - SOperatorNode* pNode = (SOperatorNode*)pOnCondition; + SNode* pMergeCondition = pJoinNode->pMergeCondition; + if (nodeType(pMergeCondition) == QUERY_NODE_OPERATOR) { + SOperatorNode* pNode = (SOperatorNode*)pMergeCondition; setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft); setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight); - } else if (nodeType(pOnCondition) == QUERY_NODE_LOGIC_CONDITION) { - extractTimeCondition(pInfo, (SLogicConditionNode*)pOnCondition); + } else { + ASSERT(false); + } + + if (pJoinNode->pOnConditions != NULL && pJoinNode->node.pConditions != NULL) { + pInfo->pCondAfterMerge = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION); + SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pInfo->pCondAfterMerge); + pLogicCond->pParameterList = nodesMakeList(); + nodesListMakeAppend(&pLogicCond->pParameterList, nodesCloneNode(pJoinNode->pOnConditions)); + nodesListMakeAppend(&pLogicCond->pParameterList, nodesCloneNode(pJoinNode->node.pConditions)); + pLogicCond->condType = LOGIC_COND_TYPE_AND; + } else if (pJoinNode->pOnConditions != NULL) { + pInfo->pCondAfterMerge = nodesCloneNode(pJoinNode->pOnConditions); + } else if (pJoinNode->node.pConditions != NULL) { + pInfo->pCondAfterMerge = nodesCloneNode(pJoinNode->node.pConditions); + } else { + pInfo->pCondAfterMerge = NULL; } pOperator->fpSet = @@ -88,15 +103,12 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) { void destroyMergeJoinOperator(void* param, int32_t numOfOutput) { SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param; + nodesDestroyNode(pJoinOperator->pCondAfterMerge); } -SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { +static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes) { SJoinOperatorInfo* pJoinInfo = pOperator->info; - SSDataBlock* pRes = pJoinInfo->pRes; - blockDataCleanup(pRes); - blockDataEnsureCapacity(pRes, 4096); - int32_t nrows = 0; while (1) { @@ -181,7 +193,28 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { break; } } +} +SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { + SJoinOperatorInfo* pJoinInfo = pOperator->info; + + SSDataBlock* pRes = pJoinInfo->pRes; + blockDataCleanup(pRes); + blockDataEnsureCapacity(pRes, 4096); + while (true) { + int32_t numOfRowsBefore = pRes->info.rows; + doMergeJoinImpl(pOperator, pRes); + int32_t numOfNewRows = pRes->info.rows - numOfRowsBefore; + if (numOfNewRows == 0) { + break; + } + if (pJoinInfo->pCondAfterMerge != NULL) { + doFilter(pJoinInfo->pCondAfterMerge, pRes); + } + if (pRes->info.rows >= pOperator->resultInfo.threshold) { + break; + } + } return (pRes->info.rows > 0) ? pRes : NULL; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 061b4ab3c5..f1965d4e68 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -807,18 +807,38 @@ static bool isStateWindow(SStreamBlockScanInfo* pInfo) { return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE; } +static uint64_t getGroupId(SOperatorInfo* pOperator, uint64_t uid) { + uint64_t* groupId = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &uid, sizeof(int64_t)); + if (groupId) { + return *groupId; + } + return 0; + /* Todo(liuyao) for partition by column + recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, rowId); + int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals); + uint64_t resId = 0; + uint64_t* groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len); + if (groupId) { + return *groupId; + } else if (len != 0) { + resId = calcGroupId(pTableScanInfo->keyBuf, len); + taosHashPut(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len, &resId, sizeof(uint64_t)); + } + return resId; + */ +} + static void setGroupId(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) { ASSERT(rowIndex < pBlock->info.rows); switch (pBlock->info.type) { + case STREAM_DELETE_DATA: case STREAM_RETRIEVE: { SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex); uint64_t* groupCol = (uint64_t*)pColInfo->pData; pInfo->groupId = groupCol[rowIndex]; } break; - case STREAM_DELETE_DATA: - break; default: break; } @@ -840,14 +860,14 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int3 int64_t gap = pInfo->sessionSup.gap; int32_t winIndex = 0; SResultWindowInfo* pCurWin = - getSessionTimeWindow(pAggSup, tsCols[(*pRowIndex)], INT64_MIN, pSDB->info.groupId, gap, &winIndex); + getSessionTimeWindow(pAggSup, tsCols[*pRowIndex], INT64_MIN, pSDB->info.groupId, gap, &winIndex); win = pCurWin->win; - (*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, (*pRowIndex), gap, NULL); + (*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, *pRowIndex, gap, NULL); } else { win = - getActiveTimeWindow(NULL, &dumyInfo, tsCols[(*pRowIndex)], &pInfo->interval, pInfo->interval.precision, NULL); - setGroupId(pInfo, pSDB, 2, *pRowIndex); - (*pRowIndex) += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, (*pRowIndex), win.ekey, binarySearchForKey, NULL, + getActiveTimeWindow(NULL, &dumyInfo, tsCols[*pRowIndex], &pInfo->interval, pInfo->interval.precision, NULL); + setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex); + (*pRowIndex) += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, *pRowIndex, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); } needRead = true; @@ -891,27 +911,6 @@ static void copyOneRow(SSDataBlock* dest, SSDataBlock* source, int32_t sourceRow dest->info.rows++; } -static uint64_t getGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t rowId) { - uint64_t* groupId = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t)); - if (groupId) { - return *groupId; - } - return 0; - /* Todo(liuyao) for partition by column - recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, rowId); - int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals); - uint64_t resId = 0; - uint64_t* groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len); - if (groupId) { - return *groupId; - } else if (len != 0) { - resId = calcGroupId(pTableScanInfo->keyBuf, len); - taosHashPut(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len, &resId, sizeof(uint64_t)); - } - return resId; - */ -} - static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) { while (1) { SSDataBlock* pResult = NULL; @@ -935,7 +934,7 @@ static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, i SSDataBlock* pBlock = createOneDataBlock(pResult, true); blockDataCleanup(pResult); for (int32_t i = 0; i < pBlock->info.rows; i++) { - uint64_t id = getGroupId(pInfo->pOperatorDumy, pBlock, i); + uint64_t id = getGroupId(pInfo->pOperatorDumy, pBlock->info.uid); if (id == pInfo->groupId) { copyOneRow(pResult, pBlock, i); } @@ -944,6 +943,40 @@ static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, i */ } +static void copyDeleteDataBlock(SStreamBlockScanInfo* pInfo, SSDataBlock* pDelBlock, SOperatorInfo* pOperator, SSDataBlock* pUpdateRes) { + if (pDelBlock->info.rows == 0) { + return; + } + blockDataCleanup(pUpdateRes); + blockDataEnsureCapacity(pUpdateRes, 64); + ASSERT(taosArrayGetSize(pDelBlock->pDataBlock) >= 3); + SColumnInfoData* pStartTsCol = taosArrayGet(pDelBlock->pDataBlock, START_TS_COLUMN_INDEX); + TSKEY* startData = (TSKEY*)pStartTsCol->pData; + SColumnInfoData* pEndTsCol = taosArrayGet(pDelBlock->pDataBlock, END_TS_COLUMN_INDEX); + TSKEY* endData = (TSKEY*)pEndTsCol->pData; + SColumnInfoData* pGpCol = taosArrayGet(pDelBlock->pDataBlock, UID_COLUMN_INDEX); + uint64_t* uidCol = (uint64_t*)pGpCol->pData; + + SColumnInfoData* pDestTsCol = taosArrayGet(pUpdateRes->pDataBlock, START_TS_COLUMN_INDEX); + SColumnInfoData* pDestGpCol = taosArrayGet(pUpdateRes->pDataBlock, DELETE_GROUPID_COLUMN_INDEX); + for (int32_t i = pInfo->deleteDataIndex ; i < pDelBlock->info.rows && + i < pDelBlock->info.capacity - (endData[i] - startData[i])/pInfo->interval.interval - 1; i++) { + uint64_t groupId = getGroupId(pOperator, uidCol[i]); + for (TSKEY startTs = startData[i]; startTs <= endData[i]; ) { + colDataAppend(pDestTsCol, pUpdateRes->info.rows, (const char*)&startTs, false); + colDataAppend(pDestGpCol, pUpdateRes->info.rows, (const char*)&groupId, false); + pUpdateRes->info.rows++; + startTs = taosTimeAdd(startTs, pInfo->interval.interval, pInfo->interval.intervalUnit, pInfo->interval.precision); + } + pInfo->deleteDataIndex++; + } + + if (pInfo->deleteDataIndex > 0 && pInfo->deleteDataIndex == pDelBlock->info.rows) { + blockDataCleanup(pDelBlock); + pInfo->deleteDataIndex = 0; + } +} + static void setUpdateData(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, SSDataBlock* pUpdateBlock) { blockDataCleanup(pUpdateBlock); int32_t size = taosArrayGetSize(pInfo->tsArray); @@ -953,11 +986,11 @@ static void setUpdateData(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, SSDa blockDataEnsureCapacity(pUpdateBlock, size); int32_t rowId = *(int32_t*)taosArrayGet(pInfo->tsArray, pInfo->tsArrayIndex); - pInfo->groupId = getGroupId(pInfo->pSnapshotReadOp, pBlock, rowId); + pInfo->groupId = getGroupId(pInfo->pSnapshotReadOp, pBlock->info.uid); int32_t i = 0; for (; i < size; i++) { rowId = *(int32_t*)taosArrayGet(pInfo->tsArray, i + pInfo->tsArrayIndex); - uint64_t id = getGroupId(pInfo->pSnapshotReadOp, pBlock, rowId); + uint64_t id = getGroupId(pInfo->pSnapshotReadOp, pBlock->info.uid); if (pInfo->groupId != id) { break; } @@ -974,28 +1007,32 @@ static void setUpdateData(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, SSDa if (size > 0 && pInfo->tsArrayIndex == size) { taosArrayClear(pInfo->tsArray); } + + if (size == 0) { + copyDeleteDataBlock(pInfo, pInfo->pDeleteDataRes, pInfo->pSnapshotReadOp, pUpdateBlock); + } } -static void getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool invertible, SSDataBlock* pBlock, - SSDataBlock* pUpdateBlock) { +static void checkUpdateData(SStreamBlockScanInfo* pInfo, bool invertible, SSDataBlock* pBlock, + bool out) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP); TSKEY* ts = (TSKEY*)pColDataInfo->pData; for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) { - if (updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, ts[rowId])) { + if (updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, ts[rowId]) && out) { taosArrayPush(pInfo->tsArray, &rowId); } } - if (!pUpdateBlock) { - taosArrayClear(pInfo->tsArray); - return; +} + +static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t uidColIndex) { + ASSERT(taosArrayGetSize(pBlock->pDataBlock) >= 3); + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, uidColIndex); + uint64_t* uidCol = (uint64_t*)pColDataInfo->pData; + ASSERT(pBlock->info.rows > 0); + for (int32_t i = 0 ; i < pBlock->info.rows; i++) { + uidCol[i] = getGroupId(pOperator, uidCol[i]); } - setUpdateData(pInfo, pBlock, pUpdateBlock); - // Todo(liuyao) get from tsdb - // SSDataBlock* p = createOneDataBlock(pBlock, true); - // p->info.type = STREAM_INVERT; - // taosArrayClear(pInfo->tsArray); - // return p; } static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { @@ -1020,13 +1057,29 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { int32_t current = pInfo->validBlockIndex++; SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current); blockDataUpdateTsWindow(pBlock, 0); - if (pBlock->info.type == STREAM_RETRIEVE) { + switch (pBlock->info.type) { + case STREAM_RETRIEVE:{ pInfo->blockType = STREAM_INPUT__DATA_SUBMIT; pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE; copyDataBlock(pInfo->pPullDataRes, pBlock); pInfo->pullDataResIndex = 0; - prepareDataScan(pInfo, pInfo->pPullDataRes, 0, &pInfo->pullDataResIndex); + prepareDataScan(pInfo, pInfo->pPullDataRes, START_TS_COLUMN_INDEX, &pInfo->pullDataResIndex); updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo); + } + break; + case STREAM_DELETE_DATA: { + pInfo->blockType = STREAM_INPUT__DATA_SUBMIT; + pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; + copyDataBlock(pInfo->pDeleteDataRes, pBlock); + copyDeleteDataBlock(pInfo, pInfo->pDeleteDataRes, pInfo->pSnapshotReadOp, pInfo->pUpdateRes); + pInfo->updateResIndex = 0; + prepareDataScan(pInfo, pInfo->pUpdateRes, START_TS_COLUMN_INDEX, &pInfo->updateResIndex); + pInfo->pUpdateRes->info.type = STREAM_DELETE_DATA; + return pInfo->pUpdateRes; + } + break; + default: + break; } return pBlock; } else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) { @@ -1043,39 +1096,33 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RETRIEVE) { SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pPullDataRes, 0, &pInfo->pullDataResIndex); if (pSDB != NULL) { - getUpdateDataBlock(pInfo, true, pSDB, NULL); + checkUpdateData(pInfo, true, pSDB, false); pSDB->info.type = STREAM_PULL_DATA; return pSDB; } pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; - } else { - if (isStateWindow(pInfo)) { - pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; - pInfo->updateResIndex = pInfo->pUpdateRes->info.rows; - if (!prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex)) { - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - } + } else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) { + SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); + if (pSDB) { + pSDB->info.type = STREAM_NORMAL; + checkUpdateData(pInfo, true, pSDB, false); + return pSDB; } - if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) { - SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); - if (pSDB == NULL) { - setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes); - if (pInfo->pUpdateRes->info.rows > 0) { - if (!isStateWindow(pInfo)) { - // Todo(liuyao) mybe can delete this. - bool test = prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); - ASSERT(test == false); - } - return pInfo->pUpdateRes; - } else { - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - } - } else { - pSDB->info.type = STREAM_NORMAL; - getUpdateDataBlock(pInfo, true, pSDB, NULL); - return pSDB; - } + setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes); + if (pInfo->pUpdateRes->info.rows > 0) { + prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); + return pInfo->pUpdateRes; } + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + } else if (isStateWindow(pInfo)) { + pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; + pInfo->updateResIndex = pInfo->pUpdateRes->info.rows; + if (prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex)) { + ASSERT(pInfo->pUpdateRes->info.rows == 0); + // return empty data blcok + return pInfo->pUpdateRes; + } + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; } SDataBlockInfo* pBlockInfo = &pInfo->pRes->info; @@ -1169,7 +1216,8 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pOperator->status = OP_EXEC_DONE; } else if (pInfo->pUpdateInfo) { pInfo->tsArrayIndex = 0; - getUpdateDataBlock(pInfo, true, pInfo->pRes, pInfo->pUpdateRes); + checkUpdateData(pInfo, true, pInfo->pRes, true); + setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes); if (pInfo->pUpdateRes->info.rows > 0) { if (pInfo->pUpdateRes->info.type == STREAM_CLEAR) { pInfo->updateResIndex = 0; @@ -1180,9 +1228,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } } } - return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes; - } else if (pInfo->blockType == STREAM_INPUT__DATA_SCAN) { // check reader last status // if not match, reset status @@ -1295,6 +1341,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pInfo->groupId = 0; pInfo->pPullDataRes = createPullDataBlock(); pInfo->pStreamScanOp = pOperator; + pInfo->deleteDataIndex = 0; + pInfo->pDeleteDataRes = createPullDataBlock(); pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; @@ -1748,8 +1796,8 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) { getPerfDbMeta(&pSysDbTableMeta, &size); p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB); - relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); pInfo->pRes->info.rows = p->info.rows; + relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); blockDataDestroy(p); return pInfo->pRes->info.rows; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 03c939cc95..fdef432d95 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -808,11 +808,31 @@ static void removeResult(SArray* pUpdated, TSKEY key) { static void removeResults(SArray* pWins, SArray* pUpdated) { int32_t size = taosArrayGetSize(pWins); for (int32_t i = 0; i < size; i++) { - STimeWindow* pW = taosArrayGet(pWins, i); - removeResult(pUpdated, pW->skey); + SWinRes* pW = taosArrayGet(pWins, i); + removeResult(pUpdated, pW->ts); } } +int64_t getWinReskey(void* data, int32_t index) { + SArray* res = (SArray*)data; + SWinRes* pos = taosArrayGet(res, index); + return pos->ts; +} + +static void removeDeleteResults(SArray* pUpdated, SArray* pDelWins) { + int32_t upSize = taosArrayGetSize(pUpdated); + int32_t delSize = taosArrayGetSize(pDelWins); + for (int32_t i = 0; i < upSize; i++) { + SResKeyPos* pResKey = taosArrayGetP(pUpdated, i); + int64_t key = *(int64_t*)pResKey->key; + int32_t index = binarySearch(pDelWins, delSize, key, TSDB_ORDER_DESC, getWinReskey); + if (index >= 0 && key == getWinReskey(pDelWins, index)) { + taosArrayRemove(pDelWins, index); + } + } +} + + bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup) { ASSERT(pSup->maxTs == INT64_MIN || pSup->maxTs > 0); return pSup->maxTs != INT64_MIN && ts < pSup->maxTs - pSup->waterMark; @@ -1264,6 +1284,38 @@ bool doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t return true; } +bool doDeleteIntervalWindow(SAggSupporter* pAggSup, TSKEY ts, uint64_t groupId) { + size_t bytes = sizeof(TSKEY); + SET_RES_WINDOW_KEY(pAggSup->keyBuf, &ts, bytes, groupId); + SResultRowPosition* p1 = + (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + if (!p1) { + // window has been closed + return false; + } + SFilePage* bufPage = getBufPage(pAggSup->pResultBuf, p1->pageId); + // dBufSetBufPageRecycled(pAggSup->pResultBuf, bufPage); + taosHashRemove(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + return true; +} + +void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* pUpWins, SInterval* pInterval) { + SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); + TSKEY* tsStarts = (TSKEY*)pStartCol->pData; + SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); + uint64_t* groupIds = (uint64_t*)pGroupCol->pData; + for (int32_t i = 0; i < pBlock->info.rows; i++) { + SResultRowInfo dumyInfo; + dumyInfo.cur.pageId = -1; + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsStarts[i], pInterval, pInterval->precision, NULL); + doDeleteIntervalWindow(pAggSup, win.skey, groupIds[i]); + if (pUpWins) { + SWinRes winRes = {.ts = win.skey, .groupId = groupIds[i]}; + taosArrayPush(pUpWins, &winRes); + } + } +} + static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* pInterval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, SArray* pUpWins) { SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, tsIndex); @@ -1279,13 +1331,11 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* dumyInfo.cur.pageId = -1; STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, pInterval->precision, NULL); step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); - uint64_t groupId = pBlock->info.groupId; - if (pGpDatas) { - groupId = pGpDatas[i]; - } - bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TKEY), groupId, numOfOutput); + uint64_t winGpId = pGpDatas ? pGpDatas[i] : pBlock->info.groupId; + bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TKEY), winGpId, numOfOutput); if (pUpWins && res) { - taosArrayPush(pUpWins, &win); + SWinRes winRes = {.ts = win.skey, .groupId = winGpId}; + taosArrayPush(pUpWins, &winRes); } } } @@ -1307,8 +1357,9 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SArray* resWins) { return TSDB_CODE_SUCCESS; } -static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval, - SHashObj* pPullDataMap, SArray* closeWins) { +static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, + SInterval* pInterval, SHashObj* pPullDataMap, SArray* closeWins, + SArray* pRecyPages, SDiskbasedBuf* pDiscBuf) { void* pIte = NULL; size_t keyLen = 0; while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { @@ -1342,6 +1393,11 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, if (code != TSDB_CODE_SUCCESS) { return code; } + ASSERT(pRecyPages != NULL); + taosArrayPush(pRecyPages, &pPos->pageId); + } else { + SFilePage* bufPage = getBufPage(pDiscBuf, pPos->pageId); + // dBufSetBufPageRecycled(pDiscBuf, bufPage); } char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))]; SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId); @@ -1358,7 +1414,38 @@ static void closeChildIntervalWindow(SArray* pChildren, TSKEY maxTs) { SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info; ASSERT(pChInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE); pChInfo->twAggSup.maxTs = TMAX(pChInfo->twAggSup.maxTs, maxTs); - closeIntervalWindow(pChInfo->aggSup.pResultRowHashTable, &pChInfo->twAggSup, &pChInfo->interval, NULL, NULL); + closeIntervalWindow(pChInfo->aggSup.pResultRowHashTable, &pChInfo->twAggSup, + &pChInfo->interval, NULL, NULL, NULL, pChInfo->aggSup.pResultBuf); + } +} + +static void freeAllPages(SArray* pageIds, SDiskbasedBuf* pDiskBuf) { + int32_t size = taosArrayGetSize(pageIds); + for (int32_t i = 0; i < size; i++) { + int32_t pageId = *(int32_t*)taosArrayGet(pageIds, i); + SFilePage* bufPage = getBufPage(pDiskBuf, pageId); + // dBufSetBufPageRecycled(pDiskBuf, bufPage); + } + taosArrayClear(pageIds); +} + +static void doBuildDeleteResult(SArray* pWins, int32_t* index, SSDataBlock* pBlock) { + blockDataCleanup(pBlock); + int32_t size = taosArrayGetSize(pWins); + if (*index == size) { + *index = 0; + taosArrayClear(pWins); + return; + } + blockDataEnsureCapacity(pBlock, size - *index); + SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); + SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, DELETE_GROUPID_COLUMN_INDEX); + for (int32_t i = *index; i < size; i++) { + SWinRes* pWin = taosArrayGet(pWins, i); + colDataAppend(pTsCol, pBlock->info.rows, (const char*)&pWin->ts, false); + colDataAppend(pGroupCol, pBlock->info.rows, (const char*)&pWin->groupId, false); + pBlock->info.rows++; + (*index)++; } } @@ -1374,27 +1461,37 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { } if (pOperator->status == OP_RES_TO_RETURN) { + doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex ,pInfo->pDelRes); + if (pInfo->pDelRes->info.rows > 0) { + return pInfo->pDelRes; + } + doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pInfo->binfo.pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { pOperator->status = OP_EXEC_DONE; + freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf); } return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; } SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); + SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { break; } + printDataBlock(pBlock, "single interval recv"); if (pBlock->info.type == STREAM_CLEAR) { - doClearWindows(&pInfo->aggSup, &pOperator->exprSupp, &pInfo->interval, 0, pOperator->exprSupp.numOfExprs, pBlock, - NULL); + doClearWindows(&pInfo->aggSup, &pOperator->exprSupp, &pInfo->interval, 0, + pOperator->exprSupp.numOfExprs, pBlock, NULL); qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo)); continue; + } if (pBlock->info.type == STREAM_DELETE_DATA) { + doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval); + continue; } else if (pBlock->info.type == STREAM_GET_ALL) { getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdated); continue; @@ -1416,14 +1513,19 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdated); } - closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdated); + pOperator->status = OP_RES_TO_RETURN; + closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, + &pInfo->interval, NULL, pUpdated, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf); finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); + removeDeleteResults(pUpdated, pInfo->pDelWins); + doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes); + if (pInfo->pDelRes->info.rows > 0) { + return pInfo->pDelRes; + } doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - - pOperator->status = OP_RES_TO_RETURN; printDataBlock(pInfo->binfo.pRes, "single interval"); return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; } @@ -1438,6 +1540,7 @@ void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput) { SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); cleanupAggSup(&pInfo->aggSup); + taosArrayDestroy(pInfo->pRecycledPages); } void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { @@ -1448,12 +1551,13 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { taosHashCleanup(pInfo->pPullDataMap); taosArrayDestroy(pInfo->pPullWins); blockDataDestroy(pInfo->pPullDataRes); + taosArrayDestroy(pInfo->pRecycledPages); if (pInfo->pChildren) { int32_t size = taosArrayGetSize(pInfo->pChildren); for (int32_t i = 0; i < size; i++) { SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i); - destroyIntervalOperatorInfo(pChildOp->info, numOfOutput); + destroyStreamFinalIntervalOperatorInfo(pChildOp->info, numOfOutput); taosMemoryFreeClear(pChildOp->info); taosMemoryFreeClear(pChildOp); } @@ -1520,6 +1624,28 @@ void increaseTs(SqlFunctionCtx* pCtx) { } } +SSDataBlock* createDeleteBlock() { + SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + pBlock->info.hasVarCol = false; + pBlock->info.groupId = 0; + pBlock->info.rows = 0; + pBlock->info.type = STREAM_DELETE_RESULT; + pBlock->info.rowSize = sizeof(TSKEY) + sizeof(uint64_t); + + pBlock->pDataBlock = taosArrayInit(2, sizeof(SColumnInfoData)); + SColumnInfoData infoData = {0}; + infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP; + infoData.info.bytes = sizeof(TSKEY); + // window start ts + taosArrayPush(pBlock->pDataBlock, &infoData); + + infoData.info.type = TSDB_DATA_TYPE_UBIGINT; + infoData.info.bytes = sizeof(uint64_t); + taosArrayPush(pBlock->pDataBlock, &infoData); + + return pBlock; +} + SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode, @@ -1573,6 +1699,12 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* goto _error; } } + pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t)); + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes)); + pInfo->delIndex = 0; + // pInfo->pDelRes = createDeleteBlock(); todo(liuyao) for delete + pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false);// todo(liuyao) for delete + pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;// todo(liuyao) for delete initResultRowInfo(&pInfo->binfo.resultRowInfo); @@ -2219,28 +2351,44 @@ void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int3 } } -static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExprSupp* pSup, SArray* pWinArray, - int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo) { +bool hasIntervalWindow(SAggSupporter* pSup, TSKEY ts, uint64_t groupId) { + int32_t bytes = sizeof(TSKEY); + SET_RES_WINDOW_KEY(pSup->keyBuf, &ts, bytes, groupId); + SResultRowPosition* p1 = + (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + return p1 != NULL; +} + +static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExprSupp* pSup, + SArray* pWinArray, int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo, SArray* pUpdated) { int32_t size = taosArrayGetSize(pWinArray); if (!pInfo->pChildren) { return; } for (int32_t i = 0; i < size; i++) { - STimeWindow* pParentWin = taosArrayGet(pWinArray, i); + SWinRes* pWinRes = taosArrayGet(pWinArray, i); SResultRow* pCurResult = NULL; - setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, pParentWin, true, &pCurResult, 0, pSup->pCtx, numOfOutput, + STimeWindow ParentWin = {.skey = pWinRes->ts, .ekey = pWinRes->ts+1}; + setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &ParentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren); + bool find = true; for (int32_t j = 0; j < numOfChildren; j++) { SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, j); SIntervalAggOperatorInfo* pChInfo = pChildOp->info; SExprSupp* pChildSup = &pChildOp->exprSupp; - + if (!hasIntervalWindow(&pChInfo->aggSup, pWinRes->ts, pWinRes->groupId)) { + continue; + } + find = true; SResultRow* pChResult = NULL; - setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, pParentWin, true, &pChResult, 0, pChildSup->pCtx, + setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, &ParentWin, true, &pChResult, pWinRes->groupId, pChildSup->pCtx, pChildSup->numOfExprs, pChildSup->rowEntryInfoOffset, &pChInfo->aggSup, pTaskInfo); compactFunctions(pSup->pCtx, pChildSup->pCtx, numOfOutput, pTaskInfo); } + if (find && pUpdated) { + saveResultRow(pCurResult, pWinRes->groupId, pUpdated); + } } } @@ -2472,6 +2620,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { if (!IS_FINAL_OP(pInfo)) { // semi interval operator clear disk buffer clearStreamIntervalOperator(pInfo); + } else { + freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf); } return NULL; } @@ -2497,12 +2647,19 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); return pInfo->pPullDataRes; } + doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes); + if (pInfo->pDelRes->info.rows != 0) { + // process the rest of the data + printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); + return pInfo->pDelRes; + } } while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { clearSpecialDataBlock(pInfo->pUpdateRes); + removeDeleteResults(pUpdated, pInfo->pDelWins); pOperator->status = OP_RES_TO_RETURN; qInfo("Stream Final Interval return data"); break; @@ -2514,7 +2671,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { pBlock->info.type == STREAM_INVALID) { pInfo->binfo.pRes->info.type = pBlock->info.type; } else if (pBlock->info.type == STREAM_CLEAR) { - SArray* pUpWins = taosArrayInit(8, sizeof(STimeWindow)); + SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pInfo->primaryTsIndex, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); if (IS_FINAL_OP(pInfo)) { @@ -2525,8 +2682,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { doClearWindows(&pChildInfo->aggSup, pChildSup, &pChildInfo->interval, pChildInfo->primaryTsIndex, pChildSup->numOfExprs, pBlock, NULL); - rebuildIntervalWindow(pInfo, pSup, pUpWins, pInfo->binfo.pRes->info.groupId, pOperator->exprSupp.numOfExprs, - pOperator->pTaskInfo); + rebuildIntervalWindow(pInfo, pSup, pUpWins, pInfo->binfo.pRes->info.groupId, + pOperator->exprSupp.numOfExprs, pOperator->pTaskInfo, NULL); taosArrayDestroy(pUpWins); continue; } @@ -2535,11 +2692,25 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { pInfo->returnUpdate = true; taosArrayDestroy(pUpWins); break; + } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) { + doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval); + if (IS_FINAL_OP(pInfo)) { + int32_t childIndex = getChildIndex(pBlock); + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); + SStreamFinalIntervalOperatorInfo* pChildInfo = pChildOp->info; + SExprSupp* pChildSup = &pChildOp->exprSupp; + doDeleteSpecifyIntervalWindow(&pChildInfo->aggSup, pBlock, NULL, &pChildInfo->interval); + rebuildIntervalWindow(pInfo, pSup, pInfo->pDelWins, pInfo->binfo.pRes->info.groupId, + pOperator->exprSupp.numOfExprs, pOperator->pTaskInfo, pUpdated); + continue; + } + removeResults(pInfo->pDelWins, pUpdated); + break; } else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) { getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdated); continue; } else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) { - SArray* pUpWins = taosArrayInit(8, sizeof(STimeWindow)); + SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, 0, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); removeResults(pUpWins, pUpdated); taosArrayDestroy(pUpWins); @@ -2563,6 +2734,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { if (!pChildOp) { longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } + SStreamFinalIntervalOperatorInfo* pTmpInfo = pChildOp->info; + pTmpInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; taosArrayPush(pInfo->pChildren, &pChildOp); } SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex); @@ -2578,8 +2751,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); if (IS_FINAL_OP(pInfo)) { - closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pInfo->pPullDataMap, - pUpdated); + closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, + &pInfo->interval, pInfo->pPullDataMap, pUpdated, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf); closeChildIntervalWindow(pInfo->pChildren, pInfo->twAggSup.maxTs); } @@ -2607,6 +2780,13 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); return pInfo->pPullDataRes; } + + doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes); + if (pInfo->pDelRes->info.rows != 0) { + // process the rest of the data + printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); + return pInfo->pDelRes; + } // ASSERT(false); return NULL; } @@ -2680,6 +2860,8 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, for (int32_t i = 0; i < numOfChild; i++) { SOperatorInfo* pChildOp = createStreamFinalIntervalOperatorInfo(NULL, pPhyNode, pTaskInfo, 0); if (pChildOp) { + SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info; + pChInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; taosArrayPush(pInfo->pChildren, &pChildOp); continue; } @@ -2711,6 +2893,11 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK); pInfo->pPullDataRes = createPullDataBlock(); pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired; + // pInfo->pDelRes = createDeleteBlock(); // todo(liuyao) for delete + pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false);// todo(liuyao) for delete + pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;// todo(liuyao) for delete + pInfo->delIndex = 0; + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes)); pOperator->operatorType = pPhyNode->type; pOperator->blocking = true; @@ -2851,9 +3038,9 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pStDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK); pInfo->pDelIterator = NULL; - pInfo->pDelRes = createOneDataBlock(pResBlock, false); - pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; - blockDataEnsureCapacity(pInfo->pDelRes, 64); + // pInfo->pDelRes = createDeleteBlock(); // todo(liuyao) for delete + pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false);// todo(liuyao) for delete + pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;// todo(liuyao) for delete pInfo->pChildren = NULL; pInfo->isFinal = false; pInfo->pPhyNode = pPhyNode; @@ -3205,6 +3392,11 @@ static int32_t copyUpdateResult(SHashObj* pStUpdated, SArray* pUpdated) { void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** Ite) { blockDataCleanup(pBlock); + int32_t size = taosHashGetSize(pStDeleted); + if (size == 0) { + return; + } + blockDataEnsureCapacity(pBlock, size); size_t keyLen = 0; while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) { SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0); @@ -3979,9 +4171,9 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pSeDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK); pInfo->pDelIterator = NULL; - pInfo->pDelRes = createOneDataBlock(pResBlock, false); - pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; - blockDataEnsureCapacity(pInfo->pDelRes, 64); + // pInfo->pDelRes = createDeleteBlock(); // todo(liuyao) for delete + pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false);// todo(liuyao) for delete + pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;// todo(liuyao) for delete pInfo->pChildren = NULL; pInfo->ignoreExpiredData = pStateNode->window.igExpired; diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index bc004579ba..d9a05973ce 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -15,6 +15,7 @@ #include "builtins.h" #include "builtinsimpl.h" +#include "cJSON.h" #include "querynodes.h" #include "scalar.h" #include "taoserror.h" @@ -39,7 +40,7 @@ static int32_t invaildFuncParaValueErrMsg(char* pErrBuf, int32_t len, const char return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_PARA_VALUE, "Invalid parameter value : %s", pFuncName); } -#define TIME_UNIT_INVALID 1 +#define TIME_UNIT_INVALID 1 #define TIME_UNIT_TOO_SMALL 2 static int32_t validateTimeUnitParam(uint8_t dbPrec, const SValueNode* pVal) { @@ -47,14 +48,19 @@ static int32_t validateTimeUnitParam(uint8_t dbPrec, const SValueNode* pVal) { return TIME_UNIT_INVALID; } - if (TSDB_TIME_PRECISION_MILLI == dbPrec && 0 == strcasecmp(pVal->literal, "1u")) { + if (TSDB_TIME_PRECISION_MILLI == dbPrec && (0 == strcasecmp(pVal->literal, "1u") || + 0 == strcasecmp(pVal->literal, "1b"))) { + return TIME_UNIT_TOO_SMALL; + } + + if (TSDB_TIME_PRECISION_MICRO == dbPrec && 0 == strcasecmp(pVal->literal, "1b")) { return TIME_UNIT_TOO_SMALL; } if (pVal->literal[0] != '1' || (pVal->literal[1] != 'u' && pVal->literal[1] != 'a' && pVal->literal[1] != 's' && pVal->literal[1] != 'm' && pVal->literal[1] != 'h' && pVal->literal[1] != 'd' && - pVal->literal[1] != 'w')) { + pVal->literal[1] != 'w' && pVal->literal[1] != 'b')) { return TIME_UNIT_INVALID; } @@ -695,13 +701,13 @@ static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len uint8_t dbPrec = pFunc->node.resType.precision; - int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode *)nodesListGetNode(pFunc->pParameterList, 1)); + int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1)); if (ret == TIME_UNIT_TOO_SMALL) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "ELAPSED function time unit parameter should be greater than db precision"); } else if (ret == TIME_UNIT_INVALID) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "ELAPSED function time unit parameter should be one of the following: [1u, 1a, 1s, 1m, 1h, 1d, 1w]"); + "ELAPSED function time unit parameter should be one of the following: [1b, 1u, 1a, 1s, 1m, 1h, 1d, 1w]"); } } @@ -796,6 +802,165 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le return TSDB_CODE_SUCCESS; } +typedef enum { UNKNOWN_BIN = 0, USER_INPUT_BIN, LINEAR_BIN, LOG_BIN } EHistoBinType; + +static int8_t validateHistogramBinType(char* binTypeStr) { + int8_t binType; + if (strcasecmp(binTypeStr, "user_input") == 0) { + binType = USER_INPUT_BIN; + } else if (strcasecmp(binTypeStr, "linear_bin") == 0) { + binType = LINEAR_BIN; + } else if (strcasecmp(binTypeStr, "log_bin") == 0) { + binType = LOG_BIN; + } else { + binType = UNKNOWN_BIN; + } + + return binType; +} + +static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* errMsg, int32_t msgLen) { + const char* msg1 = "HISTOGRAM function requires four parameters"; + const char* msg3 = "HISTOGRAM function invalid format for binDesc parameter"; + const char* msg4 = "HISTOGRAM function binDesc parameter \"count\" should be in range [1, 1000]"; + const char* msg5 = "HISTOGRAM function bin/parameter should be in range [-DBL_MAX, DBL_MAX]"; + const char* msg6 = "HISTOGRAM function binDesc parameter \"width\" cannot be 0"; + const char* msg7 = "HISTOGRAM function binDesc parameter \"start\" cannot be 0 with \"log_bin\" type"; + const char* msg8 = "HISTOGRAM function binDesc parameter \"factor\" cannot be negative or equal to 0/1"; + + cJSON* binDesc = cJSON_Parse(binDescStr); + int32_t numOfBins; + double* intervals; + if (cJSON_IsObject(binDesc)) { /* linaer/log bins */ + int32_t numOfParams = cJSON_GetArraySize(binDesc); + int32_t startIndex; + if (numOfParams != 4) { + snprintf(errMsg, msgLen, "%s", msg1); + return false; + } + + cJSON* start = cJSON_GetObjectItem(binDesc, "start"); + cJSON* factor = cJSON_GetObjectItem(binDesc, "factor"); + cJSON* width = cJSON_GetObjectItem(binDesc, "width"); + cJSON* count = cJSON_GetObjectItem(binDesc, "count"); + cJSON* infinity = cJSON_GetObjectItem(binDesc, "infinity"); + + if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) { + snprintf(errMsg, msgLen, "%s", msg3); + return false; + } + + if (count->valueint <= 0 || count->valueint > 1000) { // limit count to 1000 + snprintf(errMsg, msgLen, "%s", msg4); + return false; + } + + if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) || + (factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) { + snprintf(errMsg, msgLen, "%s", msg5); + return false; + } + + int32_t counter = (int32_t)count->valueint; + if (infinity->valueint == false) { + startIndex = 0; + numOfBins = counter + 1; + } else { + startIndex = 1; + numOfBins = counter + 3; + } + + intervals = taosMemoryCalloc(numOfBins, sizeof(double)); + if (cJSON_IsNumber(width) && factor == NULL && binType == LINEAR_BIN) { + // linear bin process + if (width->valuedouble == 0) { + snprintf(errMsg, msgLen, "%s", msg6); + taosMemoryFree(intervals); + return false; + } + for (int i = 0; i < counter + 1; ++i) { + intervals[startIndex] = start->valuedouble + i * width->valuedouble; + if (isinf(intervals[startIndex])) { + snprintf(errMsg, msgLen, "%s", msg5); + taosMemoryFree(intervals); + return false; + } + startIndex++; + } + } else if (cJSON_IsNumber(factor) && width == NULL && binType == LOG_BIN) { + // log bin process + if (start->valuedouble == 0) { + snprintf(errMsg, msgLen, "%s", msg7); + taosMemoryFree(intervals); + return false; + } + if (factor->valuedouble < 0 || factor->valuedouble == 0 || factor->valuedouble == 1) { + snprintf(errMsg, msgLen, "%s", msg8); + taosMemoryFree(intervals); + return false; + } + for (int i = 0; i < counter + 1; ++i) { + intervals[startIndex] = start->valuedouble * pow(factor->valuedouble, i * 1.0); + if (isinf(intervals[startIndex])) { + snprintf(errMsg, msgLen, "%s", msg5); + taosMemoryFree(intervals); + return false; + } + startIndex++; + } + } else { + snprintf(errMsg, msgLen, "%s", msg3); + taosMemoryFree(intervals); + return false; + } + + if (infinity->valueint == true) { + intervals[0] = -INFINITY; + intervals[numOfBins - 1] = INFINITY; + // in case of desc bin orders, -inf/inf should be swapped + ASSERT(numOfBins >= 4); + if (intervals[1] > intervals[numOfBins - 2]) { + TSWAP(intervals[0], intervals[numOfBins - 1]); + } + } + } else if (cJSON_IsArray(binDesc)) { /* user input bins */ + if (binType != USER_INPUT_BIN) { + snprintf(errMsg, msgLen, "%s", msg3); + return false; + } + numOfBins = cJSON_GetArraySize(binDesc); + intervals = taosMemoryCalloc(numOfBins, sizeof(double)); + cJSON* bin = binDesc->child; + if (bin == NULL) { + snprintf(errMsg, msgLen, "%s", msg3); + taosMemoryFree(intervals); + return false; + } + int i = 0; + while (bin) { + intervals[i] = bin->valuedouble; + if (!cJSON_IsNumber(bin)) { + snprintf(errMsg, msgLen, "%s", msg3); + taosMemoryFree(intervals); + return false; + } + if (i != 0 && intervals[i] <= intervals[i - 1]) { + snprintf(errMsg, msgLen, "%s", msg3); + taosMemoryFree(intervals); + return false; + } + bin = bin->next; + i++; + } + } else { + snprintf(errMsg, msgLen, "%s", msg3); + return false; + } + + taosMemoryFree(intervals); + return true; +} + static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); if (4 != numOfParams) { @@ -814,6 +979,8 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + int8_t binType; + char* binDesc; for (int32_t i = 1; i < numOfParams; ++i) { SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); if (QUERY_NODE_VALUE != nodeType(pParamNode)) { @@ -824,9 +991,26 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l pValue->notReserved = true; - if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) { + if (i == 1) { + binType = validateHistogramBinType(varDataVal(pValue->datum.p)); + if (binType == UNKNOWN_BIN) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "HISTOGRAM function normalized parameter should be 0/1"); + "HISTOGRAM function binType parameter should be " + "\"user_input\", \"log_bin\" or \"linear_bin\""); + } + } + + if (i == 2) { + char errMsg[128] = {0}; + binDesc = varDataVal(pValue->datum.p); + if (!validateHistogramBinDesc(binDesc, binType, errMsg, (int32_t)sizeof(errMsg))) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, errMsg); + } + } + + if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "HISTOGRAM function normalized parameter should be 0/1"); } } @@ -853,6 +1037,8 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32 return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + int8_t binType; + char* binDesc; for (int32_t i = 1; i < numOfParams; ++i) { SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); if (QUERY_NODE_VALUE != nodeType(pParamNode)) { @@ -863,9 +1049,26 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32 pValue->notReserved = true; - if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) { + if (i == 1) { + binType = validateHistogramBinType(varDataVal(pValue->datum.p)); + if (binType == UNKNOWN_BIN) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "HISTOGRAM function normalized parameter should be 0/1"); + "HISTOGRAM function binType parameter should be " + "\"user_input\", \"log_bin\" or \"linear_bin\""); + } + } + + if (i == 2) { + char errMsg[128] = {0}; + binDesc = varDataVal(pValue->datum.p); + if (!validateHistogramBinDesc(binDesc, binType, errMsg, (int32_t)sizeof(errMsg))) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, errMsg); + } + } + + if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "HISTOGRAM function normalized parameter should be 0/1"); } } @@ -1020,13 +1223,13 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 if (numOfParams == 4) { uint8_t dbPrec = pFunc->node.resType.precision; - int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode *)nodesListGetNode(pFunc->pParameterList, 3)); + int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 3)); if (ret == TIME_UNIT_TOO_SMALL) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "STATEDURATION function time unit parameter should be greater than db precision"); } else if (ret == TIME_UNIT_INVALID) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "STATEDURATION function time unit parameter should be one of the following: [1u, 1a, 1s, 1m, 1h, 1d, 1w]"); + "STATEDURATION function time unit parameter should be one of the following: [1b, 1u, 1a, 1s, 1m, 1h, 1d, 1w]"); } } @@ -1234,10 +1437,6 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l static int32_t translateFirstLastImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { // first(col_list) will be rewritten as first(col) - if (2 != LIST_LENGTH(pFunc->pParameterList)) { // input has two params c0,ts, is this a bug? - return TSDB_CODE_SUCCESS; - } - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); uint8_t paraType = ((SExprNode*)pPara)->resType.type; int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes; @@ -1535,13 +1734,13 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_ // add database precision as param uint8_t dbPrec = pFunc->node.resType.precision; - int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode *)nodesListGetNode(pFunc->pParameterList, 1)); + int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1)); if (ret == TIME_UNIT_TOO_SMALL) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "TIMETRUNCATE function time unit parameter should be greater than db precision"); } else if (ret == TIME_UNIT_INVALID) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "TIMETRUNCATE function time unit parameter should be one of the following: [1u, 1a, 1s, 1m, 1h, 1d, 1w]"); + "TIMETRUNCATE function time unit parameter should be one of the following: [1b, 1u, 1a, 1s, 1m, 1h, 1d, 1w]"); } addDbPrecisonParam(&pFunc->pParameterList, dbPrec); @@ -1574,13 +1773,13 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le uint8_t dbPrec = pFunc->node.resType.precision; if (3 == numOfParams) { - int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode *)nodesListGetNode(pFunc->pParameterList, 2)); + int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 2)); if (ret == TIME_UNIT_TOO_SMALL) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "TIMEDIFF function time unit parameter should be greater than db precision"); } else if (ret == TIME_UNIT_INVALID) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "TIMEDIFF function time unit parameter should be one of the following: [1u, 1a, 1s, 1m, 1h, 1d, 1w]"); + "TIMEDIFF function time unit parameter should be one of the following: [1b, 1u, 1a, 1s, 1m, 1h, 1d, 1w]"); } } @@ -2112,7 +2311,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "histogram", .type = FUNCTION_TYPE_HISTOGRAM, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_FORBID_FILL_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC, .translateFunc = translateHistogram, .getEnvFunc = getHistogramFuncEnv, .initFunc = histogramFunctionSetup, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 3f6afaf5fd..cf4a763423 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -719,8 +719,10 @@ int32_t avgFunction(SqlFunctionCtx* pCtx) { ASSERT(numOfElem >= 0); pAvgRes->count += numOfElem; - if (IS_INTEGER_TYPE(type)) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { pAvgRes->sum.isum += pAgg->sum; + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + pAvgRes->sum.usum += pAgg->sum; } else if (IS_FLOAT_TYPE(type)) { pAvgRes->sum.dsum += GET_DOUBLE_VAL((const char*)&(pAgg->sum)); } @@ -784,6 +786,64 @@ int32_t avgFunction(SqlFunctionCtx* pCtx) { break; } + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t* plist = (uint8_t*)pCol->pData; + for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { + if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + + numOfElem += 1; + pAvgRes->count += 1; + pAvgRes->sum.usum += plist[i]; + } + + break; + } + + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t* plist = (uint16_t*)pCol->pData; + for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { + if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + + numOfElem += 1; + pAvgRes->count += 1; + pAvgRes->sum.usum += plist[i]; + } + break; + } + + case TSDB_DATA_TYPE_UINT: { + uint32_t* plist = (uint32_t*)pCol->pData; + for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { + if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + + numOfElem += 1; + pAvgRes->count += 1; + pAvgRes->sum.usum += plist[i]; + } + + break; + } + + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t* plist = (uint64_t*)pCol->pData; + for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { + if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + + numOfElem += 1; + pAvgRes->count += 1; + pAvgRes->sum.usum += plist[i]; + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { float* plist = (float*)pCol->pData; for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { @@ -825,8 +885,10 @@ _avg_over: static void avgTransferInfo(SAvgRes* pInput, SAvgRes* pOutput) { pOutput->type = pInput->type; - if (IS_INTEGER_TYPE(pOutput->type)) { + if (IS_SIGNED_NUMERIC_TYPE(pOutput->type)) { pOutput->sum.isum += pInput->sum.isum; + } else if (IS_UNSIGNED_NUMERIC_TYPE(pOutput->type)) { + pOutput->sum.usum += pInput->sum.usum; } else { pOutput->sum.dsum += pInput->sum.dsum; } @@ -900,6 +962,22 @@ int32_t avgInvertFunction(SqlFunctionCtx* pCtx) { LIST_AVG_N(pAvgRes->sum.isum, int64_t); break; } + case TSDB_DATA_TYPE_UTINYINT: { + LIST_AVG_N(pAvgRes->sum.usum, uint8_t); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + LIST_AVG_N(pAvgRes->sum.usum, uint16_t); + break; + } + case TSDB_DATA_TYPE_UINT: { + LIST_AVG_N(pAvgRes->sum.usum, uint32_t); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + LIST_AVG_N(pAvgRes->sum.usum, uint64_t); + break; + } case TSDB_DATA_TYPE_FLOAT: { LIST_AVG_N(pAvgRes->sum.dsum, float); break; @@ -925,8 +1003,10 @@ int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); SAvgRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); - if (IS_INTEGER_TYPE(type)) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { pDBuf->sum.isum += pSBuf->sum.isum; + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + pDBuf->sum.usum += pSBuf->sum.usum; } else { pDBuf->sum.dsum += pSBuf->sum.dsum; } @@ -941,8 +1021,10 @@ int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); int32_t type = pAvgRes->type; - if (IS_INTEGER_TYPE(type)) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count); + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + pAvgRes->result = pAvgRes->sum.usum / ((double)pAvgRes->count); } else { pAvgRes->result = pAvgRes->sum.dsum / ((double)pAvgRes->count); } diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 7ad7612e7e..262adc5d6f 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -260,7 +260,7 @@ bool fmIsSameInOutType(int32_t funcId) { } static int32_t getFuncInfo(SFunctionNode* pFunc) { - char msg[64] = {0}; + char msg[128] = {0}; return fmGetFuncInfo(pFunc, msg, sizeof(msg)); } diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index da9474ede0..1bc759e833 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -1565,6 +1565,10 @@ void constructUdfService(void *argsThread) { //TODO return value of uv_run uv_run(&udfc->uvLoop, UV_RUN_DEFAULT); uv_loop_close(&udfc->uvLoop); + + uv_walk(&udfc->uvLoop, udfUdfdCloseWalkCb, NULL); + uv_run(&udfc->uvLoop, UV_RUN_DEFAULT); + uv_loop_close(&udfc->uvLoop); } int32_t udfcOpen() { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 9c5cec07df..25a41fb15c 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -368,6 +368,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) { static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); COPY_SCALAR_FIELD(joinType); + CLONE_NODE_FIELD(pMergeCondition); CLONE_NODE_FIELD(pOnConditions); COPY_SCALAR_FIELD(isSingleTableJoin); return TSDB_CODE_SUCCESS; diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 4179930033..34f92dac0b 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1254,6 +1254,7 @@ static int32_t jsonToLogicPlan(const SJson* pJson, void* pObj) { static const char* jkJoinLogicPlanJoinType = "JoinType"; static const char* jkJoinLogicPlanOnConditions = "OnConditions"; +static const char* jkJoinLogicPlanMergeCondition = "MergeConditions"; static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) { const SJoinLogicNode* pNode = (const SJoinLogicNode*)pObj; @@ -1262,6 +1263,9 @@ static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkJoinLogicPlanJoinType, pNode->joinType); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkJoinLogicPlanMergeCondition, nodeToJson, pNode->pMergeCondition); + } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkJoinLogicPlanOnConditions, nodeToJson, pNode->pOnConditions); } @@ -1617,6 +1621,7 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) { } static const char* jkJoinPhysiPlanJoinType = "JoinType"; +static const char* jkJoinPhysiPlanMergeCondition = "MergeCondition"; static const char* jkJoinPhysiPlanOnConditions = "OnConditions"; static const char* jkJoinPhysiPlanTargets = "Targets"; @@ -1627,6 +1632,9 @@ static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanJoinType, pNode->joinType); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkJoinPhysiPlanMergeCondition, nodeToJson, pNode->pMergeCondition); + } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkJoinPhysiPlanOnConditions, nodeToJson, pNode->pOnConditions); } @@ -1648,6 +1656,9 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkJoinPhysiPlanMergeCondition, &pNode->pMergeCondition); + } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkJoinPhysiPlanTargets, &pNode->pTargets); } diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c index 3747dde9ed..b12e3b14c7 100644 --- a/source/libs/nodes/src/nodesTraverseFuncs.c +++ b/source/libs/nodes/src/nodesTraverseFuncs.c @@ -470,6 +470,9 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { SJoinPhysiNode* pJoin = (SJoinPhysiNode*)pNode; res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext); + if (DEAL_RES_ERROR != res && DEAL_RES_END != res) { + res = walkPhysiPlan(pJoin->pMergeCondition, order, walker, pContext); + } if (DEAL_RES_ERROR != res && DEAL_RES_END != res) { res = walkPhysiPlan(pJoin->pOnConditions, order, walker, pContext); } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index bf4eac3698..118cd80807 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -718,6 +718,7 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_LOGIC_PLAN_JOIN: { SJoinLogicNode* pLogicNode = (SJoinLogicNode*)pNode; destroyLogicNode((SLogicNode*)pLogicNode); + nodesDestroyNode(pLogicNode->pMergeCondition); nodesDestroyNode(pLogicNode->pOnConditions); break; } @@ -828,6 +829,7 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { SJoinPhysiNode* pPhyNode = (SJoinPhysiNode*)pNode; destroyPhysiNode((SPhysiNode*)pPhyNode); + nodesDestroyNode(pPhyNode->pMergeCondition); nodesDestroyNode(pPhyNode->pOnConditions); nodesDestroyList(pPhyNode->pTargets); break; @@ -1493,6 +1495,38 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* return TSDB_CODE_SUCCESS; } +int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols) { + if (NULL == pCols) { + return TSDB_CODE_FAILED; + } + SCollectColumnsCxt cxt = { + .errCode = TSDB_CODE_SUCCESS, + .pTableAlias = pTableAlias, + .collectType = type, + .pCols = (NULL == *pCols ? nodesMakeList() : *pCols), + .pColHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK)}; + if (NULL == cxt.pCols || NULL == cxt.pColHash) { + return TSDB_CODE_OUT_OF_MEMORY; + } + *pCols = NULL; + + nodesWalkExpr(node, collectColumns, &cxt); + + taosHashCleanup(cxt.pColHash); + if (TSDB_CODE_SUCCESS != cxt.errCode) { + nodesDestroyList(cxt.pCols); + return cxt.errCode; + } + if (LIST_LENGTH(cxt.pCols) > 0) { + *pCols = cxt.pCols; + } else { + nodesDestroyList(cxt.pCols); + } + + return TSDB_CODE_SUCCESS; + +} + typedef struct SCollectFuncsCxt { int32_t errCode; FFuncClassifier classifier; diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index c85c44f09b..1042411974 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -385,6 +385,15 @@ SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType typ SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight) { CHECK_PARSER_STATUS(pCxt); + if (OP_TYPE_MINUS == type && QUERY_NODE_VALUE == nodeType(pLeft)) { + SValueNode* pVal = (SValueNode*)pLeft; + char* pNewLiteral = taosMemoryCalloc(1, strlen(pVal->literal) + 2); + CHECK_OUT_OF_MEM(pNewLiteral); + sprintf(pNewLiteral, "-%s", pVal->literal); + taosMemoryFree(pVal->literal); + pVal->literal = pNewLiteral; + return pLeft; + } SOperatorNode* op = (SOperatorNode*)nodesMakeNode(QUERY_NODE_OPERATOR); CHECK_OUT_OF_MEM(op); op->opType = type; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 5651412566..4bccb75dcf 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -558,11 +558,11 @@ static void setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SColum pCol->node.resType = pExpr->resType; } -static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* pTable, SNodeList* pList) { +static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* pTable, bool igTags, SNodeList* pList) { if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) { const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta; - int32_t nums = - pMeta->tableInfo.numOfColumns + ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0); + int32_t nums = pMeta->tableInfo.numOfColumns + + (igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0)); for (int32_t i = 0; i < nums; ++i) { SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); if (NULL == pCol) { @@ -784,12 +784,147 @@ static int32_t parseBoolFromValueNode(STranslateContext* pCxt, SValueNode* pVal) } } -static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt) { - uint8_t precision = getPrecisionFromCurrStmt(pCxt->pCurrStmt, targetDt.precision); - pVal->node.resType.precision = precision; +static EDealRes translateDurationValue(STranslateContext* pCxt, SValueNode* pVal) { + if (parseNatualDuration(pVal->literal, strlen(pVal->literal), &pVal->datum.i, &pVal->unit, + pVal->node.resType.precision) != TSDB_CODE_SUCCESS) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int64_t*)&pVal->typeData = pVal->datum.i; + return DEAL_RES_CONTINUE; +} + +static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt, bool strict) { + int32_t code = TSDB_CODE_SUCCESS; + switch (targetDt.type) { + case TSDB_DATA_TYPE_BOOL: + if (TSDB_CODE_SUCCESS != parseBoolFromValueNode(pCxt, pVal)) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(bool*)&pVal->typeData = pVal->datum.b; + break; + case TSDB_DATA_TYPE_TINYINT: { + code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_TINYINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int8_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_SMALLINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int16_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_INT: { + code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_INT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int32_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_BIGINT: { + code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_BIGINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int64_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UTINYINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(uint8_t*)&pVal->typeData = pVal->datum.u; + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_USMALLINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(uint16_t*)&pVal->typeData = pVal->datum.u; + break; + } + case TSDB_DATA_TYPE_UINT: { + code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(uint32_t*)&pVal->typeData = pVal->datum.u; + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UBIGINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(uint64_t*)&pVal->typeData = pVal->datum.u; + break; + } + case TSDB_DATA_TYPE_FLOAT: { + pVal->datum.d = taosStr2Double(pVal->literal, NULL); + *(float*)&pVal->typeData = pVal->datum.d; + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + pVal->datum.d = taosStr2Double(pVal->literal, NULL); + *(double*)&pVal->typeData = pVal->datum.d; + break; + } + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_VARBINARY: { + if (strict && (pVal->node.resType.bytes > targetDt.bytes - VARSTR_HEADER_SIZE)) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); + if (NULL == pVal->datum.p) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); + } + int32_t len = TMIN(targetDt.bytes - VARSTR_HEADER_SIZE, pVal->node.resType.bytes); + varDataSetLen(pVal->datum.p, len); + strncpy(varDataVal(pVal->datum.p), pVal->literal, len); + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: { + if (TSDB_CODE_SUCCESS != parseTimeFromValueNode(pCxt, pVal)) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int64_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_NCHAR: { + pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); + if (NULL == pVal->datum.p) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); + } + + int32_t len = 0; + if (!taosMbsToUcs4(pVal->literal, strlen(pVal->literal), (TdUcs4*)varDataVal(pVal->datum.p), + targetDt.bytes - VARSTR_HEADER_SIZE, &len)) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + varDataSetLen(pVal->datum.p, len); + break; + } + case TSDB_DATA_TYPE_DECIMAL: + case TSDB_DATA_TYPE_BLOB: + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + default: + break; + } + return DEAL_RES_CONTINUE; +} + +static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt, bool strict) { if (pVal->placeholderNo > 0 || pVal->isNull) { return DEAL_RES_CONTINUE; } + if (TSDB_DATA_TYPE_NULL == pVal->node.resType.type) { // TODO // pVal->node.resType = targetDt; @@ -797,114 +932,18 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD pVal->isNull = true; return DEAL_RES_CONTINUE; } - if (pVal->isDuration) { - if (parseNatualDuration(pVal->literal, strlen(pVal->literal), &pVal->datum.i, &pVal->unit, precision) != - TSDB_CODE_SUCCESS) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - } - *(int64_t*)&pVal->typeData = pVal->datum.i; - } else { - switch (targetDt.type) { - case TSDB_DATA_TYPE_NULL: - break; - case TSDB_DATA_TYPE_BOOL: - if (TSDB_CODE_SUCCESS != parseBoolFromValueNode(pCxt, pVal)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - } - *(bool*)&pVal->typeData = pVal->datum.b; - break; - case TSDB_DATA_TYPE_TINYINT: { - pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); - *(int8_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); - *(int16_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_INT: { - pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); - *(int32_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_BIGINT: { - pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); - *(int64_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); - *(uint8_t*)&pVal->typeData = pVal->datum.u; - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); - *(uint16_t*)&pVal->typeData = pVal->datum.u; - break; - } - case TSDB_DATA_TYPE_UINT: { - pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); - *(uint32_t*)&pVal->typeData = pVal->datum.u; - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); - *(uint64_t*)&pVal->typeData = pVal->datum.u; - break; - } - case TSDB_DATA_TYPE_FLOAT: { - pVal->datum.d = taosStr2Double(pVal->literal, NULL); - *(float*)&pVal->typeData = pVal->datum.d; - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - pVal->datum.d = taosStr2Double(pVal->literal, NULL); - *(double*)&pVal->typeData = pVal->datum.d; - break; - } - case TSDB_DATA_TYPE_VARCHAR: - case TSDB_DATA_TYPE_VARBINARY: { - pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); - if (NULL == pVal->datum.p) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); - } - int32_t len = TMIN(targetDt.bytes - VARSTR_HEADER_SIZE, pVal->node.resType.bytes); - varDataSetLen(pVal->datum.p, len); - strncpy(varDataVal(pVal->datum.p), pVal->literal, len); - break; - } - case TSDB_DATA_TYPE_TIMESTAMP: { - if (TSDB_CODE_SUCCESS != parseTimeFromValueNode(pCxt, pVal)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - } - *(int64_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_NCHAR: { - pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); - if (NULL == pVal->datum.p) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); - } - int32_t len = 0; - if (!taosMbsToUcs4(pVal->literal, strlen(pVal->literal), (TdUcs4*)varDataVal(pVal->datum.p), - targetDt.bytes - VARSTR_HEADER_SIZE, &len)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - } - varDataSetLen(pVal->datum.p, len); - break; - } - case TSDB_DATA_TYPE_DECIMAL: - case TSDB_DATA_TYPE_BLOB: - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - default: - break; - } + pVal->node.resType.precision = getPrecisionFromCurrStmt(pCxt->pCurrStmt, targetDt.precision); + + EDealRes res = DEAL_RES_CONTINUE; + if (pVal->isDuration) { + res = translateDurationValue(pCxt, pVal); + } else { + res = translateNormalValue(pCxt, pVal, targetDt, strict); } pVal->node.resType = targetDt; pVal->translate = true; - return DEAL_RES_CONTINUE; + return res; } static int32_t calcTypeBytes(SDataType dt) { @@ -920,7 +959,7 @@ static int32_t calcTypeBytes(SDataType dt) { static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) { SDataType dt = pVal->node.resType; dt.bytes = calcTypeBytes(dt); - return translateValueImpl(pCxt, pVal, dt); + return translateValueImpl(pCxt, pVal, dt, false); } static bool isMultiResFunc(SNode* pNode) { @@ -1297,6 +1336,9 @@ static int32_t rewriteSystemInfoFuncImpl(STranslateContext* pCxt, char* pLiteral pVal->isNull = true; } else { pVal->literal = pLiteral; + if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) { + pVal->node.resType.bytes = strlen(pLiteral); + } } if (DEAL_RES_ERROR != translateValue(pCxt, pVal)) { *pNode = (SNode*)pVal; @@ -1892,7 +1934,7 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) { return code; } -static int32_t createAllColumns(STranslateContext* pCxt, SNodeList** pCols) { +static int32_t createAllColumns(STranslateContext* pCxt, bool igTags, SNodeList** pCols) { *pCols = nodesMakeList(); if (NULL == *pCols) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY); @@ -1901,7 +1943,7 @@ static int32_t createAllColumns(STranslateContext* pCxt, SNodeList** pCols) { size_t nums = taosArrayGetSize(pTables); for (size_t i = 0; i < nums; ++i) { STableNode* pTable = taosArrayGetP(pTables, i); - int32_t code = createColumnsByTable(pCxt, pTable, *pCols); + int32_t code = createColumnsByTable(pCxt, pTable, igTags, *pCols); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -1938,7 +1980,7 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) { return (SNode*)pFunc; } -static int32_t createTableAllCols(STranslateContext* pCxt, SColumnNode* pCol, SNodeList** pOutput) { +static int32_t createTableAllCols(STranslateContext* pCxt, SColumnNode* pCol, bool igTags, SNodeList** pOutput) { STableNode* pTable = NULL; int32_t code = findTable(pCxt, pCol->tableAlias, &pTable); if (TSDB_CODE_SUCCESS == code && NULL == *pOutput) { @@ -1948,7 +1990,7 @@ static int32_t createTableAllCols(STranslateContext* pCxt, SColumnNode* pCol, SN } } if (TSDB_CODE_SUCCESS == code) { - code = createColumnsByTable(pCxt, pTable, *pOutput); + code = createColumnsByTable(pCxt, pTable, igTags, *pOutput); } return code; } @@ -1970,11 +2012,9 @@ static int32_t createMultiResFuncsParas(STranslateContext* pCxt, SNodeList* pSrc SNode* pPara = NULL; FOREACH(pPara, pSrcParas) { if (isStar(pPara)) { - code = createAllColumns(pCxt, &pExprs); - // The syntax definition ensures that * and other parameters do not appear at the same time - break; + code = createAllColumns(pCxt, true, &pExprs); } else if (isTableStar(pPara)) { - code = createTableAllCols(pCxt, (SColumnNode*)pPara, &pExprs); + code = createTableAllCols(pCxt, (SColumnNode*)pPara, true, &pExprs); } else { code = nodesListMakeStrictAppend(&pExprs, nodesCloneNode(pPara)); } @@ -2033,7 +2073,7 @@ static int32_t translateStar(STranslateContext* pCxt, SSelectStmt* pSelect) { int32_t code = TSDB_CODE_SUCCESS; if (isStar(pNode)) { SNodeList* pCols = NULL; - code = createAllColumns(pCxt, &pCols); + code = createAllColumns(pCxt, false, &pCols); if (TSDB_CODE_SUCCESS == code) { INSERT_LIST(pSelect->pProjectionList, pCols); ERASE_NODE(pSelect->pProjectionList); @@ -2049,7 +2089,7 @@ static int32_t translateStar(STranslateContext* pCxt, SSelectStmt* pSelect) { } } else if (isTableStar(pNode)) { SNodeList* pCols = NULL; - code = createTableAllCols(pCxt, (SColumnNode*)pNode, &pCols); + code = createTableAllCols(pCxt, (SColumnNode*)pNode, false, &pCols); if (TSDB_CODE_SUCCESS == code) { INSERT_LIST(pSelect->pProjectionList, pCols); ERASE_NODE(pSelect->pProjectionList); @@ -3138,6 +3178,7 @@ static void buildAlterDbReq(STranslateContext* pCxt, SAlterDatabaseStmt* pStmt, pReq->buffer = pStmt->pOptions->buffer; pReq->pageSize = -1; pReq->pages = pStmt->pOptions->pages; + pReq->lastRowMem = -1; pReq->daysPerFile = -1; pReq->daysToKeep0 = pStmt->pOptions->keep[0]; pReq->daysToKeep1 = pStmt->pOptions->keep[1]; @@ -3215,13 +3256,30 @@ static bool validRollupFunc(const char* pFunc) { return false; } -static int32_t checkTableRollupOption(STranslateContext* pCxt, SNodeList* pFuncs) { +static int32_t checkTableRollupOption(STranslateContext* pCxt, SNodeList* pFuncs, bool createStable, + SDbCfgInfo* pDbCfg) { if (NULL == pFuncs) { + if (NULL != pDbCfg->pRetensions) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, + "To create a super table in a database with the retensions parameter configured, " + "the 'ROLLUP' option must be present"); + } return TSDB_CODE_SUCCESS; } - if (1 != LIST_LENGTH(pFuncs) || !validRollupFunc(((SFunctionNode*)nodesListGetNode(pFuncs, 0))->functionName)) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION); + if (!createStable || NULL == pDbCfg->pRetensions) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, + "Invalid option rollup: Only supported for create super table in databases " + "configured with the 'RETENTIONS' option"); + } + if (1 != LIST_LENGTH(pFuncs)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION, + "Invalid option rollup: only one function is allowed"); + } + const char* pFunc = ((SFunctionNode*)nodesListGetNode(pFuncs, 0))->functionName; + if (!validRollupFunc(pFunc)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION, + "Invalid option rollup: %s function is not supported", pFunc); } return TSDB_CODE_SUCCESS; } @@ -3247,8 +3305,8 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); } if (TSDB_CODE_SUCCESS == code) { - if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_BINARY_LEN) || - (TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_NCHAR_LEN)) { + if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_BINARY_LEN) || + (TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_NCHAR_LEN)) { code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } } @@ -3269,11 +3327,11 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN return code; } -static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, SNodeList* pCols) { +static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, int32_t ntags, SNodeList* pCols) { int32_t ncols = LIST_LENGTH(pCols); if (ncols < TSDB_MIN_COLUMNS) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); - } else if (ncols > TSDB_MAX_COLUMNS) { + } else if (ncols + ntags > TSDB_MAX_COLUMNS) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS); } @@ -3329,7 +3387,7 @@ static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt int32_t code = checkTableTagsSchema(pCxt, pHash, pStmt->pTags); if (TSDB_CODE_SUCCESS == code) { - code = checkTableColsSchema(pCxt, pHash, pStmt->pCols); + code = checkTableColsSchema(pCxt, pHash, LIST_LENGTH(pStmt->pTags), pStmt->pCols); } taosHashCleanup(pHash); @@ -3358,11 +3416,18 @@ static int32_t getTableMaxDelayOption(STranslateContext* pCxt, SValueNode* pVal, pMaxDelay); } -static int32_t checkTableMaxDelayOption(STranslateContext* pCxt, STableOptions* pOptions) { +static int32_t checkTableMaxDelayOption(STranslateContext* pCxt, STableOptions* pOptions, bool createStable, + SDbCfgInfo* pDbCfg) { if (NULL == pOptions->pMaxDelay) { return TSDB_CODE_SUCCESS; } + if (!createStable || NULL == pDbCfg->pRetensions) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, + "Invalid option maxdelay: Only supported for create super table in databases " + "configured with the 'RETENTIONS' option"); + } + if (LIST_LENGTH(pOptions->pMaxDelay) > 2) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "maxdelay"); } @@ -3381,11 +3446,18 @@ static int32_t getTableWatermarkOption(STranslateContext* pCxt, SValueNode* pVal pMaxDelay); } -static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions* pOptions) { +static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions* pOptions, bool createStable, + SDbCfgInfo* pDbCfg) { if (NULL == pOptions->pWatermark) { return TSDB_CODE_SUCCESS; } + if (!createStable || NULL == pDbCfg->pRetensions) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, + "Invalid option watermark: Only supported for create super table in databases " + "configured with the 'RETENTIONS' option"); + } + if (LIST_LENGTH(pOptions->pWatermark) > 2) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "watermark"); } @@ -3399,13 +3471,20 @@ static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions* return code; } -static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) { - int32_t code = checkTableMaxDelayOption(pCxt, pStmt->pOptions); - if (TSDB_CODE_SUCCESS == code) { - code = checkTableWatermarkOption(pCxt, pStmt->pOptions); +static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt, bool createStable) { + int32_t code = TSDB_CODE_SUCCESS; + SDbCfgInfo dbCfg = {0}; + if (createStable) { + code = getDBCfg(pCxt, pStmt->dbName, &dbCfg); } if (TSDB_CODE_SUCCESS == code) { - code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs); + code = checkTableMaxDelayOption(pCxt, pStmt->pOptions, createStable, &dbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = checkTableWatermarkOption(pCxt, pStmt->pOptions, createStable, &dbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs, createStable, &dbCfg); } if (TSDB_CODE_SUCCESS == code) { code = checkTableSmaOption(pCxt, pStmt); @@ -3684,7 +3763,7 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm static int32_t translateCreateSuperTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) { SMCreateStbReq createReq = {0}; - int32_t code = checkCreateTable(pCxt, pStmt); + int32_t code = checkCreateTable(pCxt, pStmt, true); if (TSDB_CODE_SUCCESS == code) { code = buildCreateStbReq(pCxt, pStmt, &createReq); } @@ -4264,6 +4343,39 @@ static void getSourceDatabase(SNode* pStmt, int32_t acctId, char* pDbFName) { tNameGetFullDbName(&name, pDbFName); } +static int32_t addWstartTsToCreateStreamQuery(SNode* pStmt) { + SSelectStmt* pSelect = (SSelectStmt*)pStmt; + SNode* pProj = nodesListGetNode(pSelect->pProjectionList, 0); + if (NULL == pSelect->pWindow || + (QUERY_NODE_FUNCTION == nodeType(pProj) && 0 == strcmp("_wstartts", ((SFunctionNode*)pProj)->functionName))) { + return TSDB_CODE_SUCCESS; + } + SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION); + if (NULL == pFunc) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(pFunc->functionName, "_wstartts"); + strcpy(pFunc->node.aliasName, pFunc->functionName); + int32_t code = nodesListPushFront(pSelect->pProjectionList, (SNode*)pFunc); + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode((SNode*)pFunc); + } + return code; +} + +static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SNode* pStmt, SCMCreateStreamReq* pReq) { + pCxt->createStream = true; + int32_t code = addWstartTsToCreateStreamQuery(pStmt); + if (TSDB_CODE_SUCCESS == code) { + code = translateQuery(pCxt, pStmt); + } + if (TSDB_CODE_SUCCESS == code) { + getSourceDatabase(pStmt, pCxt->pParseCxt->acctId, pReq->sourceDB); + code = nodesNodeToString(pStmt, false, &pReq->ast, NULL); + } + return code; +} + static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { pReq->igExists = pStmt->ignoreExists; @@ -4277,13 +4389,7 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* tNameExtractFullName(&name, pReq->targetStbFullName); } - pCxt->createStream = true; - int32_t code = translateQuery(pCxt, pStmt->pQuery); - if (TSDB_CODE_SUCCESS == code) { - getSourceDatabase(pStmt->pQuery, pCxt->pParseCxt->acctId, pReq->sourceDB); - code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); - } - + int32_t code = buildCreateStreamQuery(pCxt, pStmt->pQuery, pReq); if (TSDB_CODE_SUCCESS == code) { pReq->sql = strdup(pCxt->pParseCxt->pSql); if (NULL == pReq->sql) { @@ -5232,7 +5338,7 @@ static int32_t buildCreateTableDataBlock(int32_t acctId, const SCreateTableStmt* static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) { SCreateTableStmt* pStmt = (SCreateTableStmt*)pQuery->pRoot; - int32_t code = checkCreateTable(pCxt, pStmt); + int32_t code = checkCreateTable(pCxt, pStmt, false); SVgroupInfo info = {0}; if (TSDB_CODE_SUCCESS == code) { code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info); @@ -5336,7 +5442,7 @@ static int32_t createTagValFromVal(STranslateContext* pCxt, SDataType targetDt, if (NULL == *pVal) { return TSDB_CODE_OUT_OF_MEMORY; } - return DEAL_RES_ERROR == translateValueImpl(pCxt, *pVal, targetDt) ? pCxt->errCode : TSDB_CODE_SUCCESS; + return DEAL_RES_ERROR == translateValueImpl(pCxt, *pVal, targetDt, true) ? pCxt->errCode : TSDB_CODE_SUCCESS; } static int32_t createTagVal(STranslateContext* pCxt, uint8_t precision, SSchema* pSchema, SNode* pNode, @@ -5720,14 +5826,10 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS } SDataType targetDt = schemaToDataType(pTableMeta->tableInfo.precision, pSchema); - if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt)) { + if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt, true)) { return pCxt->errCode; } - if (IS_VAR_DATA_TYPE(pSchema->type) && strlen(pStmt->pVal->literal) > pSchema->bytes) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pStmt->pVal->literal); - } - pReq->isNull = (TSDB_DATA_TYPE_NULL == pStmt->pVal->node.resType.type); if (targetDt.type == TSDB_DATA_TYPE_JSON) { pReq->isNull = 0; diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 69917ad7f9..27a9102422 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -236,7 +236,6 @@ int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* const char* prefix = "syntax error"; if (sourceStr == NULL) { - assert(additionalInfo != NULL); snprintf(pBuf->buf, pBuf->len, msgFormat1, additionalInfo); return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; } @@ -254,40 +253,25 @@ int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; } -SSchema* getTableColumnSchema(const STableMeta* pTableMeta) { - assert(pTableMeta != NULL); - return (SSchema*)pTableMeta->schema; -} +SSchema* getTableColumnSchema(const STableMeta* pTableMeta) { return (SSchema*)pTableMeta->schema; } static SSchema* getOneColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) { - assert(pTableMeta != NULL && pTableMeta->schema != NULL && colIndex >= 0 && - colIndex < (getNumOfColumns(pTableMeta) + getNumOfTags(pTableMeta))); - SSchema* pSchema = (SSchema*)pTableMeta->schema; return &pSchema[colIndex]; } SSchema* getTableTagSchema(const STableMeta* pTableMeta) { - assert(pTableMeta != NULL && - (pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE)); return getOneColumnSchema(pTableMeta, getTableInfo(pTableMeta).numOfColumns); } int32_t getNumOfColumns(const STableMeta* pTableMeta) { - assert(pTableMeta != NULL); // table created according to super table, use data from super table return getTableInfo(pTableMeta).numOfColumns; } -int32_t getNumOfTags(const STableMeta* pTableMeta) { - assert(pTableMeta != NULL); - return getTableInfo(pTableMeta).numOfTags; -} +int32_t getNumOfTags(const STableMeta* pTableMeta) { return getTableInfo(pTableMeta).numOfTags; } -STableComInfo getTableInfo(const STableMeta* pTableMeta) { - assert(pTableMeta != NULL); - return pTableMeta->tableInfo; -} +STableComInfo getTableInfo(const STableMeta* pTableMeta) { return pTableMeta->tableInfo; } STableMeta* tableMetaDup(const STableMeta* pTableMeta) { size_t size = TABLE_META_SIZE(pTableMeta); diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index 36a2eb3808..6eafa0555b 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -159,7 +159,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) { * c4 | column | DOUBLE | 8 | * c5 | column | DOUBLE | 8 | */ -void generateTestT1(MockCatalogService* mcs) { +void generateTestTables(MockCatalogService* mcs) { ITableBuilder& builder = mcs->createTableBuilder("test", "t1", TSDB_NORMAL_TABLE, 6) .setPrecision(TSDB_TIME_PRECISION_MILLI) .setVgid(1) @@ -183,23 +183,7 @@ void generateTestT1(MockCatalogService* mcs) { * tag2 | tag | VARCHAR | 20 | * tag3 | tag | TIMESTAMP | 8 | * Child Table: st1s1, st1s2 - */ -void generateTestST1(MockCatalogService* mcs) { - ITableBuilder& builder = mcs->createTableBuilder("test", "st1", TSDB_SUPER_TABLE, 3, 3) - .setPrecision(TSDB_TIME_PRECISION_MILLI) - .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) - .addColumn("c1", TSDB_DATA_TYPE_INT) - .addColumn("c2", TSDB_DATA_TYPE_BINARY, 20) - .addTag("tag1", TSDB_DATA_TYPE_INT) - .addTag("tag2", TSDB_DATA_TYPE_BINARY, 20) - .addTag("tag3", TSDB_DATA_TYPE_TIMESTAMP); - builder.done(); - mcs->createSubTable("test", "st1", "st1s1", 1); - mcs->createSubTable("test", "st1", "st1s2", 2); - mcs->createSubTable("test", "st1", "st1s3", 1); -} - -/* + * * Super Table: st2 * Field | Type | DataType | Bytes | * ========================================================================== @@ -209,16 +193,32 @@ void generateTestST1(MockCatalogService* mcs) { * jtag | tag | json | -- | * Child Table: st2s1, st2s2 */ -void generateTestST2(MockCatalogService* mcs) { - ITableBuilder& builder = mcs->createTableBuilder("test", "st2", TSDB_SUPER_TABLE, 3, 1) - .setPrecision(TSDB_TIME_PRECISION_MILLI) - .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) - .addColumn("c1", TSDB_DATA_TYPE_INT) - .addColumn("c2", TSDB_DATA_TYPE_BINARY, 20) - .addTag("jtag", TSDB_DATA_TYPE_JSON); - builder.done(); - mcs->createSubTable("test", "st2", "st2s1", 1); - mcs->createSubTable("test", "st2", "st2s2", 2); +void generateTestStables(MockCatalogService* mcs) { + { + ITableBuilder& builder = mcs->createTableBuilder("test", "st1", TSDB_SUPER_TABLE, 3, 3) + .setPrecision(TSDB_TIME_PRECISION_MILLI) + .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) + .addColumn("c1", TSDB_DATA_TYPE_INT) + .addColumn("c2", TSDB_DATA_TYPE_BINARY, 20) + .addTag("tag1", TSDB_DATA_TYPE_INT) + .addTag("tag2", TSDB_DATA_TYPE_BINARY, 20) + .addTag("tag3", TSDB_DATA_TYPE_TIMESTAMP); + builder.done(); + mcs->createSubTable("test", "st1", "st1s1", 1); + mcs->createSubTable("test", "st1", "st1s2", 2); + mcs->createSubTable("test", "st1", "st1s3", 1); + } + { + ITableBuilder& builder = mcs->createTableBuilder("test", "st2", TSDB_SUPER_TABLE, 3, 1) + .setPrecision(TSDB_TIME_PRECISION_MILLI) + .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) + .addColumn("c1", TSDB_DATA_TYPE_INT) + .addColumn("c2", TSDB_DATA_TYPE_BINARY, 20) + .addTag("jtag", TSDB_DATA_TYPE_JSON); + builder.done(); + mcs->createSubTable("test", "st2", "st2s1", 1); + mcs->createSubTable("test", "st2", "st2s2", 2); + } } void generateFunctions(MockCatalogService* mcs) { @@ -233,6 +233,11 @@ void generateDnodes(MockCatalogService* mcs) { mcs->createDnode(3, "host3", 7030); } +void generateDatabases(MockCatalogService* mcs) { + mcs->createDatabase("test"); + mcs->createDatabase("rollup_db", true); +} + } // namespace int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandle) { return 0; } @@ -262,7 +267,7 @@ int32_t __catalogGetDBVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char } int32_t __catalogGetDBCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const char* dbFName, SDbCfgInfo* pDbCfg) { - return 0; + return g_mockCatalogService->catalogGetDBCfg(dbFName, pDbCfg); } int32_t __catalogChkAuth(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type, @@ -359,11 +364,11 @@ void initMetaDataEnv() { } void generateMetaData() { + generateDatabases(g_mockCatalogService.get()); generateInformationSchema(g_mockCatalogService.get()); generatePerformanceSchema(g_mockCatalogService.get()); - generateTestT1(g_mockCatalogService.get()); - generateTestST1(g_mockCatalogService.get()); - generateTestST2(g_mockCatalogService.get()); + generateTestTables(g_mockCatalogService.get()); + generateTestStables(g_mockCatalogService.get()); generateFunctions(g_mockCatalogService.get()); generateDnodes(g_mockCatalogService.get()); g_mockCatalogService->showTables(); diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp index 8830bc7cb3..0f759018d9 100644 --- a/source/libs/parser/test/mockCatalogService.cpp +++ b/source/libs/parser/test/mockCatalogService.cpp @@ -140,6 +140,17 @@ class MockCatalogServiceImpl { return TSDB_CODE_SUCCESS; } + int32_t catalogGetDBCfg(const char* pDbFName, SDbCfgInfo* pDbCfg) const { + std::string dbFName(pDbFName); + DbCfgCache::const_iterator it = dbCfg_.find(dbFName.substr(std::string(pDbFName).find_last_of('.') + 1)); + if (dbCfg_.end() == it) { + return TSDB_CODE_FAILED; + } + + memcpy(pDbCfg, &(it->second), sizeof(SDbCfgInfo)); + return TSDB_CODE_SUCCESS; + } + int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { auto it = udf_.find(funcName); if (udf_.end() == it) { @@ -323,12 +334,21 @@ class MockCatalogServiceImpl { dnode_.insert(std::make_pair(dnodeId, epSet)); } + void createDatabase(const std::string& db, bool rollup) { + SDbCfgInfo cfg = {0}; + if (rollup) { + cfg.pRetensions = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SRetention)); + } + dbCfg_.insert(std::make_pair(db, cfg)); + } + private: typedef std::map> TableMetaCache; typedef std::map DbMetaCache; typedef std::map> UdfMetaCache; typedef std::map> IndexMetaCache; typedef std::map DnodeCache; + typedef std::map DbCfgCache; uint64_t getNextId() { return id_++; } @@ -486,6 +506,7 @@ class MockCatalogServiceImpl { for (int32_t i = 0; i < ndbs; ++i) { SMetaRes res = {0}; res.pRes = taosMemoryCalloc(1, sizeof(SDbCfgInfo)); + res.code = catalogGetDBCfg((const char*)taosArrayGet(pDbCfgReq, i), (SDbCfgInfo*)res.pRes); taosArrayPush(*pDbCfgData, &res); } } @@ -576,6 +597,7 @@ class MockCatalogServiceImpl { UdfMetaCache udf_; IndexMetaCache index_; DnodeCache dnode_; + DbCfgCache dbCfg_; }; MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {} @@ -605,6 +627,8 @@ void MockCatalogService::createDnode(int32_t dnodeId, const std::string& host, i impl_->createDnode(dnodeId, host, port); } +void MockCatalogService::createDatabase(const std::string& db, bool rollup) { impl_->createDatabase(db, rollup); } + int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const { return impl_->catalogGetTableMeta(pTableName, pTableMeta); } @@ -621,6 +645,10 @@ int32_t MockCatalogService::catalogGetDBVgInfo(const char* pDbFName, SArray** pV return impl_->catalogGetDBVgInfo(pDbFName, pVgList); } +int32_t MockCatalogService::catalogGetDBCfg(const char* pDbFName, SDbCfgInfo* pDbCfg) const { + return impl_->catalogGetDBCfg(pDbFName, pDbCfg); +} + int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { return impl_->catalogGetUdfInfo(funcName, pInfo); } diff --git a/source/libs/parser/test/mockCatalogService.h b/source/libs/parser/test/mockCatalogService.h index 932424823c..5c8a8acad1 100644 --- a/source/libs/parser/test/mockCatalogService.h +++ b/source/libs/parser/test/mockCatalogService.h @@ -63,11 +63,13 @@ class MockCatalogService { void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize); void createSmaIndex(const SMCreateSmaReq* pReq); void createDnode(int32_t dnodeId, const std::string& host, int16_t port); + void createDatabase(const std::string& db, bool rollup = false); int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const; int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const; int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const; int32_t catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const; + int32_t catalogGetDBCfg(const char* pDbFName, SDbCfgInfo* pDbCfg) const; int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const; int32_t catalogGetTableIndex(const SName* pTableName, SArray** pIndexes) const; int32_t catalogGetDnodeList(SArray** pDnodes) const; diff --git a/source/libs/parser/test/parInitialATest.cpp b/source/libs/parser/test/parInitialATest.cpp index 9148f8f28d..f4d0ba1cc8 100644 --- a/source/libs/parser/test/parInitialATest.cpp +++ b/source/libs/parser/test/parInitialATest.cpp @@ -38,9 +38,9 @@ TEST_F(ParserInitialATest, alterDnode) { TEST_F(ParserInitialATest, alterDatabase) { useDb("root", "test"); - run("ALTER DATABASE wxy_db CACHELAST 1 FSYNC 200 WAL 1"); + run("ALTER DATABASE test CACHELAST 1 FSYNC 200 WAL 1"); - run("ALTER DATABASE wxy_db KEEP 2400"); + run("ALTER DATABASE test KEEP 2400"); } TEST_F(ParserInitialATest, alterLocal) { diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index 0a980fa889..10921a2082 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -359,11 +359,11 @@ TEST_F(ParserInitialCTest, createStable) { memset(&expect, 0, sizeof(SMCreateStbReq)); }; - auto setCreateStbReqFunc = [&](const char* pTbname, int8_t igExists = 0, int64_t delay1 = -1, int64_t delay2 = -1, - int64_t watermark1 = TSDB_DEFAULT_ROLLUP_WATERMARK, + auto setCreateStbReqFunc = [&](const char* pDbName, const char* pTbName, int8_t igExists = 0, int64_t delay1 = -1, + int64_t delay2 = -1, int64_t watermark1 = TSDB_DEFAULT_ROLLUP_WATERMARK, int64_t watermark2 = TSDB_DEFAULT_ROLLUP_WATERMARK, int32_t ttl = TSDB_DEFAULT_TABLE_TTL, const char* pComment = nullptr) { - int32_t len = snprintf(expect.name, sizeof(expect.name), "0.test.%s", pTbname); + int32_t len = snprintf(expect.name, sizeof(expect.name), "0.%s.%s", pDbName, pTbName); expect.name[len] = '\0'; expect.igExists = igExists; expect.delay1 = delay1; @@ -454,14 +454,14 @@ TEST_F(ParserInitialCTest, createStable) { tFreeSMCreateStbReq(&req); }); - setCreateStbReqFunc("t1"); + setCreateStbReqFunc("test", "t1"); addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP); addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT); addFieldToCreateStbReqFunc(false, "id", TSDB_DATA_TYPE_INT); run("CREATE STABLE t1(ts TIMESTAMP, c1 INT) TAGS(id INT)"); clearCreateStbReq(); - setCreateStbReqFunc("t1", 1, 100 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_MINUTE, 10, + setCreateStbReqFunc("rollup_db", "t1", 1, 100 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_MINUTE, 10, 1 * MILLISECOND_PER_MINUTE, 100, "test create table"); addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0); addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT); @@ -493,7 +493,7 @@ TEST_F(ParserInitialCTest, createStable) { addFieldToCreateStbReqFunc(false, "a13", TSDB_DATA_TYPE_BOOL); addFieldToCreateStbReqFunc(false, "a14", TSDB_DATA_TYPE_NCHAR, 30 * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); addFieldToCreateStbReqFunc(false, "a15", TSDB_DATA_TYPE_VARCHAR, 50 + VARSTR_HEADER_SIZE); - run("CREATE STABLE IF NOT EXISTS test.t1(" + run("CREATE STABLE IF NOT EXISTS rollup_db.t1(" "ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), " "c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, " "c13 NCHAR(30), c14 VARCHAR(50)) " @@ -507,12 +507,13 @@ TEST_F(ParserInitialCTest, createStable) { TEST_F(ParserInitialCTest, createStableSemanticCheck) { useDb("root", "test"); - run("CREATE STABLE stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(CEIL)", TSDB_CODE_PAR_INVALID_ROLLUP_OPTION); + run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(CEIL)", + TSDB_CODE_PAR_INVALID_ROLLUP_OPTION); - run("CREATE STABLE stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 0s WATERMARK 1m", + run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 0s WATERMARK 1m", TSDB_CODE_PAR_INVALID_RANGE_OPTION); - run("CREATE STABLE stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 10s WATERMARK 18m", + run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 10s WATERMARK 18m", TSDB_CODE_PAR_INVALID_RANGE_OPTION); } @@ -561,30 +562,33 @@ TEST_F(ParserInitialCTest, createStream) { tFreeSCMCreateStreamReq(&req); }); - setCreateStreamReqFunc("s1", "test", "create stream s1 as select * from t1"); - run("CREATE STREAM s1 AS SELECT * FROM t1"); + setCreateStreamReqFunc("s1", "test", "create stream s1 as select count(*) from t1 interval(10s)"); + run("CREATE STREAM s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); - setCreateStreamReqFunc("s1", "test", "create stream if not exists s1 as select * from t1", nullptr, 1); - run("CREATE STREAM IF NOT EXISTS s1 AS SELECT * FROM t1"); + setCreateStreamReqFunc("s1", "test", "create stream if not exists s1 as select count(*) from t1 interval(10s)", + nullptr, 1); + run("CREATE STREAM IF NOT EXISTS s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); - setCreateStreamReqFunc("s1", "test", "create stream s1 into st1 as select * from t1", "st1"); - run("CREATE STREAM s1 INTO st1 AS SELECT * FROM t1"); + setCreateStreamReqFunc("s1", "test", "create stream s1 into st1 as select count(*) from t1 interval(10s)", "st1"); + run("CREATE STREAM s1 INTO st1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); - setCreateStreamReqFunc( - "s1", "test", - "create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired into st1 as select * from t1", - "st1", 1, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_SECOND, 1); - run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED INTO st1 AS SELECT * FROM t1"); + setCreateStreamReqFunc("s1", "test", + "create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired into st1 " + "as select count(*) from t1 interval(10s)", + "st1", 1, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_SECOND, + 1); + run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED INTO st1 AS SELECT COUNT(*) " + "FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); } TEST_F(ParserInitialCTest, createStreamSemanticCheck) { useDb("root", "test"); - run("CREATE STREAM s1 AS SELECT PERCENTILE(c1, 30) FROM t1", TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC); + run("CREATE STREAM s1 AS SELECT PERCENTILE(c1, 30) FROM t1 INTERVAL(10S)", TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC); } TEST_F(ParserInitialCTest, createTable) { @@ -598,7 +602,7 @@ TEST_F(ParserInitialCTest, createTable) { "c13 NCHAR(30), c15 VARCHAR(50)) " "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3)"); - run("CREATE TABLE IF NOT EXISTS test.t1(" + run("CREATE TABLE IF NOT EXISTS rollup_db.t1(" "ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), " "c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, " "c13 NCHAR(30), c14 VARCHAR(50)) " @@ -617,6 +621,21 @@ TEST_F(ParserInitialCTest, createTable) { // run("CREATE TABLE IF NOT EXISTS t1 USING st1 TAGS(1, 'wxy', NOW + 1S)"); } +TEST_F(ParserInitialCTest, createTableSemanticCheck) { + useDb("root", "test"); + + string sql = "CREATE TABLE st1(ts TIMESTAMP, "; + for (int32_t i = 1; i < 4096; ++i) { + if (i > 1) { + sql.append(", "); + } + sql.append("c" + to_string(i) + " INT"); + } + sql.append(") TAGS (t1 int)"); + + run(sql, TSDB_CODE_PAR_TOO_MANY_COLUMNS); +} + TEST_F(ParserInitialCTest, createTopic) { useDb("root", "test"); diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 88930b6269..202e590955 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -75,6 +75,11 @@ static SLogicNode* optFindPossibleNode(SLogicNode* pNode, FMayBeOptimized func) return NULL; } +static void optResetParent(SLogicNode* pNode) { + SNode* pChild = NULL; + FOREACH(pChild, pNode->pChildren) { ((SLogicNode*)pChild)->pParent = pNode; } +} + EDealRes scanPathOptHaveNormalColImpl(SNode* pNode, void* pContext) { if (QUERY_NODE_COLUMN == nodeType(pNode)) { // *((bool*)pContext) = (COLUMN_TYPE_TAG != ((SColumnNode*)pNode)->colType); @@ -480,17 +485,12 @@ static int32_t pushDownCondOptPushCondToProject(SOptimizeContext* pCxt, SProject return pushDownCondOptAppendCond(&pProject->node.pConditions, pCond); } +static int32_t pushDownCondOptPushCondToJoin(SOptimizeContext* pCxt, SJoinLogicNode * pJoin, SNode** pCond) { + return pushDownCondOptAppendCond(&pJoin->node.pConditions, pCond); +} + static int32_t pushDownCondOptPushCondToChild(SOptimizeContext* pCxt, SLogicNode* pChild, SNode** pCond) { - switch (nodeType(pChild)) { - case QUERY_NODE_LOGIC_PLAN_SCAN: - return pushDownCondOptPushCondToScan(pCxt, (SScanLogicNode*)pChild, pCond); - case QUERY_NODE_LOGIC_PLAN_PROJECT: - return pushDownCondOptPushCondToProject(pCxt, (SProjectLogicNode*)pChild, pCond); - default: - break; - } - planError("pushDownCondOptPushCondToChild failed, invalid logic plan node %s", nodesNodeName(nodeType(pChild))); - return TSDB_CODE_PLAN_INTERNAL_ERROR; + return pushDownCondOptAppendCond(&pChild->pConditions, pCond); } static bool pushDownCondOptIsPriKey(SNode* pNode, SNodeList* pTableCols) { @@ -554,13 +554,83 @@ static int32_t pushDownCondOptCheckJoinOnCond(SOptimizeContext* pCxt, SJoinLogic return TSDB_CODE_SUCCESS; } +static int32_t pushDownCondOptPartJoinOnCondLogicCond(SJoinLogicNode* pJoin, SNode** ppMergeCond, SNode** ppOnCond) { + SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pJoin->pOnConditions); + + int32_t code = TSDB_CODE_SUCCESS; + SNodeList* pOnConds = NULL; + SNode* pCond = NULL; + FOREACH(pCond, pLogicCond->pParameterList) { + if (pushDownCondOptIsPriKeyEqualCond(pJoin, pCond)) { + *ppMergeCond = nodesCloneNode(pCond); + } else { + code = nodesListMakeAppend(&pOnConds, nodesCloneNode(pCond)); + } + } + + SNode* pTempOnCond = NULL; + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempOnCond, &pOnConds); + } + + if (TSDB_CODE_SUCCESS == code && NULL != *ppMergeCond) { + *ppOnCond = pTempOnCond; + nodesDestroyNode(pJoin->pOnConditions); + pJoin->pOnConditions = NULL; + return TSDB_CODE_SUCCESS; + } else { + nodesDestroyList(pOnConds); + nodesDestroyNode(pTempOnCond); + return TSDB_CODE_PLAN_INTERNAL_ERROR; + } +} + +static int32_t pushDownCondOptPartJoinOnCond(SJoinLogicNode* pJoin, SNode** ppMergeCond, SNode** ppOnCond) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(pJoin->pOnConditions) && + LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)(pJoin->pOnConditions))->condType) { + return pushDownCondOptPartJoinOnCondLogicCond(pJoin, ppMergeCond, ppOnCond); + } + + if (pushDownCondOptIsPriKeyEqualCond(pJoin, pJoin->pOnConditions)) { + *ppMergeCond = nodesCloneNode(pJoin->pOnConditions); + *ppOnCond = NULL; + nodesDestroyNode(pJoin->pOnConditions); + pJoin->pOnConditions = NULL; + return TSDB_CODE_SUCCESS; + } else { + return TSDB_CODE_PLAN_INTERNAL_ERROR; + } +} + +static int32_t pushDownCondOptJoinExtractMergeCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) { + int32_t code = pushDownCondOptCheckJoinOnCond(pCxt, pJoin); + SNode* pJoinMergeCond = NULL; + SNode* pJoinOnCond = NULL; + if (TSDB_CODE_SUCCESS == code) { + code = pushDownCondOptPartJoinOnCond(pJoin, &pJoinMergeCond, &pJoinOnCond); + } + if (TSDB_CODE_SUCCESS == code) { + pJoin->pMergeCondition = pJoinMergeCond; + pJoin->pOnConditions = pJoinOnCond; + } else { + nodesDestroyNode(pJoinMergeCond); + nodesDestroyNode(pJoinOnCond); + } + return code; +} + static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) { if (OPTIMIZE_FLAG_TEST_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) { return TSDB_CODE_SUCCESS; } if (NULL == pJoin->node.pConditions) { - return pushDownCondOptCheckJoinOnCond(pCxt, pJoin); + int32_t code = pushDownCondOptJoinExtractMergeCond(pCxt, pJoin); + if (TSDB_CODE_SUCCESS == code) { + OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE); + pCxt->optimized = true; + } + return code; } SNode* pOnCond = NULL; @@ -579,10 +649,13 @@ static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* p pushDownCondOptPushCondToChild(pCxt, (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1), &pRightChildCond); } + if (TSDB_CODE_SUCCESS == code) { + code = pushDownCondOptJoinExtractMergeCond(pCxt, pJoin); + } + if (TSDB_CODE_SUCCESS == code) { OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE); pCxt->optimized = true; - code = pushDownCondOptCheckJoinOnCond(pCxt, pJoin); } else { nodesDestroyNode(pOnCond); nodesDestroyNode(pLeftChildCond); @@ -718,9 +791,7 @@ static int32_t pushDownCondOptDealAgg(SOptimizeContext* pCxt, SAggLogicNode* pAg return TSDB_CODE_SUCCESS; } // TODO: remove it after full implementation of pushing down to child - if (1 != LIST_LENGTH(pAgg->node.pChildren) || - QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) && - QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) { + if (1 != LIST_LENGTH(pAgg->node.pChildren)) { return TSDB_CODE_SUCCESS; } @@ -747,6 +818,77 @@ static int32_t pushDownCondOptDealAgg(SOptimizeContext* pCxt, SAggLogicNode* pAg return code; } +typedef struct SRewriteProjCondContext { + SProjectLogicNode* pProj; + int32_t errCode; +}SRewriteProjCondContext; + +static EDealRes rewriteProjectCondForPushDownImpl(SNode** ppNode, void* pContext) { + SRewriteProjCondContext* pCxt = pContext; + SProjectLogicNode* pProj = pCxt->pProj; + if (QUERY_NODE_COLUMN == nodeType(*ppNode)) { + SNode* pTarget = NULL; + FOREACH(pTarget, pProj->node.pTargets) { + if (nodesEqualNode(pTarget, *ppNode)) { + SNode* pProjection = NULL; + FOREACH(pProjection, pProj->pProjections) { + if (0 == strcmp(((SExprNode*)pProjection)->aliasName, ((SColumnNode*)(*ppNode))->colName)) { + SNode* pExpr = nodesCloneNode(pProjection); + if (pExpr == NULL) { + pCxt->errCode = terrno; + return DEAL_RES_ERROR; + } + nodesDestroyNode(*ppNode); + *ppNode = pExpr; + } // end if expr alias name equal column name + } // end for each project + } // end if target node equals cond column node + } // end for each targets + return DEAL_RES_IGNORE_CHILD; + } + return DEAL_RES_CONTINUE; +} + +static int32_t rewriteProjectCondForPushDown(SOptimizeContext* pCxt, SProjectLogicNode* pProject, SNode** ppProjectCond) { + SRewriteProjCondContext cxt = {.pProj = pProject, .errCode = TSDB_CODE_SUCCESS}; + SNode* pProjectCond = pProject->node.pConditions; + nodesRewriteExpr(&pProjectCond, rewriteProjectCondForPushDownImpl, &cxt); + *ppProjectCond = pProjectCond; + pProject->node.pConditions = NULL; + return cxt.errCode; +} + +static int32_t pushDownCondOptDealProject(SOptimizeContext* pCxt, SProjectLogicNode* pProject) { + if (NULL == pProject->node.pConditions || + OPTIMIZE_FLAG_TEST_MASK(pProject->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) { + return TSDB_CODE_SUCCESS; + } + // TODO: remove it after full implementation of pushing down to child + if (1 != LIST_LENGTH(pProject->node.pChildren)) { + return TSDB_CODE_SUCCESS; + } + + if (NULL != pProject->node.pLimit || NULL != pProject->node.pSlimit) { + return TSDB_CODE_SUCCESS; + } + + int32_t code = TSDB_CODE_SUCCESS; + SNode* pProjCond = NULL; + code = rewriteProjectCondForPushDown(pCxt, pProject, &pProjCond); + if (TSDB_CODE_SUCCESS == code) { + SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pProject->node.pChildren, 0); + code = pushDownCondOptPushCondToChild(pCxt, pChild, &pProjCond); + } + + if (TSDB_CODE_SUCCESS == code) { + OPTIMIZE_FLAG_SET_MASK(pProject->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE); + pCxt->optimized = true; + } else { + nodesDestroyNode(pProjCond); + } + return code; +} + static int32_t pushDownCondOptimizeImpl(SOptimizeContext* pCxt, SLogicNode* pLogicNode) { int32_t code = TSDB_CODE_SUCCESS; switch (nodeType(pLogicNode)) { @@ -759,6 +901,9 @@ static int32_t pushDownCondOptimizeImpl(SOptimizeContext* pCxt, SLogicNode* pLog case QUERY_NODE_LOGIC_PLAN_AGG: code = pushDownCondOptDealAgg(pCxt, (SAggLogicNode*)pLogicNode); break; + case QUERY_NODE_LOGIC_PLAN_PROJECT: + code = pushDownCondOptDealProject(pCxt, (SProjectLogicNode*)pLogicNode); + break; default: break; } @@ -1460,6 +1605,7 @@ static int32_t rewriteTailOptCreateSort(SIndefRowsFuncLogicNode* pIndef, SLogicN pSort->groupSort = rewriteTailOptNeedGroupSort(pIndef); TSWAP(pSort->node.pChildren, pIndef->node.pChildren); + optResetParent((SLogicNode*)pSort); pSort->node.precision = pIndef->node.precision; SFunctionNode* pTail = NULL; @@ -1667,6 +1813,7 @@ static int32_t rewriteUniqueOptCreateAgg(SIndefRowsFuncLogicNode* pIndef, SLogic } TSWAP(pAgg->node.pChildren, pIndef->node.pChildren); + optResetParent((SLogicNode*)pAgg); pAgg->node.precision = pIndef->node.precision; int32_t code = TSDB_CODE_SUCCESS; diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index aac9c25f77..d10908c519 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -552,6 +552,9 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLES) || 0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED)) { vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); + } else { + pSubplan->execNode.nodeId = MNODE_HANDLE; + pSubplan->execNode.epSet = pCxt->pPlanCxt->mgmtEpSet; } SQueryNodeLoad node = {.addr = {.nodeId = MNODE_HANDLE, .epSet = pCxt->pPlanCxt->mgmtEpSet}, .load = 0}; taosArrayPush(pCxt->pExecNodeList, &node); @@ -609,10 +612,8 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren int32_t code = TSDB_CODE_SUCCESS; pJoin->joinType = pJoinLogicNode->joinType; - if (NULL != pJoinLogicNode->pOnConditions) { - code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pOnConditions, - &pJoin->pOnConditions); - } + setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pMergeCondition, + &pJoin->pMergeCondition); if (TSDB_CODE_SUCCESS == code) { code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets, &pJoin->pTargets); @@ -620,6 +621,21 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren if (TSDB_CODE_SUCCESS == code) { code = addDataBlockSlots(pCxt, pJoin->pTargets, pJoin->node.pOutputDataBlockDesc); } + + SNodeList* condCols = nodesMakeList(); + if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOnConditions) { + code = nodesCollectColumnsFromNode(pJoinLogicNode->pOnConditions, NULL, COLLECT_COL_TYPE_ALL, &condCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = addDataBlockSlots(pCxt, condCols, pJoin->node.pOutputDataBlockDesc); + nodesDestroyList(condCols); + } + + if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOnConditions) { + code = setNodeSlotId(pCxt, ((SPhysiNode*)pJoin)->pOutputDataBlockDesc->dataBlockId, -1, pJoinLogicNode->pOnConditions, + &pJoin->pOnConditions); + } + if (TSDB_CODE_SUCCESS == code) { code = setConditionsSlotId(pCxt, (const SLogicNode*)pJoinLogicNode, (SPhysiNode*)pJoin); } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 38cca34c11..7644fc3b19 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -1018,14 +1018,14 @@ static int32_t unionMountSubplan(SLogicSubplan* pParent, SNodeList* pChildren) { return TSDB_CODE_SUCCESS; } -static SLogicSubplan* unionCreateSubplan(SSplitContext* pCxt, SLogicNode* pNode) { +static SLogicSubplan* unionCreateSubplan(SSplitContext* pCxt, SLogicNode* pNode, ESubplanType subplanType) { SLogicSubplan* pSubplan = (SLogicSubplan*)nodesMakeNode(QUERY_NODE_LOGIC_SUBPLAN); if (NULL == pSubplan) { return NULL; } pSubplan->id.queryId = pCxt->queryId; pSubplan->id.groupId = pCxt->groupId; - pSubplan->subplanType = SUBPLAN_TYPE_SCAN; + pSubplan->subplanType = subplanType; pSubplan->pNode = pNode; pNode->pParent = NULL; return pSubplan; @@ -1039,7 +1039,7 @@ static int32_t unionSplitSubplan(SSplitContext* pCxt, SLogicSubplan* pUnionSubpl SNode* pChild = NULL; FOREACH(pChild, pSplitNode->pChildren) { - SLogicSubplan* pNewSubplan = unionCreateSubplan(pCxt, (SLogicNode*)pChild); + SLogicSubplan* pNewSubplan = unionCreateSubplan(pCxt, (SLogicNode*)pChild, pUnionSubplan->subplanType); code = nodesListMakeStrictAppend(&pUnionSubplan->pChildren, (SNode*)pNewSubplan); if (TSDB_CODE_SUCCESS == code) { REPLACE_NODE(NULL); @@ -1203,7 +1203,8 @@ typedef struct SQnodeSplitInfo { static bool qndSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SLogicNode* pNode, SQnodeSplitInfo* pInfo) { - if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode) && NULL != pNode->pParent) { + if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode) && NULL != pNode->pParent && + ((SScanLogicNode*)pNode)->scanSeq[0] < 1 && ((SScanLogicNode*)pNode)->scanSeq[1] < 1) { pInfo->pSplitNode = pNode; pInfo->pSubplan = pSubplan; return true; @@ -1220,6 +1221,7 @@ static int32_t qnodeSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)qndSplFindSplitNode, &info)) { return TSDB_CODE_SUCCESS; } + ((SScanLogicNode*)info.pSplitNode)->dataRequired = FUNC_DATA_REQUIRED_DATA_LOAD; int32_t code = splCreateExchangeNodeForSubplan(pCxt, info.pSubplan, info.pSplitNode, info.pSubplan->subplanType); if (TSDB_CODE_SUCCESS == code) { SLogicSubplan* pScanSubplan = splCreateScanSubplan(pCxt, info.pSplitNode, 0); diff --git a/source/libs/planner/test/planBasicTest.cpp b/source/libs/planner/test/planBasicTest.cpp index bf841e6679..c99e4ea866 100644 --- a/source/libs/planner/test/planBasicTest.cpp +++ b/source/libs/planner/test/planBasicTest.cpp @@ -63,6 +63,10 @@ TEST_F(PlanBasicTest, uniqueFunc) { run("SELECT UNIQUE(c2 + 10), ts, c2 FROM t1 WHERE c1 > 10"); run("SELECT UNIQUE(c1) a FROM t1 ORDER BY a"); + + run("SELECT ts, UNIQUE(c1) FROM st1 PARTITION BY TBNAME"); + + run("SELECT TBNAME, UNIQUE(c1) FROM st1 PARTITION BY TBNAME"); } TEST_F(PlanBasicTest, tailFunc) { @@ -81,6 +85,8 @@ TEST_F(PlanBasicTest, tailFunc) { run("SELECT TAIL(c2 + 10, 10, 80) FROM t1 WHERE c1 > 10 PARTITION BY c1 LIMIT 5"); run("SELECT TAIL(c1, 2, 1) FROM st1s1 UNION ALL SELECT c1 FROM st1s2"); + + run("SELECT TAIL(c1, 1) FROM st2 WHERE jtag->'tag1' > 10"); } TEST_F(PlanBasicTest, interpFunc) { diff --git a/source/libs/planner/test/planOptimizeTest.cpp b/source/libs/planner/test/planOptimizeTest.cpp index 3994db0902..e4019292d8 100644 --- a/source/libs/planner/test/planOptimizeTest.cpp +++ b/source/libs/planner/test/planOptimizeTest.cpp @@ -81,3 +81,8 @@ TEST_F(PlanOptimizeTest, eliminateProjection) { run("SELECT c1 FROM st1s3"); // run("select 1-abs(c1) from (select unique(c1) c1 from st1s3) order by 1 nulls first"); } + +TEST_F(PlanOptimizeTest, pushDownProjectCond) { + useDb("root", "test"); + run("select 1-abs(c1) from (select unique(c1) c1 from st1s3) where 1-c1>5 order by 1 nulls first"); +} \ No newline at end of file diff --git a/source/libs/planner/test/planOtherTest.cpp b/source/libs/planner/test/planOtherTest.cpp index b8963c29f9..2c031aa3a8 100644 --- a/source/libs/planner/test/planOtherTest.cpp +++ b/source/libs/planner/test/planOtherTest.cpp @@ -91,10 +91,3 @@ TEST_F(PlanOtherTest, delete) { run("DELETE FROM st1 WHERE ts > now - 2d and ts < now - 1d AND tag1 = 10"); } - -TEST_F(PlanOtherTest, queryPolicy) { - useDb("root", "test"); - - tsQueryPolicy = QUERY_POLICY_QNODE; - run("SELECT COUNT(*) FROM st1"); -} diff --git a/source/libs/planner/test/planTestMain.cpp b/source/libs/planner/test/planTestMain.cpp index 46c2f33048..8f6fc832a2 100644 --- a/source/libs/planner/test/planTestMain.cpp +++ b/source/libs/planner/test/planTestMain.cpp @@ -78,6 +78,7 @@ static void parseArg(int argc, char* argv[]) { {"skipSql", required_argument, NULL, 's'}, {"limitSql", required_argument, NULL, 'i'}, {"log", required_argument, NULL, 'l'}, + {"queryPolicy", required_argument, NULL, 'q'}, {0, 0, 0, 0} }; // clang-format on @@ -95,6 +96,9 @@ static void parseArg(int argc, char* argv[]) { case 'l': setLogLevel(optarg); break; + case 'q': + setQueryPolicy(optarg); + break; default: break; } diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp index d19e277a7d..4780249ec9 100644 --- a/source/libs/planner/test/planTestUtil.cpp +++ b/source/libs/planner/test/planTestUtil.cpp @@ -24,6 +24,7 @@ #include "mockCatalogService.h" #include "parser.h" #include "planInt.h" +#include "tglobal.h" using namespace std; using namespace testing; @@ -53,6 +54,7 @@ DumpModule g_dumpModule = DUMP_MODULE_NOTHING; int32_t g_skipSql = 0; int32_t g_limitSql = 0; int32_t g_logLevel = 131; +int32_t g_queryPolicy = QUERY_POLICY_VNODE; void setDumpModule(const char* pModule) { if (NULL == pModule) { @@ -79,6 +81,7 @@ void setDumpModule(const char* pModule) { void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); } void setLimitSqlNum(const char* pNum) { g_limitSql = stoi(pNum); } void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); } +void setQueryPolicy(const char* pQueryPolicy) { g_queryPolicy = stoi(pQueryPolicy); } int32_t getLogLevel() { return g_logLevel; } @@ -105,7 +108,23 @@ class PlannerTestBaseImpl { } ++sqlNum_; + switch (g_queryPolicy) { + case QUERY_POLICY_VNODE: + case QUERY_POLICY_HYBRID: + case QUERY_POLICY_QNODE: + runImpl(sql, g_queryPolicy); + break; + default: + runImpl(sql, QUERY_POLICY_VNODE); + runImpl(sql, QUERY_POLICY_HYBRID); + runImpl(sql, QUERY_POLICY_QNODE); + break; + } + } + + void runImpl(const string& sql, int32_t queryPolicy) { reset(); + tsQueryPolicy = queryPolicy; try { SQuery* pQuery = nullptr; doParseSql(sql, &pQuery); diff --git a/source/libs/planner/test/planTestUtil.h b/source/libs/planner/test/planTestUtil.h index f9942c93a7..b0ddd726a6 100644 --- a/source/libs/planner/test/planTestUtil.h +++ b/source/libs/planner/test/planTestUtil.h @@ -45,6 +45,7 @@ extern void setDumpModule(const char* pModule); extern void setSkipSqlNum(const char* pNum); extern void setLimitSqlNum(const char* pNum); extern void setLogLevel(const char* pLogLevel); +extern void setQueryPolicy(const char* pQueryPolicy); extern int32_t getLogLevel(); #endif // PLAN_TEST_UTIL_H diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index ba6bd4bd5f..cbb1089d61 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -1035,3 +1035,72 @@ _return: sclFreeRes(ctx.pRes); return code; } + +int32_t scalarGetOperatorResultType(SDataType left, SDataType right, EOperatorType op, SDataType* pRes) { + switch (op) { + case OP_TYPE_ADD: + if (left.type == TSDB_DATA_TYPE_TIMESTAMP && right.type == TSDB_DATA_TYPE_TIMESTAMP) { + qError("invalid op %d, left type:%d, right type:%d", op, left.type, right.type); + return TSDB_CODE_TSC_INVALID_OPERATION; + } + if ((left.type == TSDB_DATA_TYPE_TIMESTAMP && (IS_INTEGER_TYPE(right.type) || right.type == TSDB_DATA_TYPE_BOOL)) || + (right.type == TSDB_DATA_TYPE_TIMESTAMP && (IS_INTEGER_TYPE(left.type) || left.type == TSDB_DATA_TYPE_BOOL))) { + pRes->type = TSDB_DATA_TYPE_TIMESTAMP; + return TSDB_CODE_SUCCESS; + } + pRes->type = TSDB_DATA_TYPE_DOUBLE; + return TSDB_CODE_SUCCESS; + case OP_TYPE_SUB: + if ((left.type == TSDB_DATA_TYPE_TIMESTAMP && right.type == TSDB_DATA_TYPE_BIGINT) || + (right.type == TSDB_DATA_TYPE_TIMESTAMP && left.type == TSDB_DATA_TYPE_BIGINT)) { + pRes->type = TSDB_DATA_TYPE_TIMESTAMP; + return TSDB_CODE_SUCCESS; + } + pRes->type = TSDB_DATA_TYPE_DOUBLE; + return TSDB_CODE_SUCCESS; + case OP_TYPE_MULTI: + if (left.type == TSDB_DATA_TYPE_TIMESTAMP && right.type == TSDB_DATA_TYPE_TIMESTAMP) { + qError("invalid op %d, left type:%d, right type:%d", op, left.type, right.type); + return TSDB_CODE_TSC_INVALID_OPERATION; + } + case OP_TYPE_DIV: + if (left.type == TSDB_DATA_TYPE_TIMESTAMP && right.type == TSDB_DATA_TYPE_TIMESTAMP) { + qError("invalid op %d, left type:%d, right type:%d", op, left.type, right.type); + return TSDB_CODE_TSC_INVALID_OPERATION; + } + case OP_TYPE_REM: + case OP_TYPE_MINUS: + pRes->type = TSDB_DATA_TYPE_DOUBLE; + return TSDB_CODE_SUCCESS; + case OP_TYPE_GREATER_THAN: + case OP_TYPE_GREATER_EQUAL: + case OP_TYPE_LOWER_THAN: + case OP_TYPE_LOWER_EQUAL: + case OP_TYPE_EQUAL: + case OP_TYPE_NOT_EQUAL: + case OP_TYPE_IN: + case OP_TYPE_NOT_IN: + case OP_TYPE_LIKE: + case OP_TYPE_NOT_LIKE: + case OP_TYPE_MATCH: + case OP_TYPE_NMATCH: + case OP_TYPE_IS_NULL: + case OP_TYPE_IS_NOT_NULL: + case OP_TYPE_IS_TRUE: + case OP_TYPE_JSON_CONTAINS: + pRes->type = TSDB_DATA_TYPE_BOOL; + return TSDB_CODE_SUCCESS; + case OP_TYPE_BIT_AND: + case OP_TYPE_BIT_OR: + pRes->type = TSDB_DATA_TYPE_BIGINT; + return TSDB_CODE_SUCCESS; + case OP_TYPE_JSON_GET_VALUE: + pRes->type = TSDB_DATA_TYPE_JSON; + return TSDB_CODE_SUCCESS; + default: + ASSERT(0); + return TSDB_CODE_APP_ERROR; + } +} + + diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index f35f81892d..df5df127f0 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -360,9 +360,6 @@ static void trtrim(char *input, char *output, int32_t type, int32_t charLen) { static int32_t doLengthFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput, _len_fn lenFn) { int32_t type = GET_PARAM_TYPE(pInput); - if (inputNum != 1 || !IS_VAR_DATA_TYPE(type)) { - return TSDB_CODE_FAILED; - } SColumnInfoData *pInputData = pInput->columnData; SColumnInfoData *pOutputData = pOutput->columnData; @@ -586,9 +583,6 @@ DONE: static int32_t doCaseConvFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput, _conv_fn convFn) { int32_t type = GET_PARAM_TYPE(pInput); - if (inputNum != 1 || !IS_VAR_DATA_TYPE(type)) { - return TSDB_CODE_FAILED; - } SColumnInfoData *pInputData = pInput->columnData; SColumnInfoData *pOutputData = pOutput->columnData; @@ -628,9 +622,6 @@ static int32_t doCaseConvFunction(SScalarParam *pInput, int32_t inputNum, SScala static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput, _trim_fn trimFn) { int32_t type = GET_PARAM_TYPE(pInput); - if (inputNum != 1 || !IS_VAR_DATA_TYPE(type)) { - return TSDB_CODE_FAILED; - } SColumnInfoData *pInputData = pInput->columnData; SColumnInfoData *pOutputData = pOutput->columnData; @@ -664,16 +655,10 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar int32_t substrFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int32_t subPos = 0; GET_TYPED_DATA(subPos, int32_t, GET_PARAM_TYPE(&pInput[1]), pInput[1].columnData->pData); - if (subPos == 0) { //subPos needs to be positive or negative values; - return TSDB_CODE_FAILED; - } int32_t subLen = INT16_MAX; if (inputNum == 3) { GET_TYPED_DATA(subLen, int32_t, GET_PARAM_TYPE(&pInput[2]), pInput[2].columnData->pData); - if (subLen < 0 || subLen > INT16_MAX) { //subLen cannot be negative - return TSDB_CODE_FAILED; - } subLen = (GET_PARAM_TYPE(pInput) == TSDB_DATA_TYPE_VARCHAR) ? subLen : subLen * TSDB_NCHAR_SIZE; } @@ -1149,13 +1134,6 @@ int32_t toUnixtimestampFunction(SScalarParam *pInput, int32_t inputNum, SScalarP int32_t toJsonFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int32_t type = GET_PARAM_TYPE(pInput); - if (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR) { - return TSDB_CODE_FAILED; - } - - if (inputNum != 1) { - return TSDB_CODE_FAILED; - } char tmp[TSDB_MAX_JSON_TAG_LEN] = {0}; for (int32_t i = 0; i < pInput[0].numOfRows; ++i) { @@ -1196,6 +1174,8 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : (timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000); + int64_t unit = timeUnit * 1000 / factor; + for (int32_t i = 0; i < pInput[0].numOfRows; ++i) { if (colDataIsNull_s(pInput[0].columnData, i)) { colDataAppendNULL(pOutput->columnData, i); @@ -1228,14 +1208,15 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara char buf[20] = {0}; NUM_TO_STRING(TSDB_DATA_TYPE_BIGINT, &timeVal, sizeof(buf), buf); int32_t tsDigits = (int32_t)strlen(buf); - timeUnit = timeUnit * 1000 / factor; - switch (timeUnit) { - case 0: { /* 1u */ + switch (unit) { + case 0: { /* 1u or 1b */ if (tsDigits == TSDB_TIME_PRECISION_NANO_DIGITS) { - timeVal = timeVal / 1000 * 1000; - //} else if (tsDigits == TSDB_TIME_PRECISION_MICRO_DIGITS) { - // //timeVal = timeVal / 1000; + if (timePrec == TSDB_TIME_PRECISION_NANO && timeUnit == 1) { + timeVal = timeVal * 1; + } else { + timeVal = timeVal / 1000 * 1000; + } } else if (tsDigits <= TSDB_TIME_PRECISION_SEC_DIGITS) { timeVal = timeVal * factor; } else { @@ -1384,6 +1365,9 @@ int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p GET_TYPED_DATA(timePrec, int64_t, GET_PARAM_TYPE(&pInput[2]), pInput[2].columnData->pData); } + int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : + (timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000); + int32_t numOfRows = 0; for (int32_t i = 0; i < inputNum; ++i) { if (pInput[i].numOfRows > numOfRows) { @@ -1463,12 +1447,14 @@ int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p } } } else { - int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : - (timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000); - timeUnit = timeUnit * 1000 / factor; - switch(timeUnit) { - case 0: { /* 1u */ - result = result / 1000; + int64_t unit = timeUnit * 1000 / factor; + switch(unit) { + case 0: { /* 1u or 1b */ + if (timePrec == TSDB_TIME_PRECISION_NANO && timeUnit == 1) { + result = result / 1; + } else { + result = result / 1000; + } break; } case 1: { /* 1a */ diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index 6e4838e50c..56ebc21568 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -184,9 +184,7 @@ FORCE_INLINE bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus) { } if ((*pJob->chkKillFp)(pJob->chkKillParam)) { - schUpdateJobStatus(pJob, JOB_TASK_STATUS_DROPPING); schUpdateJobErrCode(pJob, TSDB_CODE_TSC_QUERY_KILLED); - return true; } @@ -669,6 +667,11 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { return TSDB_CODE_SUCCESS; } + if (SCH_IS_DATA_SRC_QRY_TASK(pTask)) { + SCH_TASK_ELOG("no execNode specifed for data src task, numOfEps:%d", pTask->plan->execNode.epSet.numOfEps); + SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + } + SCH_ERR_RET(schSetAddrsFromNodeList(pJob, pTask)); /* @@ -811,14 +814,6 @@ int32_t schMoveTaskToExecList(SSchJob *pJob, SSchTask *pTask, bool *moved) { */ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bool *needRetry) { - int8_t status = 0; - - if (schJobNeedToStop(pJob, &status)) { - *needRetry = false; - SCH_TASK_DLOG("task no more retry cause of job status, job status:%s", jobTaskStatusStr(status)); - return TSDB_CODE_SUCCESS; - } - if (TSDB_CODE_SCH_TIMEOUT_ERROR == errCode) { pTask->maxExecTimes++; if (pTask->timeoutUsec < SCH_MAX_TASK_TIMEOUT_USEC) { @@ -1277,7 +1272,7 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId* pEpId, SArray* pStatusList) { for (int32_t i = 0; i < taskNum; ++i) { STaskStatus *taskStatus = taosArrayGet(pStatusList, i); - qDebug("QID:%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d task status in server: %s", + qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d task status in server: %s", taskStatus->queryId, taskStatus->taskId, taskStatus->execId, jobTaskStatusStr(taskStatus->status)); SSchJob *pJob = schAcquireJob(taskStatus->refId); @@ -1495,6 +1490,8 @@ void schFreeJobImpl(void *job) { uint64_t queryId = pJob->queryId; int64_t refId = pJob->refId; + qDebug("QID:0x%" PRIx64 " begin to free sch job, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob); + if (pJob->status == JOB_TASK_STATUS_EXECUTING) { schCancelJob(pJob); } @@ -1535,12 +1532,12 @@ void schFreeJobImpl(void *job) { taosMemoryFreeClear(pJob->resData); taosMemoryFree(pJob); - qDebug("QID:0x%" PRIx64 " sch job freed, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob); - int32_t jobNum = atomic_sub_fetch_32(&schMgmt.jobNum, 1); if (jobNum == 0) { schCloseJobRef(); } + + qDebug("QID:0x%" PRIx64 " sch job freed, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob); } int32_t schLaunchStaticExplainJob(SSchedulerReq *pReq, SSchJob *pJob, bool sync) { @@ -1687,11 +1684,6 @@ _return: int32_t schDoTaskRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf* pData, int32_t rspCode) { int32_t code = 0; - int8_t status = 0; - if (schJobNeedToStop(pJob, &status)) { - SCH_TASK_ELOG("redirect will no continue cause of job status %s", jobTaskStatusStr(status)); - SCH_RET(atomic_load_32(&pJob->errCode)); - } if ((pTask->execId + 1) >= pTask->maxExecTimes) { SCH_TASK_DLOG("task no more retry since reach max try times, execId:%d", pTask->execId); diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 02e7b085ae..32f151f8af 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -402,12 +402,16 @@ int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) { goto _return; } - code = schHandleResponseMsg(pJob, pTask, msgType, pMsg->pData, pMsg->len, rspCode); + schHandleResponseMsg(pJob, pTask, msgType, pMsg->pData, pMsg->len, rspCode); pMsg->pData = NULL; _return: if (pTask) { + if (code) { + schProcessOnTaskFailure(pJob, pTask, code); + } + SCH_UNLOCK_TASK(pTask); } diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c index 74ddc89b40..e2389c2a75 100644 --- a/source/libs/scheduler/src/scheduler.c +++ b/source/libs/scheduler/src/scheduler.c @@ -225,26 +225,33 @@ void schedulerStopQueryHb(void *pTrans) { schCleanClusterHb(pTrans); } -void schedulerFreeJob(int64_t job, int32_t errCode) { - SSchJob *pJob = schAcquireJob(job); +void schedulerFreeJob(int64_t* job, int32_t errCode) { + if (0 == *job) { + return; + } + + SSchJob *pJob = schAcquireJob(*job); if (NULL == pJob) { - qError("acquire job from jobRef list failed, may be dropped, jobId:0x%" PRIx64, job); + qError("acquire sch job failed, may be dropped, jobId:0x%" PRIx64, *job); + *job = 0; return; } int32_t code = schProcessOnJobDropped(pJob, errCode); if (TSDB_CODE_SCH_JOB_IS_DROPPING == code) { - SCH_JOB_DLOG("sch job is already dropping, refId:0x%" PRIx64, job); + SCH_JOB_DLOG("sch job is already dropping, refId:0x%" PRIx64, *job); + *job = 0; return; } - SCH_JOB_DLOG("start to remove job from jobRef list, refId:0x%" PRIx64, job); + SCH_JOB_DLOG("start to remove job from jobRef list, refId:0x%" PRIx64, *job); - if (taosRemoveRef(schMgmt.jobRef, job)) { - SCH_JOB_ELOG("remove job from job list failed, refId:0x%" PRIx64, job); + if (taosRemoveRef(schMgmt.jobRef, *job)) { + SCH_JOB_ELOG("remove job from job list failed, refId:0x%" PRIx64, *job); } - schReleaseJob(job); + schReleaseJob(*job); + *job = 0; } void schedulerDestroy(void) { diff --git a/source/libs/scheduler/test/schedulerTests.cpp b/source/libs/scheduler/test/schedulerTests.cpp index 098699744d..7fe6cc22bf 100644 --- a/source/libs/scheduler/test/schedulerTests.cpp +++ b/source/libs/scheduler/test/schedulerTests.cpp @@ -457,7 +457,7 @@ void schtFreeQueryJob(int32_t freeThread) { int64_t job = queryJobRefId; if (job && atomic_val_compare_exchange_64(&queryJobRefId, job, 0)) { - schedulerFreeJob(job, 0); + schedulerFreeJob(&job, 0); if (freeThread) { if (++freeNum % schtTestPrintNum == 0) { printf("FreeNum:%d\n", freeNum); @@ -724,7 +724,7 @@ TEST(queryTest, normalCase) { schReleaseJob(job); - schedulerFreeJob(job, 0); + schedulerFreeJob(&job, 0); schtFreeQueryDag(&dag); @@ -828,7 +828,7 @@ TEST(queryTest, readyFirstCase) { schReleaseJob(job); - schedulerFreeJob(job, 0); + schedulerFreeJob(&job, 0); schtFreeQueryDag(&dag); @@ -940,7 +940,7 @@ TEST(queryTest, flowCtrlCase) { schReleaseJob(job); - schedulerFreeJob(job, 0); + schedulerFreeJob(&job, 0); schtFreeQueryDag(&dag); @@ -994,7 +994,7 @@ TEST(insertTest, normalCase) { ASSERT_EQ(code, 0); ASSERT_EQ(res.numOfRows, 20); - schedulerFreeJob(insertJobRefId, 0); + schedulerFreeJob(&insertJobRefId, 0); schedulerDestroy(); } diff --git a/source/libs/sync/inc/syncAppendEntriesReply.h b/source/libs/sync/inc/syncAppendEntriesReply.h index 70ce5a72c6..03148252fb 100644 --- a/source/libs/sync/inc/syncAppendEntriesReply.h +++ b/source/libs/sync/inc/syncAppendEntriesReply.h @@ -44,11 +44,6 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg); int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntriesReply* pMsg); -typedef struct SReaderParam { - SyncIndex start; - SyncIndex end; -} SReaderParam; - #ifdef __cplusplus } #endif diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 66e5d28bdd..8936cd6ed9 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -174,8 +174,9 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak); int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize); // option -bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); -SyncIndex syncNodeGetSnapshotConfigIndex(SSyncNode* pSyncNode, SyncIndex snapshotLastApplyIndex); +bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); +ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode); +SyncIndex syncNodeGetSnapshotConfigIndex(SSyncNode* pSyncNode, SyncIndex snapshotLastApplyIndex); // ping -------------- int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg); diff --git a/source/libs/sync/inc/syncRaftEntry.h b/source/libs/sync/inc/syncRaftEntry.h index 37f18a6388..82d5c0a6ea 100644 --- a/source/libs/sync/inc/syncRaftEntry.h +++ b/source/libs/sync/inc/syncRaftEntry.h @@ -29,19 +29,20 @@ extern "C" { typedef struct SSyncRaftEntry { uint32_t bytes; - uint32_t msgType; // SyncClientRequest msgType - uint32_t originalRpcType; // user RpcMsg msgType + uint32_t msgType; // TDMT_SYNC_CLIENT_REQUEST + uint32_t originalRpcType; // origin RpcMsg msgType uint64_t seqNum; bool isWeak; SyncTerm term; SyncIndex index; - uint32_t dataLen; // user RpcMsg.contLen - char data[]; // user RpcMsg.pCont + uint32_t dataLen; // origin RpcMsg.contLen + char data[]; // origin RpcMsg.pCont } SSyncRaftEntry; SSyncRaftEntry* syncEntryBuild(uint32_t dataLen); SSyncRaftEntry* syncEntryBuild2(SyncClientRequest* pMsg, SyncTerm term, SyncIndex index); // step 4 SSyncRaftEntry* syncEntryBuild3(SyncClientRequest* pMsg, SyncTerm term, SyncIndex index); +SSyncRaftEntry* syncEntryBuild4(SRpcMsg* pOriginalMsg, SyncTerm term, SyncIndex index); SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId); void syncEntryDestory(SSyncRaftEntry* pEntry); char* syncEntrySerialize(const SSyncRaftEntry* pEntry, uint32_t* len); // step 5 diff --git a/source/libs/sync/inc/syncReplication.h b/source/libs/sync/inc/syncReplication.h index 07e12460da..dad7d9b5ec 100644 --- a/source/libs/sync/inc/syncReplication.h +++ b/source/libs/sync/inc/syncReplication.h @@ -54,6 +54,7 @@ extern "C" { int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode); int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode); int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode); + int32_t syncNodeReplicate(SSyncNode* pSyncNode); int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg); int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntriesBatch* pMsg); diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index 6801e7212a..3b1e4f4560 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -37,26 +37,28 @@ extern "C" { //--------------------------------------------------- typedef struct SSyncSnapshotSender { - bool start; - int32_t seq; - int32_t ack; - void *pReader; - void *pCurrentBlock; - int32_t blockLen; - SSnapshot snapshot; - SSyncCfg lastConfig; - int64_t sendingMS; - SSyncNode *pSyncNode; - int32_t replicaIndex; - SyncTerm term; - SyncTerm privateTerm; - bool finish; + bool start; + int32_t seq; + int32_t ack; + void *pReader; + void *pCurrentBlock; + int32_t blockLen; + SSnapshotParam snapshotParam; + SSnapshot snapshot; + SSyncCfg lastConfig; + int64_t sendingMS; + SSyncNode *pSyncNode; + int32_t replicaIndex; + SyncTerm term; + SyncTerm privateTerm; + bool finish; } SSyncSnapshotSender; SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaIndex); void snapshotSenderDestroy(SSyncSnapshotSender *pSender); bool snapshotSenderIsStart(SSyncSnapshotSender *pSender); -int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void *pReader); +int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapshotParam, SSnapshot snapshot, + void *pReader); int32_t snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish); int32_t snapshotSend(SSyncSnapshotSender *pSender); int32_t snapshotReSend(SSyncSnapshotSender *pSender); @@ -67,22 +69,23 @@ char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event); //--------------------------------------------------- typedef struct SSyncSnapshotReceiver { - bool start; - int32_t ack; - void *pWriter; - SyncTerm term; - SyncTerm privateTerm; - SSnapshot snapshot; - SRaftId fromId; - SSyncNode *pSyncNode; + bool start; + int32_t ack; + void *pWriter; + SyncTerm term; + SyncTerm privateTerm; + SSnapshotParam snapshotParam; + SSnapshot snapshot; + SRaftId fromId; + SSyncNode *pSyncNode; } SSyncSnapshotReceiver; SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId fromId); void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver); -int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg); -int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver); -bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver); +int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg); +int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver); +bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver); cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver); char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index be48227d40..c923ee3d1d 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -628,8 +628,6 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { #endif -static int32_t syncNodeMakeLogSame2(SSyncNode* ths, SyncAppendEntriesBatch* pMsg) { return 0; } - static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) { int32_t code; @@ -675,6 +673,51 @@ static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) { return code; } +static int32_t syncNodeDoMakeLogSame(SSyncNode* ths, SyncIndex FromIndex) { + int32_t code; + + SyncIndex delBegin = FromIndex; + SyncIndex delEnd = ths->pLogStore->syncLogLastIndex(ths->pLogStore); + + // invert roll back! + for (SyncIndex index = delEnd; index >= delBegin; --index) { + if (ths->pFsm->FpRollBackCb != NULL) { + SSyncRaftEntry* pRollBackEntry; + code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, index, &pRollBackEntry); + ASSERT(code == 0); + ASSERT(pRollBackEntry != NULL); + + if (syncUtilUserRollback(pRollBackEntry->msgType)) { + SRpcMsg rpcMsg; + syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg); + + SFsmCbMeta cbMeta = {0}; + cbMeta.index = pRollBackEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); + cbMeta.isWeak = pRollBackEntry->isWeak; + cbMeta.code = 0; + cbMeta.state = ths->state; + cbMeta.seqNum = pRollBackEntry->seqNum; + ths->pFsm->FpRollBackCb(ths->pFsm, &rpcMsg, cbMeta); + rpcFreeCont(rpcMsg.pCont); + } + + syncEntryDestory(pRollBackEntry); + } + } + + // delete confict entries + code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin); + ASSERT(code == 0); + + char eventLog[128]; + snprintf(eventLog, sizeof(eventLog), "log truncate, from %ld to %ld", delBegin, delEnd); + syncNodeEventLog(ths, eventLog); + logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore); + + return code; +} + static int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry) { SRpcMsg rpcMsg; syncEntry2OriginalRpc(pEntry, &rpcMsg); @@ -694,6 +737,31 @@ static int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry) { return 0; } +static bool syncNodeOnAppendEntriesBatchLogOK(SSyncNode* pSyncNode, SyncAppendEntriesBatch* pMsg) { + if (pMsg->prevLogIndex == SYNC_INDEX_INVALID) { + return true; + } + + SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode); + if (pMsg->prevLogIndex > myLastIndex) { + sDebug("vgId:%d sync log not ok, preindex:%ld", pSyncNode->vgId, pMsg->prevLogIndex); + return false; + } + + SyncTerm myPreLogTerm = syncNodeGetPreTerm(pSyncNode, pMsg->prevLogIndex + 1); + if (myPreLogTerm == SYNC_TERM_INVALID) { + sDebug("vgId:%d sync log not ok2, preindex:%ld", pSyncNode->vgId, pMsg->prevLogIndex); + return false; + } + + if (pMsg->prevLogIndex <= myLastIndex && pMsg->prevLogTerm == myPreLogTerm) { + return true; + } + + sDebug("vgId:%d sync log not ok3, preindex:%ld", pSyncNode->vgId, pMsg->prevLogIndex); + return false; +} + // really pre log match // prevLogIndex == -1 static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries* pMsg) { @@ -767,7 +835,6 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc // operation: // if hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex, append entry // match my-commit-index or my-commit-index + 1 - // no operation on log do { bool condition = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) && (pMsg->prevLogIndex <= ths->commitIndex); @@ -780,14 +847,11 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc syncNodeEventLog(ths, logBuf); } while (0); - SyncIndex matchIndex = ths->commitIndex; - bool hasAppendEntries = pMsg->dataLen > 0; - if (hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex) { - SRpcMsg rpcMsgArr[SYNC_MAX_BATCH_SIZE]; - memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); - int32_t retArrSize = 0; - syncAppendEntriesBatch2RpcMsgArray(pMsg, rpcMsgArr, SYNC_MAX_BATCH_SIZE, &retArrSize); + SyncIndex matchIndex = ths->commitIndex; + bool hasAppendEntries = pMsg->dataLen > 0; + SOffsetAndContLen* metaTableArr = syncAppendEntriesBatchMetaTableArray(pMsg); + if (hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex) { // make log same do { SyncIndex logLastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); @@ -795,15 +859,15 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc if (hasExtraEntries) { // make log same, rollback deleted entries - code = syncNodeMakeLogSame2(ths, pMsg); + code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); ASSERT(code == 0); } } while (0); // append entry batch - for (int32_t i = 0; i < retArrSize; ++i) { - SSyncRaftEntry* pAppendEntry = syncEntryBuild(1234); + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); if (code != 0) { return -1; @@ -821,7 +885,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc walFsync(pWal, true); // update match index - matchIndex = pMsg->prevLogIndex + retArrSize; + matchIndex = pMsg->prevLogIndex + pMsg->dataCount; } // prepare response msg @@ -839,13 +903,12 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg); syncAppendEntriesReplyDestroy(pReply); - return ret; + return 0; } } while (0); // calculate logOK here, before will coredump, due to fake match - // bool logOK = syncNodeOnAppendEntriesLogOK(ths, pMsg); - bool logOK = true; + bool logOK = syncNodeOnAppendEntriesBatchLogOK(ths, pMsg); // not match // @@ -866,8 +929,9 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc if (condition) { char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, not match, pre-index:%ld, pre-term:%lu, datalen:%d", - pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen); + snprintf(logBuf, sizeof(logBuf), + "recv sync-append-entries-batch, not match, pre-index:%ld, pre-term:%lu, datalen:%d", pMsg->prevLogIndex, + pMsg->prevLogTerm, pMsg->dataLen); syncNodeEventLog(ths, logBuf); // prepare response msg @@ -885,7 +949,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg); syncAppendEntriesReplyDestroy(pReply); - return ret; + return 0; } } while (0); @@ -905,28 +969,26 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc bool hasExtraEntries = myLastIndex > pMsg->prevLogIndex; // has entries in SyncAppendEntries msg - bool hasAppendEntries = pMsg->dataLen > 0; + bool hasAppendEntries = pMsg->dataLen > 0; + SOffsetAndContLen* metaTableArr = syncAppendEntriesBatchMetaTableArray(pMsg); - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, match, pre-index:%ld, pre-term:%lu, datalen:%d", - pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen); - syncNodeEventLog(ths, logBuf); + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, match, pre-index:%ld, pre-term:%lu, datalen:%d", + pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen); + syncNodeEventLog(ths, logBuf); + } while (0); if (hasExtraEntries) { // make log same, rollback deleted entries - // code = syncNodeMakeLogSame(ths, pMsg); + code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); ASSERT(code == 0); } - int32_t retArrSize = 0; if (hasAppendEntries) { - SRpcMsg rpcMsgArr[SYNC_MAX_BATCH_SIZE]; - memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); - syncAppendEntriesBatch2RpcMsgArray(pMsg, rpcMsgArr, SYNC_MAX_BATCH_SIZE, &retArrSize); - // append entry batch - for (int32_t i = 0; i < retArrSize; ++i) { - SSyncRaftEntry* pAppendEntry = syncEntryBuild(1234); + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); if (code != 0) { return -1; @@ -951,7 +1013,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc pReply->term = ths->pRaftStore->currentTerm; pReply->privateTerm = ths->pNewNodeReceiver->privateTerm; pReply->success = true; - pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + retArrSize : pMsg->prevLogIndex; + pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + pMsg->dataCount : pMsg->prevLogIndex; // send response SRpcMsg rpcMsg; @@ -991,11 +1053,11 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc ASSERT(code == 0); } } - return ret; + return 0; } } while (0); - return ret; + return 0; } int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMsg) { diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 629d83eb51..f3206e9ccc 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -118,12 +118,12 @@ static void syncNodeStartSnapshot(SSyncNode* ths, SyncIndex beginIndex, SyncInde SSnapshot snapshot = { .data = NULL, .lastApplyIndex = endIndex, .lastApplyTerm = lastApplyTerm, .lastConfigIndex = SYNC_INDEX_INVALID}; - void* pReader = NULL; - SReaderParam readerParam = {.start = beginIndex, .end = endIndex}; + void* pReader = NULL; + SSnapshotParam readerParam = {.start = beginIndex, .end = endIndex}; ths->pFsm->FpSnapshotStartRead(ths->pFsm, &readerParam, &pReader); if (!snapshotSenderIsStart(pSender) && pMsg->privateTerm < pSender->privateTerm) { ASSERT(pReader != NULL); - snapshotSenderStart(pSender, snapshot, pReader); + snapshotSenderStart(pSender, readerParam, snapshot, pReader); } else { if (pReader != NULL) { @@ -165,23 +165,22 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie if (ths->pLogStore->syncLogExist(ths->pLogStore, newNextIndex) && ths->pLogStore->syncLogExist(ths->pLogStore, newNextIndex - 1)) { - // nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1] + // update next-index, match-index syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), newNextIndex); - - // matchIndex' = [matchIndex EXCEPT ![i][j] = m.mmatchIndex] syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), newMatchIndex); // maybe commit if (ths->state == TAOS_SYNC_STATE_LEADER) { syncMaybeAdvanceCommitIndex(ths); } + } else { // start snapshot - SSnapshot snapshot; - ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot); - syncNodeStartSnapshot(ths, newMatchIndex + 1, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pMsg); + SSnapshot oldSnapshot; + ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &oldSnapshot); + syncNodeStartSnapshot(ths, newMatchIndex + 1, oldSnapshot.lastApplyIndex, oldSnapshot.lastApplyTerm, pMsg); - syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), snapshot.lastApplyIndex + 1); + syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), oldSnapshot.lastApplyIndex + 1); syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), newMatchIndex); } @@ -301,7 +300,8 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries !snapshotSenderIsStart(pSender) && pMsg->privateTerm < pSender->privateTerm) { // has snapshot ASSERT(pReader != NULL); - snapshotSenderStart(pSender, snapshot, pReader); + SSnapshotParam readerParam = {.start = 0, .end = snapshot.lastApplyIndex}; + snapshotSenderStart(pSender, readerParam, snapshot, pReader); } else { // no snapshot diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index ddda60bc18..cefd306f7d 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -65,6 +65,7 @@ int32_t syncInit() { syncCleanUp(); ret = -1; } else { + sDebug("sync rsetId:%" PRId64 " is open", tsNodeRefId); ret = syncEnvStart(); } } @@ -77,6 +78,7 @@ void syncCleanUp() { ASSERT(ret == 0); if (tsNodeRefId != -1) { + sDebug("sync rsetId:%" PRId64 " is closed", tsNodeRefId); taosCloseRef(tsNodeRefId); tsNodeRefId = -1; } @@ -96,6 +98,7 @@ int64_t syncOpen(const SSyncInfo* pSyncInfo) { return -1; } + sDebug("vgId:%d, rid:%" PRId64 " is added to rsetId:%" PRId64, pSyncInfo->vgId, pSyncNode->rid, tsNodeRefId); return pSyncNode->rid; } @@ -136,13 +139,14 @@ void syncStartStandBy(int64_t rid) { void syncStop(int64_t rid) { SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); - if (pSyncNode == NULL) { - return; - } + if (pSyncNode == NULL) return; + + int32_t vgId = pSyncNode->vgId; syncNodeClose(pSyncNode); taosReleaseRef(tsNodeRefId, pSyncNode->rid); taosRemoveRef(tsNodeRefId, rid); + sDebug("vgId:%d, rid:%" PRId64 " is removed from rsetId:%" PRId64, vgId, rid, tsNodeRefId); } int32_t syncSetStandby(int64_t rid) { @@ -154,13 +158,13 @@ int32_t syncSetStandby(int64_t rid) { } if (pSyncNode->state != TAOS_SYNC_STATE_FOLLOWER) { - taosReleaseRef(tsNodeRefId, pSyncNode->rid); if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { terrno = TSDB_CODE_SYN_IS_LEADER; } else { terrno = TSDB_CODE_SYN_STANDBY_NOT_READY; } sError("failed to set standby since it is not follower, state:%s rid:%" PRId64, syncStr(pSyncNode->state), rid); + taosReleaseRef(tsNodeRefId, pSyncNode->rid); return -1; } @@ -616,6 +620,7 @@ int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak) { SSyncNode* pSyncNode = taosAcquireRef(tsNodeRefId, rid); if (pSyncNode == NULL) { + taosReleaseRef(tsNodeRefId, rid); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; return -1; } @@ -815,7 +820,7 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) { // create a new raft config file SRaftCfgMeta meta; meta.isStandBy = pSyncInfo->isStandBy; - meta.snapshotEnable = pSyncInfo->snapshotEnable; + meta.snapshotEnable = pSyncInfo->snapshotStrategy; meta.lastConfigIndex = SYNC_INDEX_INVALID; ret = raftCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), meta, pSyncNode->configPath); ASSERT(ret == 0); @@ -1100,7 +1105,9 @@ void syncNodeClose(SSyncNode* pSyncNode) { } // option -bool syncNodeSnapshotEnable(SSyncNode* pSyncNode) { return pSyncNode->pRaftCfg->snapshotEnable; } +// bool syncNodeSnapshotEnable(SSyncNode* pSyncNode) { return pSyncNode->pRaftCfg->snapshotEnable; } + +ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode) { return pSyncNode->pRaftCfg->snapshotEnable; } // ping -------------- int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg) { diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c index ad352df59f..e53b9d5e36 100644 --- a/source/libs/sync/src/syncMessage.c +++ b/source/libs/sync/src/syncMessage.c @@ -15,6 +15,7 @@ #include "syncMessage.h" #include "syncRaftCfg.h" +#include "syncRaftEntry.h" #include "syncUtil.h" #include "tcoding.h" @@ -996,7 +997,135 @@ SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMet return pMsg; } -void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg) {} +void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg) { + memset(pRpcMsg, 0, sizeof(*pRpcMsg)); + pRpcMsg->msgType = pSyncMsg->msgType; + pRpcMsg->contLen = pSyncMsg->bytes; + pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen); + memcpy(pRpcMsg->pCont, pSyncMsg, pRpcMsg->contLen); +} + +void syncClientRequestBatchDestroy(SyncClientRequestBatch* pMsg) { + if (pMsg != NULL) { + taosMemoryFree(pMsg); + } +} + +void syncClientRequestBatchDestroyDeep(SyncClientRequestBatch* pMsg) { + if (pMsg != NULL) { + int32_t arrSize = pMsg->dataCount; + int32_t raftMetaArrayLen = sizeof(SRaftMeta) * arrSize; + SRpcMsg* msgArr = (SRpcMsg*)((char*)(pMsg->data) + raftMetaArrayLen); + for (int i = 0; i < arrSize; ++i) { + if (msgArr[i].pCont != NULL) { + rpcFreeCont(msgArr[i].pCont); + } + } + + taosMemoryFree(pMsg); + } +} + +SRaftMeta* syncClientRequestBatchMetaArr(const SyncClientRequestBatch* pSyncMsg) { + SRaftMeta* raftMetaArr = (SRaftMeta*)(pSyncMsg->data); + return raftMetaArr; +} + +SRpcMsg* syncClientRequestBatchRpcMsgArr(const SyncClientRequestBatch* pSyncMsg) { + int32_t arrSize = pSyncMsg->dataCount; + int32_t raftMetaArrayLen = sizeof(SRaftMeta) * arrSize; + SRpcMsg* msgArr = (SRpcMsg*)((char*)(pSyncMsg->data) + raftMetaArrayLen); + return msgArr; +} + +SyncClientRequestBatch* syncClientRequestBatchFromRpcMsg(const SRpcMsg* pRpcMsg) { + SyncClientRequestBatch* pSyncMsg = taosMemoryMalloc(pRpcMsg->contLen); + ASSERT(pSyncMsg != NULL); + memcpy(pSyncMsg, pRpcMsg->pCont, pRpcMsg->contLen); + ASSERT(pRpcMsg->contLen == pSyncMsg->bytes); + + return pSyncMsg; +} + +cJSON* syncClientRequestBatch2Json(const SyncClientRequestBatch* pMsg) { + char u64buf[128] = {0}; + cJSON* pRoot = cJSON_CreateObject(); + + if (pMsg != NULL) { + cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes); + cJSON_AddNumberToObject(pRoot, "vgId", pMsg->vgId); + cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType); + cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen); + cJSON_AddNumberToObject(pRoot, "dataCount", pMsg->dataCount); + + SRaftMeta* metaArr = syncClientRequestBatchMetaArr(pMsg); + SRpcMsg* msgArr = syncClientRequestBatchRpcMsgArr(pMsg); + + cJSON* pMetaArr = cJSON_CreateArray(); + cJSON_AddItemToObject(pRoot, "metaArr", pMetaArr); + for (int i = 0; i < pMsg->dataCount; ++i) { + cJSON* pMeta = cJSON_CreateObject(); + cJSON_AddNumberToObject(pMeta, "seqNum", metaArr[i].seqNum); + cJSON_AddNumberToObject(pMeta, "isWeak", metaArr[i].isWeak); + cJSON_AddItemToArray(pMetaArr, pMeta); + } + + cJSON* pMsgArr = cJSON_CreateArray(); + cJSON_AddItemToObject(pRoot, "msgArr", pMsgArr); + for (int i = 0; i < pMsg->dataCount; ++i) { + cJSON* pRpcMsgJson = syncRpcMsg2Json(&msgArr[i]); + cJSON_AddItemToArray(pMsgArr, pRpcMsgJson); + } + + char* s; + s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen); + cJSON_AddStringToObject(pRoot, "data", s); + taosMemoryFree(s); + s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen); + cJSON_AddStringToObject(pRoot, "data2", s); + taosMemoryFree(s); + } + + cJSON* pJson = cJSON_CreateObject(); + cJSON_AddItemToObject(pJson, "SyncClientRequestBatch", pRoot); + return pJson; +} + +char* syncClientRequestBatch2Str(const SyncClientRequestBatch* pMsg) { + cJSON* pJson = syncClientRequestBatch2Json(pMsg); + char* serialized = cJSON_Print(pJson); + cJSON_Delete(pJson); + return serialized; +} + +// for debug ---------------------- +void syncClientRequestBatchPrint(const SyncClientRequestBatch* pMsg) { + char* serialized = syncClientRequestBatch2Str(pMsg); + printf("syncClientRequestBatchPrint | len:%lu | %s \n", strlen(serialized), serialized); + fflush(NULL); + taosMemoryFree(serialized); +} + +void syncClientRequestBatchPrint2(char* s, const SyncClientRequestBatch* pMsg) { + char* serialized = syncClientRequestBatch2Str(pMsg); + printf("syncClientRequestBatchPrint2 | len:%lu | %s | %s \n", strlen(serialized), s, serialized); + fflush(NULL); + taosMemoryFree(serialized); +} + +void syncClientRequestBatchLog(const SyncClientRequestBatch* pMsg) { + char* serialized = syncClientRequestBatch2Str(pMsg); + sTrace("syncClientRequestBatchLog | len:%lu | %s", strlen(serialized), serialized); + taosMemoryFree(serialized); +} + +void syncClientRequestBatchLog2(char* s, const SyncClientRequestBatch* pMsg) { + if (gRaftDetailLog) { + char* serialized = syncClientRequestBatch2Str(pMsg); + sTraceLong("syncClientRequestBatchLog2 | len:%lu | %s | %s", strlen(serialized), s, serialized); + taosMemoryFree(serialized); + } +} // ---- message process SyncRequestVote---- SyncRequestVote* syncRequestVoteBuild(int32_t vgId) { @@ -1472,21 +1601,20 @@ void syncAppendEntriesLog2(char* s, const SyncAppendEntries* pMsg) { // block1: SOffsetAndContLen // block2: SOffsetAndContLen Array -// block3: SRpcMsg Array -// block4: SRpcMsg pCont Array +// block3: entry Array -SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SRpcMsg* rpcMsgArr, int32_t arrSize, int32_t vgId) { - ASSERT(rpcMsgArr != NULL); +SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SSyncRaftEntry** entryPArr, int32_t arrSize, int32_t vgId) { + ASSERT(entryPArr != NULL); ASSERT(arrSize > 0); int32_t dataLen = 0; int32_t metaArrayLen = sizeof(SOffsetAndContLen) * arrSize; // - int32_t rpcArrayLen = sizeof(SRpcMsg) * arrSize; // SRpcMsg - int32_t contArrayLen = 0; + int32_t entryArrayLen = 0; for (int i = 0; i < arrSize; ++i) { // SRpcMsg pCont - contArrayLen += rpcMsgArr[i].contLen; + SSyncRaftEntry* pEntry = entryPArr[i]; + entryArrayLen += pEntry->bytes; } - dataLen += (metaArrayLen + rpcArrayLen + contArrayLen); + dataLen += (metaArrayLen + entryArrayLen); uint32_t bytes = sizeof(SyncAppendEntriesBatch) + dataLen; SyncAppendEntriesBatch* pMsg = taosMemoryMalloc(bytes); @@ -1498,30 +1626,30 @@ SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SRpcMsg* rpcMsgArr, int32_t pMsg->dataLen = dataLen; SOffsetAndContLen* metaArr = (SOffsetAndContLen*)(pMsg->data); - SRpcMsg* msgArr = (SRpcMsg*)((char*)(pMsg->data) + metaArrayLen); char* pData = pMsg->data; for (int i = 0; i < arrSize; ++i) { - // init + // init meta if (i == 0) { - metaArr[i].offset = metaArrayLen + rpcArrayLen; - metaArr[i].contLen = rpcMsgArr[i].contLen; + metaArr[i].offset = metaArrayLen; + metaArr[i].contLen = entryPArr[i]->bytes; } else { metaArr[i].offset = metaArr[i - 1].offset + metaArr[i - 1].contLen; - metaArr[i].contLen = rpcMsgArr[i].contLen; + metaArr[i].contLen = entryPArr[i]->bytes; } - // init msgArr - msgArr[i] = rpcMsgArr[i]; - - // init data - ASSERT(rpcMsgArr[i].contLen == metaArr[i].contLen); - memcpy(pData + metaArr[i].offset, rpcMsgArr[i].pCont, rpcMsgArr[i].contLen); + // init entry array + ASSERT(metaArr[i].contLen == entryPArr[i]->bytes); + memcpy(pData + metaArr[i].offset, entryPArr[i], metaArr[i].contLen); } return pMsg; } +SOffsetAndContLen* syncAppendEntriesBatchMetaTableArray(SyncAppendEntriesBatch* pMsg) { + return (SOffsetAndContLen*)(pMsg->data); +} + void syncAppendEntriesBatchDestroy(SyncAppendEntriesBatch* pMsg) { if (pMsg != NULL) { taosMemoryFree(pMsg); @@ -1634,16 +1762,12 @@ cJSON* syncAppendEntriesBatch2Json(const SyncAppendEntriesBatch* pMsg) { cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen); int32_t metaArrayLen = sizeof(SOffsetAndContLen) * pMsg->dataCount; // - int32_t rpcArrayLen = sizeof(SRpcMsg) * pMsg->dataCount; // SRpcMsg - int32_t contArrayLen = pMsg->dataLen - metaArrayLen - rpcArrayLen; + int32_t entryArrayLen = pMsg->dataLen - metaArrayLen; cJSON_AddNumberToObject(pRoot, "metaArrayLen", metaArrayLen); - cJSON_AddNumberToObject(pRoot, "rpcArrayLen", rpcArrayLen); - cJSON_AddNumberToObject(pRoot, "contArrayLen", contArrayLen); + cJSON_AddNumberToObject(pRoot, "entryArrayLen", entryArrayLen); SOffsetAndContLen* metaArr = (SOffsetAndContLen*)(pMsg->data); - SRpcMsg* msgArr = (SRpcMsg*)(pMsg->data + metaArrayLen); - void* pData = (void*)(pMsg->data + metaArrayLen + rpcArrayLen); cJSON* pMetaArr = cJSON_CreateArray(); cJSON_AddItemToObject(pRoot, "metaArr", pMetaArr); @@ -1654,14 +1778,12 @@ cJSON* syncAppendEntriesBatch2Json(const SyncAppendEntriesBatch* pMsg) { cJSON_AddItemToArray(pMetaArr, pMeta); } - cJSON* pMsgArr = cJSON_CreateArray(); - cJSON_AddItemToObject(pRoot, "msgArr", pMsgArr); + cJSON* pEntryArr = cJSON_CreateArray(); + cJSON_AddItemToObject(pRoot, "entryArr", pEntryArr); for (int i = 0; i < pMsg->dataCount; ++i) { - cJSON* pRpcMsgJson = cJSON_CreateObject(); - cJSON_AddNumberToObject(pRpcMsgJson, "code", msgArr[i].code); - cJSON_AddNumberToObject(pRpcMsgJson, "contLen", msgArr[i].contLen); - cJSON_AddNumberToObject(pRpcMsgJson, "msgType", msgArr[i].msgType); - cJSON_AddItemToArray(pMsgArr, pRpcMsgJson); + SSyncRaftEntry* pEntry = (SSyncRaftEntry*)(pMsg->data + metaArr[i].offset); + cJSON* pEntryJson = syncEntry2Json(pEntry); + cJSON_AddItemToArray(pEntryArr, pEntryJson); } char* s; @@ -1685,33 +1807,6 @@ char* syncAppendEntriesBatch2Str(const SyncAppendEntriesBatch* pMsg) { return serialized; } -void syncAppendEntriesBatch2RpcMsgArray(SyncAppendEntriesBatch* pSyncMsg, SRpcMsg* rpcMsgArr, int32_t maxArrSize, - int32_t* pRetArrSize) { - if (pRetArrSize != NULL) { - *pRetArrSize = pSyncMsg->dataCount; - } - - int32_t arrSize = pSyncMsg->dataCount; - if (arrSize > maxArrSize) { - arrSize = maxArrSize; - } - - int32_t metaArrayLen = sizeof(SOffsetAndContLen) * pSyncMsg->dataCount; // - int32_t rpcArrayLen = sizeof(SRpcMsg) * pSyncMsg->dataCount; // SRpcMsg - int32_t contArrayLen = pSyncMsg->dataLen - metaArrayLen - rpcArrayLen; - - SOffsetAndContLen* metaArr = (SOffsetAndContLen*)(pSyncMsg->data); - SRpcMsg* msgArr = (SRpcMsg*)(pSyncMsg->data + metaArrayLen); - void* pData = pSyncMsg->data + metaArrayLen + rpcArrayLen; - - for (int i = 0; i < arrSize; ++i) { - rpcMsgArr[i] = msgArr[i]; - rpcMsgArr[i].pCont = rpcMallocCont(msgArr[i].contLen); - void* pRpcCont = pSyncMsg->data + metaArr[i].offset; - memcpy(rpcMsgArr[i].pCont, pRpcCont, rpcMsgArr[i].contLen); - } -} - // for debug ---------------------- void syncAppendEntriesBatchPrint(const SyncAppendEntriesBatch* pMsg) { char* serialized = syncAppendEntriesBatch2Str(pMsg); @@ -2159,6 +2254,9 @@ cJSON* syncSnapshotSend2Json(const SyncSnapshotSend* pMsg) { snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->privateTerm); cJSON_AddStringToObject(pRoot, "privateTerm", u64buf); + snprintf(u64buf, sizeof(u64buf), "%ld", pMsg->beginIndex); + cJSON_AddStringToObject(pRoot, "beginIndex", u64buf); + snprintf(u64buf, sizeof(u64buf), "%ld", pMsg->lastIndex); cJSON_AddStringToObject(pRoot, "lastIndex", u64buf); diff --git a/source/libs/sync/src/syncRaftEntry.c b/source/libs/sync/src/syncRaftEntry.c index 225360630c..0ee4684ea8 100644 --- a/source/libs/sync/src/syncRaftEntry.c +++ b/source/libs/sync/src/syncRaftEntry.c @@ -50,6 +50,22 @@ SSyncRaftEntry* syncEntryBuild3(SyncClientRequest* pMsg, SyncTerm term, SyncInde return pEntry; } +SSyncRaftEntry* syncEntryBuild4(SRpcMsg* pOriginalMsg, SyncTerm term, SyncIndex index) { + SSyncRaftEntry* pEntry = syncEntryBuild(pOriginalMsg->contLen); + ASSERT(pEntry != NULL); + + pEntry->msgType = TDMT_SYNC_CLIENT_REQUEST; + pEntry->originalRpcType = pOriginalMsg->msgType; + pEntry->seqNum = 0; + pEntry->isWeak = 0; + pEntry->term = term; + pEntry->index = index; + pEntry->dataLen = pOriginalMsg->contLen; + memcpy(pEntry->data, pOriginalMsg->pCont, pOriginalMsg->contLen); + + return pEntry; +} + SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId) { // init rpcMsg SMsgHead head; diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index a026892629..83495e7486 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -32,6 +32,7 @@ static SyncTerm raftLogLastTerm(struct SSyncLogStore* pLogStore); static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncRaftEntry** ppEntry); static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIndex); +static bool raftLogExist(struct SSyncLogStore* pLogStore, SyncIndex index); // private function static int32_t raftLogGetLastEntry(SSyncLogStore* pLogStore, SSyncRaftEntry** ppLastEntry); @@ -83,6 +84,7 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) { pLogStore->syncLogGetEntry = raftLogGetEntry; pLogStore->syncLogTruncate = raftLogTruncate; pLogStore->syncLogWriteIndex = raftLogWriteIndex; + pLogStore->syncLogExist = raftLogExist; return pLogStore; } @@ -168,6 +170,13 @@ static SyncIndex raftLogWriteIndex(struct SSyncLogStore* pLogStore) { return lastVer + 1; } +static bool raftLogExist(struct SSyncLogStore* pLogStore, SyncIndex index) { + SSyncLogStoreData* pData = pLogStore->data; + SWal* pWal = pData->pWal; + bool b = walLogExist(pWal, index); + return b; +} + // if success, return last term // if not log, return 0 // if error, return SYNC_TERM_INVALID diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 4908822a3a..bcca44130a 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -145,26 +145,34 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) { return -1; } - SRpcMsg rpcMsgArr[SYNC_MAX_BATCH_SIZE]; - memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); + SSyncRaftEntry* entryPArr[SYNC_MAX_BATCH_SIZE]; + memset(entryPArr, 0, sizeof(entryPArr)); - int32_t getCount = 0; + int32_t getCount = 0; + SyncIndex getEntryIndex = nextIndex; for (int32_t i = 0; i < pSyncNode->batchSize; ++i) { SSyncRaftEntry* pEntry; - int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, nextIndex, &pEntry); + int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, getEntryIndex, &pEntry); if (code == 0) { ASSERT(pEntry != NULL); - // get rpc msg [i] from entry - syncEntryDestory(pEntry); + entryPArr[i] = pEntry; getCount++; } else { break; } } - SyncAppendEntriesBatch* pMsg = syncAppendEntriesBatchBuild(rpcMsgArr, getCount, pSyncNode->vgId); + SyncAppendEntriesBatch* pMsg = syncAppendEntriesBatchBuild(entryPArr, getCount, pSyncNode->vgId); ASSERT(pMsg != NULL); + for (int32_t i = 0; i < pSyncNode->batchSize; ++i) { + SSyncRaftEntry* pEntry = entryPArr[i]; + if (pEntry != NULL) { + syncEntryDestory(pEntry); + entryPArr[i] = NULL; + } + } + // prepare msg pMsg->srcId = pSyncNode->myRaftId; pMsg->destId = *pDestId; diff --git a/source/libs/sync/src/syncRespMgr.c b/source/libs/sync/src/syncRespMgr.c index d2cbabe226..990a92aad7 100644 --- a/source/libs/sync/src/syncRespMgr.c +++ b/source/libs/sync/src/syncRespMgr.c @@ -14,6 +14,7 @@ */ #include "syncRespMgr.h" +#include "syncRaftEntry.h" #include "syncRaftStore.h" SSyncRespMgr *syncRespMgrCreate(void *data, int64_t ttl) { @@ -116,4 +117,59 @@ void syncRespClean(SSyncRespMgr *pObj) { taosThreadMutexUnlock(&(pObj->mutex)); } -void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) {} +void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) { + SRespStub *pStub = (SRespStub *)taosHashIterate(pObj->pRespHash, NULL); + int cnt = 0; + SSyncNode *pSyncNode = pObj->data; + + SArray *delIndexArray = taosArrayInit(0, sizeof(SyncIndex)); + ASSERT(delIndexArray != NULL); + + while (pStub) { + size_t len; + void *key = taosHashGetKey(pStub, &len); + SyncIndex *pIndex = (SyncIndex *)key; + + int64_t nowMS = taosGetTimestampMs(); + if (nowMS - pStub->createTime > ttl) { + taosArrayPush(delIndexArray, pIndex); + cnt++; + + SSyncRaftEntry *pEntry = NULL; + int32_t code = 0; + if (pSyncNode->pLogStore != NULL) { + code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, *pIndex, &pEntry); + if (code == 0 && pEntry != NULL) { + SFsmCbMeta cbMeta = {0}; + cbMeta.index = pEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(pSyncNode, cbMeta.index); + cbMeta.isWeak = pEntry->isWeak; + cbMeta.code = TSDB_CODE_SYN_TIMEOUT; + cbMeta.state = pSyncNode->state; + cbMeta.seqNum = pEntry->seqNum; + cbMeta.term = pEntry->term; + cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm; + cbMeta.flag = 0; + + SRpcMsg rpcMsg = pStub->rpcMsg; + rpcMsg.pCont = rpcMallocCont(pEntry->dataLen); + memcpy(rpcMsg.pCont, pEntry->data, pEntry->dataLen); + pSyncNode->pFsm->FpCommitCb(pSyncNode->pFsm, &rpcMsg, cbMeta); + + syncEntryDestory(pEntry); + } + } + } + + pStub = (SRespStub *)taosHashIterate(pObj->pRespHash, pStub); + } + + int32_t arraySize = taosArrayGetSize(delIndexArray); + sDebug("vgId:%d, resp clean by ttl, cnt:%d, array-size:%d", pSyncNode->vgId, cnt, arraySize); + + for (int32_t i = 0; i < arraySize; ++i) { + SyncIndex *pIndex = taosArrayGet(delIndexArray, i); + taosHashRemove(pObj->pRespHash, pIndex, sizeof(SyncIndex)); + } + taosArrayDestroy(delIndexArray); +} diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 4466cccbbc..5cdfec72c5 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -22,9 +22,11 @@ #include "wal.h" //---------------------------------- -static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, - SyncSnapshotSend *pBeginMsg); -static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg); +static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg); +static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg); +static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver); +static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg); +static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg); //---------------------------------- SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaIndex) { @@ -68,7 +70,9 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) { // close reader if (pSender->pReader != NULL) { int32_t ret = pSender->pSyncNode->pFsm->FpSnapshotStopRead(pSender->pSyncNode->pFsm, pSender->pReader); - ASSERT(ret == 0); + if (ret != 0) { + syncNodeErrorLog(pSender->pSyncNode, "stop reader error"); + } pSender->pReader = NULL; } @@ -79,14 +83,21 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) { bool snapshotSenderIsStart(SSyncSnapshotSender *pSender) { return pSender->start; } -// begin send snapshot by snapshot, pReader -int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void *pReader) { +// begin send snapshot by param, snapshot, pReader +// +// action: +// 1. assert reader not start +// 2. update state +// 3. send first snapshot block +int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapshotParam, SSnapshot snapshot, + void *pReader) { ASSERT(!snapshotSenderIsStart(pSender)); - // init snapshot and reader + // init snapshot, parm, reader ASSERT(pSender->pReader == NULL); pSender->pReader = pReader; pSender->snapshot = snapshot; + pSender->snapshotParam = snapshotParam; // init current block if (pSender->pCurrentBlock != NULL) { @@ -96,7 +107,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, vo // update term pSender->term = pSender->pSyncNode->pRaftStore->currentTerm; - ++(pSender->privateTerm); + ++(pSender->privateTerm); // increase private term // update state pSender->finish = false; @@ -112,9 +123,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, vo code = pSender->pSyncNode->pLogStore->syncLogGetEntry(pSender->pSyncNode->pLogStore, pSender->snapshot.lastConfigIndex, &pEntry); - if (code == 0) { - ASSERT(pEntry != NULL); - + if (code == 0 && pEntry != NULL) { SRpcMsg rpcMsg; syncEntry2OriginalRpc(pEntry, &rpcMsg); @@ -162,6 +171,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, vo pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex]; pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; + pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex; @@ -204,6 +214,8 @@ int32_t snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish) { pSender->start = false; pSender->finish = finish; + // do not update term, maybe print + // event log do { char *eventLog = snapshotSender2SimpleStr(pSender, "snapshot sender stop"); @@ -240,6 +252,7 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) { pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex]; pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; + pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex; @@ -278,11 +291,13 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex]; pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; + pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex; pMsg->lastConfig = pSender->lastConfig; pMsg->seq = pSender->seq; + pMsg->privateTerm = pSender->privateTerm; memcpy(pMsg->data, pSender->pCurrentBlock, pSender->blockLen); // send msg @@ -302,6 +317,12 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { return 0; } +static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) { + ASSERT(pMsg->ack == pSender->seq); + pSender->ack = pMsg->ack; + ++(pSender->seq); +} + cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) { char u64buf[128]; cJSON *pRoot = cJSON_CreateObject(); @@ -368,10 +389,11 @@ char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event) { syncUtilU642Addr(destId.addr, host, sizeof(host), &port); snprintf(s, len, - "%s {%p laindex:%ld laterm:%lu lcindex:%ld seq:%d ack:%d finish:%d pterm:%lu replica-index:%d %s:%d}", event, - pSender, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm, - pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, pSender->finish, pSender->privateTerm, - pSender->replicaIndex, host, port); + "%s {%p s-param:%ld e-param:%ld laindex:%ld laterm:%lu lcindex:%ld seq:%d ack:%d finish:%d pterm:%lu " + "replica-index:%d %s:%d}", + event, pSender, pSender->snapshotParam.start, pSender->snapshotParam.end, pSender->snapshot.lastApplyIndex, + pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, + pSender->finish, pSender->privateTerm, pSender->replicaIndex, host, port); return s; } @@ -426,11 +448,10 @@ bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver) { return pReceive // static do start by privateTerm, pBeginMsg // receive first snapshot data // write first block data -static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, - SyncSnapshotSend *pBeginMsg) { +static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg) { // update state pReceiver->term = pReceiver->pSyncNode->pRaftStore->currentTerm; - pReceiver->privateTerm = privateTerm; + pReceiver->privateTerm = pBeginMsg->privateTerm; pReceiver->ack = SYNC_SNAPSHOT_SEQ_BEGIN; pReceiver->fromId = pBeginMsg->srcId; pReceiver->start = true; @@ -439,10 +460,13 @@ static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm p pReceiver->snapshot.lastApplyIndex = pBeginMsg->lastIndex; pReceiver->snapshot.lastApplyTerm = pBeginMsg->lastTerm; pReceiver->snapshot.lastConfigIndex = pBeginMsg->lastConfigIndex; + pReceiver->snapshotParam.start = pBeginMsg->beginIndex; + pReceiver->snapshotParam.end = pBeginMsg->lastIndex; - // write data + // start writer ASSERT(pReceiver->pWriter == NULL); - int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStartWrite(pReceiver->pSyncNode->pFsm, &(pReceiver->pWriter)); + int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStartWrite(pReceiver->pSyncNode->pFsm, + &(pReceiver->snapshotParam), &(pReceiver->pWriter)); ASSERT(ret == 0); // event log @@ -475,10 +499,10 @@ static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) { // if receiver receive msg from seq = SYNC_SNAPSHOT_SEQ_BEGIN, start receiver // if already start, force close, start again -int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg) { +int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg) { if (!snapshotReceiverIsStart(pReceiver)) { // first start - snapshotReceiverDoStart(pReceiver, privateTerm, pBeginMsg); + snapshotReceiverDoStart(pReceiver, pBeginMsg); } else { // already start @@ -488,12 +512,14 @@ int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm private snapshotReceiverForceStop(pReceiver); // start again - snapshotReceiverDoStart(pReceiver, privateTerm, pBeginMsg); + snapshotReceiverDoStart(pReceiver, pBeginMsg); } return 0; } +// just set start = false +// FpSnapshotStopWrite should not be called, assert writer == NULL int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) { if (pReceiver->pWriter != NULL) { int32_t ret = @@ -516,6 +542,7 @@ int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) { return 0; } +// when recv last snapshot block, apply data into snapshot static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) { ASSERT(pMsg->seq == SYNC_SNAPSHOT_SEQ_END); @@ -544,7 +571,7 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap pReceiver->pSyncNode->commitIndex = pReceiver->snapshot.lastApplyIndex; } - // stop writer + // stop writer, apply data code = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, true); if (code != 0) { syncNodeErrorLog(pReceiver->pSyncNode, "snapshot stop writer true error"); @@ -573,15 +600,20 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap return 0; } +// apply data block +// update progress static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) { ASSERT(pMsg->seq == pReceiver->ack + 1); if (pReceiver->pWriter != NULL) { if (pMsg->dataLen > 0) { + // apply data block int32_t code = pReceiver->pSyncNode->pFsm->FpSnapshotDoWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, pMsg->data, pMsg->dataLen); ASSERT(code == 0); } + + // update progress pReceiver->ack = pMsg->seq; // event log @@ -659,14 +691,23 @@ char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event) uint16_t port; syncUtilU642Addr(fromId.addr, host, sizeof(host), &port); - snprintf(s, len, "%s {%p start:%d ack:%d term:%lu pterm:%lu from:%s:%d laindex:%ld laterm:%lu lcindex:%ld}", event, - pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, pReceiver->privateTerm, host, port, - pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex); + snprintf(s, len, + "%s {%p start:%d ack:%d term:%lu pterm:%lu from:%s:%d s-param:%ld e-param:%ld laindex:%ld laterm:%lu " + "lcindex:%ld}", + event, pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, pReceiver->privateTerm, host, port, + pReceiver->snapshotParam.start, pReceiver->snapshotParam.end, pReceiver->snapshot.lastApplyIndex, + pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex); return s; } -// receiver do something +// receiver on message +// +// condition 1, recv SYNC_SNAPSHOT_SEQ_BEGIN, start receiver, update privateTerm +// condition 2, recv SYNC_SNAPSHOT_SEQ_END, finish receiver(apply snapshot data, update commit index, maybe reconfig) +// condition 3, recv SYNC_SNAPSHOT_SEQ_FORCE_CLOSE, force close +// condition 4, got data, update ack +// int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { // get receiver SSyncSnapshotReceiver *pReceiver = pSyncNode->pNewNodeReceiver; @@ -677,11 +718,13 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) { if (pMsg->term == pSyncNode->pRaftStore->currentTerm) { if (pMsg->seq == SYNC_SNAPSHOT_SEQ_BEGIN) { + // condition 1 // begin, no data - snapshotReceiverStart(pReceiver, pMsg->privateTerm, pMsg); + snapshotReceiverStart(pReceiver, pMsg); needRsp = true; } else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_END) { + // condition 2 // end, finish FSM code = snapshotReceiverFinish(pReceiver, pMsg); if (code == 0) { @@ -691,7 +734,6 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { // maybe update lastconfig if (pMsg->lastConfigIndex >= SYNC_INDEX_BEGIN) { - // int32_t oldReplicaNum = pSyncNode->replicaNum; SSyncCfg oldSyncCfg = pSyncNode->pRaftCfg->cfg; // update new config myIndex @@ -703,11 +745,13 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { } } else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_FORCE_CLOSE) { + // condition 3 // force close snapshotReceiverForceStop(pReceiver); needRsp = false; } else if (pMsg->seq > SYNC_SNAPSHOT_SEQ_BEGIN && pMsg->seq < SYNC_SNAPSHOT_SEQ_END) { + // condition 4 // transfering if (pMsg->seq == pReceiver->ack + 1) { snapshotReceiverGotData(pReceiver, pMsg); @@ -746,6 +790,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { syncNodeSendMsgById(&(pRspMsg->destId), pSyncNode, &rpcMsg); syncSnapshotRspDestroy(pRspMsg); } + } else { // error log do { @@ -753,6 +798,8 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { syncNodeErrorLog(pSyncNode, eventLog); taosMemoryFree(eventLog); } while (0); + + return -1; } } else { // error log @@ -761,19 +808,19 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { syncNodeErrorLog(pSyncNode, eventLog); taosMemoryFree(eventLog); } while (0); + + return -1; } return 0; } -static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) { - ASSERT(pMsg->ack == pSender->seq); - pSender->ack = pMsg->ack; - ++(pSender->seq); -} - -// sender receives ack, set seq = ack + 1, send msg from seq -// if ack == SYNC_SNAPSHOT_SEQ_END, stop sender +// sender on message +// +// condition 1 sender receives SYNC_SNAPSHOT_SEQ_END, close sender +// condition 2 sender receives ack, set seq = ack + 1, send msg from seq +// condition 3 sender receives error msg, just print error log +// int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) { // if already drop replica, do not process if (!syncNodeInRaftGroup(pSyncNode, &(pMsg->srcId)) && pSyncNode->state == TAOS_SYNC_STATE_LEADER) { @@ -788,12 +835,14 @@ int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) { // state, term, seq/ack if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { if (pMsg->term == pSyncNode->pRaftStore->currentTerm) { - // receiver ack is finish, close sender + // condition 1 + // receive ack is finish, close sender if (pMsg->ack == SYNC_SNAPSHOT_SEQ_END) { snapshotSenderStop(pSender, true); return 0; } + // condition 2 // send next msg if (pMsg->ack == pSender->seq) { // update sender ack @@ -801,6 +850,7 @@ int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) { snapshotSend(pSender); } else if (pMsg->ack == pSender->seq - 1) { + // maybe resend snapshotReSend(pSender); } else { diff --git a/source/libs/sync/test/CMakeLists.txt b/source/libs/sync/test/CMakeLists.txt index c549b78399..37d9707cfd 100644 --- a/source/libs/sync/test/CMakeLists.txt +++ b/source/libs/sync/test/CMakeLists.txt @@ -24,6 +24,7 @@ add_executable(syncAppendEntriesTest "") add_executable(syncAppendEntriesBatchTest "") add_executable(syncAppendEntriesReplyTest "") add_executable(syncClientRequestTest "") +add_executable(syncClientRequestBatchTest "") add_executable(syncTimeoutTest "") add_executable(syncPingTest "") add_executable(syncPingReplyTest "") @@ -159,6 +160,10 @@ target_sources(syncClientRequestTest PRIVATE "syncClientRequestTest.cpp" ) +target_sources(syncClientRequestBatchTest + PRIVATE + "syncClientRequestBatchTest.cpp" +) target_sources(syncTimeoutTest PRIVATE "syncTimeoutTest.cpp" @@ -407,6 +412,11 @@ target_include_directories(syncClientRequestTest "${TD_SOURCE_DIR}/include/libs/sync" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) +target_include_directories(syncClientRequestBatchTest + PUBLIC + "${TD_SOURCE_DIR}/include/libs/sync" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" +) target_include_directories(syncTimeoutTest PUBLIC "${TD_SOURCE_DIR}/include/libs/sync" @@ -658,6 +668,10 @@ target_link_libraries(syncClientRequestTest sync gtest_main ) +target_link_libraries(syncClientRequestBatchTest + sync + gtest_main +) target_link_libraries(syncTimeoutTest sync gtest_main diff --git a/source/libs/sync/test/syncAppendEntriesBatchTest.cpp b/source/libs/sync/test/syncAppendEntriesBatchTest.cpp index 8784ddd637..515d580b35 100644 --- a/source/libs/sync/test/syncAppendEntriesBatchTest.cpp +++ b/source/libs/sync/test/syncAppendEntriesBatchTest.cpp @@ -3,6 +3,7 @@ #include "syncIO.h" #include "syncInt.h" #include "syncMessage.h" +#include "syncRaftEntry.h" #include "syncUtil.h" #include "trpc.h" @@ -15,30 +16,29 @@ void logTest() { sFatal("--- sync log test: fatal"); } -SRpcMsg *createRpcMsg(int32_t i, int32_t dataLen) { - SRpcMsg *pRpcMsg = (SRpcMsg *)taosMemoryMalloc(sizeof(SRpcMsg)); - memset(pRpcMsg, 0, sizeof(SRpcMsg)); - - pRpcMsg->msgType = TDMT_SYNC_PING; - pRpcMsg->contLen = dataLen; - pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen); - pRpcMsg->code = 10 * i; - snprintf((char *)pRpcMsg->pCont, pRpcMsg->contLen, "value_%d", i); - - return pRpcMsg; +SSyncRaftEntry *createEntry(int i) { + SSyncRaftEntry *pEntry = syncEntryBuild(20); + assert(pEntry != NULL); + pEntry->msgType = 1; + pEntry->originalRpcType = 2; + pEntry->seqNum = 3; + pEntry->isWeak = true; + pEntry->term = 100; + pEntry->index = 200; + snprintf(pEntry->data, pEntry->dataLen, "value_%d", i); + return pEntry; } SyncAppendEntriesBatch *createMsg() { - SRpcMsg rpcMsgArr[5]; - memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); + SSyncRaftEntry *entryPArr[5]; + memset(entryPArr, 0, sizeof(entryPArr)); for (int32_t i = 0; i < 5; ++i) { - SRpcMsg *pRpcMsg = createRpcMsg(i, 20); - rpcMsgArr[i] = *pRpcMsg; - taosMemoryFree(pRpcMsg); + SSyncRaftEntry *pEntry = createEntry(i); + entryPArr[i] = pEntry; } - SyncAppendEntriesBatch *pMsg = syncAppendEntriesBatchBuild(rpcMsgArr, 5, 1234); + SyncAppendEntriesBatch *pMsg = syncAppendEntriesBatchBuild(entryPArr, 5, 1234); pMsg->srcId.addr = syncUtilAddr2U64("127.0.0.1", 1234); pMsg->srcId.vgId = 100; pMsg->destId.addr = syncUtilAddr2U64("127.0.0.1", 5678); @@ -52,17 +52,17 @@ SyncAppendEntriesBatch *createMsg() { void test1() { SyncAppendEntriesBatch *pMsg = createMsg(); - syncAppendEntriesBatchLog2((char *)"test1:", pMsg); + syncAppendEntriesBatchLog2((char *)"==test1==", pMsg); - SRpcMsg rpcMsgArr[5]; - int32_t retArrSize; - syncAppendEntriesBatch2RpcMsgArray(pMsg, rpcMsgArr, 5, &retArrSize); +/* + SOffsetAndContLen *metaArr = syncAppendEntriesBatchMetaTableArray(pMsg); + int32_t retArrSize = pMsg->dataCount; for (int i = 0; i < retArrSize; ++i) { - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "==test1 decode rpc msg %d: msgType:%d, code:%d, contLen:%d, pCont:%s \n", i, - rpcMsgArr[i].msgType, rpcMsgArr[i].code, rpcMsgArr[i].contLen, (char *)rpcMsgArr[i].pCont); - sTrace("%s", logBuf); + SSyncRaftEntry *pEntry = (SSyncRaftEntry*)(pMsg->data + metaArr[i].offset); + ASSERT(pEntry->bytes == metaArr[i].contLen); + syncEntryPrint(pEntry); } +*/ syncAppendEntriesBatchDestroy(pMsg); } diff --git a/source/libs/sync/test/syncClientRequestBatchTest.cpp b/source/libs/sync/test/syncClientRequestBatchTest.cpp new file mode 100644 index 0000000000..ae74baeda4 --- /dev/null +++ b/source/libs/sync/test/syncClientRequestBatchTest.cpp @@ -0,0 +1,125 @@ +#include +#include +#include "syncIO.h" +#include "syncInt.h" +#include "syncMessage.h" +#include "syncUtil.h" + +void logTest() { + sTrace("--- sync log test: trace"); + sDebug("--- sync log test: debug"); + sInfo("--- sync log test: info"); + sWarn("--- sync log test: warn"); + sError("--- sync log test: error"); + sFatal("--- sync log test: fatal"); +} + +SRpcMsg *createRpcMsg(int32_t i, int32_t dataLen) { + SyncPing *pSyncMsg = syncPingBuild(20); + snprintf(pSyncMsg->data, pSyncMsg->dataLen, "value_%d", i); + + SRpcMsg *pRpcMsg = (SRpcMsg *)taosMemoryMalloc(sizeof(SRpcMsg)); + memset(pRpcMsg, 0, sizeof(SRpcMsg)); + pRpcMsg->code = 10 * i; + syncPing2RpcMsg(pSyncMsg, pRpcMsg); + + syncPingDestroy(pSyncMsg); + return pRpcMsg; +} + +SyncClientRequestBatch *createMsg() { + SRpcMsg rpcMsgArr[5]; + memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); + for (int32_t i = 0; i < 5; ++i) { + SRpcMsg *pRpcMsg = createRpcMsg(i, 20); + rpcMsgArr[i] = *pRpcMsg; + taosMemoryFree(pRpcMsg); + } + + SRaftMeta raftArr[5]; + memset(raftArr, 0, sizeof(raftArr)); + for (int32_t i = 0; i < 5; ++i) { + raftArr[i].seqNum = i * 10; + raftArr[i].isWeak = i % 2; + } + + SyncClientRequestBatch *pMsg = syncClientRequestBatchBuild(rpcMsgArr, raftArr, 5, 1234); + return pMsg; +} + +void test1() { + SyncClientRequestBatch *pMsg = createMsg(); + syncClientRequestBatchLog2((char *)"==test1==", pMsg); + syncClientRequestBatchDestroyDeep(pMsg); +} + +/* +void test2() { + SyncClientRequest *pMsg = createMsg(); + uint32_t len = pMsg->bytes; + char * serialized = (char *)taosMemoryMalloc(len); + syncClientRequestSerialize(pMsg, serialized, len); + SyncClientRequest *pMsg2 = syncClientRequestBuild(pMsg->dataLen); + syncClientRequestDeserialize(serialized, len, pMsg2); + syncClientRequestLog2((char *)"test2: syncClientRequestSerialize -> syncClientRequestDeserialize ", pMsg2); + + taosMemoryFree(serialized); + syncClientRequestDestroy(pMsg); + syncClientRequestDestroy(pMsg2); +} + +void test3() { + SyncClientRequest *pMsg = createMsg(); + uint32_t len; + char * serialized = syncClientRequestSerialize2(pMsg, &len); + SyncClientRequest *pMsg2 = syncClientRequestDeserialize2(serialized, len); + syncClientRequestLog2((char *)"test3: syncClientRequestSerialize3 -> syncClientRequestDeserialize2 ", pMsg2); + + taosMemoryFree(serialized); + syncClientRequestDestroy(pMsg); + syncClientRequestDestroy(pMsg2); +} + +void test4() { + SyncClientRequest *pMsg = createMsg(); + SRpcMsg rpcMsg; + syncClientRequest2RpcMsg(pMsg, &rpcMsg); + SyncClientRequest *pMsg2 = (SyncClientRequest *)taosMemoryMalloc(rpcMsg.contLen); + syncClientRequestFromRpcMsg(&rpcMsg, pMsg2); + syncClientRequestLog2((char *)"test4: syncClientRequest2RpcMsg -> syncClientRequestFromRpcMsg ", pMsg2); + + rpcFreeCont(rpcMsg.pCont); + syncClientRequestDestroy(pMsg); + syncClientRequestDestroy(pMsg2); +} + +void test5() { + SyncClientRequest *pMsg = createMsg(); + SRpcMsg rpcMsg; + syncClientRequest2RpcMsg(pMsg, &rpcMsg); + SyncClientRequest *pMsg2 = syncClientRequestFromRpcMsg2(&rpcMsg); + syncClientRequestLog2((char *)"test5: syncClientRequest2RpcMsg -> syncClientRequestFromRpcMsg2 ", pMsg2); + + rpcFreeCont(rpcMsg.pCont); + syncClientRequestDestroy(pMsg); + syncClientRequestDestroy(pMsg2); +} +*/ + +int main() { + gRaftDetailLog = true; + tsAsyncLog = 0; + sDebugFlag = DEBUG_DEBUG + DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE; + logTest(); + + test1(); + + /* +test2(); +test3(); +test4(); +test5(); +*/ + + return 0; +} diff --git a/source/libs/sync/test/syncConfigChangeSnapshotTest.cpp b/source/libs/sync/test/syncConfigChangeSnapshotTest.cpp index 2908dd5907..968baff952 100644 --- a/source/libs/sync/test/syncConfigChangeSnapshotTest.cpp +++ b/source/libs/sync/test/syncConfigChangeSnapshotTest.cpp @@ -77,7 +77,7 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { return 0; } -int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void *pParam, void** ppReader) { +int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void* pParam, void** ppReader) { *ppReader = (void*)0xABCD; char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==SnapshotStartRead== pFsm:%p, *ppReader:%p", pFsm, *ppReader); @@ -114,7 +114,7 @@ int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32 return 0; } -int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void** ppWriter) { +int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void *pParam, void** ppWriter) { *ppWriter = (void*)0xCDEF; char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==SnapshotStartWrite== pFsm:%p, *ppWriter:%p", pFsm, *ppWriter); @@ -198,7 +198,7 @@ int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex); syncInfo.pWal = pWal; syncInfo.isStandBy = isStandBy; - syncInfo.snapshotEnable = true; + syncInfo.snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT; SSyncCfg* pCfg = &syncInfo.syncCfg; diff --git a/source/libs/sync/test/syncIndexTest.cpp b/source/libs/sync/test/syncIndexTest.cpp index 1cf2847b5c..8627a6c174 100644 --- a/source/libs/sync/test/syncIndexTest.cpp +++ b/source/libs/sync/test/syncIndexTest.cpp @@ -8,7 +8,13 @@ void print(SHashObj *pNextIndex) { printf("----------------\n"); uint64_t *p = (uint64_t *)taosHashIterate(pNextIndex, NULL); while (p) { - printf("%lu \n", *p); + + size_t len; + void* key = taosHashGetKey(p, &len); + + SRaftId *pRaftId = (SRaftId*)key; + + printf("key:<%lu, %d>, value:%lu \n", pRaftId->addr, pRaftId->vgId, *p); p = (uint64_t *)taosHashIterate(pNextIndex, p); } } diff --git a/source/libs/sync/test/syncRespMgrTest.cpp b/source/libs/sync/test/syncRespMgrTest.cpp index 495b82bed7..fd18109280 100644 --- a/source/libs/sync/test/syncRespMgrTest.cpp +++ b/source/libs/sync/test/syncRespMgrTest.cpp @@ -73,9 +73,15 @@ void syncRespMgrGetAndDelTest(uint64_t i) { } } +SSyncNode *createSyncNode() { + SSyncNode *pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode)); + memset(pSyncNode, 0, sizeof(SSyncNode)); + return pSyncNode; +} + void test1() { printf("------- test1 ---------\n"); - pMgr = syncRespMgrCreate(NULL, 0); + pMgr = syncRespMgrCreate(createSyncNode(), 0); assert(pMgr != NULL); syncRespMgrInsert(10); @@ -100,7 +106,7 @@ void test1() { void test2() { printf("------- test2 ---------\n"); - pMgr = syncRespMgrCreate(NULL, 0); + pMgr = syncRespMgrCreate(createSyncNode(), 0); assert(pMgr != NULL); syncRespMgrInsert(10); @@ -117,7 +123,7 @@ void test2() { void test3() { printf("------- test3 ---------\n"); - pMgr = syncRespMgrCreate(NULL, 0); + pMgr = syncRespMgrCreate(createSyncNode(), 0); assert(pMgr != NULL); syncRespMgrInsert(10); @@ -132,13 +138,34 @@ void test3() { syncRespMgrDestroy(pMgr); } +void test4() { + printf("------- test4 ---------\n"); + pMgr = syncRespMgrCreate(createSyncNode(), 2); + assert(pMgr != NULL); + + syncRespMgrInsert(5); + syncRespMgrPrint(); + + taosMsleep(3000); + + syncRespMgrInsert(3); + syncRespMgrPrint(); + + printf("====== after clean ttl \n"); + syncRespClean(pMgr); + syncRespMgrPrint(); + + syncRespMgrDestroy(pMgr); +} + int main() { tsAsyncLog = 0; - sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE; + sDebugFlag = DEBUG_DEBUG + DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE; logTest(); test1(); test2(); test3(); + test4(); return 0; } diff --git a/source/libs/sync/test/syncSnapshotReceiverTest.cpp b/source/libs/sync/test/syncSnapshotReceiverTest.cpp index 208a96daa4..b4bf08dd40 100644 --- a/source/libs/sync/test/syncSnapshotReceiverTest.cpp +++ b/source/libs/sync/test/syncSnapshotReceiverTest.cpp @@ -29,7 +29,7 @@ int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void** ppReader) { return 0; } int32_t SnapshotStopRead(struct SSyncFSM* pFsm, void* pReader) { return 0; } int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len) { return 0; } -int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void** ppWriter) { return 0; } +int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void *pParam, void** ppWriter) { return 0; } int32_t SnapshotStopWrite(struct SSyncFSM* pFsm, void* pWriter, bool isApply) { return 0; } int32_t SnapshotDoWrite(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len) { return 0; } diff --git a/source/libs/sync/test/syncSnapshotSenderTest.cpp b/source/libs/sync/test/syncSnapshotSenderTest.cpp index dc38cd3df5..8d1f83b3b1 100644 --- a/source/libs/sync/test/syncSnapshotSenderTest.cpp +++ b/source/libs/sync/test/syncSnapshotSenderTest.cpp @@ -25,7 +25,7 @@ void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) int32_t GetSnapshot(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { return 0; } -int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void *pParam, void** ppReader) { return 0; } +int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void* pParam, void** ppReader) { return 0; } int32_t SnapshotStopRead(struct SSyncFSM* pFsm, void* pReader) { return 0; } int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len) { return 0; } diff --git a/source/libs/sync/test/syncTestTool.cpp b/source/libs/sync/test/syncTestTool.cpp index 4e84ff0f7e..c6d3a3e4af 100644 --- a/source/libs/sync/test/syncTestTool.cpp +++ b/source/libs/sync/test/syncTestTool.cpp @@ -74,7 +74,7 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { return 0; } -int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void *pParam, void** ppReader) { +int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void* pParam, void** ppReader) { *ppReader = (void*)0xABCD; char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==SnapshotStartRead== pFsm:%p, *ppReader:%p", pFsm, *ppReader); @@ -111,7 +111,7 @@ int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32 return 0; } -int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void** ppWriter) { +int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void *pParam, void** ppWriter) { *ppWriter = (void*)0xCDEF; char logBuf[256] = {0}; @@ -203,7 +203,7 @@ SWal* createWal(char* path, int32_t vgId) { } int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* pWal, char* path, bool isStandBy, - bool enableSnapshot) { + ESyncStrategy enableSnapshot) { SSyncInfo syncInfo; syncInfo.vgId = vgId; syncInfo.msgcb = &gSyncIO->msgcb; @@ -213,7 +213,7 @@ int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex); syncInfo.pWal = pWal; syncInfo.isStandBy = isStandBy; - syncInfo.snapshotEnable = enableSnapshot; + syncInfo.snapshotStrategy = enableSnapshot; SSyncCfg* pCfg = &syncInfo.syncCfg; @@ -316,7 +316,7 @@ int main(int argc, char** argv) { int32_t replicaNum = atoi(argv[1]); int32_t myIndex = atoi(argv[2]); - bool enableSnapshot = atoi(argv[3]); + ESyncStrategy enableSnapshot = (ESyncStrategy)atoi(argv[3]); int32_t lastApplyIndex = atoi(argv[4]); int32_t lastApplyTerm = atoi(argv[5]); int32_t writeRecordNum = atoi(argv[6]); diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 05fe62762a..5d51a031bf 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -40,8 +40,8 @@ struct SBTree { #define TDB_BTREE_PAGE_IS_ROOT(PAGE) (TDB_BTREE_PAGE_GET_FLAGS(PAGE) & TDB_BTREE_ROOT) #define TDB_BTREE_PAGE_IS_LEAF(PAGE) (TDB_BTREE_PAGE_GET_FLAGS(PAGE) & TDB_BTREE_LEAF) #define TDB_BTREE_PAGE_IS_OVFL(PAGE) (TDB_BTREE_PAGE_GET_FLAGS(PAGE) & TDB_BTREE_OVFL) -#define TDB_BTREE_ASSERT_FLAG(flags) \ - ASSERT(TDB_FLAG_IS(flags, TDB_BTREE_ROOT) || TDB_FLAG_IS(flags, TDB_BTREE_LEAF) || \ +#define TDB_BTREE_ASSERT_FLAG(flags) \ + ASSERT(TDB_FLAG_IS(flags, TDB_BTREE_ROOT) || TDB_FLAG_IS(flags, TDB_BTREE_LEAF) || \ TDB_FLAG_IS(flags, TDB_BTREE_ROOT | TDB_BTREE_LEAF) || TDB_FLAG_IS(flags, 0) || \ TDB_FLAG_IS(flags, TDB_BTREE_OVFL)) @@ -58,7 +58,7 @@ typedef struct { static int tdbDefaultKeyCmprFn(const void *pKey1, int keyLen1, const void *pKey2, int keyLen2); static int tdbBtreeOpenImpl(SBTree *pBt); -//static int tdbBtreeInitPage(SPage *pPage, void *arg, int init); +// static int tdbBtreeInitPage(SPage *pPage, void *arg, int init); static int tdbBtreeEncodeCell(SPage *pPage, const void *pKey, int kLen, const void *pVal, int vLen, SCell *pCell, int *szCell, TXN *pTxn, SBTree *pBt); static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pDecoder, TXN *pTxn, SBTree *pBt); @@ -321,7 +321,7 @@ static int tdbBtreeOpenImpl(SBTree *pBt) { { // 1. TODO: Search the main DB to check if the DB exists - ret = tdbPagerOpenDB(pBt->pPager, &pgno, true); + ret = tdbPagerOpenDB(pBt->pPager, &pgno, true, pBt); ASSERT(ret == 0); } @@ -721,7 +721,8 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx int szNewCell; SPgno pgno; pgno = TDB_PAGE_PGNO(pNews[iNew]); - tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, pTxn, pBt); + tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, pTxn, + pBt); tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0); tdbOsFree(pNewCell); } @@ -916,10 +917,10 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const int surplus = minLocal + (nPayload + nHeader - minLocal) % (maxLocal - sizeof(SPgno)); int nLocal = surplus <= maxLocal ? surplus : minLocal; - //int ofpCap = tdbPageCapacity(pBt->pageSize, sizeof(SIntHdr)); + // int ofpCap = tdbPageCapacity(pBt->pageSize, sizeof(SIntHdr)); // fetch a new ofp and make it dirty - SPgno pgno = 0; + SPgno pgno = 0; SPage *ofp, *nextOfp; ret = tdbFetchOvflPage(&pgno, &ofp, pTxn, pBt); @@ -942,8 +943,8 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const nLeft -= kLen; // pack partial val to local if any space left if (nLocal > kLen + 4) { - memcpy(pCell + nHeader + kLen, pVal, nLocal - kLen - sizeof(SPgno)); - nLeft -= nLocal - kLen - sizeof(SPgno); + memcpy(pCell + nHeader + kLen, pVal, nLocal - kLen - sizeof(SPgno)); + nLeft -= nLocal - kLen - sizeof(SPgno); } // pack nextPgno @@ -951,132 +952,132 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const // pack left val data to ovpages do { - lastPage = 0; - if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeft; - lastPage = 1; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + lastPage = 0; + if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeft; + lastPage = 1; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - // fetch next ofp if not last page - if (!lastPage) { - // fetch a new ofp and make it dirty - ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); - if (ret < 0) { - tdbFree(pBuf); - return -1; - } - } else { - pgno = 0; - } + // fetch next ofp if not last page + if (!lastPage) { + // fetch a new ofp and make it dirty + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); + if (ret < 0) { + tdbFree(pBuf); + return -1; + } + } else { + pgno = 0; + } - memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes); - memcpy(pBuf + bytes, &pgno, sizeof(pgno)); + memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes); + memcpy(pBuf + bytes, &pgno, sizeof(pgno)); - ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); - if (ret < 0) { - tdbFree(pBuf); - return -1; - } + ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); + if (ret < 0) { + tdbFree(pBuf); + return -1; + } - ofp = nextOfp; - nLeft -= bytes; + ofp = nextOfp; + nLeft -= bytes; } while (nLeft > 0); } else { int nLeftKey = kLen; // pack partial key and nextPgno memcpy(pCell + nHeader, pKey, nLocal - 4); nLeft -= nLocal - 4; - nLeftKey -= nLocal -4; + nLeftKey -= nLocal - 4; memcpy(pCell + nHeader + nLocal - 4, &pgno, sizeof(pgno)); int lastKeyPageSpace = 0; // pack left key & val to ovpages do { - // cal key to cpy - int lastKeyPage = 0; - if (nLeftKey <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeftKey; - lastKeyPage = 1; - lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + // cal key to cpy + int lastKeyPage = 0; + if (nLeftKey <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeftKey; + lastKeyPage = 1; + lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - // cpy key - memcpy(pBuf, ((SCell *)pKey) + kLen - nLeftKey, bytes); + // cpy key + memcpy(pBuf, ((SCell *)pKey) + kLen - nLeftKey, bytes); - if (lastKeyPage) { - if (lastKeyPageSpace >= vLen) { - memcpy(pBuf + kLen -nLeftKey, pVal, vLen); + if (lastKeyPage) { + if (lastKeyPageSpace >= vLen) { + memcpy(pBuf + kLen - nLeftKey, pVal, vLen); - nLeft -= vLen; - pgno = 0; - } else { - memcpy(pBuf + kLen -nLeftKey, pVal, lastKeyPageSpace); - nLeft -= lastKeyPageSpace; + nLeft -= vLen; + pgno = 0; + } else { + memcpy(pBuf + kLen - nLeftKey, pVal, lastKeyPageSpace); + nLeft -= lastKeyPageSpace; - // fetch next ofp, a new ofp and make it dirty - ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); - if (ret < 0) { - return -1; - } - } - } else { - // fetch next ofp, a new ofp and make it dirty - ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); - if (ret < 0) { - return -1; - } - } + // fetch next ofp, a new ofp and make it dirty + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); + if (ret < 0) { + return -1; + } + } + } else { + // fetch next ofp, a new ofp and make it dirty + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); + if (ret < 0) { + return -1; + } + } - memcpy(pBuf + kLen - nLeft, &pgno, sizeof(pgno)); + memcpy(pBuf + kLen - nLeft, &pgno, sizeof(pgno)); - ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); - if (ret < 0) { - return -1; - } + ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); + if (ret < 0) { + return -1; + } - ofp = nextOfp; - nLeftKey -= bytes; - nLeft -= bytes; + ofp = nextOfp; + nLeftKey -= bytes; + nLeft -= bytes; } while (nLeftKey > 0); while (nLeft > 0) { - // pack left val data to ovpages - lastPage = 0; - if (nLeft <= maxLocal - sizeof(SPgno)) { - bytes = nLeft; - lastPage = 1; - } else { - bytes = maxLocal - sizeof(SPgno); - } + // pack left val data to ovpages + lastPage = 0; + if (nLeft <= maxLocal - sizeof(SPgno)) { + bytes = nLeft; + lastPage = 1; + } else { + bytes = maxLocal - sizeof(SPgno); + } - // fetch next ofp if not last page - if (!lastPage) { - // fetch a new ofp and make it dirty - ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); - if (ret < 0) { - tdbFree(pBuf); - return -1; - } - } else { - pgno = 0; - } + // fetch next ofp if not last page + if (!lastPage) { + // fetch a new ofp and make it dirty + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); + if (ret < 0) { + tdbFree(pBuf); + return -1; + } + } else { + pgno = 0; + } - memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes); - memcpy(pBuf + bytes, &pgno, sizeof(pgno)); + memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes); + memcpy(pBuf + bytes, &pgno, sizeof(pgno)); - ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); - if (ret < 0) { - tdbFree(pBuf); - return -1; - } + ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); + if (ret < 0) { + tdbFree(pBuf); + return -1; + } - ofp = nextOfp; - nLeft -= bytes; + ofp = nextOfp; + nLeft -= bytes; } } @@ -1142,7 +1143,8 @@ static int tdbBtreeEncodeCell(SPage *pPage, const void *pKey, int kLen, const vo return 0; } -static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, SCellDecoder *pDecoder, TXN *pTxn, SBTree *pBt) { +static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, SCellDecoder *pDecoder, TXN *pTxn, + SBTree *pBt) { int ret = 0; int nPayload; int maxLocal = pPage->maxLocal; @@ -1171,149 +1173,149 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, int surplus = minLocal + (nPayload + nHeader - minLocal) % (maxLocal - sizeof(SPgno)); int nLocal = surplus <= maxLocal ? surplus : minLocal; - int nLeft = nPayload; - SPgno pgno = 0; + int nLeft = nPayload; + SPgno pgno = 0; SPage *ofp; SCell *ofpCell; - int bytes; - int lastPage = 0; + int bytes; + int lastPage = 0; if (nLocal >= pDecoder->kLen + 4) { pDecoder->pKey = (SCell *)pCell + nHeader; nLeft -= kLen; if (nLocal > kLen + 4) { - // read partial val to local - pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); - if (pDecoder->pVal == NULL) { - return -1; - } - TDB_CELLDECODER_SET_FREE_VAL(pDecoder); + // read partial val to local + pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); + if (pDecoder->pVal == NULL) { + return -1; + } + TDB_CELLDECODER_SET_FREE_VAL(pDecoder); - memcpy(pDecoder->pVal, pCell + nHeader + kLen, nLocal - kLen - sizeof(SPgno)); + memcpy(pDecoder->pVal, pCell + nHeader + kLen, nLocal - kLen - sizeof(SPgno)); - nLeft -= nLocal - kLen - sizeof(SPgno); + nLeft -= nLocal - kLen - sizeof(SPgno); } memcpy(&pgno, pCell + nHeader + nPayload - nLeft, sizeof(pgno)); // unpack left val data from ovpages - while (pgno != 0) { - ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); - if (ret < 0) { - return -1; - } + while (pgno != 0) { + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); + if (ret < 0) { + return -1; + } - ofpCell = tdbPageGetCell(ofp, 0); + ofpCell = tdbPageGetCell(ofp, 0); - if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeft; - lastPage = 1; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeft; + lastPage = 1; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - memcpy(pDecoder->pVal + vLen - nLeft, ofpCell, bytes); - nLeft -= bytes; + memcpy(pDecoder->pVal + vLen - nLeft, ofpCell, bytes); + nLeft -= bytes; - memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); } } else { int nLeftKey = kLen; // load partial key and nextPgno pDecoder->pKey = tdbRealloc(pDecoder->pKey, kLen); if (pDecoder->pKey == NULL) { - return -1; + return -1; } TDB_CELLDECODER_SET_FREE_KEY(pDecoder); memcpy(pDecoder->pKey, pCell + nHeader, nLocal - 4); nLeft -= nLocal - 4; - nLeftKey -= nLocal -4; + nLeftKey -= nLocal - 4; memcpy(&pgno, pCell + nHeader + nLocal - 4, sizeof(pgno)); int lastKeyPageSpace = 0; // load left key & val to ovpages while (pgno != 0) { - ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); - if (ret < 0) { - return -1; - } + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); + if (ret < 0) { + return -1; + } - ofpCell = tdbPageGetCell(ofp, 0); + ofpCell = tdbPageGetCell(ofp, 0); - int lastKeyPage = 0; - if (nLeftKey <= maxLocal - sizeof(SPgno)) { - bytes = nLeftKey; - lastKeyPage = 1; - lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + int lastKeyPage = 0; + if (nLeftKey <= maxLocal - sizeof(SPgno)) { + bytes = nLeftKey; + lastKeyPage = 1; + lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - // cpy key - memcpy(pDecoder->pKey + kLen - nLeftKey, ofpCell, bytes); + // cpy key + memcpy(pDecoder->pKey + kLen - nLeftKey, ofpCell, bytes); - if (lastKeyPage) { - if (lastKeyPageSpace >= vLen) { - pDecoder->pVal = ofpCell + kLen -nLeftKey; + if (lastKeyPage) { + if (lastKeyPageSpace >= vLen) { + pDecoder->pVal = ofpCell + kLen - nLeftKey; - nLeft -= vLen; - pgno = 0; - } else { - // read partial val to local - pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); - if (pDecoder->pVal == NULL) { - return -1; - } - TDB_CELLDECODER_SET_FREE_VAL(pDecoder); + nLeft -= vLen; + pgno = 0; + } else { + // read partial val to local + pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); + if (pDecoder->pVal == NULL) { + return -1; + } + TDB_CELLDECODER_SET_FREE_VAL(pDecoder); - memcpy(pDecoder->pVal, ofpCell + kLen -nLeftKey, lastKeyPageSpace); - nLeft -= lastKeyPageSpace; - } - } + memcpy(pDecoder->pVal, ofpCell + kLen - nLeftKey, lastKeyPageSpace); + nLeft -= lastKeyPageSpace; + } + } - memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); - nLeftKey -= bytes; - nLeft -= bytes; + nLeftKey -= bytes; + nLeft -= bytes; } while (nLeft > 0) { - ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); - if (ret < 0) { - return -1; - } + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); + if (ret < 0) { + return -1; + } - ofpCell = tdbPageGetCell(ofp, 0); + ofpCell = tdbPageGetCell(ofp, 0); - // load left val data to ovpages - lastPage = 0; - if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeft; - lastPage = 1; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + // load left val data to ovpages + lastPage = 0; + if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeft; + lastPage = 1; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - if (lastPage) { - pgno = 0; - } + if (lastPage) { + pgno = 0; + } - if (!pDecoder->pVal) { - pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); - if (pDecoder->pVal == NULL) { - return -1; - } - TDB_CELLDECODER_SET_FREE_VAL(pDecoder); - } + if (!pDecoder->pVal) { + pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); + if (pDecoder->pVal == NULL) { + return -1; + } + TDB_CELLDECODER_SET_FREE_VAL(pDecoder); + } - memcpy(pDecoder->pVal, ofpCell + vLen - nLeft, bytes); - nLeft -= bytes; + memcpy(pDecoder->pVal, ofpCell + vLen - nLeft, bytes); + nLeft -= bytes; - memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno)); + memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno)); - nLeft -= bytes; + nLeft -= bytes; } } } @@ -1404,31 +1406,31 @@ static int tdbBtreeCellSize(const SPage *pPage, SCell *pCell, int dropOfp, TXN * // free ofp pages' cells if (dropOfp) { - int ret = 0; - SPgno pgno = *(SPgno *) (pCell + nHeader + nLocal - sizeof(SPgno)); - int nLeft = nPayload - nLocal + sizeof(SPgno); + int ret = 0; + SPgno pgno = *(SPgno *)(pCell + nHeader + nLocal - sizeof(SPgno)); + int nLeft = nPayload - nLocal + sizeof(SPgno); SPage *ofp; - int bytes; + int bytes; while (pgno != 0) { - ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); - if (ret < 0) { - return -1; - } + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); + if (ret < 0) { + return -1; + } - SCell *ofpCell = tdbPageGetCell(ofp, 0); + SCell *ofpCell = tdbPageGetCell(ofp, 0); - if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeft; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeft; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); - tdbPagerReturnPage(pPage->pPager, ofp, pTxn); + tdbPagerReturnPage(pPage->pPager, ofp, pTxn); - nLeft -= bytes; + nLeft -= bytes; } } @@ -1932,7 +1934,7 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int // alloc space szBuf = kLen + nData + 14; - pBuf = tdbRealloc(pBtc->pBt->pBuf, pBtc->pBt->pageSize > szBuf ? szBuf : pBtc->pBt->pageSize); + pBuf = tdbRealloc(pBtc->pBt->pBuf, pBtc->pBt->pageSize > szBuf ? szBuf : pBtc->pBt->pageSize); if (pBuf == NULL) { ASSERT(0); return -1; diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index dd3f09d5d2..892a913773 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -98,7 +98,7 @@ int tdbPagerClose(SPager *pPager) { return 0; } -int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate) { +int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate, SBTree *pBt) { SPgno pgno; SPage *pPage; int ret; @@ -110,25 +110,41 @@ int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate) { } { - // TODO: try to search the main DB to get the page number - // pgno = 0; + // TODO: try to search the main DB to get the page number + // pgno = 0; } - // if (pgno == 0 && toCreate) { - // ret = tdbPagerAllocPage(pPager, &pPage, &pgno); - // if (ret < 0) { - // return -1; - // } + if (pgno == 0 && toCreate) { + // allocate a new child page + TXN txn; + tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0); - // // TODO: Need to zero the page + pPager->inTran = 1; - // ret = tdbPagerWrite(pPager, pPage); - // if (ret < 0) { - // return -1; - // } - // } + SBtreeInitPageArg zArg; + zArg.flags = 0x1 | 0x2; // root leaf node; + zArg.pBt = pBt; + ret = tdbPagerFetchPage(pPager, &pgno, &pPage, tdbBtreeInitPage, &zArg, &txn); + if (ret < 0) { + return -1; + } - *ppgno = pgno; + // ret = tdbPagerAllocPage(pPager, &pPage, &pgno); + // if (ret < 0) { + // return -1; + //} + + // TODO: Need to zero the page + + ret = tdbPagerWrite(pPager, pPage); + if (ret < 0) { + return -1; + } + + tdbTxnClose(&txn); + } + + *ppgno = pgno; return 0; } @@ -427,9 +443,9 @@ static int tdbPagerWritePageToDB(SPager *pPager, SPage *pPage) { } int tdbPagerRestore(SPager *pPager, SBTree *pBt) { - int ret = 0; + int ret = 0; SPgno journalSize = 0; - u8 *pageBuf = NULL; + u8 *pageBuf = NULL; tdb_fd_t jfd = tdbOsOpen(pPager->jFileName, TDB_O_RDWR, 0755); if (jfd == NULL) { @@ -454,7 +470,7 @@ int tdbPagerRestore(SPager *pPager, SBTree *pBt) { for (int pgIndex = 0; pgIndex < journalSize; ++pgIndex) { // read pgno & the page from journal - SPgno pgno; + SPgno pgno; SPage *pPage; int ret = tdbOsRead(jfd, &pgno, sizeof(pgno)); diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h index c914e098f7..2713a12af3 100644 --- a/source/libs/tdb/src/inc/tdbInt.h +++ b/source/libs/tdb/src/inc/tdbInt.h @@ -128,13 +128,13 @@ typedef struct SBtInfo { #define TDB_CELLDECODER_FREE_VAL(pCellDecoder) ((pCellDecoder)->freeKV & TDB_CELLD_F_VAL) typedef struct { - int kLen; - u8 *pKey; - int vLen; - u8 *pVal; - SPgno pgno; - u8 *pBuf; - u8 freeKV; + int kLen; + u8 *pKey; + int vLen; + u8 *pVal; + SPgno pgno; + u8 *pBuf; + u8 freeKV; } SCellDecoder; struct SBTC { @@ -184,7 +184,7 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager); int tdbPagerClose(SPager *pPager); -int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate); +int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate, SBTree *pBt); int tdbPagerWrite(SPager *pPager, SPage *pPage); int tdbPagerBegin(SPager *pPager, TXN *pTxn); int tdbPagerCommit(SPager *pPager, TXN *pTxn); @@ -192,7 +192,7 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initP TXN *pTxn); void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn); int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno); -int tdbPagerRestore(SPager *pPager, SBTree *pBt); +int tdbPagerRestore(SPager *pPager, SBTree *pBt); // tdbPCache.c ==================================== #define TDB_PCACHE_PAGE \ @@ -314,19 +314,18 @@ static inline int tdbTryLockPage(tdb_spinlock_t *pLock) { #define TDB_TRY_LOCK_PAGE(pPage) tdbTryLockPage(&((pPage)->lock)) // APIs -#define TDB_PAGE_TOTAL_CELLS(pPage) ((pPage)->nOverflow + (pPage)->pPageMethods->getCellNum(pPage)) -#define TDB_PAGE_USABLE_SIZE(pPage) ((u8 *)(pPage)->pPageFtr - (pPage)->pCellIdx) -#define TDB_PAGE_FREE_SIZE(pPage) (*(pPage)->pPageMethods->getFreeBytes)(pPage) -#define TDB_PAGE_PGNO(pPage) ((pPage)->pgid.pgno) -#define TDB_BYTES_CELL_TAKEN(pPage, pCell) ((*(pPage)->xCellSize)(pPage, pCell, 0, NULL, NULL) + (pPage)->pPageMethods->szOffset) -#define TDB_PAGE_OFFSET_SIZE(pPage) ((pPage)->pPageMethods->szOffset) +#define TDB_PAGE_TOTAL_CELLS(pPage) ((pPage)->nOverflow + (pPage)->pPageMethods->getCellNum(pPage)) +#define TDB_PAGE_USABLE_SIZE(pPage) ((u8 *)(pPage)->pPageFtr - (pPage)->pCellIdx) +#define TDB_PAGE_FREE_SIZE(pPage) (*(pPage)->pPageMethods->getFreeBytes)(pPage) +#define TDB_PAGE_PGNO(pPage) ((pPage)->pgid.pgno) +#define TDB_BYTES_CELL_TAKEN(pPage, pCell) \ + ((*(pPage)->xCellSize)(pPage, pCell, 0, NULL, NULL) + (pPage)->pPageMethods->szOffset) +#define TDB_PAGE_OFFSET_SIZE(pPage) ((pPage)->pPageMethods->szOffset) int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t), void *arg); int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg); -void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, - TXN *, SBTree *pBt)); -void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, - TXN *, SBTree *pBt)); +void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)); +void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)); int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl); int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt); int tdbPageUpdateCell(SPage *pPage, int idx, SCell *pCell, int szCell, TXN *pTxn, SBTree *pBt); diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 2d3d41de64..62aec219df 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -291,7 +291,7 @@ int32_t tfsRmdir(STfs *pTfs, const char *rname) { for (int32_t id = 0; id < pTier->ndisk; id++) { STfsDisk *pDisk = pTier->disks[id]; snprintf(aname, TMPNAME_LEN, "%s%s%s", pDisk->path, TD_DIRSEP, rname); - uInfo("====> tfs remove dir : path:%s aname:%s rname:[%s]", pDisk->path, aname, rname); + uInfo("tfs remove dir : path:%s aname:%s rname:[%s]", pDisk->path, aname, rname); taosRemoveDir(aname); } } diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index b06d541b75..f699df6883 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -253,7 +253,7 @@ int transAsyncSend(SAsyncPool* pool, queue* mq); do { \ if (id > 0) { \ tTrace("handle step1"); \ - SExHandle* exh2 = transAcquireExHandle(id); \ + SExHandle* exh2 = transAcquireExHandle(transGetRefMgt(), id); \ if (exh2 == NULL || id != exh2->refId) { \ tTrace("handle %p except, may already freed, ignore msg, ref1: %" PRIu64 ", ref2 : %" PRIu64 "", exh1, \ exh2 ? exh2->refId : 0, id); \ @@ -261,7 +261,7 @@ int transAsyncSend(SAsyncPool* pool, queue* mq); } \ } else if (id == 0) { \ tTrace("handle step2"); \ - SExHandle* exh2 = transAcquireExHandle(id); \ + SExHandle* exh2 = transAcquireExHandle(transGetRefMgt(), id); \ if (exh2 == NULL || id == exh2->refId) { \ tTrace("handle %p except, may already freed, ignore msg, ref1: %" PRIu64 ", ref2 : %" PRIu64 "", exh1, id, \ exh2 ? exh2->refId : 0); \ @@ -391,13 +391,16 @@ void transThreadOnce(); void transInit(); void transCleanup(); -int32_t transOpenExHandleMgt(int size); -void transCloseExHandleMgt(); -int64_t transAddExHandle(void* p); -int32_t transRemoveExHandle(int64_t refId); -SExHandle* transAcquireExHandle(int64_t refId); -int32_t transReleaseExHandle(int64_t refId); -void transDestoryExHandle(void* handle); +int32_t transOpenRefMgt(int size, void (*func)(void*)); +void transCloseRefMgt(int32_t refMgt); +int64_t transAddExHandle(int32_t refMgt, void* p); +int32_t transRemoveExHandle(int32_t refMgt, int64_t refId); +void* transAcquireExHandle(int32_t refMgt, int64_t refId); +int32_t transReleaseExHandle(int32_t refMgt, int64_t refId); +void transDestoryExHandle(void* handle); + +int32_t transGetRefMgt(); +int32_t transGetInstMgt(); #ifdef __cplusplus } diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index 462debb247..5fb2980ceb 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -57,7 +57,7 @@ typedef struct { void* parent; void* tcphandle; // returned handle from TCP initialization - int32_t refMgt; + int64_t refId; TdThreadMutex mutex; } SRpcInfo; diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 936cfe870d..48e7d7c91d 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -76,16 +76,23 @@ void* rpcOpen(const SRpcInit* pInit) { if (pInit->user) { memcpy(pRpc->user, pInit->user, strlen(pInit->user)); } - return pRpc; + + int64_t refId = transAddExHandle(transGetInstMgt(), pRpc); + transAcquireExHandle(transGetInstMgt(), refId); + pRpc->refId = refId; + return (void*)refId; } void rpcClose(void* arg) { tInfo("start to close rpc"); + transRemoveExHandle(transGetInstMgt(), (int64_t)arg); + transReleaseExHandle(transGetInstMgt(), (int64_t)arg); + tInfo("finish to close rpc"); + return; +} +void rpcCloseImpl(void* arg) { SRpcInfo* pRpc = (SRpcInfo*)arg; (*taosCloseHandle[pRpc->connType])(pRpc->tcphandle); taosMemoryFree(pRpc); - tInfo("finish to close rpc"); - - return; } void* rpcMallocCont(int32_t contLen) { @@ -140,11 +147,10 @@ void rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pMsg, SRpcMsg* pRsp) { transSendRecv(shandle, pEpSet, pMsg, pRsp); } -void rpcSendResponse(const SRpcMsg* pMsg) { transSendResponse(pMsg); } +void rpcSendResponse(const SRpcMsg* pMsg) { transSendResponse(pMsg); } int32_t rpcGetConnInfo(void* thandle, SRpcConnInfo* pInfo) { return 0; } - void rpcRefHandle(void* handle, int8_t type) { assert(type == TAOS_CONN_SERVER || type == TAOS_CONN_CLIENT); (*taosRefHandle[type])(handle); diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index a21e97d049..8b771f6f8a 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -47,6 +47,7 @@ typedef struct SCliMsg { queue q; STransMsgType type; + int64_t refId; uint64_t st; int sent; //(0: no send, 1: alread sent) } SCliMsg; @@ -505,13 +506,13 @@ static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) { } static void allocConnRef(SCliConn* conn, bool update) { if (update) { - transRemoveExHandle(conn->refId); + transRemoveExHandle(transGetRefMgt(), conn->refId); conn->refId = -1; } SExHandle* exh = taosMemoryCalloc(1, sizeof(SExHandle)); exh->handle = conn; exh->pThrd = conn->hostThrd; - exh->refId = transAddExHandle(exh); + exh->refId = transAddExHandle(transGetRefMgt(), exh); conn->refId = exh->refId; } static void addConnToPool(void* pool, SCliConn* conn) { @@ -604,16 +605,14 @@ static void cliDestroyConn(SCliConn* conn, bool clear) { tTrace("%s conn %p remove from conn pool", CONN_GET_INST_LABEL(conn), conn); QUEUE_REMOVE(&conn->conn); QUEUE_INIT(&conn->conn); - transRemoveExHandle(conn->refId); + transRemoveExHandle(transGetRefMgt(), conn->refId); conn->refId = -1; if (clear) { if (!uv_is_closing((uv_handle_t*)conn->stream)) { + uv_read_stop(conn->stream); uv_close((uv_handle_t*)conn->stream, cliDestroy); } - //} else { - // cliDestroy((uv_handle_t*)conn->stream); - //} } } static void cliDestroy(uv_handle_t* handle) { @@ -622,7 +621,7 @@ static void cliDestroy(uv_handle_t* handle) { } SCliConn* conn = handle->data; - transRemoveExHandle(conn->refId); + transRemoveExHandle(transGetRefMgt(), conn->refId); taosMemoryFree(conn->ip); conn->stream->data = NULL; taosMemoryFree(conn->stream); @@ -638,7 +637,6 @@ static bool cliHandleNoResp(SCliConn* conn) { SCliMsg* pMsg = transQueueGet(&conn->cliMsgs, 0); if (REQUEST_NO_RESP(&pMsg->msg)) { transQueuePop(&conn->cliMsgs); - // taosArrayRemove(msgs, 0); destroyCmsg(pMsg); res = true; } @@ -749,7 +747,7 @@ static void cliHandleQuit(SCliMsg* pMsg, SCliThrd* pThrd) { } static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) { int64_t refId = (int64_t)(pMsg->msg.info.handle); - SExHandle* exh = transAcquireExHandle(refId); + SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId); if (exh == NULL) { tDebug("%" PRId64 " already release", refId); } @@ -775,14 +773,14 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) { SCliConn* conn = NULL; int64_t refId = (int64_t)(pMsg->msg.info.handle); if (refId != 0) { - SExHandle* exh = transAcquireExHandle(refId); + SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId); if (exh == NULL) { *ignore = true; destroyCmsg(pMsg); return NULL; } else { conn = exh->handle; - transReleaseExHandle(refId); + transReleaseExHandle(transGetRefMgt(), refId); } return conn; }; @@ -984,6 +982,7 @@ void cliSendQuit(SCliThrd* thrd) { } void cliWalkCb(uv_handle_t* handle, void* arg) { if (!uv_is_closing(handle)) { + uv_read_stop((uv_stream_t*)handle); uv_close(handle, cliDestroy); } } @@ -1073,7 +1072,7 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { if (retry) { pMsg->sent = 0; pCtx->retryCnt += 1; - if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL || code == TSDB_CODE_RPC_BROKEN_LINK) { cliCompareAndSwap(&pCtx->retryLimit, TRANS_RETRY_COUNT_LIMIT, EPSET_GET_SIZE(&pCtx->epSet) * 3); if (pCtx->retryCnt < pCtx->retryLimit) { transUnrefCliHandle(pConn); @@ -1161,12 +1160,12 @@ void transUnrefCliHandle(void* handle) { } SCliThrd* transGetWorkThrdFromHandle(int64_t handle) { SCliThrd* pThrd = NULL; - SExHandle* exh = transAcquireExHandle(handle); + SExHandle* exh = transAcquireExHandle(transGetRefMgt(), handle); if (exh == NULL) { return NULL; } pThrd = exh->pThrd; - transReleaseExHandle(handle); + transReleaseExHandle(transGetRefMgt(), handle); return pThrd; } SCliThrd* transGetWorkThrd(STrans* trans, int64_t handle) { @@ -1193,10 +1192,13 @@ void transReleaseCliHandle(void* handle) { } void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransCtx* ctx) { - STrans* pTransInst = (STrans*)shandle; + STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle); + if (pTransInst == NULL) return; + SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle); if (pThrd == NULL) { transFreeMsg(pReq->pCont); + transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return; } @@ -1217,19 +1219,24 @@ void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STra cliMsg->msg = *pReq; cliMsg->st = taosGetTimestampUs(); cliMsg->type = Normal; + cliMsg->refId = (int64_t)shandle; STraceId* trace = &pReq->info.traceId; tGTrace("%s send request at thread:%08" PRId64 ", dst: %s:%d, app:%p", transLabel(pTransInst), pThrd->pid, EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet), pReq->info.ahandle); ASSERT(transAsyncSend(pThrd->asyncPool, &(cliMsg->q)) == 0); + transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return; } void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMsg* pRsp) { - STrans* pTransInst = (STrans*)shandle; + STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle); + if (pTransInst == NULL) return; + SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle); if (pThrd == NULL) { transFreeMsg(pReq->pCont); + transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return; } tsem_t* sem = taosMemoryCalloc(1, sizeof(tsem_t)); @@ -1250,6 +1257,7 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM cliMsg->msg = *pReq; cliMsg->st = taosGetTimestampUs(); cliMsg->type = Normal; + cliMsg->refId = (int64_t)shandle; STraceId* trace = &pReq->info.traceId; tGTrace("%s send request at thread:%08" PRId64 ", dst: %s:%d, app:%p", transLabel(pTransInst), pThrd->pid, @@ -1259,13 +1267,16 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM tsem_wait(sem); tsem_destroy(sem); taosMemoryFree(sem); + + transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return; } /* * **/ void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) { - STrans* pTransInst = shandle; + STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle); + if (pTransInst == NULL) return; SCvtAddr cvtAddr = {0}; if (ip != NULL && fqdn != NULL) { @@ -1280,11 +1291,13 @@ void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) { SCliMsg* cliMsg = taosMemoryCalloc(1, sizeof(SCliMsg)); cliMsg->ctx = pCtx; cliMsg->type = Update; + cliMsg->refId = (int64_t)shandle; SCliThrd* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[i]; tDebug("%s update epset at thread:%08" PRId64 "", pTransInst->label, thrd->pid); transAsyncSend(thrd->asyncPool, &(cliMsg->q)); } + transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); } #endif diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 57549c59bb..5f6e3db615 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -19,6 +19,7 @@ static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT; static int32_t refMgt; +static int32_t instMgt; int transAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey) { T_MD5_CTX context; @@ -479,53 +480,54 @@ bool transEpSetIsEqual(SEpSet* a, SEpSet* b) { } return true; } -static int32_t transGetRefMgt() { - // - return refMgt; -} static void transInitEnv() { - refMgt = transOpenExHandleMgt(50000); + refMgt = transOpenRefMgt(50000, transDestoryExHandle); + instMgt = taosOpenRef(50, rpcCloseImpl); uv_os_setenv("UV_TCP_SINGLE_ACCEPT", "1"); } static void transDestroyEnv() { - // close ref - transCloseExHandleMgt(); + transCloseRefMgt(refMgt); + transCloseRefMgt(instMgt); } void transInit() { // init env taosThreadOnce(&transModuleInit, transInitEnv); } + +int32_t transGetRefMgt() { return refMgt; } +int32_t transGetInstMgt() { return instMgt; } + void transCleanup() { // clean env transDestroyEnv(); } -int32_t transOpenExHandleMgt(int size) { +int32_t transOpenRefMgt(int size, void (*func)(void*)) { // added into once later - return taosOpenRef(size, transDestoryExHandle); + return taosOpenRef(size, func); } -void transCloseExHandleMgt() { +void transCloseRefMgt(int32_t mgt) { // close ref - taosCloseRef(transGetRefMgt()); + taosCloseRef(mgt); } -int64_t transAddExHandle(void* p) { +int64_t transAddExHandle(int32_t refMgt, void* p) { // acquire extern handle - return taosAddRef(transGetRefMgt(), p); + return taosAddRef(refMgt, p); } -int32_t transRemoveExHandle(int64_t refId) { +int32_t transRemoveExHandle(int32_t refMgt, int64_t refId) { // acquire extern handle - return taosRemoveRef(transGetRefMgt(), refId); + return taosRemoveRef(refMgt, refId); } -SExHandle* transAcquireExHandle(int64_t refId) { +void* transAcquireExHandle(int32_t refMgt, int64_t refId) { // acquire extern handle - return (SExHandle*)taosAcquireRef(transGetRefMgt(), refId); + return (void*)taosAcquireRef(refMgt, refId); } -int32_t transReleaseExHandle(int64_t refId) { +int32_t transReleaseExHandle(int32_t refMgt, int64_t refId) { // release extern handle - return taosReleaseRef(transGetRefMgt(), refId); + return taosReleaseRef(refMgt, refId); } void transDestoryExHandle(void* handle) { if (handle == NULL) { diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 58d6d53684..3f4a5628f9 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -240,18 +240,19 @@ static void uvHandleReq(SSvrConn* pConn) { tDebug("conn %p acquired by server app", pConn); } } + STrans* pTransInst = pConn->pTransInst; STraceId* trace = &pHead->traceId; if (pConn->status == ConnNormal && pHead->noResp == 0) { transRefSrvHandle(pConn); - tGTrace("%s conn %p %s received from %s:%d, local info: %s:%d, msg size: %d", transLabel(pConn), pConn, + tGTrace("%s conn %p %s received from %s:%d, local info: %s:%d, msg size: %d", transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), taosInetNtoa(pConn->addr.sin_addr), ntohs(pConn->addr.sin_port), taosInetNtoa(pConn->localAddr.sin_addr), ntohs(pConn->localAddr.sin_port), transMsg.contLen); } else { - tGTrace("%s conn %p %s received from %s:%d, local info: %s:%d, msg size: %d, resp:%d, code: %d", transLabel(pConn), - pConn, TMSG_INFO(transMsg.msgType), taosInetNtoa(pConn->addr.sin_addr), ntohs(pConn->addr.sin_port), - taosInetNtoa(pConn->localAddr.sin_addr), ntohs(pConn->localAddr.sin_port), transMsg.contLen, pHead->noResp, - transMsg.code); + tGTrace("%s conn %p %s received from %s:%d, local info: %s:%d, msg size: %d, resp:%d, code: %d", + transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), taosInetNtoa(pConn->addr.sin_addr), + ntohs(pConn->addr.sin_port), taosInetNtoa(pConn->localAddr.sin_addr), ntohs(pConn->localAddr.sin_port), + transMsg.contLen, pHead->noResp, transMsg.code); // no ref here } @@ -260,12 +261,12 @@ static void uvHandleReq(SSvrConn* pConn) { // 2. once send out data, cli conn released to conn pool immediately // 3. not mixed with persist transMsg.info.ahandle = (void*)pHead->ahandle; - transMsg.info.handle = (void*)transAcquireExHandle(pConn->refId); + transMsg.info.handle = (void*)transAcquireExHandle(transGetRefMgt(), pConn->refId); transMsg.info.refId = pConn->refId; transMsg.info.traceId = pHead->traceId; - tGTrace("%s handle %p conn: %p translated to app, refId: %" PRIu64 "", transLabel(pConn), transMsg.info.handle, pConn, - pConn->refId); + tGTrace("%s handle %p conn: %p translated to app, refId: %" PRIu64 "", transLabel(pTransInst), transMsg.info.handle, + pConn, pConn->refId); assert(transMsg.info.handle != NULL); if (pHead->noResp == 1) { @@ -278,9 +279,8 @@ static void uvHandleReq(SSvrConn* pConn) { pConnInfo->clientPort = ntohs(pConn->addr.sin_port); tstrncpy(pConnInfo->user, pConn->user, sizeof(pConnInfo->user)); - transReleaseExHandle(pConn->refId); + transReleaseExHandle(transGetRefMgt(), pConn->refId); - STrans* pTransInst = pConn->pTransInst; (*pTransInst->cfp)(pTransInst->parent, &transMsg, NULL); // uv_timer_start(&pConn->pTimer, uvHandleActivityTimeout, pRpc->idleTime * 10000, 0); } @@ -289,14 +289,15 @@ void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { // opt SSvrConn* conn = cli->data; SConnBuffer* pBuf = &conn->readBuf; + STrans* pTransInst = conn->pTransInst; if (nread > 0) { pBuf->len += nread; - tTrace("%s conn %p total read: %d, current read: %d", transLabel(conn->pTransInst), conn, pBuf->len, (int)nread); + tTrace("%s conn %p total read: %d, current read: %d", transLabel(pTransInst), conn, pBuf->len, (int)nread); if (transReadComplete(pBuf)) { - tTrace("%s conn %p alread read complete packet", transLabel(conn->pTransInst), conn); + tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn); uvHandleReq(conn); } else { - tTrace("%s conn %p read partial packet, continue to read", transLabel(conn->pTransInst), conn); + tTrace("%s conn %p read partial packet, continue to read", transLabel(pTransInst), conn); } return; } @@ -304,12 +305,12 @@ void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { return; } - tError("%s conn %p read error: %s", transLabel(conn->pTransInst), conn, uv_err_name(nread)); + tError("%s conn %p read error: %s", transLabel(pTransInst), conn, uv_err_name(nread)); if (nread < 0) { conn->broken = true; if (conn->status == ConnAcquire) { if (conn->regArg.init) { - tTrace("%s conn %p broken, notify server app", transLabel(conn->pTransInst), conn); + tTrace("%s conn %p broken, notify server app", transLabel(pTransInst), conn); STrans* pTransInst = conn->pTransInst; (*pTransInst->cfp)(pTransInst->parent, &(conn->regArg.msg), NULL); memset(&conn->regArg, 0, sizeof(conn->regArg)); @@ -401,8 +402,9 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { } else { pHead->msgType = pMsg->msgType; // set up resp msg type - if (pHead->msgType == 0 && transMsgLenFromCont(pMsg->contLen) == sizeof(STransMsgHead)) + if (pHead->msgType == 0 && transMsgLenFromCont(pMsg->contLen) == sizeof(STransMsgHead)) { pHead->msgType = pConn->inType + 1; + } } } @@ -412,8 +414,9 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { char* msg = (char*)pHead; int32_t len = transMsgLenFromCont(pMsg->contLen); + STrans* pTransInst = pConn->pTransInst; STraceId* trace = &pMsg->info.traceId; - tGTrace("%s conn %p %s is sent to %s:%d, local info: %s:%d, msglen:%d", transLabel(pConn->pTransInst), pConn, + tGTrace("%s conn %p %s is sent to %s:%d, local info: %s:%d, msglen:%d", transLabel(pTransInst), pConn, TMSG_INFO(pHead->msgType), taosInetNtoa(pConn->addr.sin_addr), ntohs(pConn->addr.sin_port), taosInetNtoa(pConn->localAddr.sin_addr), ntohs(pConn->localAddr.sin_port), len); pHead->msgLen = htonl(len); @@ -506,15 +509,15 @@ void uvWorkerAsyncCb(uv_async_t* handle) { SExHandle* exh1 = transMsg.info.handle; int64_t refId = transMsg.info.refId; - SExHandle* exh2 = transAcquireExHandle(refId); + SExHandle* exh2 = transAcquireExHandle(transGetRefMgt(), refId); if (exh2 == NULL || exh1 != exh2) { tTrace("handle except msg %p, ignore it", exh1); - transReleaseExHandle(refId); + transReleaseExHandle(transGetRefMgt(), refId); destroySmsg(msg); continue; } msg->pConn = exh1->handle; - transReleaseExHandle(refId); + transReleaseExHandle(transGetRefMgt(), refId); (*transAsyncHandle[msg->type])(msg, pThrd); } } @@ -756,12 +759,13 @@ static SSvrConn* createConn(void* hThrd) { SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle)); exh->handle = pConn; exh->pThrd = pThrd; - exh->refId = transAddExHandle(exh); - transAcquireExHandle(exh->refId); + exh->refId = transAddExHandle(transGetRefMgt(), exh); + transAcquireExHandle(transGetRefMgt(), exh->refId); + STrans* pTransInst = pThrd->pTransInst; pConn->refId = exh->refId; transRefSrvHandle(pConn); - tTrace("%s handle %p, conn %p created, refId: %" PRId64 "", transLabel(pThrd->pTransInst), exh, pConn, pConn->refId); + tTrace("%s handle %p, conn %p created, refId: %" PRId64 "", transLabel(pTransInst), exh, pConn, pConn->refId); return pConn; } @@ -788,14 +792,14 @@ static void destroyConnRegArg(SSvrConn* conn) { } } static int reallocConnRef(SSvrConn* conn) { - transReleaseExHandle(conn->refId); - transRemoveExHandle(conn->refId); + transReleaseExHandle(transGetRefMgt(), conn->refId); + transRemoveExHandle(transGetRefMgt(), conn->refId); // avoid app continue to send msg on invalid handle SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle)); exh->handle = conn; exh->pThrd = conn->hostThrd; - exh->refId = transAddExHandle(exh); - transAcquireExHandle(exh->refId); + exh->refId = transAddExHandle(transGetRefMgt(), exh); + transAcquireExHandle(transGetRefMgt(), exh->refId); conn->refId = exh->refId; return 0; @@ -807,10 +811,16 @@ static void uvDestroyConn(uv_handle_t* handle) { } SWorkThrd* thrd = conn->hostThrd; - transReleaseExHandle(conn->refId); - transRemoveExHandle(conn->refId); + transReleaseExHandle(transGetRefMgt(), conn->refId); + transRemoveExHandle(transGetRefMgt(), conn->refId); - tDebug("%s conn %p destroy", transLabel(thrd->pTransInst), conn); + STrans* pTransInst = thrd->pTransInst; + tDebug("%s conn %p destroy", transLabel(pTransInst), conn); + + for (int i = 0; i < transQueueSize(&conn->srvMsgs); i++) { + SSvrMsg* msg = transQueueGet(&conn->srvMsgs, i); + destroySmsg(msg); + } transQueueDestroy(&conn->srvMsgs); QUEUE_REMOVE(&conn->queue); @@ -1045,11 +1055,11 @@ void transReleaseSrvHandle(void* handle) { tTrace("%s conn %p start to release", transLabel(pThrd->pTransInst), exh->handle); transAsyncSend(pThrd->asyncPool, &m->q); - transReleaseExHandle(refId); + transReleaseExHandle(transGetRefMgt(), refId); return; _return1: tTrace("handle %p failed to send to release handle", exh); - transReleaseExHandle(refId); + transReleaseExHandle(transGetRefMgt(), refId); return; _return2: tTrace("handle %p failed to send to release handle", exh); @@ -1074,12 +1084,12 @@ void transSendResponse(const STransMsg* msg) { STraceId* trace = (STraceId*)&msg->info.traceId; tGTrace("conn %p start to send resp (1/2)", exh->handle); transAsyncSend(pThrd->asyncPool, &m->q); - transReleaseExHandle(refId); + transReleaseExHandle(transGetRefMgt(), refId); return; _return1: tTrace("handle %p failed to send resp", exh); rpcFreeCont(msg->pCont); - transReleaseExHandle(refId); + transReleaseExHandle(transGetRefMgt(), refId); return; _return2: tTrace("handle %p failed to send resp", exh); @@ -1101,15 +1111,16 @@ void transRegisterMsg(const STransMsg* msg) { m->msg = tmsg; m->type = Register; - tTrace("%s conn %p start to register brokenlink callback", transLabel(pThrd->pTransInst), exh->handle); + STrans* pTransInst = pThrd->pTransInst; + tTrace("%s conn %p start to register brokenlink callback", transLabel(pTransInst), exh->handle); transAsyncSend(pThrd->asyncPool, &m->q); - transReleaseExHandle(refId); + transReleaseExHandle(transGetRefMgt(), refId); return; _return1: tTrace("handle %p failed to register brokenlink", exh); rpcFreeCont(msg->pCont); - transReleaseExHandle(refId); + transReleaseExHandle(transGetRefMgt(), refId); return; _return2: tTrace("handle %p failed to register brokenlink", exh); diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 9245c03826..27f12259bc 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -332,21 +332,25 @@ int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLog terrno = TSDB_CODE_WAL_SIZE_LIMIT; return -1; } + taosThreadMutexLock(&pWal->mutex); if (index == pWal->vers.lastVer + 1) { if (taosArrayGetSize(pWal->fileInfoSet) == 0) { pWal->vers.firstVer = index; if (walRoll(pWal) < 0) { + taosThreadMutexUnlock(&pWal->mutex); return -1; } } else { int64_t passed = walGetSeq() - pWal->lastRollSeq; if (pWal->cfg.rollPeriod != -1 && pWal->cfg.rollPeriod != 0 && passed > pWal->cfg.rollPeriod) { if (walRoll(pWal) < 0) { + taosThreadMutexUnlock(&pWal->mutex); return -1; } } else if (pWal->cfg.segSize != -1 && pWal->cfg.segSize != 0 && walGetLastFileSize(pWal) > pWal->cfg.segSize) { if (walRoll(pWal) < 0) { + taosThreadMutexUnlock(&pWal->mutex); return -1; } } @@ -355,6 +359,7 @@ int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLog // reject skip log or rewrite log // must truncate explicitly first terrno = TSDB_CODE_WAL_INVALID_VER; + taosThreadMutexUnlock(&pWal->mutex); return -1; } @@ -362,8 +367,6 @@ int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLog ASSERT(pWal->writeCur >= 0); - taosThreadMutexLock(&pWal->mutex); - if (pWal->pWriteIdxTFile == NULL || pWal->pWriteLogTFile == NULL) { walSetWrite(pWal); taosLSeekFile(pWal->pWriteLogTFile, 0, SEEK_END); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 923ff1878c..3ee2d83f6f 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -177,6 +177,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, "Dnode already exists" TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, "Dnode does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_DNODES, "Too many dnodes") TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, "Out of dnodes") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE, "No enough memory in dnode") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CLUSTER_CFG, "Cluster cfg inconsistent") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CLUSTER_ID, "Cluster id not match") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_CFG, "Invalid dnode cfg") @@ -433,6 +434,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_RECONFIG_NOT_READY, "Sync not ready for re TAOS_DEFINE_ERROR(TSDB_CODE_SYN_PROPOSE_NOT_READY, "Sync not ready for propose") TAOS_DEFINE_ERROR(TSDB_CODE_SYN_STANDBY_NOT_READY, "Sync not ready for standby") TAOS_DEFINE_ERROR(TSDB_CODE_SYN_BATCH_ERROR, "Sync batch error") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_TIMEOUT, "Sync timeout") TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INTERNAL_ERROR, "Sync internal error") // wal diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index d70c45b0f2..f50fd22ba6 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -637,6 +637,7 @@ void dBufSetBufPageRecycled(SDiskbasedBuf* pBuf, void* pPage) { SListNode* pNode = tdListPopNode(pBuf->lruList, ppi->pn); taosMemoryFreeClear(ppi->pData); taosMemoryFreeClear(pNode); + ppi->pn = NULL; tdListAppend(pBuf->freePgList, &ppi); } diff --git a/source/util/src/ttimer.c b/source/util/src/ttimer.c index 36b9437b66..7256331c85 100644 --- a/source/util/src/ttimer.c +++ b/source/util/src/ttimer.c @@ -18,6 +18,7 @@ #include "taoserror.h" #include "tlog.h" #include "tsched.h" +#include "tdef.h" #define tmrFatal(...) \ { \ @@ -110,7 +111,7 @@ typedef struct time_wheel_t { tmr_obj_t** slots; } time_wheel_t; -static int32_t tsMaxTmrCtrl = 512; +static int32_t tsMaxTmrCtrl = TSDB_MAX_VNODES_PER_DB + 100; static TdThreadOnce tmrModuleInit = PTHREAD_ONCE_INIT; static TdThreadMutex tmrCtrlMutex; diff --git a/tests/script/api/stopquery.c b/tests/script/api/stopquery.c index 4c7964c983..0f27fdf9f9 100644 --- a/tests/script/api/stopquery.c +++ b/tests/script/api/stopquery.c @@ -52,6 +52,7 @@ typedef struct { typedef struct SSP_CB_PARAM { TAOS *taos; bool fetch; + bool free; int32_t *end; } SSP_CB_PARAM; @@ -156,7 +157,58 @@ void sqCloseQueryCb(void *param, TAOS_RES *pRes, int code) { } } -int sqSyncStopQuery(bool fetch) { +void sqKillFetchCb(void *param, TAOS_RES *pRes, int numOfRows) { + SSP_CB_PARAM *qParam = (SSP_CB_PARAM *)param; + taos_kill_query(qParam->taos); + + *qParam->end = 1; +} + +void sqKillQueryCb(void *param, TAOS_RES *pRes, int code) { + SSP_CB_PARAM *qParam = (SSP_CB_PARAM *)param; + if (code == 0 && pRes) { + if (qParam->fetch) { + taos_fetch_rows_a(pRes, sqKillFetchCb, param); + } else { + taos_kill_query(qParam->taos); + *qParam->end = 1; + } + } else { + sqExit("select", taos_errstr(pRes)); + } +} + +void sqAsyncFetchCb(void *param, TAOS_RES *pRes, int numOfRows) { + SSP_CB_PARAM *qParam = (SSP_CB_PARAM *)param; + if (numOfRows > 0) { + taos_fetch_rows_a(pRes, sqAsyncFetchCb, param); + } else { + *qParam->end = 1; + if (qParam->free) { + taos_free_result(pRes); + } + } +} + + +void sqAsyncQueryCb(void *param, TAOS_RES *pRes, int code) { + SSP_CB_PARAM *qParam = (SSP_CB_PARAM *)param; + if (code == 0 && pRes) { + if (qParam->fetch) { + taos_fetch_rows_a(pRes, sqAsyncFetchCb, param); + } else { + if (qParam->free) { + taos_free_result(pRes); + } + *qParam->end = 1; + } + } else { + sqExit("select", taos_errstr(pRes)); + } +} + + +int sqStopSyncQuery(bool fetch) { CASE_ENTER(); for (int32_t i = 0; i < runTimes; ++i) { char sql[1024] = {0}; @@ -189,7 +241,7 @@ int sqSyncStopQuery(bool fetch) { CASE_LEAVE(); } -int sqAsyncStopQuery(bool fetch) { +int sqStopAsyncQuery(bool fetch) { CASE_ENTER(); for (int32_t i = 0; i < runTimes; ++i) { char sql[1024] = {0}; @@ -219,7 +271,7 @@ int sqAsyncStopQuery(bool fetch) { CASE_LEAVE(); } -int sqSyncFreeQuery(bool fetch) { +int sqFreeSyncQuery(bool fetch) { CASE_ENTER(); for (int32_t i = 0; i < runTimes; ++i) { char sql[1024] = {0}; @@ -250,7 +302,7 @@ int sqSyncFreeQuery(bool fetch) { CASE_LEAVE(); } -int sqAsyncFreeQuery(bool fetch) { +int sqFreeAsyncQuery(bool fetch) { CASE_ENTER(); for (int32_t i = 0; i < runTimes; ++i) { char sql[1024] = {0}; @@ -280,7 +332,7 @@ int sqAsyncFreeQuery(bool fetch) { CASE_LEAVE(); } -int sqSyncCloseQuery(bool fetch) { +int sqCloseSyncQuery(bool fetch) { CASE_ENTER(); for (int32_t i = 0; i < runTimes; ++i) { char sql[1024] = {0}; @@ -310,7 +362,7 @@ int sqSyncCloseQuery(bool fetch) { CASE_LEAVE(); } -int sqAsyncCloseQuery(bool fetch) { +int sqCloseAsyncQuery(bool fetch) { CASE_ENTER(); for (int32_t i = 0; i < runTimes; ++i) { char sql[1024] = {0}; @@ -360,9 +412,39 @@ void *syncQueryThreadFp(void *arg) { taos_fetch_row(pRes); } - taos_free_result(pRes); + if (qParam->free) { + taos_free_result(pRes); + } } +void *asyncQueryThreadFp(void *arg) { + SSP_CB_PARAM* qParam = (SSP_CB_PARAM*)arg; + char sql[1024] = {0}; + int32_t code = 0; + TAOS *taos = taos_connect(hostName, "root", "taosdata", NULL, 0); + if (taos == NULL) sqExit("taos_connect", taos_errstr(NULL)); + + qParam->taos = taos; + + sprintf(sql, "reset query cache"); + sqExecSQLE(taos, sql); + + sprintf(sql, "use %s", dbName); + sqExecSQLE(taos, sql); + + sprintf(sql, "select * from %s", tbName); + + int32_t qEnd = 0; + SSP_CB_PARAM param = {0}; + param.fetch = qParam->fetch; + param.end = &qEnd; + taos_query_a(taos, sql, sqAsyncQueryCb, ¶m); + while (0 == qEnd) { + usleep(5000); + } +} + + void *closeThreadFp(void *arg) { SSP_CB_PARAM* qParam = (SSP_CB_PARAM*)arg; while (true) { @@ -376,7 +458,22 @@ void *closeThreadFp(void *arg) { } -int sqConSyncCloseQuery(bool fetch) { + +void *killThreadFp(void *arg) { + SSP_CB_PARAM* qParam = (SSP_CB_PARAM*)arg; + while (true) { + if (qParam->taos) { + usleep(rand() % 10000); + taos_kill_query(qParam->taos); + break; + } + usleep(1); + } +} + + + +int sqConCloseSyncQuery(bool fetch) { CASE_ENTER(); pthread_t qid, cid; for (int32_t i = 0; i < runTimes; ++i) { @@ -391,26 +488,160 @@ int sqConSyncCloseQuery(bool fetch) { CASE_LEAVE(); } +int sqConCloseAsyncQuery(bool fetch) { + CASE_ENTER(); + pthread_t qid, cid; + for (int32_t i = 0; i < runTimes; ++i) { + SSP_CB_PARAM param = {0}; + param.fetch = fetch; + pthread_create(&qid, NULL, asyncQueryThreadFp, (void*)¶m); + pthread_create(&cid, NULL, closeThreadFp, (void*)¶m); + + pthread_join(qid, NULL); + pthread_join(cid, NULL); + } + CASE_LEAVE(); +} + + +int sqKillSyncQuery(bool fetch) { + CASE_ENTER(); + for (int32_t i = 0; i < runTimes; ++i) { + char sql[1024] = {0}; + int32_t code = 0; + TAOS *taos = taos_connect(hostName, "root", "taosdata", NULL, 0); + if (taos == NULL) sqExit("taos_connect", taos_errstr(NULL)); + + sprintf(sql, "reset query cache"); + sqExecSQL(taos, sql); + + sprintf(sql, "use %s", dbName); + sqExecSQL(taos, sql); + + sprintf(sql, "select * from %s", tbName); + TAOS_RES* pRes = taos_query(taos, sql); + code = taos_errno(pRes); + if (code) { + sqExit("taos_query", taos_errstr(pRes)); + } + + if (fetch) { + taos_fetch_row(pRes); + } + + taos_kill_query(taos); + + taos_close(taos); + } + CASE_LEAVE(); +} + +int sqKillAsyncQuery(bool fetch) { + CASE_ENTER(); + for (int32_t i = 0; i < runTimes; ++i) { + char sql[1024] = {0}; + int32_t code = 0; + TAOS *taos = taos_connect(hostName, "root", "taosdata", NULL, 0); + if (taos == NULL) sqExit("taos_connect", taos_errstr(NULL)); + + sprintf(sql, "reset query cache"); + sqExecSQL(taos, sql); + + sprintf(sql, "use %s", dbName); + sqExecSQL(taos, sql); + + sprintf(sql, "select * from %s", tbName); + + int32_t qEnd = 0; + SSP_CB_PARAM param = {0}; + param.fetch = fetch; + param.end = &qEnd; + param.taos = taos; + taos_query_a(taos, sql, sqKillQueryCb, ¶m); + while (0 == qEnd) { + usleep(5000); + } + + taos_close(taos); + } + CASE_LEAVE(); +} + +int sqConKillSyncQuery(bool fetch) { + CASE_ENTER(); + pthread_t qid, cid; + for (int32_t i = 0; i < runTimes; ++i) { + SSP_CB_PARAM param = {0}; + param.fetch = fetch; + pthread_create(&qid, NULL, syncQueryThreadFp, (void*)¶m); + pthread_create(&cid, NULL, killThreadFp, (void*)¶m); + + pthread_join(qid, NULL); + pthread_join(cid, NULL); + } + CASE_LEAVE(); +} + +int sqConKillAsyncQuery(bool fetch) { + CASE_ENTER(); + pthread_t qid, cid; + for (int32_t i = 0; i < runTimes; ++i) { + SSP_CB_PARAM param = {0}; + param.fetch = fetch; + pthread_create(&qid, NULL, asyncQueryThreadFp, (void*)¶m); + pthread_create(&cid, NULL, killThreadFp, (void*)¶m); + + pthread_join(qid, NULL); + pthread_join(cid, NULL); + } + CASE_LEAVE(); +} + + + void sqRunAllCase(void) { /* - sqSyncStopQuery(false); - sqSyncStopQuery(true); - sqAsyncStopQuery(false); - sqAsyncStopQuery(true); + sqStopSyncQuery(false); + sqStopSyncQuery(true); + sqStopAsyncQuery(false); + sqStopAsyncQuery(true); - sqSyncFreeQuery(false); - sqSyncFreeQuery(true); - sqAsyncFreeQuery(false); - sqAsyncFreeQuery(true); + sqFreeSyncQuery(false); + sqFreeSyncQuery(true); + sqFreeAsyncQuery(false); + sqFreeAsyncQuery(true); - sqSyncCloseQuery(false); - sqSyncCloseQuery(true); - sqAsyncCloseQuery(false); - sqAsyncCloseQuery(true); -*/ - sqConSyncCloseQuery(false); - sqConSyncCloseQuery(true); + sqCloseSyncQuery(false); + sqCloseSyncQuery(true); + sqCloseAsyncQuery(false); + sqCloseAsyncQuery(true); + + sqConCloseSyncQuery(false); + sqConCloseSyncQuery(true); + sqConCloseAsyncQuery(false); + sqConCloseAsyncQuery(true); +*/ +#if 0 + + sqKillSyncQuery(false); + sqKillSyncQuery(true); + sqKillAsyncQuery(false); + sqKillAsyncQuery(true); +#endif + + //sqConKillSyncQuery(false); + sqConKillSyncQuery(true); +#if 0 + sqConKillAsyncQuery(false); + sqConKillAsyncQuery(true); +#endif + + int32_t l = 5; + while (l) { + printf("%d\n", l--); + sleep(1); + } } diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 2b33067381..cbcc2d86ef 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -164,6 +164,7 @@ ./test.sh -f tsim/sma/drop_sma.sim ./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim +./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim # --- valgrind ./test.sh -f tsim/valgrind/checkError.sim -v diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index d54624872f..1deea26337 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -121,7 +121,7 @@ echo "firstEp ${HOSTNAME}:7100" >> $TAOS_CFG echo "secondEp ${HOSTNAME}:7200" >> $TAOS_CFG echo "fqdn ${HOSTNAME}" >> $TAOS_CFG echo "serverPort ${NODE}" >> $TAOS_CFG -echo "supportVnodes 128" >> $TAOS_CFG +echo "supportVnodes 1024" >> $TAOS_CFG echo "dataDir $DATA_DIR" >> $TAOS_CFG echo "logDir $LOG_DIR" >> $TAOS_CFG echo "debugFlag 0" >> $TAOS_CFG @@ -135,7 +135,7 @@ echo "jniDebugFlag 143" >> $TAOS_CFG echo "qDebugFlag 143" >> $TAOS_CFG echo "rpcDebugFlag 143" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG -echo "uDebugFlag 131" >> $TAOS_CFG +echo "uDebugFlag 143" >> $TAOS_CFG echo "sDebugFlag 143" >> $TAOS_CFG echo "wDebugFlag 143" >> $TAOS_CFG echo "numOfLogLines 20000000" >> $TAOS_CFG diff --git a/tests/script/sh/stop_dnodes.sh b/tests/script/sh/stop_dnodes.sh index b431c0627c..d30c75022a 100755 --- a/tests/script/sh/stop_dnodes.sh +++ b/tests/script/sh/stop_dnodes.sh @@ -14,7 +14,7 @@ while [ -n "$PID" ]; do echo kill -9 $PID #pkill -9 taosd kill -9 $PID - echo "Killing processes locking on port 6030" + echo "Killing taosd processes" if [ "$OS_TYPE" != "Darwin" ]; then fuser -k -n tcp 6030 else diff --git a/tests/script/tmp/prepare.sim b/tests/script/tmp/prepare.sim index 3b43656a41..2e16ee7bda 100644 --- a/tests/script/tmp/prepare.sim +++ b/tests/script/tmp/prepare.sim @@ -7,39 +7,33 @@ system sh/deploy.sh -n dnode4 -i 4 return -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 -system sh/deploy.sh -n dnode5 -i 5 -system sh/deploy.sh -n dnode6 -i 6 -system sh/deploy.sh -n dnode7 -i 7 -system sh/deploy.sh -n dnode8 -i 8 -system sh/deploy.sh -n dnode9 -i 9 system sh/exec.sh -n dnode1 -s start -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start -system sh/exec.sh -n dnode5 -s start -system sh/exec.sh -n dnode6 -s start -system sh/exec.sh -n dnode7 -s start -system sh/exec.sh -n dnode8 -s start -system sh/exec.sh -n dnode9 -s start +sql connect -sleep 2000 +$num = 200 +$i = 0 +while $i < $num + $port = $i + 8000 + $i = $i + 1 + sql create dnode $hostname port $port +endw -sql create dnode $hostname port 7200 -sql create dnode $hostname port 7300 -sql create dnode $hostname port 7400 -sql create dnode $hostname port 7500 -sql create dnode $hostname port 7600 -sql create dnode $hostname port 7700 -sql create dnode $hostname port 7800 -sql create dnode $hostname port 7900 +$i = 0 +while $i < $num + $port = $i + 8000 + $i = $i + 1 + $name = dnode . $port + system sh/deploy.sh -n $name -i 1 + system sh/cfg.sh -n $name -c serverPort -v $port + system sh/exec.sh -n $name -s start +endw -sql show dnodes; -print $data00 $data01 -print $data10 $data11 -print $data20 $data21 -print $data30 $data31 -print $data40 $data41 \ No newline at end of file +return + +sql create database db vgroups 1024 buffer 3; +sql use db; +sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned); +sql create table ct1 using stb tags(1000); +sql create table ct2 using stb tags(1000) ; +sql show db.tables; +sql insert into ct1 values(now+0s, 10, 2.0, 3.0); \ No newline at end of file diff --git a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim new file mode 100644 index 0000000000..1b54e5a47d --- /dev/null +++ b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim @@ -0,0 +1,237 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database with retentions +sql create database d0 retentions 5s:7d,5m:21d,15m:365d; +sql use d0 + +print =============== create super table and register rsma +sql create table if not exists stb (ts timestamp, c1 int, c2 float) tags (city binary(20),district binary(20)) rollup(max) max_delay 5s,5s watermark 2s,3s; + +sql show stables +if $rows != 1 then + return -1 +endi + +print =============== create child table +sql create table ct1 using stb tags("BeiJing", "ChaoYang"); + +sql show tables +if $rows != 1 then + return -1 +endi + +print =============== insert data and trigger rollup +sql insert into ct1 values(now, 10, 10.0); +sql insert into ct1 values(now+1s, 1, 1.0); +sql insert into ct1 values(now+2s, 100, 100.0); + +print =============== wait maxdelay 5+1 seconds for results +sleep 6000 + +print =============== select * from retention level 2 from memory +sql select * from ct1; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 2 file rows $rows > 2 + return -1 +endi + + +if $data01 != 100 then + if $data01 != 10 then + print retention level 2 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 1 from memory +sql select * from ct1 where ts > now-8d; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 1 file rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 1 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 0 from memory +sql select * from ct1 where ts > now-3d; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +print $data20 $data21 $data22 + +if $rows < 1 then + print retention level 0 file rows $rows < 1 + return -1 +endi + +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + +#=================================================================== + + +#==================== reboot to trigger commit data to file +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start + +print =============== select * from retention level 2 from file +sql select * from ct1; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 2 file rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 2 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 1 from file +sql select * from ct1 where ts > now-8d; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 1 file rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 1 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 0 from file +sql select * from ct1 where ts > now-3d; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +print $data20 $data21 $data22 +if $rows < 1 then + print retention level 0 file rows $rows < 1 + return -1 +endi + +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + +print =============== insert after rsma qtaskinfo recovery +sql insert into ct1 values(now, 50, 500.0); +sql insert into ct1 values(now+1s, 40, 40.0); + +print =============== wait maxdelay 5+1 seconds for results +sleep 6000 + +print =============== select * from retention level 2 from file and memory after rsma qtaskinfo recovery +sql select * from ct1; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 2 file/mem rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 2 file/mem result $data01 != 100 or 10 + return -1 + endi +endi + +if $data02 != 500.00000 then + if $data02 != 100.00000 then + print retention level 1 file/mem result $data02 != 500.00000 or 100.00000 + return -1 + endi +endi + +print =============== select * from retention level 1 from file and memory after rsma qtaskinfo recovery +sql select * from ct1 where ts > now-8d; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 1 file/mem rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 1 file/mem result $data01 != 100 or 10 + return -1 + endi +endi + +if $data02 != 500.00000 then + if $data02 != 100.00000 then + print retention level 1 file/mem result $data02 != 500.00000 or 100.00000 + return -1 + endi +endi + + +print =============== select * from retention level 0 from file and memory after rsma qtaskinfo recovery +sql select * from ct1 where ts > now-3d; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +print $data20 $data21 $data22 +print $data30 $data31 $data32 +print $data40 $data41 $data42 + +if $rows < 1 then + print retention level 0 file/mem rows $rows < 1 + return -1 +endi + +if $data01 != 10 then + print retention level 0 file/mem result $data01 != 10 + return -1 +endi + +if $data11 != 1 then + print retention level 0 file/mem result $data11 != 1 + return -1 +endi + +if $data21 != 100 then + print retention level 0 file/mem result $data21 != 100 + return -1 +endi + +if $data31 != 50 then + print retention level 0 file/mem result $data31 != 50 + return -1 +endi + +if $data32 != 500.00000 then + print retention level 0 file/mem result $data32 != 500.00000 + return -1 +endi + + +if $data41 != 40 then + print retention level 0 file/mem result $data41 != 40 + return -1 +endi + +if $data42 != 40.00000 then + print retention level 0 file/mem result $data42 != 40.00000 + return -1 +endi + + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/tag_modify.sim b/tests/script/tsim/stable/tag_modify.sim index 62e4c7b282..909ed79359 100644 --- a/tests/script/tsim/stable/tag_modify.sim +++ b/tests/script/tsim/stable/tag_modify.sim @@ -14,7 +14,7 @@ sql_error alter table db.stb MODIFY tag ts int sql_error alter table db.stb MODIFY tag t2 binary(3) sql_error alter table db.stb MODIFY tag t2 int sql_error alter table db.stb MODIFY tag t1 int -sql create table db.ctb using db.stb tags(101, "12345") +sql create table db.ctb using db.stb tags(101, "123") sql insert into db.ctb values(now, 1, "1234") sql select * from db.stb @@ -32,7 +32,7 @@ endi if $data[0][3] != 101 then return -1 endi -if $data[0][4] != 1234 then +if $data[0][4] != 123 then return -1 endi diff --git a/tests/script/tsim/stable/tag_rename.sim b/tests/script/tsim/stable/tag_rename.sim index 2f67a3ab2c..5bdfa24990 100644 --- a/tests/script/tsim/stable/tag_rename.sim +++ b/tests/script/tsim/stable/tag_rename.sim @@ -14,7 +14,7 @@ sql_error alter table db.stb rename tag ts c3 sql_error alter table db.stb rename tag t2 t1 sql_error alter table db.stb rename tag t2 t2 sql_error alter table db.stb rename tag t1 t2 -sql create table db.ctb using db.stb tags(101, "12345") +sql create table db.ctb using db.stb tags(101, "123") sql insert into db.ctb values(now, 1, "1234") sql select * from db.stb @@ -32,7 +32,7 @@ endi if $data[0][3] != 101 then return -1 endi -if $data[0][4] != 1234 then +if $data[0][4] != 123 then return -1 endi @@ -56,7 +56,7 @@ endi if $data[0][3] != 101 then return -1 endi -if $data[0][4] != 1234 then +if $data[0][4] != 123 then return -1 endi diff --git a/tests/script/tsim/valgrind/basic.sim b/tests/script/tsim/valgrind/basic.sim index 0f11ae0313..fe7b6973d4 100644 --- a/tests/script/tsim/valgrind/basic.sim +++ b/tests/script/tsim/valgrind/basic.sim @@ -1,8 +1,33 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode1 -s start -v sql connect -sql create database d0 vgroups 1; +print =============== step1: create drop show dnodes +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +if $rows != 1 then + return -1 +endi +goto _OVER + +print =============== step2: create alter drop show user +sql create user u1 pass 'taosdata' +sql show users +sql alter user u1 sysinfo 1 +sql alter user u1 enable 1 +sql alter user u1 pass 'taosdata' +sql drop user u1 +sql_error alter user u2 sysinfo 0 + +_OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/valgrind/basic2.sim b/tests/script/tsim/valgrind/basic2.sim new file mode 100644 index 0000000000..440873b89b --- /dev/null +++ b/tests/script/tsim/valgrind/basic2.sim @@ -0,0 +1,25 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start -v +sql connect + +print =============== step1: create drop show dnodes +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +if $rows != 1 then + return -1 +endi + +print =============== step2: create db +sql create database db vgroups 1 + +_OVER: +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/system-test/2-query/abs.py b/tests/system-test/2-query/abs.py index 961a6446b5..90a1b8f343 100644 --- a/tests/system-test/2-query/abs.py +++ b/tests/system-test/2-query/abs.py @@ -139,7 +139,7 @@ class TDTestCase: ) for i in range(4): tdSql.execute( - f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py index 20ee6df7fc..2e30ac7ea7 100644 --- a/tests/system-test/2-query/avg.py +++ b/tests/system-test/2-query/avg.py @@ -14,7 +14,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), True) - + def prepare_datas(self): tdSql.execute( '''create table stb1 @@ -22,7 +22,7 @@ class TDTestCase: tags (t1 int) ''' ) - + tdSql.execute( ''' create table t1 @@ -64,7 +64,7 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_avg(self ,origin_query , check_query): avg_result = tdSql.getResult(origin_query) origin_result = tdSql.getResult(check_query) @@ -73,13 +73,13 @@ class TDTestCase: for row_index , row in enumerate(avg_result): for col_index , elem in enumerate(row): if avg_result[row_index][col_index] != origin_result[row_index][col_index]: - check_status = False + check_status = False if not check_status: tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query ) sys.exit(1) else: tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query ) - + def test_errors(self): error_sql_lists = [ "select avg from t1", @@ -113,42 +113,42 @@ class TDTestCase: ] for error_sql in error_sql_lists: tdSql.error(error_sql) - + def support_types(self): type_error_sql_lists = [ - "select avg(ts) from t1" , + "select avg(ts) from t1" , "select avg(c7) from t1", "select avg(c8) from t1", "select avg(c9) from t1", - "select avg(ts) from ct1" , + "select avg(ts) from ct1" , "select avg(c7) from ct1", "select avg(c8) from ct1", "select avg(c9) from ct1", - "select avg(ts) from ct3" , + "select avg(ts) from ct3" , "select avg(c7) from ct3", "select avg(c8) from ct3", "select avg(c9) from ct3", - "select avg(ts) from ct4" , + "select avg(ts) from ct4" , "select avg(c7) from ct4", "select avg(c8) from ct4", "select avg(c9) from ct4", - "select avg(ts) from stb1" , + "select avg(ts) from stb1" , "select avg(c7) from stb1", "select avg(c8) from stb1", "select avg(c9) from stb1" , - "select avg(ts) from stbbb1" , + "select avg(ts) from stbbb1" , "select avg(c7) from stbbb1", "select avg(ts) from tbname", "select avg(c9) from tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ "select avg(c1) from t1", "select avg(c2) from t1", @@ -178,16 +178,16 @@ class TDTestCase: "select avg(c5) from stb1", "select avg(c6) from stb1", - "select avg(c6) as alisb from stb1", - "select avg(c6) alisb from stb1", + "select avg(c6) as alisb from stb1", + "select avg(c6) alisb from stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - + def basic_avg_function(self): - # basic query + # basic query tdSql.query("select c1 from ct3") tdSql.checkRows(0) tdSql.query("select c1 from t1") @@ -207,18 +207,18 @@ class TDTestCase: tdSql.query("select avg(c5) from ct3") tdSql.checkRows(0) tdSql.query("select avg(c6) from ct3") - + # used for regular table tdSql.query("select avg(c1) from t1") tdSql.checkData(0, 0, 5.000000000) - - + + tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) self.check_avg(" select avg(c1) , avg(c2) , avg(c3) from t1 " , " select sum(c1)/count(c1) , sum(c2)/count(c2) , sum(c3)/count(c3) from t1 ") - + # used for sub table tdSql.query("select avg(c1) from ct1") tdSql.checkData(0, 0, 4.846153846) @@ -229,8 +229,8 @@ class TDTestCase: self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from t1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from t1 ") self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from stb1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from stb1 ") - # used for stable table - + # used for stable table + tdSql.query("select avg(c1) from stb1") tdSql.checkRows(1) @@ -241,10 +241,10 @@ class TDTestCase: tdSql.error("select avg(c1) from tbname") tdSql.error("select avg(c1) from ct5") - # mix with common col + # mix with common col tdSql.error("select c1, avg(c1) from ct1") tdSql.error("select c1, avg(c1) from ct4") - + # mix with common functions tdSql.error("select c1, avg(c1),c5, floor(c5) from ct4 ") @@ -278,11 +278,11 @@ class TDTestCase: tdSql.query("select count(*) from stb1 ") tdSql.checkData(0,0,25) - # bug fix for compute + # bug fix for compute tdSql.error("select c1, avg(c1) -0 ,ceil(c1)-0 from ct4 ") tdSql.error(" select c1, avg(c1) -0 ,avg(ceil(c1-0.1))-0.1 from ct4") - # mix with nest query + # mix with nest query self.check_avg("select avg(col) from (select abs(c1) col from stb1)" , "select avg(abs(c1)) from stb1") self.check_avg("select avg(col) from (select ceil(abs(c1)) col from stb1)" , "select avg(abs(c1)) from stb1") @@ -297,7 +297,7 @@ class TDTestCase: tdSql.query(" select avg(c1) from stb1 where c1 is null ") tdSql.checkRows(0) - + def avg_func_filter(self): tdSql.execute("use db") tdSql.query(" select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1>5 ") @@ -324,7 +324,7 @@ class TDTestCase: def avg_Arithmetic(self): pass - + def check_boundary_values(self): tdSql.execute("drop database if exists bound_test") @@ -344,11 +344,11 @@ class TDTestCase: tdSql.execute( f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.execute( f"insert into sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.execute( f"insert into sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" ) @@ -359,14 +359,14 @@ class TDTestCase: tdSql.execute( f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.error( f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) self.check_avg("select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from sub1_bound " , " select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from sub1_bound ") - + # check basic elem for table per row tdSql.query("select avg(c1) ,avg(c2) , avg(c3) , avg(c4), avg(c5), avg(c6) from sub1_bound ") tdSql.checkRows(1) @@ -376,8 +376,8 @@ class TDTestCase: tdSql.checkData(0,3,53.571428571) tdSql.checkData(0,4,5.828571332045761e+37) # tdSql.checkData(0,5,None) - - + + # check + - * / in functions tdSql.query(" select avg(c1+1) ,avg(c2) , avg(c3*1) , avg(c4/2), avg(c5)/2, avg(c6) from sub1_bound ") tdSql.checkData(0,0,920350134.5714285) @@ -386,33 +386,33 @@ class TDTestCase: tdSql.checkData(0,3,26.785714286) tdSql.checkData(0,4,2.9142856660228804e+37) # tdSql.checkData(0,5,None) - - + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: avg basic query ============") + tdLog.printNoPrefix("==========step4: avg basic query ============") self.basic_avg_function() - tdLog.printNoPrefix("==========step5: avg boundary query ============") + tdLog.printNoPrefix("==========step5: avg boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step6: avg filter query ============") + tdLog.printNoPrefix("==========step6: avg filter query ============") self.avg_func_filter() diff --git a/tests/system-test/2-query/distribute_agg_apercentile.py b/tests/system-test/2-query/distribute_agg_apercentile.py index fd1455ce16..022d13c5ae 100644 --- a/tests/system-test/2-query/distribute_agg_apercentile.py +++ b/tests/system-test/2-query/distribute_agg_apercentile.py @@ -36,7 +36,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_avg.py b/tests/system-test/2-query/distribute_agg_avg.py index 647a262558..c690a17b4a 100644 --- a/tests/system-test/2-query/distribute_agg_avg.py +++ b/tests/system-test/2-query/distribute_agg_avg.py @@ -53,7 +53,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_count.py b/tests/system-test/2-query/distribute_agg_count.py index 2ac9c86df0..b3638dac4b 100644 --- a/tests/system-test/2-query/distribute_agg_count.py +++ b/tests/system-test/2-query/distribute_agg_count.py @@ -55,7 +55,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_max.py b/tests/system-test/2-query/distribute_agg_max.py index 5c9760cbcf..0924ea16ac 100644 --- a/tests/system-test/2-query/distribute_agg_max.py +++ b/tests/system-test/2-query/distribute_agg_max.py @@ -55,7 +55,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_min.py b/tests/system-test/2-query/distribute_agg_min.py index dd20d88229..8d077fd59b 100644 --- a/tests/system-test/2-query/distribute_agg_min.py +++ b/tests/system-test/2-query/distribute_agg_min.py @@ -55,7 +55,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_spread.py b/tests/system-test/2-query/distribute_agg_spread.py index 94f1a61d77..c91fd1d30b 100644 --- a/tests/system-test/2-query/distribute_agg_spread.py +++ b/tests/system-test/2-query/distribute_agg_spread.py @@ -55,7 +55,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_stddev.py b/tests/system-test/2-query/distribute_agg_stddev.py index 5050e6e940..59ede38983 100644 --- a/tests/system-test/2-query/distribute_agg_stddev.py +++ b/tests/system-test/2-query/distribute_agg_stddev.py @@ -64,7 +64,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_sum.py b/tests/system-test/2-query/distribute_agg_sum.py index add4d75c61..8dcd902b3d 100644 --- a/tests/system-test/2-query/distribute_agg_sum.py +++ b/tests/system-test/2-query/distribute_agg_sum.py @@ -53,7 +53,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/function_null.py b/tests/system-test/2-query/function_null.py index 4de7a7f113..545872b39d 100644 --- a/tests/system-test/2-query/function_null.py +++ b/tests/system-test/2-query/function_null.py @@ -42,7 +42,7 @@ class TDTestCase: ) for i in range(4): tdSql.execute( - f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py index 238924d851..d0573a6bf4 100644 --- a/tests/system-test/2-query/irate.py +++ b/tests/system-test/2-query/irate.py @@ -97,7 +97,7 @@ class TDTestCase: ) for i in range(4): tdSql.execute( - f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py index 84aab3d624..9e48f7d45a 100644 --- a/tests/system-test/2-query/json_tag.py +++ b/tests/system-test/2-query/json_tag.py @@ -687,8 +687,8 @@ class TDTestCase: # to_json() tdSql.query("select to_json('{\"abc\":123}') from jsons1_1") tdSql.checkRows(2) - # tdSql.checkData(0, 0, '{"abc":123}') - # tdSql.checkData(1, 0, '{"abc":123}') + tdSql.checkData(0, 0, '{"abc":123}') + tdSql.checkData(1, 0, '{"abc":123}') tdSql.query("select to_json('null') from jsons1_1") tdSql.checkRows(2) tdSql.checkData(0, 0, 'null') diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py index 46426a5a95..3cd023648e 100644 --- a/tests/system-test/2-query/max.py +++ b/tests/system-test/2-query/max.py @@ -109,7 +109,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py index 97e4622378..ebc8e6b6f9 100644 --- a/tests/system-test/2-query/timetruncate.py +++ b/tests/system-test/2-query/timetruncate.py @@ -3,7 +3,8 @@ from util.log import * from util.cases import * from util.sql import * import numpy as np - +import time +from datetime import datetime class TDTestCase: def init(self, conn, logSql): @@ -13,205 +14,210 @@ class TDTestCase: self.rowNum = 10 self.ts = 1537146000000 # 2018-9-17 09:00:00.000 + self.ts_str = [ + '2020-1-1', + '2020-2-1 00:00:01', + '2020-3-1 00:00:00.001', + '2020-4-1 00:00:00.001002', + '2020-5-1 00:00:00.001002001' + + ] + self.db_param_precision = ['ms','us','ns'] + self.time_unit = ['1w','1d','1h','1m','1s','1a','1u'] + #self.error_unit = ['1b','2w','2d','2h','2m','2s','2a','2u','1c','#1'] + self.error_unit = ['2w','2d','2h','2m','2s','2a','2u','1c','#1'] + self.ntbname = 'ntb' + self.stbname = 'stb' + self.ctbname = 'ctb' + def get_ms_timestamp(self,ts_str): + _ts_str = ts_str + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 15 : + _ts_str = ts_str[:-3] + if ':' in _ts_str and '.' in _ts_str: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S.%f") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + elif ':' in _ts_str and '.' not in _ts_str: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + else: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + return date_time + def get_us_timestamp(self,ts_str): + _ts = self.get_ms_timestamp(ts_str) * 1000 + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 12: + us_ts = p[12:15] + _ts += int(us_ts) + return _ts + def get_ns_timestamp(self,ts_str): + _ts = self.get_us_timestamp(ts_str) *1000 + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 15: + us_ts = p[15:] + _ts += int(us_ts) + return _ts + def time_transform(self,ts_str,precision): + date_time = [] + if precision == 'ms': + for i in ts_str: + date_time.append(self.get_ms_timestamp(i)) + elif precision == 'us': + for i in ts_str: + date_time.append(self.get_us_timestamp(i)) + elif precision == 'ns': + for i in ts_str: + date_time.append(self.get_us_timestamp(i)) + return date_time + def check_ms_timestamp(self,unit,date_time): + if unit.lower() == '1a': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i])) + elif unit.lower() == '1s': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000)*1000) + elif unit.lower() == '1m': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60)*60*1000) + elif unit.lower() == '1h': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60)*60*60*1000 ) + elif unit.lower() == '1d': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000) + elif unit.lower() == '1w': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24/7)*7*24*60*60*1000) + def check_us_timestamp(self,unit,date_time): + if unit.lower() == '1u': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i])) + elif unit.lower() == '1a': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000)*1000) + elif unit.lower() == '1s': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000)*1000*1000) + elif unit.lower() == '1m': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60)*60*1000*1000) + elif unit.lower() == '1h': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60)*60*60*1000*1000 ) + elif unit.lower() == '1d': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 ) + elif unit.lower() == '1w': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24/7)*7*24*60*60*1000*1000) + def check_ns_timestamp(self,unit,date_time): + if unit.lower() == '1u': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000)*1000) + elif unit.lower() == '1a': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000)*1000*1000) + elif unit.lower() == '1s': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000)*1000*1000*1000) + elif unit.lower() == '1m': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60)*60*1000*1000*1000) + elif unit.lower() == '1h': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 ) + elif unit.lower() == '1d': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 ) + elif unit.lower() == '1w': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000) + def data_check(self,date_time,precision,tb_type): + for unit in self.time_unit: + if (unit.lower() == '1u' and precision.lower() == 'ms') or () : + if tb_type.lower() == 'ntb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}') + elif precision.lower() == 'ms': + if tb_type.lower() == 'ntb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}') + tdSql.checkRows(len(self.ts_str)) + self.check_ms_timestamp(unit,date_time) + elif precision.lower() == 'us': + if tb_type.lower() == 'ntb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}') + tdSql.checkRows(len(self.ts_str)) + self.check_us_timestamp(unit,date_time) + elif precision.lower() == 'ns': + if tb_type.lower() == 'ntb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}') + tdSql.checkRows(len(self.ts_str)) + self.check_ns_timestamp(unit,date_time) + for unit in self.error_unit: + if tb_type.lower() == 'ntb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}') + def function_check_ntb(self): + for precision in self.db_param_precision: + tdSql.execute('drop database if exists db') + tdSql.execute(f'create database db precision "{precision}"') + tdSql.execute('use db') + tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)') + for ts in self.ts_str: + tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)') + date_time = self.time_transform(self.ts_str,precision) + self.data_check(date_time,precision,'ntb') + + def function_check_stb(self): + for precision in self.db_param_precision: + tdSql.execute('drop database if exists db') + tdSql.execute(f'create database db precision "{precision}"') + tdSql.execute('use db') + tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)') + tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags(1)') + for ts in self.ts_str: + tdSql.execute(f'insert into {self.ctbname} values("{ts}",1)') + date_time = self.time_transform(self.ts_str,precision) + self.data_check(date_time,precision,'ctb') + self.data_check(date_time,precision,'stb') def run(self): - tdSql.prepare() - - intData = [] - floatData = [] - - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table stb_1 using stb tags('beijing')") - tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') - for i in range(self.rowNum): - tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - intData.append(i + 1) - floatData.append(i + 0.1) - for i in range(self.rowNum): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - intData.append(i + 1) - floatData.append(i + 0.1) - - tdSql.query("select timetruncate(1,1d) from ntb") - tdSql.checkRows(10) - tdSql.error("select timetruncate(1,1u) from ntb") - #tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1a) from ntb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1m) from ntb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1h) from ntb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(ts,1d) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 08:00:00.000") - tdSql.query("select timetruncate(ts,1h) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1m) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1s) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1a) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.checkData(1,0,"2018-09-17 09:00:00.001") - tdSql.checkData(2,0,"2018-09-17 09:00:00.002") - tdSql.checkData(3,0,"2018-09-17 09:00:00.003") - tdSql.checkData(4,0,"2018-09-17 09:00:00.004") - tdSql.checkData(5,0,"2018-09-17 09:00:00.005") - tdSql.checkData(6,0,"2018-09-17 09:00:00.006") - tdSql.checkData(7,0,"2018-09-17 09:00:00.007") - tdSql.checkData(8,0,"2018-09-17 09:00:00.008") - tdSql.checkData(9,0,"2018-09-17 09:00:00.009") - # tdSql.query("select timetruncate(ts,1u) from ntb") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000") - # tdSql.query("select timetruncate(ts,1b) from ntb") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000000") - - - tdSql.query("select timetruncate(1,1d) from stb") - tdSql.checkRows(10) - tdSql.error("select timetruncate(1,1u) from stb") - #tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1a) from stb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1m) from stb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1h) from stb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(ts,1d) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 08:00:00.000") - tdSql.query("select timetruncate(ts,1h) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1m) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1s) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1a) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.checkData(1,0,"2018-09-17 09:00:00.001") - tdSql.checkData(2,0,"2018-09-17 09:00:00.002") - tdSql.checkData(3,0,"2018-09-17 09:00:00.003") - tdSql.checkData(4,0,"2018-09-17 09:00:00.004") - tdSql.checkData(5,0,"2018-09-17 09:00:00.005") - tdSql.checkData(6,0,"2018-09-17 09:00:00.006") - tdSql.checkData(7,0,"2018-09-17 09:00:00.007") - tdSql.checkData(8,0,"2018-09-17 09:00:00.008") - tdSql.checkData(9,0,"2018-09-17 09:00:00.009") - # tdSql.query("select timetruncate(ts,1u) from stb") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000") - # tdSql.query("select timetruncate(ts,1b) from stb") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000000") - - tdSql.query("select timetruncate(1,1d) from stb_1") - tdSql.checkRows(10) - tdSql.error("select timetruncate(1,1u) from stb_1") - #tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1a) from stb_1") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1m) from stb_1") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1h) from stb_1") - tdSql.checkRows(10) - tdSql.query("select timetruncate(ts,1d) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 08:00:00.000") - tdSql.query("select timetruncate(ts,1h) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1m) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1s) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1a) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.checkData(1,0,"2018-09-17 09:00:00.001") - tdSql.checkData(2,0,"2018-09-17 09:00:00.002") - tdSql.checkData(3,0,"2018-09-17 09:00:00.003") - tdSql.checkData(4,0,"2018-09-17 09:00:00.004") - tdSql.checkData(5,0,"2018-09-17 09:00:00.005") - tdSql.checkData(6,0,"2018-09-17 09:00:00.006") - tdSql.checkData(7,0,"2018-09-17 09:00:00.007") - tdSql.checkData(8,0,"2018-09-17 09:00:00.008") - tdSql.checkData(9,0,"2018-09-17 09:00:00.009") - # tdSql.query("select timetruncate(ts,1u) from stb_1") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000") - # tdSql.query("select timetruncate(ts,1b) from stb_1") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000000") + self.function_check_ntb() + self.function_check_stb() def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py index b400e503d9..9f0e189a5f 100644 --- a/tests/system-test/2-query/twa.py +++ b/tests/system-test/2-query/twa.py @@ -34,7 +34,7 @@ class TDTestCase: ) for i in range(self.tb_nums): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') ts = self.ts for j in range(self.row_nums): ts+=j*self.time_step diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py new file mode 100644 index 0000000000..bd8531d7c8 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py @@ -0,0 +1,264 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 1 + self.rowsPerTbl = 100000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 100000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(totalRowsInserted/3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + firstConsumeRows = resultList[0] + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(totalRowsInserted/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + secondConsumeRows = resultList[0] + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 3 + expectrowcnt = math.ceil(totalRowsInserted/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + thirdConsumeRows = resultList[0] + + if not (totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # total consume + actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py new file mode 100644 index 0000000000..1d2aaccda5 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py @@ -0,0 +1,263 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 1 + self.rowsPerTbl = 100000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 100000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + firstConsumeRows = resultList[0] + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + secondConsumeRows = resultList[0] + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 3 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + thirdConsumeRows = resultList[0] + + if not (totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # total consume + actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py new file mode 100644 index 0000000000..2720b7cdce --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py @@ -0,0 +1,245 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 3000 + self.rowsPerTbl = 150 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 200, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + # tdDnodes.start(1) + tdDnodes.starttaosd(1) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 20, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + tdLog.info("select result rows: %d"%totalRowsInserted) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(totalRowsInserted/3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + firstConsumeRows = resultList[0] + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(totalRowsInserted*2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = firstConsumeRows + resultList[0] + + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py new file mode 100644 index 0000000000..0b2dddd24a --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py @@ -0,0 +1,242 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 3000 + self.rowsPerTbl = 70 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 100, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + # tdDnodes.start(1) + tdDnodes.starttaosd(1) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + firstConsumeRows = resultList[0] + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = firstConsumeRows + resultList[0] + + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py new file mode 100644 index 0000000000..540c9dbbe1 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py @@ -0,0 +1,243 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 1 + self.rowsPerTbl = 1000000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 1000, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 15, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(totalRowsInserted/3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(totalRowsInserted * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 25, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(totalRowsInserted) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:500, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py new file mode 100644 index 0000000000..5cb373092b --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py @@ -0,0 +1,241 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 1 + self.rowsPerTbl = 100000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 15, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"]) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py new file mode 100644 index 0000000000..fc2552d6f2 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py @@ -0,0 +1,247 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 4000 + self.rowsPerTbl = 150 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 200, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + tdLog.info("select result rows: %d"%totalRowsInserted) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(totalRowsInserted/3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(totalRowsInserted*2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + tdLog.info("select result rows: %d"%totalRowsInserted) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(totalRowsInserted) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:300, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + # tdLog.info("wait start consume notify") + # tmqCom.getStartConsumeNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py new file mode 100644 index 0000000000..c1b327e5f1 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py @@ -0,0 +1,241 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 3000 + self.rowsPerTbl = 70 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 100, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"]) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 8f1137510e..889b316568 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -21,7 +21,7 @@ python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py python3 ./test.py -f 1-insert/alter_stable.py -python3 ./test.py -f 1-insert/alter_table.py +#python3 ./test.py -f 1-insert/alter_table.py python3 ./test.py -f 1-insert/insertWithMoreVgroup.py python3 ./test.py -f 1-insert/table_comment.py python3 ./test.py -f 1-insert/time_range_wise.py @@ -118,7 +118,7 @@ python3 ./test.py -f 2-query/twa.py python3 ./test.py -f 2-query/irate.py python3 ./test.py -f 2-query/function_null.py -python3 ./test.py -f 2-query/queryQnode.py +#python3 ./test.py -f 2-query/queryQnode.py #python3 ./test.py -f 6-cluster/5dnode1mnode.py #python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3 @@ -163,3 +163,12 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb1.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py + diff --git a/tests/system-test/simpletest.bat b/tests/system-test/simpletest.bat index 656828aa1e..e33fe0d538 100644 --- a/tests/system-test/simpletest.bat +++ b/tests/system-test/simpletest.bat @@ -6,7 +6,7 @@ python3 .\test.py -f 0-others\telemetry.py python3 .\test.py -f 0-others\taosdMonitor.py python3 .\test.py -f 0-others\udfTest.py python3 .\test.py -f 0-others\udf_create.py -python3 .\test.py -f 0-others\udf_restart_taosd.py +@REM python3 .\test.py -f 0-others\udf_restart_taosd.py @REM python3 .\test.py -f 0-others\cachelast.py @REM python3 .\test.py -f 0-others\user_control.py diff --git a/tools/taos-tools b/tools/taos-tools index 7105027650..1163c0f60a 160000 --- a/tools/taos-tools +++ b/tools/taos-tools @@ -1 +1 @@ -Subproject commit 7105027650b51e701cfa1dac11b8fb42d447dd01 +Subproject commit 1163c0f60aa65d6cc58283247c8bf8c56ba43b92 diff --git a/tools/taosadapter b/tools/taosadapter index c885e967e4..c3815951fc 160000 --- a/tools/taosadapter +++ b/tools/taosadapter @@ -1 +1 @@ -Subproject commit c885e967e490105999b84d009a15168728dfafaf +Subproject commit c3815951fc80617ecd171f3743b8b4a4d0bc712e