diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 920eb0c01d..32e9b11520 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -127,6 +127,25 @@ def pre_test(){ ''' return 1 } +def pre_test_build_mac() { + sh ''' + hostname + date + ''' + sh ''' + cd ${WK} + rm -rf debug + mkdir debug + ''' + sh ''' + cd ${WK}/debug + cmake .. + make -j8 + ''' + sh ''' + date + ''' +} def pre_test_win(){ bat ''' hostname @@ -334,6 +353,17 @@ pipeline { } } } + stage('mac test') { + agent{label " Mac_catalina "} + steps { + catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { + timeout(time: 20, unit: 'MINUTES'){ + pre_test() + pre_test_build_mac() + } + } + } + } stage('linux test') { agent{label " worker03 || slave215 || slave217 || slave219 "} options { skipDefaultCheckout() } diff --git a/README-CN.md b/README-CN.md index a9bc814e8d..c78050c7bc 100644 --- a/README-CN.md +++ b/README-CN.md @@ -174,6 +174,8 @@ cmake .. -G "NMake Makefiles" nmake ``` +如果你使用的是 Visual Studio 2022 版本, 脚本 `vcvarsall.bat` 的默认安装路径是 `C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat`。 + ### Mac OS X 系统 安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。 diff --git a/README.md b/README.md index 2dea05f09d..48349e891e 100644 --- a/README.md +++ b/README.md @@ -136,7 +136,7 @@ cmake .. -DCPUTYPE=mips64 && cmake --build . ### On Windows platform -If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". +If you use Visual Studio 2013, please open a command window by executing "cmd.exe". Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. ```cmd mkdir debug && cd debug @@ -145,7 +145,7 @@ cmake .. -G "NMake Makefiles" nmake ``` -If you use the Visual Studio 2019 or 2017: +If you use Visual Studio 2019 or 2017: please open a command window by executing "cmd.exe". Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. @@ -164,6 +164,8 @@ cmake .. -G "NMake Makefiles" nmake ``` +If you use Visual Studio 2022, the only change is the default path of `vcvarsall.bat`, which is `C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat`. + ### On Mac OS X platform Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. diff --git a/cmake/cmake.define b/cmake/cmake.define index f58c1ad354..b5d5c6d957 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -98,12 +98,12 @@ ELSE () ENDIF () IF (${SANITIZER} MATCHES "true") - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3") + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") MESSAGE(STATUS "Will compile with Address Sanitizer!") ELSE () - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3") + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=0") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=0") ENDIF () MESSAGE("System processor ID: ${CMAKE_SYSTEM_PROCESSOR}") diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 31b9936f3e..e80e7e4110 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -118,6 +118,7 @@ execute_process(COMMAND "${CMAKE_COMMAND}" --build . # ================================================================================================ # googletest if(${BUILD_TEST}) + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) add_subdirectory(googletest EXCLUDE_FROM_ALL) target_include_directories( gtest @@ -259,7 +260,7 @@ if(${BUILD_MSVCREGEX}) SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex) endif(${BUILD_MSVCREGEX}) -# msvcregex +# wcwidth if(${BUILD_WCWIDTH}) add_library(wcwidth STATIC "") target_sources(wcwidth diff --git a/include/common/tcommon.h b/include/common/tcommon.h index c7ba618b25..4a06d81c7b 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -56,7 +56,8 @@ typedef enum EStreamType { STREAM_CLEAR, STREAM_INVALID, STREAM_GET_ALL, - STREAM_DELETE, + STREAM_DELETE_RESULT, + STREAM_DELETE_DATA, STREAM_RETRIEVE, STREAM_PULL_DATA, STREAM_PULL_OVER, diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 454b940862..00b220da57 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -708,6 +708,7 @@ typedef struct { int32_t buffer; // MB int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; @@ -736,6 +737,7 @@ typedef struct { int32_t buffer; int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; @@ -1023,8 +1025,10 @@ typedef struct { int64_t clusterId; int64_t rebootTime; int64_t updateTime; - int32_t numOfCores; + float numOfCores; int32_t numOfSupportVnodes; + int64_t memTotal; + int64_t memAvail; char dnodeEp[TSDB_EP_LEN]; SMnodeLoad mload; SQnodeLoad qload; @@ -1079,6 +1083,7 @@ typedef struct { int32_t buffer; int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; @@ -1131,6 +1136,7 @@ typedef struct { int32_t buffer; int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 2153f59bf8..a8637228e4 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -22,8 +22,8 @@ extern "C" { #include "tdef.h" -#define nodeType(nodeptr) (((const SNode*)(nodeptr))->type) -#define setNodeType(nodeptr, type) (((SNode*)(nodeptr))->type = (type)) +#define nodeType(nodeptr) (((const SNode*)(nodeptr))->type) +#define setNodeType(nodeptr, nodetype) (((SNode*)(nodeptr))->type = (nodetype)) #define LIST_LENGTH(l) (NULL != (l) ? (l)->length : 0) @@ -118,6 +118,7 @@ typedef enum ENodeType { QUERY_NODE_DROP_TABLE_STMT, QUERY_NODE_DROP_SUPER_TABLE_STMT, QUERY_NODE_ALTER_TABLE_STMT, + QUERY_NODE_ALTER_SUPER_TABLE_STMT, QUERY_NODE_CREATE_USER_STMT, QUERY_NODE_ALTER_USER_STMT, QUERY_NODE_DROP_USER_STMT, diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 3558e04bbf..debfce5f2d 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -82,6 +82,7 @@ typedef struct SScanLogicNode { typedef struct SJoinLogicNode { SLogicNode node; EJoinType joinType; + SNode* pMergeCondition; SNode* pOnConditions; bool isSingleTableJoin; } SJoinLogicNode; @@ -320,6 +321,7 @@ typedef struct SInterpFuncPhysiNode { SNodeList* pFuncs; STimeWindow timeRange; int64_t interval; + int8_t intervalUnit; EFillMode fillMode; SNode* pFillValues; // SNodeListNode SNode* pTimeSeries; // SColumnNode @@ -328,6 +330,7 @@ typedef struct SInterpFuncPhysiNode { typedef struct SJoinPhysiNode { SPhysiNode node; EJoinType joinType; + SNode* pMergeCondition; SNode* pOnConditions; SNodeList* pTargets; } SJoinPhysiNode; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 3478926fef..dd337bcee0 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -376,6 +376,7 @@ void nodesRewriteSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeRewrit typedef enum ECollectColType { COLLECT_COL_TYPE_COL = 1, COLLECT_COL_TYPE_TAG, COLLECT_COL_TYPE_ALL } ECollectColType; int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* pTableAlias, ECollectColType type, SNodeList** pCols); +int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols); typedef bool (*FFuncClassifier)(int32_t funcId); int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs); diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index 670e21fc4a..617b50aacc 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -62,7 +62,7 @@ typedef struct STableComInfo { typedef struct SIndexMeta { -#ifdef WINDOWS +#if defined(WINDOWS) || defined(_TD_DARWIN_64) size_t avoidCompilationErrors; #endif diff --git a/include/libs/scalar/scalar.h b/include/libs/scalar/scalar.h index 555274599a..aaebffa118 100644 --- a/include/libs/scalar/scalar.h +++ b/include/libs/scalar/scalar.h @@ -25,6 +25,8 @@ extern "C" { typedef struct SFilterInfo SFilterInfo; +int32_t scalarGetOperatorResultType(SDataType left, SDataType right, EOperatorType op, SDataType* pRes); + /* pNode will be freed in API; *pRes need to freed in caller diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index d07cf0adf1..a93b359ef3 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -31,6 +31,12 @@ extern bool gRaftDetailLog; #define SYNC_INDEX_INVALID -1 #define SYNC_TERM_INVALID 0xFFFFFFFFFFFFFFFF +typedef enum { + SYNC_STRATEGY_NO_SNAPSHOT = 0, + SYNC_STRATEGY_STANDARD_SNAPSHOT = 1, + SYNC_STRATEGY_WAL_FIRST = 2, +} ESyncStrategy; + typedef uint64_t SyncNodeId; typedef int32_t SyncGroupId; typedef int64_t SyncIndex; @@ -48,11 +54,6 @@ typedef enum { TAOS_SYNC_STATE_ERROR = 103, } ESyncState; -typedef enum { - TAOS_SYNC_FSM_CB_SUCCESS = 0, - TAOS_SYNC_FSM_CB_OTHER_ERROR = 1, -} ESyncFsmCbCode; - typedef struct SNodeInfo { uint16_t nodePort; char nodeFqdn[TSDB_FQDN_LEN]; @@ -96,6 +97,11 @@ typedef struct SReConfigCbMeta { } SReConfigCbMeta; +typedef struct SSnapshotParam { + SyncIndex start; + SyncIndex end; +} SSnapshotParam; + typedef struct SSnapshot { void* data; SyncIndex lastApplyIndex; @@ -125,7 +131,7 @@ typedef struct SSyncFSM { int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader); int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len); - int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void** ppWriter); + int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void* pWriterParam, void** ppWriter); int32_t (*FpSnapshotStopWrite)(struct SSyncFSM* pFsm, void* pWriter, bool isApply); int32_t (*FpSnapshotDoWrite)(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len); @@ -178,15 +184,15 @@ typedef struct SSyncLogStore { } SSyncLogStore; typedef struct SSyncInfo { - bool isStandBy; - bool snapshotEnable; - SyncGroupId vgId; - int32_t batchSize; - SSyncCfg syncCfg; - char path[TSDB_FILENAME_LEN]; - SWal* pWal; - SSyncFSM* pFsm; - SMsgCb* msgcb; + bool isStandBy; + ESyncStrategy snapshotStrategy; + SyncGroupId vgId; + int32_t batchSize; + SSyncCfg syncCfg; + char path[TSDB_FILENAME_LEN]; + SWal* pWal; + SSyncFSM* pFsm; + SMsgCb* msgcb; int32_t (*FpSendMsg)(const SEpSet* pEpSet, SRpcMsg* pMsg); int32_t (*FpEqMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg); } SSyncInfo; @@ -205,7 +211,7 @@ SyncGroupId syncGetVgId(int64_t rid); void syncGetEpSet(int64_t rid, SEpSet* pEpSet); void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet); int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak); -// int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize); +int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize); bool syncEnvIsStart(); const char* syncStr(ESyncState state); bool syncIsRestoreFinish(int64_t rid); diff --git a/include/libs/sync/syncTools.h b/include/libs/sync/syncTools.h index 745c63d5b3..7e95623740 100644 --- a/include/libs/sync/syncTools.h +++ b/include/libs/sync/syncTools.h @@ -191,12 +191,12 @@ void syncTimeoutLog2(char* s, const SyncTimeout* pMsg); typedef struct SyncClientRequest { uint32_t bytes; int32_t vgId; - uint32_t msgType; // SyncClientRequest msgType - uint32_t originalRpcType; // user RpcMsg msgType + uint32_t msgType; // TDMT_SYNC_CLIENT_REQUEST + uint32_t originalRpcType; // origin RpcMsg msgType uint64_t seqNum; bool isWeak; - uint32_t dataLen; // user RpcMsg.contLen - char data[]; // user RpcMsg.pCont + uint32_t dataLen; // origin RpcMsg.contLen + char data[]; // origin RpcMsg.pCont } SyncClientRequest; SyncClientRequest* syncClientRequestBuild(uint32_t dataLen); @@ -220,11 +220,6 @@ void syncClientRequestLog(const SyncClientRequest* pMsg); void syncClientRequestLog2(char* s, const SyncClientRequest* pMsg); // --------------------------------------------- -typedef struct SOffsetAndContLen { - int32_t offset; - int32_t contLen; -} SOffsetAndContLen; - typedef struct SRaftMeta { uint64_t seqNum; bool isWeak; @@ -232,20 +227,33 @@ typedef struct SRaftMeta { // block1: // block2: SRaftMeta array -// block3: rpc msg array (with pCont) +// block3: rpc msg array (with pCont pointer) typedef struct SyncClientRequestBatch { uint32_t bytes; int32_t vgId; - uint32_t msgType; // SyncClientRequestBatch msgType + uint32_t msgType; // TDMT_SYNC_CLIENT_REQUEST_BATCH uint32_t dataCount; - uint32_t dataLen; // user RpcMsg.contLen - char data[]; // user RpcMsg.pCont + uint32_t dataLen; + char data[]; // block2, block3 } SyncClientRequestBatch; SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMeta* raftArr, int32_t arrSize, int32_t vgId); void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg); +void syncClientRequestBatchDestroy(SyncClientRequestBatch* pMsg); +void syncClientRequestBatchDestroyDeep(SyncClientRequestBatch* pMsg); +SRaftMeta* syncClientRequestBatchMetaArr(const SyncClientRequestBatch* pSyncMsg); +SRpcMsg* syncClientRequestBatchRpcMsgArr(const SyncClientRequestBatch* pSyncMsg); +SyncClientRequestBatch* syncClientRequestBatchFromRpcMsg(const SRpcMsg* pRpcMsg); +cJSON* syncClientRequestBatch2Json(const SyncClientRequestBatch* pMsg); +char* syncClientRequestBatch2Str(const SyncClientRequestBatch* pMsg); + +// for debug ---------------------- +void syncClientRequestBatchPrint(const SyncClientRequestBatch* pMsg); +void syncClientRequestBatchPrint2(char* s, const SyncClientRequestBatch* pMsg); +void syncClientRequestBatchLog(const SyncClientRequestBatch* pMsg); +void syncClientRequestBatchLog2(char* s, const SyncClientRequestBatch* pMsg); // --------------------------------------------- typedef struct SyncClientRequestReply { @@ -318,12 +326,15 @@ void syncRequestVoteReplyLog(const SyncRequestVoteReply* pMsg); void syncRequestVoteReplyLog2(char* s, const SyncRequestVoteReply* pMsg); // --------------------------------------------- +// data: entry + typedef struct SyncAppendEntries { uint32_t bytes; int32_t vgId; uint32_t msgType; SRaftId srcId; SRaftId destId; + // private data SyncTerm term; SyncIndex prevLogIndex; @@ -354,18 +365,14 @@ void syncAppendEntriesLog2(char* s, const SyncAppendEntries* pMsg); // --------------------------------------------- -// define ahead -/* typedef struct SOffsetAndContLen { int32_t offset; int32_t contLen; } SOffsetAndContLen; -*/ -// block1: SOffsetAndContLen -// block2: SOffsetAndContLen Array -// block3: SRpcMsg Array -// block4: SRpcMsg pCont Array +// data: +// block1: SOffsetAndContLen Array +// block2: entry Array typedef struct SyncAppendEntriesBatch { uint32_t bytes; @@ -382,10 +389,11 @@ typedef struct SyncAppendEntriesBatch { SyncTerm privateTerm; int32_t dataCount; uint32_t dataLen; - char data[]; + char data[]; // block1, block2 } SyncAppendEntriesBatch; -SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SRpcMsg* rpcMsgArr, int32_t arrSize, int32_t vgId); +SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SSyncRaftEntry** entryPArr, int32_t arrSize, int32_t vgId); +SOffsetAndContLen* syncAppendEntriesBatchMetaTableArray(SyncAppendEntriesBatch* pMsg); void syncAppendEntriesBatchDestroy(SyncAppendEntriesBatch* pMsg); void syncAppendEntriesBatchSerialize(const SyncAppendEntriesBatch* pMsg, char* buf, uint32_t bufLen); void syncAppendEntriesBatchDeserialize(const char* buf, uint32_t len, SyncAppendEntriesBatch* pMsg); @@ -396,8 +404,6 @@ void syncAppendEntriesBatchFromRpcMsg(const SRpcMsg* pRpcMsg, SyncAppendEntriesBatch* syncAppendEntriesBatchFromRpcMsg2(const SRpcMsg* pRpcMsg); cJSON* syncAppendEntriesBatch2Json(const SyncAppendEntriesBatch* pMsg); char* syncAppendEntriesBatch2Str(const SyncAppendEntriesBatch* pMsg); -void syncAppendEntriesBatch2RpcMsgArray(SyncAppendEntriesBatch* pSyncMsg, SRpcMsg* rpcMsgArr, int32_t maxArrSize, - int32_t* pRetArrSize); // for debug ---------------------- void syncAppendEntriesBatchPrint(const SyncAppendEntriesBatch* pMsg); @@ -477,9 +483,10 @@ typedef struct SyncSnapshotSend { SRaftId destId; SyncTerm term; - SyncIndex lastIndex; // lastIndex of snapshot - SyncTerm lastTerm; // lastTerm of snapshot - SyncIndex lastConfigIndex; + SyncIndex beginIndex; // snapshot.beginIndex + SyncIndex lastIndex; // snapshot.lastIndex + SyncTerm lastTerm; // snapshot.lastTerm + SyncIndex lastConfigIndex; // snapshot.lastConfigIndex SSyncCfg lastConfig; SyncTerm privateTerm; int32_t seq; @@ -617,6 +624,9 @@ int32_t syncNodeOnRequestVoteReplySnapshotCb(SSyncNode* ths, SyncRequestVoteRepl int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMsg); int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg); +int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatch* pMsg); +int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntriesReply* pMsg); + int32_t syncNodeOnSnapshotSendCb(SSyncNode* ths, SyncSnapshotSend* pMsg); int32_t syncNodeOnSnapshotRspCb(SSyncNode* ths, SyncSnapshotRsp* pMsg); @@ -633,7 +643,8 @@ typedef int32_t (*FpOnSnapshotSendCb)(SSyncNode* ths, SyncSnapshotSend* pMsg); typedef int32_t (*FpOnSnapshotRspCb)(SSyncNode* ths, SyncSnapshotRsp* pMsg); // option ---------------------------------- -bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); +bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); +ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode); // --------------------------------------------- diff --git a/include/os/os.h b/include/os/os.h index 3e72a618a0..b036002f8a 100644 --- a/include/os/os.h +++ b/include/os/os.h @@ -41,7 +41,6 @@ extern "C" { #include #include #include -#include #include #include diff --git a/include/os/osAtomic.h b/include/os/osAtomic.h index 8600992d68..9fd00cefb4 100644 --- a/include/os/osAtomic.h +++ b/include/os/osAtomic.h @@ -63,7 +63,7 @@ int8_t atomic_add_fetch_8(int8_t volatile *ptr, int8_t val); int16_t atomic_add_fetch_16(int16_t volatile *ptr, int16_t val); int32_t atomic_add_fetch_32(int32_t volatile *ptr, int32_t val); int64_t atomic_add_fetch_64(int64_t volatile *ptr, int64_t val); -void *atomic_add_fetch_ptr(void *ptr, void *val); +void *atomic_add_fetch_ptr(void *ptr, int64_t val); int8_t atomic_fetch_add_8(int8_t volatile *ptr, int8_t val); int16_t atomic_fetch_add_16(int16_t volatile *ptr, int16_t val); int32_t atomic_fetch_add_32(int32_t volatile *ptr, int32_t val); @@ -73,7 +73,7 @@ int8_t atomic_sub_fetch_8(int8_t volatile *ptr, int8_t val); int16_t atomic_sub_fetch_16(int16_t volatile *ptr, int16_t val); int32_t atomic_sub_fetch_32(int32_t volatile *ptr, int32_t val); int64_t atomic_sub_fetch_64(int64_t volatile *ptr, int64_t val); -void *atomic_sub_fetch_ptr(void *ptr, void *val); +void *atomic_sub_fetch_ptr(void *ptr, int64_t val); int8_t atomic_fetch_sub_8(int8_t volatile *ptr, int8_t val); int16_t atomic_fetch_sub_16(int16_t volatile *ptr, int16_t val); int32_t atomic_fetch_sub_32(int32_t volatile *ptr, int32_t val); diff --git a/include/os/osMath.h b/include/os/osMath.h index b0c75f4dd7..74973d9bb2 100644 --- a/include/os/osMath.h +++ b/include/os/osMath.h @@ -20,6 +20,12 @@ extern "C" { #endif +// If the error is in a third-party library, place this header file under the third-party library header file. +// When you want to use this feature, you should find or add the same function in the following sectio +#ifndef ALLOW_FORBID_FUNC +#define qsort QSORT_FUNC_TAOS_FORBID +#endif + #define TPOW2(x) ((x) * (x)) #define TABS(x) ((x) > 0 ? (x) : -(x)) diff --git a/include/os/osSemaphore.h b/include/os/osSemaphore.h index 21c88c9976..7fca20d75e 100644 --- a/include/os/osSemaphore.h +++ b/include/os/osSemaphore.h @@ -24,7 +24,9 @@ extern "C" { #if defined(_TD_DARWIN_64) -typedef struct tsem_s *tsem_t; +// typedef struct tsem_s *tsem_t; +typedef struct bosal_sem_t *tsem_t; + int tsem_init(tsem_t *sem, int pshared, unsigned int value); int tsem_wait(tsem_t *sem); @@ -51,11 +53,11 @@ int tsem_timewait(tsem_t *sim, int64_t nanosecs); // #define taosThreadRwlockRdlock(lock) taosThreadMutexLock(lock) // #define taosThreadRwlockUnlock(lock) taosThreadMutexUnlock(lock) -#define TdThreadSpinlock TdThreadMutex -#define taosThreadSpinInit(lock, NULL) taosThreadMutexInit(lock, NULL) -#define taosThreadSpinDestroy(lock) taosThreadMutexDestroy(lock) -#define taosThreadSpinLock(lock) taosThreadMutexLock(lock) -#define taosThreadSpinUnlock(lock) taosThreadMutexUnlock(lock) +// #define TdThreadSpinlock TdThreadMutex +// #define taosThreadSpinInit(lock, NULL) taosThreadMutexInit(lock, NULL) +// #define taosThreadSpinDestroy(lock) taosThreadMutexDestroy(lock) +// #define taosThreadSpinLock(lock) taosThreadMutexLock(lock) +// #define taosThreadSpinUnlock(lock) taosThreadMutexUnlock(lock) #endif bool taosCheckPthreadValid(TdThread thread); diff --git a/include/os/osSocket.h b/include/os/osSocket.h index 9dd5b972fa..4bad51e263 100644 --- a/include/os/osSocket.h +++ b/include/os/osSocket.h @@ -64,7 +64,6 @@ #include #else #include -#include #endif #endif @@ -77,15 +76,12 @@ typedef int socklen_t; #define TAOS_EPOLL_WAIT_TIME 100 typedef SOCKET eventfd_t; #define eventfd(a, b) -1 -#define EpollClose(pollFd) epoll_close(pollFd) #ifndef EPOLLWAKEUP #define EPOLLWAKEUP (1u << 29) #endif #elif defined(_TD_DARWIN_64) #define TAOS_EPOLL_WAIT_TIME 500 typedef int32_t SOCKET; -typedef SOCKET EpollFd; -#define EpollClose(pollFd) epoll_close(pollFd) #else #define TAOS_EPOLL_WAIT_TIME 500 typedef int32_t SOCKET; @@ -122,14 +118,6 @@ typedef SOCKET EpollFd; typedef int32_t SocketFd; typedef SocketFd EpollFd; -typedef struct TdSocket { -#if SOCKET_WITH_LOCK - TdThreadRwlock rwlock; -#endif - int refId; - SocketFd fd; -} * TdSocketPtr, TdSocket; - typedef struct TdSocketServer *TdSocketServerPtr; typedef struct TdSocket * TdSocketPtr; typedef struct TdEpoll * TdEpollPtr; @@ -181,11 +169,6 @@ void taosSetMaskSIGPIPE(); uint32_t taosInetAddr(const char *ipAddr); const char *taosInetNtoa(struct in_addr ipInt); -TdEpollPtr taosCreateEpoll(int32_t size); -int32_t taosCtlEpoll(TdEpollPtr pEpoll, int32_t epollOperate, TdSocketPtr pSocket, struct epoll_event *event); -int32_t taosWaitEpoll(TdEpollPtr pEpoll, struct epoll_event *event, int32_t maxEvents, int32_t timeout); -int32_t taosCloseEpoll(TdEpollPtr *ppEpoll); - #ifdef __cplusplus } #endif diff --git a/include/os/osString.h b/include/os/osString.h index 1b518f9b81..3a4ff18694 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -67,7 +67,7 @@ bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs int32_t tasoUcs4Compare(TdUcs4 *f1_ucs4, TdUcs4 *f2_ucs4, int32_t bytes); TdUcs4* tasoUcs4Copy(TdUcs4 *target_ucs4, TdUcs4 *source_ucs4, int32_t len_ucs4); bool taosValidateEncodec(const char *encodec); -int32_t taosHexEncode(const char *src, char *dst, int32_t len); +int32_t taosHexEncode(const unsigned char *src, char *dst, int32_t len); int32_t taosHexDecode(const char *src, char *dst, int32_t len); int32_t taosWcharWidth(TdWchar wchar); diff --git a/include/os/osThread.h b/include/os/osThread.h index 6252a0cb60..b1ea8277f0 100644 --- a/include/os/osThread.h +++ b/include/os/osThread.h @@ -188,27 +188,27 @@ int32_t taosThreadJoin(TdThread thread, void **valuePtr); int32_t taosThreadKeyCreate(TdThreadKey * key, void(*destructor)(void *)); int32_t taosThreadKeyDelete(TdThreadKey key); int32_t taosThreadKill(TdThread thread, int32_t sig); -int32_t taosThreadMutexConsistent(TdThreadMutex* mutex); +// int32_t taosThreadMutexConsistent(TdThreadMutex* mutex); int32_t taosThreadMutexDestroy(TdThreadMutex * mutex); int32_t taosThreadMutexInit(TdThreadMutex * mutex, const TdThreadMutexAttr * attr); int32_t taosThreadMutexLock(TdThreadMutex * mutex); -int32_t taosThreadMutexTimedLock(TdThreadMutex * mutex, const struct timespec *abstime); +// int32_t taosThreadMutexTimedLock(TdThreadMutex * mutex, const struct timespec *abstime); int32_t taosThreadMutexTryLock(TdThreadMutex * mutex); int32_t taosThreadMutexUnlock(TdThreadMutex * mutex); int32_t taosThreadMutexAttrDestroy(TdThreadMutexAttr * attr); int32_t taosThreadMutexAttrGetPshared(const TdThreadMutexAttr * attr, int32_t *pshared); -int32_t taosThreadMutexAttrGetRobust(const TdThreadMutexAttr * attr, int32_t * robust); +// int32_t taosThreadMutexAttrGetRobust(const TdThreadMutexAttr * attr, int32_t * robust); int32_t taosThreadMutexAttrGetType(const TdThreadMutexAttr * attr, int32_t *kind); int32_t taosThreadMutexAttrInit(TdThreadMutexAttr * attr); int32_t taosThreadMutexAttrSetPshared(TdThreadMutexAttr * attr, int32_t pshared); -int32_t taosThreadMutexAttrSetRobust(TdThreadMutexAttr * attr, int32_t robust); +// int32_t taosThreadMutexAttrSetRobust(TdThreadMutexAttr * attr, int32_t robust); int32_t taosThreadMutexAttrSetType(TdThreadMutexAttr * attr, int32_t kind); int32_t taosThreadOnce(TdThreadOnce * onceControl, void(*initRoutine)(void)); int32_t taosThreadRwlockDestroy(TdThreadRwlock * rwlock); int32_t taosThreadRwlockInit(TdThreadRwlock * rwlock, const TdThreadRwlockAttr * attr); int32_t taosThreadRwlockRdlock(TdThreadRwlock * rwlock); -int32_t taosThreadRwlockTimedRdlock(TdThreadRwlock * rwlock, const struct timespec *abstime); -int32_t taosThreadRwlockTimedWrlock(TdThreadRwlock * rwlock, const struct timespec *abstime); +// int32_t taosThreadRwlockTimedRdlock(TdThreadRwlock * rwlock, const struct timespec *abstime); +// int32_t taosThreadRwlockTimedWrlock(TdThreadRwlock * rwlock, const struct timespec *abstime); int32_t taosThreadRwlockTryRdlock(TdThreadRwlock * rwlock); int32_t taosThreadRwlockTryWrlock(TdThreadRwlock * rwlock); int32_t taosThreadRwlockUnlock(TdThreadRwlock * rwlock); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index d93fb92ee5..7a4bcd85b8 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -173,11 +173,12 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_DNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0341) #define TSDB_CODE_MND_TOO_MANY_DNODES TAOS_DEF_ERROR_CODE(0, 0x0342) #define TSDB_CODE_MND_NO_ENOUGH_DNODES TAOS_DEF_ERROR_CODE(0, 0x0343) -#define TSDB_CODE_MND_INVALID_CLUSTER_CFG TAOS_DEF_ERROR_CODE(0, 0x0344) -#define TSDB_CODE_MND_INVALID_CLUSTER_ID TAOS_DEF_ERROR_CODE(0, 0x0345) -#define TSDB_CODE_MND_INVALID_DNODE_CFG TAOS_DEF_ERROR_CODE(0, 0x0346) -#define TSDB_CODE_MND_INVALID_DNODE_EP TAOS_DEF_ERROR_CODE(0, 0x0347) -#define TSDB_CODE_MND_INVALID_DNODE_ID TAOS_DEF_ERROR_CODE(0, 0x0348) +#define TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0344) +#define TSDB_CODE_MND_INVALID_CLUSTER_CFG TAOS_DEF_ERROR_CODE(0, 0x0345) +#define TSDB_CODE_MND_INVALID_CLUSTER_ID TAOS_DEF_ERROR_CODE(0, 0x0346) +#define TSDB_CODE_MND_INVALID_DNODE_CFG TAOS_DEF_ERROR_CODE(0, 0x0347) +#define TSDB_CODE_MND_INVALID_DNODE_EP TAOS_DEF_ERROR_CODE(0, 0x0348) +#define TSDB_CODE_MND_INVALID_DNODE_ID TAOS_DEF_ERROR_CODE(0, 0x0349) // mnode-node #define TSDB_CODE_MND_MNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0350) @@ -428,6 +429,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_SYN_PROPOSE_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0911) #define TSDB_CODE_SYN_STANDBY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0912) #define TSDB_CODE_SYN_BATCH_ERROR TAOS_DEF_ERROR_CODE(0, 0x0913) +#define TSDB_CODE_SYN_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0914) #define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF) // tq @@ -578,6 +580,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_TABLE_OPTION TAOS_DEF_ERROR_CODE(0, 0x265C) #define TSDB_CODE_PAR_INVALID_INTERP_CLAUSE TAOS_DEF_ERROR_CODE(0, 0x265D) #define TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN TAOS_DEF_ERROR_CODE(0, 0x265E) +#define TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE TAOS_DEF_ERROR_CODE(0, 0x265F) //planner #define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700) @@ -623,9 +626,12 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3150) #define TSDB_CODE_RSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3151) #define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152) +#define TSDB_CODE_RSMA_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x3153) //index #define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200) +#define TSDB_CODE_INDEX_INVALID_FILE TAOS_DEF_ERROR_CODE(0, 0x3201) + //tmq #define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000) diff --git a/include/util/tdef.h b/include/util/tdef.h index 5befa6a67f..84bc30b9e7 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -287,7 +287,7 @@ typedef enum ELogicConditionType { #define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta #define TSDB_MIN_VNODES_PER_DB 1 -#define TSDB_MAX_VNODES_PER_DB 4096 +#define TSDB_MAX_VNODES_PER_DB 1024 #define TSDB_DEFAULT_VN_PER_DB 2 #define TSDB_MIN_BUFFER_PER_VNODE 3 // unit MB #define TSDB_MAX_BUFFER_PER_VNODE 16384 // unit MB @@ -334,6 +334,9 @@ typedef enum ELogicConditionType { #define TSDB_MIN_DB_CACHE_LAST_ROW 0 #define TSDB_MAX_DB_CACHE_LAST_ROW 3 #define TSDB_DEFAULT_CACHE_LAST_ROW 0 +#define TSDB_MIN_DB_LAST_ROW_MEM 1 // MB +#define TSDB_MAX_DB_LAST_ROW_MEM 65536 +#define TSDB_DEFAULT_LAST_ROW_MEM 1 #define TSDB_DB_STREAM_MODE_OFF 0 #define TSDB_DB_STREAM_MODE_ON 1 #define TSDB_DEFAULT_DB_STREAM_MODE 0 diff --git a/include/util/types.h b/include/util/types.h index ded9dc37d7..1360307156 100644 --- a/include/util/types.h +++ b/include/util/types.h @@ -83,6 +83,7 @@ typedef uint16_t VarDataLenT; // maxVarDataLen: 32767 #define varDataLen(v) ((VarDataLenT *)(v))[0] #define varDataVal(v) ((char *)(v) + VARSTR_HEADER_SIZE) +#define varDataTLen(v) (sizeof(VarDataLenT) + varDataLen(v)) #define NCHAR_WIDTH_TO_BYTES(n) ((n) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 9d2886d242..9380b73d2d 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -65,7 +65,7 @@ enum { typedef struct SAppInstInfo SAppInstInfo; typedef struct { - char* key; + char* key; // statistics int32_t reportCnt; int32_t connKeyCnt; @@ -177,14 +177,14 @@ typedef struct SReqResultInfo { } SReqResultInfo; typedef struct SRequestSendRecvBody { - tsem_t rspSem; // not used now - __taos_async_fn_t queryFp; - __taos_async_fn_t fetchFp; - void* param; - SDataBuf requestMsg; - int64_t queryJob; // query job, created according to sql query DAG. - int32_t subplanNum; - SReqResultInfo resInfo; + tsem_t rspSem; // not used now + __taos_async_fn_t queryFp; + __taos_async_fn_t fetchFp; + void* param; + SDataBuf requestMsg; + int64_t queryJob; // query job, created according to sql query DAG. + int32_t subplanNum; + SReqResultInfo resInfo; } SRequestSendRecvBody; typedef struct { @@ -284,6 +284,7 @@ static FORCE_INLINE SReqResultInfo* tscGetCurResInfo(TAOS_RES* res) { extern SAppInfo appInfo; extern int32_t clientReqRefPool; extern int32_t clientConnRefPool; +extern void* tscQhandle; __async_send_cb_fn_t getMsgRspHandle(int32_t msgType); @@ -301,7 +302,7 @@ void destroyRequest(SRequestObj* pRequest); SRequestObj* acquireRequest(int64_t rid); int32_t releaseRequest(int64_t rid); int32_t removeRequest(int64_t rid); -void doDestroyRequest(void *p); +void doDestroyRequest(void* p); char* getDbOfConnection(STscObj* pObj); void setConnectionDB(STscObj* pTscObj, const char* db); @@ -336,9 +337,9 @@ int hbHandleRsp(SClientHbBatchRsp* hbRsp); // cluster level SAppHbMgr* appHbMgrInit(SAppInstInfo* pAppInstInfo, char* key); void appHbMgrCleanup(void); -void hbRemoveAppHbMrg(SAppHbMgr **pAppHbMgr); -void destroyAllRequests(SHashObj *pRequests); -void stopAllRequests(SHashObj *pRequests); +void hbRemoveAppHbMrg(SAppHbMgr** pAppHbMgr); +void destroyAllRequests(SHashObj* pRequests); +void stopAllRequests(SHashObj* pRequests); // conn level int hbRegisterConn(SAppHbMgr* pAppHbMgr, int64_t tscRefId, int64_t clusterId, int8_t connType); @@ -357,6 +358,9 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList); // todo move to clie int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); // todo move to xxx bool qnodeRequired(SRequestObj* pRequest); +void initTscQhandle(); +void cleanupTscQhandle(); + #ifdef __cplusplus } #endif diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 9e72e5fe35..797d58e6ef 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -25,6 +25,7 @@ #include "tmsg.h" #include "tref.h" #include "trpc.h" +#include "tsched.h" #include "ttime.h" #define TSC_VAR_NOT_RELEASE 1 @@ -34,9 +35,20 @@ SAppInfo appInfo; int32_t clientReqRefPool = -1; int32_t clientConnRefPool = -1; +void *tscQhandle = NULL; + static TdThreadOnce tscinit = PTHREAD_ONCE_INIT; volatile int32_t tscInitRes = 0; +void initTscQhandle() { + // init handle + tscQhandle = taosInitScheduler(4096, 5, "tsc"); +} + +void cleanupTscQhandle() { + // destroy handle + taosCleanUpScheduler(tscQhandle); +} static int32_t registerRequest(SRequestObj *pRequest) { STscObj *pTscObj = acquireTscObj(pRequest->pTscObj->id); if (NULL == pTscObj) { @@ -151,7 +163,6 @@ void stopAllRequests(SHashObj *pRequests) { } } - void destroyAppInst(SAppInstInfo *pAppInfo) { tscDebug("destroy app inst mgr %p", pAppInfo); @@ -176,9 +187,9 @@ void destroyTscObj(void *pObj) { if (NULL == pObj) { return; } - + STscObj *pTscObj = pObj; - int64_t tscId = pTscObj->id; + int64_t tscId = pTscObj->id; tscTrace("begin to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj); SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType}; @@ -292,11 +303,12 @@ void doDestroyRequest(void *p) { if (NULL == p) { return; } - + SRequestObj *pRequest = (SRequestObj *)p; + int64_t reqId = pRequest->self; tscTrace("begin to destroy request %" PRIx64 " p:%p", reqId, pRequest); - + taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self)); schedulerFreeJob(&pRequest->body.queryJob, 0); @@ -334,7 +346,7 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. atexit(taos_cleanup); - + initTscQhandle(); errno = TSDB_CODE_SUCCESS; taosSeedRand(taosGetTimestampSec()); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 423e7982ab..3d43b3a9a1 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -25,6 +25,7 @@ #include "tmsgtype.h" #include "tpagedbuf.h" #include "tref.h" +#include "tsched.h" static int32_t initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet); static SMsgSendInfo* buildConnectMsg(SRequestObj* pRequest); @@ -56,14 +57,14 @@ static char* getClusterKey(const char* user, const char* auth, const char* ip, i } bool chkRequestKilled(void* param) { - bool killed = false; + bool killed = false; SRequestObj* pRequest = acquireRequest((int64_t)param); if (NULL == pRequest || pRequest->killed) { killed = true; } releaseRequest((int64_t)param); - + return killed; } @@ -769,7 +770,7 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) { code = handleSubmitExecRes(pRequest, pRes->res, pCatalog, &epset); break; } - case TDMT_SCH_QUERY: + case TDMT_SCH_QUERY: case TDMT_SCH_MERGE_QUERY: { code = handleQueryExecRes(pRequest, pRes->res, pCatalog, &epset); break; @@ -1240,7 +1241,16 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, } } -void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { +typedef struct SchedArg { + SRpcMsg msg; + SEpSet* pEpset; +} SchedArg; + +void doProcessMsgFromServer(SSchedMsg* schedMsg) { + SchedArg* arg = (SchedArg*)schedMsg->ahandle; + SRpcMsg* pMsg = &arg->msg; + SEpSet* pEpSet = arg->pEpset; + SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; assert(pMsg->info.ahandle != NULL); STscObj* pTscObj = NULL; @@ -1273,7 +1283,8 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); - SDataBuf buf = {.msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = pEpSet}; + SDataBuf buf = { + .msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = pEpSet}; if (pMsg->contLen > 0) { buf.pData = taosMemoryCalloc(1, pMsg->contLen); @@ -1288,6 +1299,25 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { pSendInfo->fp(pSendInfo->param, &buf, pMsg->code); rpcFreeCont(pMsg->pCont); destroySendMsgInfo(pSendInfo); + + taosMemoryFree(arg); +} + +void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { + SSchedMsg schedMsg = {0}; + + SEpSet* tEpSet = pEpSet != NULL ? taosMemoryCalloc(1, sizeof(SEpSet)) : NULL; + if (tEpSet != NULL) { + *tEpSet = *pEpSet; + } + + SchedArg* arg = taosMemoryCalloc(1, sizeof(SchedArg)); + arg->msg = *pMsg; + arg->pEpset = tEpSet; + + schedMsg.fp = doProcessMsgFromServer; + schedMsg.ahandle = arg; + taosScheduleTask(tscQhandle, &schedMsg); } TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) { @@ -1420,7 +1450,7 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU pParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); tsem_init(&pParam->sem, 0, 0); } - + // convert ucs4 to native multi-bytes string pResultInfo->convertUcs4 = convertUcs4; diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 4e24fb4f48..2550a7a47b 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -47,11 +47,9 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) { atomic_store_32(&lock, 0); return ret; } - // this function may be called by user or system, or by both simultaneously. void taos_cleanup(void) { - tscInfo("start to cleanup client environment"); - + tscInfo("start to cleanup client environment"); if (atomic_val_compare_exchange_32(&sentinel, TSC_VAR_NOT_RELEASE, TSC_VAR_RELEASED) != TSC_VAR_NOT_RELEASE) { return; } @@ -74,8 +72,8 @@ void taos_cleanup(void) { catalogDestroy(); schedulerDestroy(); + cleanupTscQhandle(); rpcCleanup(); - tscInfo("all local resources released"); taosCleanupCfg(); taosCloseLog(); @@ -108,7 +106,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha if (pObj) { int64_t *rid = taosMemoryCalloc(1, sizeof(int64_t)); *rid = pObj->id; - return (TAOS*)rid; + return (TAOS *)rid; } return NULL; @@ -196,9 +194,9 @@ void taos_kill_query(TAOS *taos) { if (NULL == taos) { return; } - - int64_t rid = *(int64_t*)taos; - STscObj* pTscObj = acquireTscObj(rid); + + int64_t rid = *(int64_t *)taos; + STscObj *pTscObj = acquireTscObj(rid); stopAllRequests(pTscObj->pRequests); releaseTscObj(rid); } @@ -244,7 +242,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { #endif } else if (TD_RES_TMQ(res)) { - SMqRspObj *msg = ((SMqRspObj *)res); + SMqRspObj * msg = ((SMqRspObj *)res); SReqResultInfo *pResultInfo; if (msg->resIter == -1) { pResultInfo = tmqGetNextResInfo(res, true); @@ -420,7 +418,7 @@ int taos_affected_rows(TAOS_RES *res) { return 0; } - SRequestObj *pRequest = (SRequestObj *)res; + SRequestObj * pRequest = (SRequestObj *)res; SReqResultInfo *pResInfo = &pRequest->body.resInfo; return pResInfo->numOfRows; } @@ -604,7 +602,7 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) { } SReqResultInfo *pResInfo = tscGetCurResInfo(res); - TAOS_FIELD *pField = &pResInfo->userFields[columnIndex]; + TAOS_FIELD * pField = &pResInfo->userFields[columnIndex]; if (!IS_VAR_DATA_TYPE(pField->type)) { return 0; } @@ -648,8 +646,8 @@ const char *taos_get_server_info(TAOS *taos) { typedef struct SqlParseWrapper { SParseContext *pCtx; SCatalogReq catalogReq; - SRequestObj *pRequest; - SQuery *pQuery; + SRequestObj * pRequest; + SQuery * pQuery; } SqlParseWrapper; static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) { @@ -670,8 +668,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { tscDebug("enter meta callback, code %s", tstrerror(code)); SqlParseWrapper *pWrapper = (SqlParseWrapper *)param; - SQuery *pQuery = pWrapper->pQuery; - SRequestObj *pRequest = pWrapper->pRequest; + SQuery * pQuery = pWrapper->pQuery; + SRequestObj * pRequest = pWrapper->pRequest; if (code == TSDB_CODE_SUCCESS) { code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery); @@ -720,31 +718,29 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) { return TSDB_CODE_OUT_OF_MEMORY; } - **pCxt = (SParseContext){ - .requestId = pRequest->requestId, - .requestRid = pRequest->self, - .acctId = pTscObj->acctId, - .db = pRequest->pDb, - .topicQuery = false, - .pSql = pRequest->sqlstr, - .sqlLen = pRequest->sqlLen, - .pMsg = pRequest->msgBuf, - .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, - .pTransporter = pTscObj->pAppInfo->pTransporter, - .pStmtCb = NULL, - .pUser = pTscObj->user, - .schemalessType = pTscObj->schemalessType, - .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), - .async = true, - .svrVer = pTscObj->sVer, - .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes) - }; + **pCxt = (SParseContext){.requestId = pRequest->requestId, + .requestRid = pRequest->self, + .acctId = pTscObj->acctId, + .db = pRequest->pDb, + .topicQuery = false, + .pSql = pRequest->sqlstr, + .sqlLen = pRequest->sqlLen, + .pMsg = pRequest->msgBuf, + .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, + .pTransporter = pTscObj->pAppInfo->pTransporter, + .pStmtCb = NULL, + .pUser = pTscObj->user, + .schemalessType = pTscObj->schemalessType, + .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), + .async = true, + .svrVer = pTscObj->sVer, + .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)}; return TSDB_CODE_SUCCESS; } void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { SParseContext *pCxt = NULL; - STscObj *pTscObj = pRequest->pTscObj; + STscObj * pTscObj = pRequest->pTscObj; int32_t code = 0; if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) { @@ -914,10 +910,10 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { return terrno; } - int64_t rid = *(int64_t*)taos; + int64_t rid = *(int64_t *)taos; const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list int32_t code = 0; - SRequestObj *pRequest = NULL; + SRequestObj * pRequest = NULL; SCatalogReq catalogReq = {0}; if (NULL == tableNameList) { diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 455f204542..b8844390d2 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -108,13 +108,11 @@ static const SSysDbTableSchema userFuncSchema[] = { }; static const SSysDbTableSchema userIdxSchema[] = { + {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "index_database", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "column_name", .bytes = SYSTABLE_SCH_COL_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "index_type", .bytes = 10, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "index_extensions", .bytes = 256, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, }; static const SSysDbTableSchema userStbsSchema[] = { @@ -313,7 +311,7 @@ static const SSysDbTableSchema querySchema[] = { {.name = "query_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "req_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, {.name = "connId", .bytes = 4, .type = TSDB_DATA_TYPE_UINT}, - {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, @@ -329,17 +327,17 @@ static const SSysDbTableSchema appSchema[] = { {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "start_time", .bytes = 8 , .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "insert_req", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "insert_row", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "insert_time", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "insert_bytes", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "fetch_bytes", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "query_time", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "show_query", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "total_req", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "current_req", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "insert_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "insert_row", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "insert_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "insert_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "fetch_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "query_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "show_query", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "total_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "current_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, }; @@ -353,8 +351,7 @@ static const SSysTableMeta perfsMeta[] = { {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema)}, {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema)}, {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)}, - {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)} -}; + {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)}}; void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) { *pInfosTableMeta = infosMeta; diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 4881dd88aa..bca740e9ce 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -895,7 +895,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) { SBlockOrderInfo* pOrder = taosArrayGet(pOrderInfo, 0); int64_t p0 = taosGetTimestampUs(); - + __compar_fn_t fn = getKeyComparFunc(pColInfoData->info.type, pOrder->order); taosSort(pColInfoData->pData, pDataBlock->info.rows, pColInfoData->info.bytes, fn); @@ -923,8 +923,9 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) { pInfo->pColData = taosArrayGet(pDataBlock->pDataBlock, pInfo->slotId); } + terrno = 0; taosqsort(index, rows, sizeof(int32_t), &helper, dataBlockCompar); - if(terrno) return terrno; + if (terrno) return terrno; int64_t p1 = taosGetTimestampUs(); @@ -1259,6 +1260,7 @@ int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src) { dst->info.rows = src->info.rows; dst->info.window = src->info.window; + dst->info.type = src->info.type; return TSDB_CODE_SUCCESS; } @@ -1437,21 +1439,21 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) { } } -static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, size_t end){ +static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, size_t end) { int32_t dataOffset = -1; int32_t dataLen = 0; int32_t beigin = start; - while(beigin < end){ + while (beigin < end) { int32_t offset = pColInfoData->varmeta.offset[beigin]; - if(offset == -1) { + if (offset == -1) { beigin++; continue; } - if(start != 0) { + if (start != 0) { pColInfoData->varmeta.offset[beigin] = dataLen; } - char *data = pColInfoData->pData + offset; - if(dataOffset == -1) dataOffset = offset; // mark the begin of data + char* data = pColInfoData->pData + offset; + if (dataOffset == -1) dataOffset = offset; // mark the begin of data int32_t type = pColInfoData->info.type; if (type == TSDB_DATA_TYPE_JSON) { dataLen += getJsonValueLen(data); @@ -1460,7 +1462,7 @@ static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, s } beigin++; } - if(dataOffset > 0){ + if (dataOffset > 0) { memmove(pColInfoData->pData, pColInfoData->pData + dataOffset, dataLen); memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[start], (end - start) * sizeof(int32_t)); } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index aaf1e5414c..8157ba4d92 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -947,8 +947,10 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { if (tEncodeI64(&encoder, pReq->clusterId) < 0) return -1; if (tEncodeI64(&encoder, pReq->rebootTime) < 0) return -1; if (tEncodeI64(&encoder, pReq->updateTime) < 0) return -1; - if (tEncodeI32(&encoder, pReq->numOfCores) < 0) return -1; + if (tEncodeFloat(&encoder, pReq->numOfCores) < 0) return -1; if (tEncodeI32(&encoder, pReq->numOfSupportVnodes) < 0) return -1; + if (tEncodeI64(&encoder, pReq->memTotal) < 0) return -1; + if (tEncodeI64(&encoder, pReq->memAvail) < 0) return -1; if (tEncodeCStr(&encoder, pReq->dnodeEp) < 0) return -1; // cluster cfg @@ -1008,8 +1010,10 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { if (tDecodeI64(&decoder, &pReq->clusterId) < 0) return -1; if (tDecodeI64(&decoder, &pReq->rebootTime) < 0) return -1; if (tDecodeI64(&decoder, &pReq->updateTime) < 0) return -1; - if (tDecodeI32(&decoder, &pReq->numOfCores) < 0) return -1; + if (tDecodeFloat(&decoder, &pReq->numOfCores) < 0) return -1; if (tDecodeI32(&decoder, &pReq->numOfSupportVnodes) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->memTotal) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->memAvail) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->dnodeEp) < 0) return -1; // cluster cfg @@ -1974,6 +1978,7 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) { if (tEncodeI32(&encoder, pReq->buffer) < 0) return -1; if (tEncodeI32(&encoder, pReq->pageSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->pages) < 0) return -1; + if (tEncodeI32(&encoder, pReq->lastRowMem) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysPerFile) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep0) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep1) < 0) return -1; @@ -2015,6 +2020,7 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) if (tDecodeI32(&decoder, &pReq->buffer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pageSize) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pages) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->lastRowMem) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysPerFile) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep0) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep1) < 0) return -1; @@ -2069,6 +2075,7 @@ int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) { if (tEncodeI32(&encoder, pReq->buffer) < 0) return -1; if (tEncodeI32(&encoder, pReq->pageSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->pages) < 0) return -1; + if (tEncodeI32(&encoder, pReq->lastRowMem) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysPerFile) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep0) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep1) < 0) return -1; @@ -2094,6 +2101,7 @@ int32_t tDeserializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) { if (tDecodeI32(&decoder, &pReq->buffer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pageSize) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pages) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->lastRowMem) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysPerFile) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep0) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep1) < 0) return -1; @@ -2679,10 +2687,12 @@ int32_t tDeserializeSDbCfgRsp(void *buf, int32_t bufLen, SDbCfgRsp *pRsp) { if (tDecodeI8(&decoder, &pRsp->strict) < 0) return -1; if (tDecodeI8(&decoder, &pRsp->cacheLastRow) < 0) return -1; if (tDecodeI32(&decoder, &pRsp->numOfRetensions) < 0) return -1; - pRsp->pRetensions = taosArrayInit(pRsp->numOfRetensions, sizeof(SRetention)); - if (pRsp->pRetensions == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + if (pRsp->numOfRetensions > 0) { + pRsp->pRetensions = taosArrayInit(pRsp->numOfRetensions, sizeof(SRetention)); + if (pRsp->pRetensions == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } } for (int32_t i = 0; i < pRsp->numOfRetensions; ++i) { @@ -3586,6 +3596,7 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR if (tEncodeI32(&encoder, pReq->buffer) < 0) return -1; if (tEncodeI32(&encoder, pReq->pageSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->pages) < 0) return -1; + if (tEncodeI32(&encoder, pReq->lastRowMem) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysPerFile) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep0) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep1) < 0) return -1; @@ -3643,6 +3654,7 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * if (tDecodeI32(&decoder, &pReq->buffer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pageSize) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pages) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->lastRowMem) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysPerFile) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep0) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep1) < 0) return -1; @@ -3665,7 +3677,6 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * SReplica *pReplica = &pReq->replicas[i]; if (tDecodeSReplica(&decoder, pReplica) < 0) return -1; } - if (tDecodeI32(&decoder, &pReq->numOfRetensions) < 0) return -1; pReq->pRetensions = taosArrayInit(pReq->numOfRetensions, sizeof(SRetention)); if (pReq->pRetensions == NULL) { @@ -3768,6 +3779,7 @@ int32_t tSerializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pReq if (tEncodeI32(&encoder, pReq->buffer) < 0) return -1; if (tEncodeI32(&encoder, pReq->pageSize) < 0) return -1; if (tEncodeI32(&encoder, pReq->pages) < 0) return -1; + if (tEncodeI32(&encoder, pReq->lastRowMem) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysPerFile) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep0) < 0) return -1; if (tEncodeI32(&encoder, pReq->daysToKeep1) < 0) return -1; @@ -3782,7 +3794,6 @@ int32_t tSerializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pReq SReplica *pReplica = &pReq->replicas[i]; if (tEncodeSReplica(&encoder, pReplica) < 0) return -1; } - tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -3799,6 +3810,7 @@ int32_t tDeserializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pR if (tDecodeI32(&decoder, &pReq->buffer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pageSize) < 0) return -1; if (tDecodeI32(&decoder, &pReq->pages) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->lastRowMem) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysPerFile) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep0) < 0) return -1; if (tDecodeI32(&decoder, &pReq->daysToKeep1) < 0) return -1; diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 8fbdeb0654..befb0abac8 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -589,7 +589,7 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec return TSDB_CODE_FAILED; } newColData[len] = 0; - int32_t ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight); + int32_t ret = taosParseTime(newColData, timeVal, len, (int32_t)timePrec, tsDaylight); if (ret != TSDB_CODE_SUCCESS) { taosMemoryFree(newColData); return ret; diff --git a/source/common/src/ttszip.c b/source/common/src/ttszip.c index 3160d64c12..03353b0de6 100644 --- a/source/common/src/ttszip.c +++ b/source/common/src/ttszip.c @@ -845,11 +845,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) { int64_t offset = getDataStartOffset(); int32_t size = (int32_t)pSrcBuf->fileSize - (int32_t)offset; -#if defined(_TD_DARWIN_64) - int64_t written = taosFSendFile(pDestBuf->pFile->fp, pSrcBuf->pFile->fp, &offset, size); -#else int64_t written = taosFSendFile(pDestBuf->pFile, pSrcBuf->pFile, &offset, size); -#endif if (written == -1 || written != size) { return -1; diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 234a133243..59b442881a 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -67,6 +67,8 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { req.updateTime = pMgmt->pData->updateTime; req.numOfCores = tsNumOfCores; req.numOfSupportVnodes = tsNumOfSupportVnodes; + req.memTotal = tsTotalMemoryKB * 1024; + req.memAvail = req.memTotal - tsRpcQueueMemoryAllowed - 16 * 1024 * 1024; tstrncpy(req.dnodeEp, tsLocalEp, TSDB_EP_LEN); req.clusterCfg.statusInterval = tsStatusInterval; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 3f053639aa..681440dec4 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -21,7 +21,7 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) { taosThreadRwlockRdlock(&pMgmt->lock); taosHashGetDup(pMgmt->hash, &vgId, sizeof(int32_t), (void *)&pVnode); - if (pVnode == NULL) { + if (pVnode == NULL || pVnode->dropped) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; } else { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); @@ -81,16 +81,18 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { vmReleaseVnode(pMgmt, pVnode); while (pVnode->refCount > 0) taosMsleep(10); + dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId); + while (!taosQueueEmpty(pVnode->pWriteQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pSyncQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pApplyQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10); + dTrace("vgId:%d, vnode-fetch queue is empty", pVnode->vgId); vmFreeQueue(pMgmt, pVnode); vnodeClose(pVnode->pImpl); pVnode->pImpl = NULL; - dDebug("vgId:%d, vnode is closed", pVnode->vgId); if (pVnode->dropped) { diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 91ef292360..ecd02ae8dc 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -107,7 +107,7 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf const STraceId *trace = &pMsg->info.traceId; dGTrace("vgId:%d, msg:%p get from vnode-sync queue", pVnode->vgId, pMsg); - int32_t code = vnodeProcessSyncReq(pVnode->pImpl, pMsg, NULL); // no response here + int32_t code = vnodeProcessSyncReq(pVnode->pImpl, pMsg, NULL); // no response here dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); @@ -146,8 +146,8 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId); if (pVnode == NULL) { - dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s", pHead->vgId, pMsg, terrstr(), - TMSG_INFO(pMsg->msgType)); + dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, msgtype:%s qtype:%d", pHead->vgId, pMsg, + terrstr(), TMSG_INFO(pMsg->msgType), qtype); return terrno != 0 ? terrno : -1; } diff --git a/source/dnode/mgmt/node_mgmt/src/dmProc.c b/source/dnode/mgmt/node_mgmt/src/dmProc.c index 72878d0d85..cbf13924d7 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmProc.c +++ b/source/dnode/mgmt/node_mgmt/src/dmProc.c @@ -87,8 +87,8 @@ static SProcQueue *dmInitProcQueue(SProc *proc, char *ptr, int32_t size) { static void dmCleanupProcQueue(SProcQueue *queue) {} static inline int32_t dmPushToProcQueue(SProc *proc, SProcQueue *queue, SRpcMsg *pMsg, EProcFuncType ftype) { - const void *pHead = pMsg; - const void *pBody = pMsg->pCont; + const void * pHead = pMsg; + const void * pBody = pMsg->pCont; const int16_t rawHeadLen = sizeof(SRpcMsg); const int32_t rawBodyLen = pMsg->contLen; const int16_t headLen = CEIL8(rawHeadLen); @@ -257,7 +257,7 @@ int32_t dmInitProc(struct SMgmtWrapper *pWrapper) { proc->wrapper = pWrapper; proc->name = pWrapper->name; - SShm *shm = &proc->shm; + SShm * shm = &proc->shm; int32_t cstart = 0; int32_t csize = CEIL8(shm->size / 2); int32_t pstart = csize; @@ -281,13 +281,13 @@ int32_t dmInitProc(struct SMgmtWrapper *pWrapper) { } static void *dmConsumChildQueue(void *param) { - SProc *proc = param; + SProc * proc = param; SMgmtWrapper *pWrapper = proc->wrapper; - SProcQueue *queue = proc->cqueue; + SProcQueue * queue = proc->cqueue; int32_t numOfMsgs = 0; int32_t code = 0; EProcFuncType ftype = DND_FUNC_REQ; - SRpcMsg *pMsg = NULL; + SRpcMsg * pMsg = NULL; dDebug("node:%s, start to consume from cqueue", proc->name); do { @@ -324,13 +324,13 @@ static void *dmConsumChildQueue(void *param) { } static void *dmConsumParentQueue(void *param) { - SProc *proc = param; + SProc * proc = param; SMgmtWrapper *pWrapper = proc->wrapper; - SProcQueue *queue = proc->pqueue; + SProcQueue * queue = proc->pqueue; int32_t numOfMsgs = 0; int32_t code = 0; EProcFuncType ftype = DND_FUNC_REQ; - SRpcMsg *pMsg = NULL; + SRpcMsg * pMsg = NULL; dDebug("node:%s, start to consume from pqueue", proc->name); do { @@ -353,7 +353,7 @@ static void *dmConsumParentQueue(void *param) { rpcRegisterBrokenLinkArg(pMsg); } else if (ftype == DND_FUNC_RELEASE) { dmRemoveProcRpcHandle(proc, pMsg->info.handle); - rpcReleaseHandle(pMsg->info.handle, (int8_t)pMsg->code); + rpcReleaseHandle(&pMsg->info, TAOS_CONN_SERVER); } else { dError("node:%s, invalid ftype:%d from pqueue", proc->name, ftype); rpcFreeCont(pMsg->pCont); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 35d478177a..df3c9c4e88 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -245,7 +245,7 @@ static inline void dmReleaseHandle(SRpcHandleInfo *pHandle, int8_t type) { SRpcMsg msg = {.code = type, .info = *pHandle}; dmPutToProcPQueue(&pWrapper->proc, &msg, DND_FUNC_RELEASE); } else { - rpcReleaseHandle(pHandle->handle, type); + rpcReleaseHandle(pHandle, type); } } diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 096fc753b2..7fe7d44827 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -280,7 +280,7 @@ static void dmPrintEps(SDnodeData *pData) { dDebug("print dnode list, num:%d", numOfEps); for (int32_t i = 0; i < numOfEps; i++) { SDnodeEp *pEp = taosArrayGet(pData->dnodeEps, i); - dDebug("dnode:%d, fqdn:%s port:%u is_mnode:%d", pEp->id, pEp->ep.fqdn, pEp->ep.port, pEp->isMnode); + dDebug("dnode:%d, fqdn:%s port:%u isMnode:%d", pEp->id, pEp->ep.fqdn, pEp->ep.port, pEp->isMnode); } } diff --git a/source/dnode/mgmt/test/sut/src/sut.cpp b/source/dnode/mgmt/test/sut/src/sut.cpp index 21d9351ceb..a5e6128800 100644 --- a/source/dnode/mgmt/test/sut/src/sut.cpp +++ b/source/dnode/mgmt/test/sut/src/sut.cpp @@ -30,12 +30,14 @@ void Testbase::InitLog(const char* path) { tsdbDebugFlag = 0; tsLogEmbedded = 1; tsAsyncLog = 0; - tsRpcQueueMemoryAllowed = 1024 * 1024 * 64; - + taosRemoveDir(path); taosMkDir(path); tstrncpy(tsLogDir, path, PATH_MAX); - if (taosInitLog("taosdlog", 1) != 0) { + + taosGetSystemInfo(); + tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 0.1; +if (taosInitLog("taosdlog", 1) != 0) { printf("failed to init log file\n"); } } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 1fb7fa85b1..f39a848992 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -148,7 +148,10 @@ typedef struct { int32_t accessTimes; int32_t numOfVnodes; int32_t numOfSupportVnodes; - int32_t numOfCores; + float numOfCores; + int64_t memTotal; + int64_t memAvail; + int64_t memUsed; EDndReason offlineReason; uint16_t port; char fqdn[TSDB_FQDN_LEN]; @@ -243,6 +246,7 @@ typedef struct { int32_t buffer; int32_t pageSize; int32_t pages; + int32_t lastRowMem; int32_t daysPerFile; int32_t daysToKeep0; int32_t daysToKeep1; @@ -255,8 +259,8 @@ typedef struct { int8_t compression; int8_t replications; int8_t strict; - int8_t cacheLastRow; int8_t hashMethod; // default is 1 + int8_t cacheLastRow; int32_t numOfRetensions; SArray* pRetensions; int8_t schemaless; @@ -556,7 +560,7 @@ typedef struct { int64_t uid; int8_t status; // config - int8_t dropPolicy; + int8_t igExpired; int8_t trigger; int64_t triggerParam; int64_t watermark; diff --git a/source/dnode/mnode/impl/inc/mndInt.h b/source/dnode/mnode/impl/inc/mndInt.h index 58266724a5..14867ff693 100644 --- a/source/dnode/mnode/impl/inc/mndInt.h +++ b/source/dnode/mnode/impl/inc/mndInt.h @@ -87,7 +87,7 @@ typedef struct { } STelemMgmt; typedef struct { - sem_t syncSem; + tsem_t syncSem; int64_t sync; bool standby; SReplica replica; diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index 0901e77287..d873df621e 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -38,6 +38,8 @@ int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); +int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h index 6622b89b1d..c8237d17d8 100644 --- a/source/dnode/mnode/impl/inc/mndVgroup.h +++ b/source/dnode/mnode/impl/inc/mndVgroup.h @@ -30,6 +30,8 @@ SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup); SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup); int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId); void mndSortVnodeGid(SVgObj *pVgroup); +int64_t mndGetVnodesMemory(SMnode *pMnode, int32_t dnodeId); +int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup); SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId); int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup); diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index dabb938705..6770cd578a 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -21,6 +21,7 @@ #include "mndShow.h" #include "mndSma.h" #include "mndStb.h" +#include "mndStream.h" #include "mndSubscribe.h" #include "mndTopic.h" #include "mndTrans.h" @@ -92,6 +93,7 @@ static SSdbRaw *mndDbActionEncode(SDbObj *pDb) { SDB_SET_INT32(pRaw, dataPos, pDb->cfg.buffer, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.pageSize, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.pages, _OVER) + SDB_SET_INT32(pRaw, dataPos, pDb->cfg.lastRowMem, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.daysPerFile, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.daysToKeep0, _OVER) SDB_SET_INT32(pRaw, dataPos, pDb->cfg.daysToKeep1, _OVER) @@ -164,6 +166,7 @@ static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw) { SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.buffer, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.pageSize, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.pages, _OVER) + SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.lastRowMem, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.daysPerFile, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.daysToKeep0, _OVER) SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.daysToKeep1, _OVER) @@ -229,8 +232,9 @@ static int32_t mndDbActionUpdate(SSdb *pSdb, SDbObj *pOld, SDbObj *pNew) { pOld->cfgVersion = pNew->cfgVersion; pOld->vgVersion = pNew->vgVersion; pOld->cfg.buffer = pNew->cfg.buffer; - pOld->cfg.pages = pNew->cfg.pages; pOld->cfg.pageSize = pNew->cfg.pageSize; + pOld->cfg.pages = pNew->cfg.pages; + pOld->cfg.lastRowMem = pNew->cfg.lastRowMem; pOld->cfg.daysPerFile = pNew->cfg.daysPerFile; pOld->cfg.daysToKeep0 = pNew->cfg.daysToKeep0; pOld->cfg.daysToKeep1 = pNew->cfg.daysToKeep1; @@ -287,6 +291,7 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) { if (pCfg->buffer < TSDB_MIN_BUFFER_PER_VNODE || pCfg->buffer > TSDB_MAX_BUFFER_PER_VNODE) return -1; if (pCfg->pageSize < TSDB_MIN_PAGESIZE_PER_VNODE || pCfg->pageSize > TSDB_MAX_PAGESIZE_PER_VNODE) return -1; if (pCfg->pages < TSDB_MIN_PAGES_PER_VNODE || pCfg->pages > TSDB_MAX_PAGES_PER_VNODE) return -1; + if (pCfg->lastRowMem < TSDB_MIN_DB_LAST_ROW_MEM || pCfg->lastRowMem > TSDB_MAX_DB_LAST_ROW_MEM) return -1; if (pCfg->daysPerFile < TSDB_MIN_DAYS_PER_FILE || pCfg->daysPerFile > TSDB_MAX_DAYS_PER_FILE) return -1; if (pCfg->daysToKeep0 < TSDB_MIN_KEEP || pCfg->daysToKeep0 > TSDB_MAX_KEEP) return -1; if (pCfg->daysToKeep1 < TSDB_MIN_KEEP || pCfg->daysToKeep1 > TSDB_MAX_KEEP) return -1; @@ -335,6 +340,7 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) { if (pCfg->replications < 0) pCfg->replications = TSDB_DEFAULT_DB_REPLICA; if (pCfg->strict < 0) pCfg->strict = TSDB_DEFAULT_DB_STRICT; if (pCfg->cacheLastRow < 0) pCfg->cacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW; + if (pCfg->lastRowMem <= 0) pCfg->lastRowMem = TSDB_DEFAULT_LAST_ROW_MEM; if (pCfg->numOfRetensions < 0) pCfg->numOfRetensions = 0; if (pCfg->schemaless < 0) pCfg->schemaless = TSDB_DB_SCHEMALESS_OFF; } @@ -433,6 +439,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, .buffer = pCreate->buffer, .pageSize = pCreate->pageSize, .pages = pCreate->pages, + .lastRowMem = pCreate->lastRowMem, .daysPerFile = pCreate->daysPerFile, .daysToKeep0 = pCreate->daysToKeep0, .daysToKeep1 = pCreate->daysToKeep1, @@ -474,7 +481,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, int32_t code = -1; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq); if (pTrans == NULL) goto _OVER; - + // mndTransSetSerial(pTrans); mDebug("trans:%d, used to create db:%s", pTrans->id, pCreate->db); mndTransSetDbName(pTrans, dbObj.name, NULL); @@ -548,18 +555,33 @@ static int32_t mndSetDbCfgFromAlterDbReq(SDbObj *pDb, SAlterDbReq *pAlter) { terrno = TSDB_CODE_MND_DB_OPTION_UNCHANGED; if (pAlter->buffer > 0 && pAlter->buffer != pDb->cfg.buffer) { +#if 1 + terrno = TSDB_CODE_OPS_NOT_SUPPORT; + return terrno; +#else pDb->cfg.buffer = pAlter->buffer; terrno = 0; +#endif } if (pAlter->pages > 0 && pAlter->pages != pDb->cfg.pages) { +#if 1 + terrno = TSDB_CODE_OPS_NOT_SUPPORT; + return terrno; +#else pDb->cfg.pages = pAlter->pages; terrno = 0; +#endif } if (pAlter->pageSize > 0 && pAlter->pageSize != pDb->cfg.pageSize) { +#if 1 + terrno = TSDB_CODE_OPS_NOT_SUPPORT; + return terrno; +#else pDb->cfg.pageSize = pAlter->pageSize; terrno = 0; +#endif } if (pAlter->daysPerFile > 0 && pAlter->daysPerFile != pDb->cfg.daysPerFile) { @@ -593,8 +615,12 @@ static int32_t mndSetDbCfgFromAlterDbReq(SDbObj *pDb, SAlterDbReq *pAlter) { } if (pAlter->strict >= 0 && pAlter->strict != pDb->cfg.strict) { +#if 1 + terrno = TSDB_CODE_OPS_NOT_SUPPORT; +#else pDb->cfg.strict = pAlter->strict; terrno = 0; +#endif } if (pAlter->cacheLastRow >= 0 && pAlter->cacheLastRow != pDb->cfg.cacheLastRow) { @@ -602,10 +628,19 @@ static int32_t mndSetDbCfgFromAlterDbReq(SDbObj *pDb, SAlterDbReq *pAlter) { terrno = 0; } + if (pAlter->lastRowMem > 0 && pAlter->lastRowMem != pDb->cfg.lastRowMem) { + pDb->cfg.lastRowMem = pAlter->lastRowMem; + terrno = 0; + } + if (pAlter->replications > 0 && pAlter->replications != pDb->cfg.replications) { +#if 1 + terrno = TSDB_CODE_OPS_NOT_SUPPORT; +#else pDb->cfg.replications = pAlter->replications; pDb->vgVersion++; terrno = 0; +#endif } return terrno; @@ -927,6 +962,7 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) { if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER; + if (mndDropStreamByDb(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndDropSmasByDb(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER; @@ -947,7 +983,6 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) { mndTransSetRpcRsp(pTrans, pRsp, rspLen); if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; - code = 0; _OVER: diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index dc7f08ebc2..727b20ef8a 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -28,7 +28,7 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) { if (tEncodeI64(pEncoder, pObj->uid) < 0) return -1; if (tEncodeI8(pEncoder, pObj->status) < 0) return -1; - if (tEncodeI8(pEncoder, pObj->dropPolicy) < 0) return -1; + if (tEncodeI8(pEncoder, pObj->igExpired) < 0) return -1; if (tEncodeI8(pEncoder, pObj->trigger) < 0) return -1; if (tEncodeI64(pEncoder, pObj->triggerParam) < 0) return -1; if (tEncodeI64(pEncoder, pObj->watermark) < 0) return -1; @@ -73,7 +73,7 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj) { if (tDecodeI64(pDecoder, &pObj->uid) < 0) return -1; if (tDecodeI8(pDecoder, &pObj->status) < 0) return -1; - if (tDecodeI8(pDecoder, &pObj->dropPolicy) < 0) return -1; + if (tDecodeI8(pDecoder, &pObj->igExpired) < 0) return -1; if (tDecodeI8(pDecoder, &pObj->trigger) < 0) return -1; if (tDecodeI64(pDecoder, &pObj->triggerParam) < 0) return -1; if (tDecodeI64(pDecoder, &pObj->watermark) < 0) return -1; diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 5e78d0f434..6ead922d95 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -15,8 +15,8 @@ #define _DEFAULT_SOURCE #include "mndDnode.h" -#include "mndPrivilege.h" #include "mndMnode.h" +#include "mndPrivilege.h" #include "mndQnode.h" #include "mndShow.h" #include "mndSnode.h" @@ -274,15 +274,14 @@ static void mndGetDnodeData(SMnode *pMnode, SArray *pDnodeEps) { SDnodeEp dnodeEp = {0}; dnodeEp.id = pDnode->id; - dnodeEp.isMnode = 0; dnodeEp.ep.port = pDnode->port; memcpy(dnodeEp.ep.fqdn, pDnode->fqdn, TSDB_FQDN_LEN); + sdbRelease(pSdb, pDnode); + dnodeEp.isMnode = 0; if (mndIsMnode(pMnode, pDnode->id)) { dnodeEp.isMnode = 1; } - - sdbRelease(pSdb, pDnode); taosArrayPush(pDnodeEps, &dnodeEp); } } @@ -432,7 +431,8 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { } if (!online) { - mInfo("dnode:%d, from offline to online", pDnode->id); + mInfo("dnode:%d, from offline to online, memory avail:%" PRId64 " total:%" PRId64 " cores:%.2f", pDnode->id, + statusReq.memAvail, statusReq.memTotal, statusReq.numOfCores); } else { mDebug("dnode:%d, send dnode epset, online:%d dnodeVer:%" PRId64 ":%" PRId64 " reboot:%d", pDnode->id, online, statusReq.dnodeVer, dnodeVer, reboot); @@ -441,6 +441,8 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { pDnode->rebootTime = statusReq.rebootTime; pDnode->numOfCores = statusReq.numOfCores; pDnode->numOfSupportVnodes = statusReq.numOfSupportVnodes; + pDnode->memAvail = statusReq.memAvail; + pDnode->memTotal = statusReq.memTotal; SStatusRsp statusRsp = {0}; statusRsp.dnodeVer = dnodeVer; @@ -580,7 +582,7 @@ static int32_t mndProcessShowVariablesReq(SRpcMsg *pReq) { strcpy(info.name, "timezone"); snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%s", tsTimezoneStr); taosArrayPush(rsp.variables, &info); - + strcpy(info.name, "locale"); snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%s", tsLocale); taosArrayPush(rsp.variables, &info); @@ -758,6 +760,11 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) { } } + if (numOfVnodes > 0) { + terrno = TSDB_CODE_OPS_NOT_SUPPORT; + goto _OVER; + } + code = mndDropDnode(pMnode, pReq, pDnode, pMObj, pQObj, pSObj, numOfVnodes); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index c39c9847a9..bc6830b8f3 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -58,7 +58,7 @@ static void *mndBuildTimerMsg(int32_t *pContLen) { static void mndPullupTrans(SMnode *pMnode) { int32_t contLen = 0; - void * pReq = mndBuildTimerMsg(&contLen); + void *pReq = mndBuildTimerMsg(&contLen); if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_TRANS_TIMER, .pCont = pReq, .contLen = contLen}; tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); @@ -67,14 +67,14 @@ static void mndPullupTrans(SMnode *pMnode) { static void mndTtlTimer(SMnode *pMnode) { int32_t contLen = 0; - void * pReq = mndBuildTimerMsg(&contLen); + void *pReq = mndBuildTimerMsg(&contLen); SRpcMsg rpcMsg = {.msgType = TDMT_MND_TTL_TIMER, .pCont = pReq, .contLen = contLen}; tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); } static void mndCalMqRebalance(SMnode *pMnode) { int32_t contLen = 0; - void * pReq = mndBuildTimerMsg(&contLen); + void *pReq = mndBuildTimerMsg(&contLen); if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_MQ_TIMER, .pCont = pReq, .contLen = contLen}; tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); @@ -83,7 +83,7 @@ static void mndCalMqRebalance(SMnode *pMnode) { static void mndPullupTelem(SMnode *pMnode) { int32_t contLen = 0; - void * pReq = mndBuildTimerMsg(&contLen); + void *pReq = mndBuildTimerMsg(&contLen); if (pReq != NULL) { SRpcMsg rpcMsg = {.msgType = TDMT_MND_TELEM_TIMER, .pCont = pReq, .contLen = contLen}; tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); @@ -395,7 +395,7 @@ void mndStop(SMnode *pMnode) { } int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { - SMnode * pMnode = pMsg->info.node; + SMnode *pMnode = pMsg->info.node; SSyncMgmt *pMgmt = &pMnode->syncMgmt; int32_t code = 0; @@ -413,7 +413,7 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { } do { - char * syncNodeStr = sync2SimpleStr(pMgmt->sync); + char *syncNodeStr = sync2SimpleStr(pMgmt->sync); static int64_t mndTick = 0; if (++mndTick % 10 == 1) { mTrace("vgId:%d, sync trace msg:%s, %s", syncGetVgId(pMgmt->sync), TMSG_INFO(pMsg->msgType), syncNodeStr); @@ -427,7 +427,7 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { } while (0); // ToDo: ugly! use function pointer - if (syncNodeSnapshotEnable(pSyncNode)) { + if (syncNodeStrategy(pSyncNode) == SYNC_STRATEGY_STANDARD_SNAPSHOT) { if (pMsg->msgType == TDMT_SYNC_TIMEOUT) { SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pMsg); code = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg); @@ -579,7 +579,7 @@ static int32_t mndCheckMsgContent(SRpcMsg *pMsg) { } int32_t mndProcessRpcMsg(SRpcMsg *pMsg) { - SMnode * pMnode = pMsg->info.node; + SMnode *pMnode = pMsg->info.node; const STraceId *trace = &pMsg->info.traceId; MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)]; @@ -632,7 +632,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr SMonStbInfo *pStbInfo, SMonGrantInfo *pGrantInfo) { if (mndAcquireRpcRef(pMnode) != 0) return -1; - SSdb * pSdb = pMnode->pSdb; + SSdb *pSdb = pMnode->pSdb; int64_t ms = taosGetTimestampMs(); pClusterInfo->dnodes = taosArrayInit(sdbGetSize(pSdb, SDB_DNODE), sizeof(SMonDnodeDesc)); @@ -713,7 +713,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pGrantInfo->timeseries_used += pVgroup->numOfTimeSeries; tstrncpy(desc.status, "unsynced", sizeof(desc.status)); for (int32_t i = 0; i < pVgroup->replica; ++i) { - SVnodeGid * pVgid = &pVgroup->vnodeGid[i]; + SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; SMonVnodeDesc *pVnDesc = &desc.vnodes[i]; pVnDesc->dnode_id = pVgid->dnodeId; tstrncpy(pVnDesc->vnode_role, syncStr(pVgid->role), sizeof(pVnDesc->vnode_role)); diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 6014adbe95..2a8cbc4425 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -71,7 +71,7 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { } else if (strncasecmp(name, TSDB_INS_TABLE_USER_FUNCTIONS, len) == 0) { type = TSDB_MGMT_TABLE_FUNC; } else if (strncasecmp(name, TSDB_INS_TABLE_USER_INDEXES, len) == 0) { - // type = TSDB_MGMT_TABLE_INDEX; + type = TSDB_MGMT_TABLE_INDEX; } else if (strncasecmp(name, TSDB_INS_TABLE_USER_STABLES, len) == 0) { type = TSDB_MGMT_TABLE_STB; } else if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, len) == 0) { diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 530cc57390..da5b8cb48e 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -523,6 +523,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea streamObj.updateTime = streamObj.createTime; streamObj.uid = mndGenerateUid(pCreate->name, strlen(pCreate->name)); streamObj.sourceDbUid = pDb->uid; + streamObj.targetDbUid = pDb->uid; streamObj.version = 1; streamObj.sql = pCreate->sql; streamObj.smaId = smaObj.uid; @@ -853,36 +854,26 @@ _OVER: } int32_t mndDropSmasByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { - SSdb *pSdb = pMnode->pSdb; - SSmaObj *pSma = NULL; - void *pIter = NULL; - SVgObj *pVgroup = NULL; - int32_t code = -1; + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; while (1) { + SSmaObj *pSma = NULL; pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma); if (pIter == NULL) break; if (pSma->dbUid == pDb->uid) { - pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId); - if (pVgroup == NULL) goto _OVER; - if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; - if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER; - if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER; - mndReleaseVgroup(pMnode, pVgroup); - pVgroup = NULL; + if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) { + sdbRelease(pSdb, pSma); + sdbCancelFetch(pSdb, pSma); + return -1; + } } sdbRelease(pSdb, pSma); } - code = 0; - -_OVER: - sdbCancelFetch(pSdb, pIter); - sdbRelease(pSdb, pSma); - mndReleaseVgroup(pMnode, pVgroup); - return code; + return 0; } static int32_t mndProcessDropSmaReq(SRpcMsg *pReq) { @@ -1156,29 +1147,32 @@ static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc SName smaName = {0}; tNameFromString(&smaName, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + char n1[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(n1, (char *)tNameGetTableName(&smaName)); - char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(n, (char *)tNameGetTableName(&smaName)); - cols++; + char n2[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(n2, (char *)mndGetDbStr(pDb->name)); SName stbName = {0}; tNameFromString(&stbName, pSma->stb, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - - char n1[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(n1, (char *)tNameGetTableName(&stbName)); + char n3[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(n3, (char *)tNameGetTableName(&stbName)); SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, numOfRows, (const char *)n, false); - - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, numOfRows, (const char *)&pSma->createdTime, false); - - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)n1, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)n2, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)n3, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pSma->dstVgId, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pSma->createdTime, false); + numOfRows++; sdbRelease(pSdb, pSma); } diff --git a/source/dnode/mnode/impl/src/mndSnode.c b/source/dnode/mnode/impl/src/mndSnode.c index 2dd8592bf8..d18a233d29 100644 --- a/source/dnode/mnode/impl/src/mndSnode.c +++ b/source/dnode/mnode/impl/src/mndSnode.c @@ -273,6 +273,9 @@ _OVER: } static int32_t mndProcessCreateSnodeReq(SRpcMsg *pReq) { +#if 1 + return TSDB_CODE_OPS_NOT_SUPPORT; +#else SMnode *pMnode = pReq->info.node; int32_t code = -1; SSnodeObj *pObj = NULL; @@ -315,6 +318,7 @@ _OVER: mndReleaseSnode(pMnode, pObj); mndReleaseDnode(pMnode, pDnode); return code; +#endif } static int32_t mndSetDropSnodeRedoLogs(STrans *pTrans, SSnodeObj *pObj) { @@ -386,9 +390,12 @@ _OVER: } static int32_t mndProcessDropSnodeReq(SRpcMsg *pReq) { - SMnode *pMnode = pReq->info.node; - int32_t code = -1; - SSnodeObj *pObj = NULL; +#if 1 + return TSDB_CODE_OPS_NOT_SUPPORT; +#else + SMnode *pMnode = pReq->info.node; + int32_t code = -1; + SSnodeObj *pObj = NULL; SMDropSnodeReq dropReq = {0}; if (tDeserializeSCreateDropMQSBNodeReq(pReq->pCont, pReq->contLen, &dropReq) != 0) { @@ -422,6 +429,7 @@ _OVER: mndReleaseSnode(pMnode, pObj); return code; +#endif } static int32_t mndRetrieveSnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index cabbac14f1..c2125f75f8 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -247,8 +247,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, pObj->uid = mndGenerateUid(pObj->name, strlen(pObj->name)); pObj->status = 0; - // TODO - pObj->dropPolicy = 0; + pObj->igExpired = pCreate->igExpired; pObj->trigger = pCreate->triggerType; pObj->triggerParam = pCreate->maxDelay; pObj->watermark = pCreate->watermark; @@ -301,6 +300,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, .streamQuery = true, .triggerType = pObj->trigger == STREAM_TRIGGER_MAX_DELAY ? STREAM_TRIGGER_WINDOW_CLOSE : pObj->trigger, .watermark = pObj->watermark, + .igExpired = pObj->igExpired, }; // using ast and param to build physical plan @@ -673,27 +673,29 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; - void *pIter = NULL; - SStreamObj *pStream = NULL; while (1) { + SStreamObj *pStream = NULL; pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); if (pIter == NULL) break; if (pStream->sourceDbUid == pDb->uid || pStream->targetDbUid == pDb->uid) { if (pStream->sourceDbUid != pStream->targetDbUid) { sdbRelease(pSdb, pStream); + sdbCancelFetch(pSdb, pIter); + mError("db:%s, failed to drop stream:%s since sourceDbUid:%" PRId64 " not match with targetDbUid:%" PRId64, + pDb->name, pStream->name, pStream->sourceDbUid, pStream->targetDbUid); + terrno = TSDB_CODE_MND_STREAM_ALREADY_EXIST; return -1; } else { // TODO drop all task on snode if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) { sdbRelease(pSdb, pStream); + sdbCancelFetch(pSdb, pIter); return -1; } } - } else { - sdbRelease(pSdb, pStream); - continue; } #if 0 diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 693eed5222..2053f3886c 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -134,7 +134,7 @@ int32_t mndSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, in return sdbDoRead(pMnode->pSdb, pReader, ppBuf, len); } -int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) { +int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void *pParam, void **ppWriter) { mInfo("start to apply snapshot to sdb"); SMnode *pMnode = pFsm->data; return sdbStartWrite(pMnode->pSdb, (SSdbIter **)ppWriter); @@ -178,7 +178,7 @@ int32_t mndInitSync(SMnode *pMnode) { syncInfo.pWal = pMnode->pWal; syncInfo.pFsm = mndSyncMakeFsm(pMnode); syncInfo.isStandBy = pMgmt->standby; - syncInfo.snapshotEnable = true; + syncInfo.snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT; mInfo("start to open mnode sync, standby:%d", pMgmt->standby); if (pMgmt->standby || pMgmt->replica.id > 0) { diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 534cba73c7..ea92b2f0e6 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -90,7 +90,7 @@ static int32_t mndTransGetActionsSize(SArray *pArray) { for (int32_t i = 0; i < actionNum; ++i) { STransAction *pAction = taosArrayGet(pArray, i); if (pAction->actionType == TRANS_ACTION_RAW) { - rawDataLen += (sdbGetRawTotalSize(pAction->pRaw) + sizeof(int32_t)); + rawDataLen += (sizeof(STransAction) + sdbGetRawTotalSize(pAction->pRaw)); } else if (pAction->actionType == TRANS_ACTION_MSG) { rawDataLen += (sizeof(STransAction) + pAction->contLen); } else { @@ -105,7 +105,7 @@ static int32_t mndTransGetActionsSize(SArray *pArray) { static SSdbRaw *mndTransActionEncode(STrans *pTrans) { terrno = TSDB_CODE_OUT_OF_MEMORY; - int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE; + int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE + pTrans->paramLen; rawDataLen += mndTransGetActionsSize(pTrans->redoActions); rawDataLen += mndTransGetActionsSize(pTrans->undoActions); rawDataLen += mndTransGetActionsSize(pTrans->commitActions); @@ -226,7 +226,8 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) { _OVER: if (terrno != 0) { - mError("trans:%d, failed to encode to raw:%p len:%d since %s", pTrans->id, pRaw, dataPos, terrstr()); + mError("trans:%d, failed to encode to raw:%p maxlen:%d len:%d since %s", pTrans->id, pRaw, sdbGetRawTotalSize(pRaw), + dataPos, terrstr()); sdbFreeRaw(pRaw); return NULL; } @@ -1025,7 +1026,7 @@ static int32_t mndTransExecNullMsg(SMnode *pMnode, STrans *pTrans, STransAction pTrans->lastAction = pAction->id; pTrans->lastMsgType = pAction->msgType; pTrans->lastEpset = pAction->epSet; - pTrans->lastErrorNo == 0; + pTrans->lastErrorNo = 0; return 0; } diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 5ca945624d..3a3331a0b3 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -207,6 +207,7 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg createReq.buffer = pDb->cfg.buffer; createReq.pageSize = pDb->cfg.pageSize; createReq.pages = pDb->cfg.pages; + createReq.lastRowMem = pDb->cfg.lastRowMem; createReq.daysPerFile = pDb->cfg.daysPerFile; createReq.daysToKeep0 = pDb->cfg.daysToKeep0; createReq.daysToKeep1 = pDb->cfg.daysToKeep1; @@ -274,8 +275,9 @@ void *mndBuildAlterVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_ SAlterVnodeReq alterReq = {0}; alterReq.vgVersion = pVgroup->version; alterReq.buffer = pDb->cfg.buffer; - alterReq.pages = pDb->cfg.pages; alterReq.pageSize = pDb->cfg.pageSize; + alterReq.pages = pDb->cfg.pages; + alterReq.lastRowMem = pDb->cfg.lastRowMem; alterReq.daysPerFile = pDb->cfg.daysPerFile; alterReq.daysToKeep0 = pDb->cfg.daysToKeep0; alterReq.daysToKeep1 = pDb->cfg.daysToKeep1; @@ -392,9 +394,10 @@ static bool mndBuildDnodesArrayFp(SMnode *pMnode, void *pObj, void *p1, void *p2 bool online = mndIsDnodeOnline(pDnode, curMs); bool isMnode = mndIsMnode(pMnode, pDnode->id); pDnode->numOfVnodes = mndGetVnodesNum(pMnode, pDnode->id); + pDnode->memUsed = mndGetVnodesMemory(pMnode, pDnode->id); - mDebug("dnode:%d, vnodes:%d support_vnodes:%d is_mnode:%d online:%d", pDnode->id, pDnode->numOfVnodes, - pDnode->numOfSupportVnodes, isMnode, online); + mDebug("dnode:%d, vnodes:%d supportVnodes:%d isMnode:%d online:%d memory avail:%" PRId64 " used:%" PRId64, pDnode->id, + pDnode->numOfVnodes, pDnode->numOfSupportVnodes, isMnode, online, pDnode->memAvail, pDnode->memUsed); if (isMnode) { pDnode->numOfVnodes++; @@ -426,15 +429,7 @@ static int32_t mndCompareDnodeId(int32_t *dnode1Id, int32_t *dnode2Id) { return static int32_t mndCompareDnodeVnodes(SDnodeObj *pDnode1, SDnodeObj *pDnode2) { float d1Score = (float)pDnode1->numOfVnodes / pDnode1->numOfSupportVnodes; float d2Score = (float)pDnode2->numOfVnodes / pDnode2->numOfSupportVnodes; -#if 0 - if (d1Score == d2Score) { - return pDnode2->id - pDnode1->id; - } else { - return d1Score >= d2Score ? 1 : 0; - } -#else return d1Score >= d2Score ? 1 : 0; -#endif } void mndSortVnodeGid(SVgObj *pVgroup) { @@ -447,7 +442,7 @@ void mndSortVnodeGid(SVgObj *pVgroup) { } } -static int32_t mndGetAvailableDnode(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) { +static int32_t mndGetAvailableDnode(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, SArray *pArray) { SSdb *pSdb = pMnode->pSdb; int32_t allocedVnodes = 0; void *pIter = NULL; @@ -470,6 +465,16 @@ static int32_t mndGetAvailableDnode(SMnode *pMnode, SVgObj *pVgroup, SArray *pAr return -1; } + int64_t vgMem = mndGetVgroupMemory(pMnode, pDb, pVgroup); + if (pDnode->memAvail - vgMem - pDnode->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d, avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pDnode->id, pDnode->memAvail, pDnode->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pDnode->memUsed += vgMem; + } + pVgid->dnodeId = pDnode->id; if (pVgroup->replica == 1) { pVgid->role = TAOS_SYNC_STATE_LEADER; @@ -477,7 +482,8 @@ static int32_t mndGetAvailableDnode(SMnode *pMnode, SVgObj *pVgroup, SArray *pAr pVgid->role = TAOS_SYNC_STATE_FOLLOWER; } - mInfo("db:%s, vgId:%d, vn:%d dnode:%d is alloced", pVgroup->dbName, pVgroup->vgId, v, pVgid->dnodeId); + mInfo("db:%s, vgId:%d, vn:%d is alloced, memory:%" PRId64 ", dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, v, vgMem, pVgid->dnodeId, pDnode->memAvail, pDnode->memUsed); pDnode->numOfVnodes++; } @@ -498,7 +504,7 @@ int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) { pVgroup->dbUid = pDb->uid; pVgroup->replica = 1; - if (mndGetAvailableDnode(pMnode, pVgroup, pArray) != 0) return -1; + if (mndGetAvailableDnode(pMnode, pDb, pVgroup, pArray) != 0) return -1; mInfo("db:%s, sma vgId:%d is alloced", pDb->name, pVgroup->vgId); return 0; @@ -546,8 +552,7 @@ int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) { pVgroup->dbUid = pDb->uid; pVgroup->replica = pDb->cfg.replications; - if (mndGetAvailableDnode(pMnode, pVgroup, pArray) != 0) { - terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; + if (mndGetAvailableDnode(pMnode, pDb, pVgroup, pArray) != 0) { goto _OVER; } @@ -728,6 +733,43 @@ int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId) { return numOfVnodes; } +int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDbInput, SVgObj *pVgroup) { + SDbObj *pDb = pDbInput; + if (pDbInput == NULL) { + pDb = mndAcquireDb(pMnode, pVgroup->dbName); + } + + int64_t vgroupMemroy = (int64_t)pDb->cfg.buffer * 1024 * 1024 + (int64_t)pDb->cfg.pages * pDb->cfg.pageSize * 1024; + if (pDb->cfg.cacheLastRow > 0) { + vgroupMemroy += (int64_t)pDb->cfg.lastRowMem * 1024 * 1024; + } + + if (pDbInput == NULL) { + mndReleaseDb(pMnode, pDb); + } + return vgroupMemroy; +} + +static bool mndGetVnodeMemroyFp(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3) { + SVgObj *pVgroup = pObj; + int32_t dnodeId = *(int32_t *)p1; + int64_t *pVnodeMemory = (int64_t *)p2; + + for (int32_t v = 0; v < pVgroup->replica; ++v) { + if (pVgroup->vnodeGid[v].dnodeId == dnodeId) { + *pVnodeMemory += mndGetVgroupMemory(pMnode, NULL, pVgroup); + } + } + + return true; +} + +int64_t mndGetVnodesMemory(SMnode *pMnode, int32_t dnodeId) { + int64_t vnodeMemory = 0; + sdbTraverse(pMnode->pSdb, SDB_VGROUP, mndGetVnodeMemroyFp, &dnodeId, &vnodeMemory, NULL); + return vnodeMemory; +} + static int32_t mndRetrieveVnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; @@ -807,9 +849,20 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) { return -1; } + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + if (pDnode->memAvail - vgMem - pDnode->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pDnode->id, pDnode->memAvail, pDnode->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pDnode->memUsed += vgMem; + } + pVgid->dnodeId = pDnode->id; pVgid->role = TAOS_SYNC_STATE_ERROR; - mInfo("db:%s, vgId:%d, vn:%d dnode:%d, is added", pVgroup->dbName, pVgroup->vgId, pVgroup->replica, pVgid->dnodeId); + mInfo("db:%s, vgId:%d, vn:%d is added, memory:%" PRId64 ", dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, pVgroup->replica, vgMem, pVgid->dnodeId, pDnode->memAvail, pDnode->memUsed); pVgroup->replica++; pDnode->numOfVnodes++; @@ -835,7 +888,10 @@ int32_t mndRemoveVnodeFromVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray for (int32_t vn = 0; vn < pVgroup->replica; ++vn) { SVnodeGid *pVgid = &pVgroup->vnodeGid[vn]; if (pVgid->dnodeId == pDnode->id) { - mInfo("db:%s, vgId:%d, vn:%d dnode:%d, is removed", pVgroup->dbName, pVgroup->vgId, vn, pVgid->dnodeId); + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + pDnode->memUsed -= vgMem; + mInfo("db:%s, vgId:%d, vn:%d is removed, memory:%" PRId64 ", dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vn, vgMem, pVgid->dnodeId, pDnode->memAvail, pDnode->memUsed); pDnode->numOfVnodes--; pVgroup->replica--; *pDelVgid = *pVgid; @@ -1161,6 +1217,17 @@ static int32_t mndRedistributeVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; goto _OVER; } + + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + if (pNew1->memAvail - vgMem - pNew1->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pNew1->id, pNew1->memAvail, pNew1->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pNew1->memUsed += vgMem; + } + if (mndAddIncVgroupReplicaToTrans(pMnode, pTrans, pDb, &newVg, pNew1->id) != 0) goto _OVER; if (mndAddDecVgroupReplicaFromTrans(pMnode, pTrans, pDb, &newVg, pOld1->id) != 0) goto _OVER; } @@ -1173,6 +1240,15 @@ static int32_t mndRedistributeVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; goto _OVER; } + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + if (pNew2->memAvail - vgMem - pNew2->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pNew2->id, pNew2->memAvail, pNew2->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pNew2->memUsed += vgMem; + } if (mndAddIncVgroupReplicaToTrans(pMnode, pTrans, pDb, &newVg, pNew2->id) != 0) goto _OVER; if (mndAddDecVgroupReplicaFromTrans(pMnode, pTrans, pDb, &newVg, pOld2->id) != 0) goto _OVER; } @@ -1185,6 +1261,15 @@ static int32_t mndRedistributeVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; goto _OVER; } + int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup); + if (pNew3->memAvail - vgMem - pNew3->memUsed <= 0) { + mError("db:%s, vgId:%d, no enough memory:%" PRId64 " in dnode:%d avail:%" PRId64 " used:%" PRId64, + pVgroup->dbName, pVgroup->vgId, vgMem, pNew3->id, pNew3->memAvail, pNew3->memUsed); + terrno = TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE; + return -1; + } else { + pNew3->memUsed += vgMem; + } if (mndAddIncVgroupReplicaToTrans(pMnode, pTrans, pDb, &newVg, pNew3->id) != 0) goto _OVER; if (mndAddDecVgroupReplicaFromTrans(pMnode, pTrans, pDb, &newVg, pOld3->id) != 0) goto _OVER; } @@ -1219,6 +1304,9 @@ _OVER: } static int32_t mndProcessRedistributeVgroupMsg(SRpcMsg *pReq) { +#if 1 + return TSDB_CODE_OPS_NOT_SUPPORT; +#else SMnode *pMnode = pReq->info.node; SDnodeObj *pNew1 = NULL; SDnodeObj *pNew2 = NULL; @@ -1412,6 +1500,7 @@ _OVER: mndReleaseDb(pMnode, pDb); return code; +#endif } int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SArray *pArray) { @@ -1650,9 +1739,9 @@ static int32_t mndBalanceVgroupBetweenDnode(SMnode *pMnode, STrans *pTrans, SDno } static int32_t mndBalanceVgroup(SMnode *pMnode, SRpcMsg *pReq, SArray *pArray) { - int32_t code = -1; - int32_t numOfVgroups = 0; - STrans *pTrans = NULL; + int32_t code = -1; + int32_t numOfVgroups = 0; + STrans *pTrans = NULL; SHashObj *pBalancedVgroups = NULL; pBalancedVgroups = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); @@ -1711,10 +1800,13 @@ _OVER: } static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) { +#if 1 + return TSDB_CODE_OPS_NOT_SUPPORT; +#else SMnode *pMnode = pReq->info.node; int32_t code = -1; SArray *pArray = NULL; - void *pIter = NULL; + void *pIter = NULL; int64_t curMs = taosGetTimestampMs(); SBalanceVgroupReq req = {0}; @@ -1759,6 +1851,7 @@ _OVER: taosArrayDestroy(pArray); return code; +#endif } bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; } \ No newline at end of file diff --git a/source/dnode/mnode/impl/test/db/db.cpp b/source/dnode/mnode/impl/test/db/db.cpp index a1bab5d1d4..a3d129c7c4 100644 --- a/source/dnode/mnode/impl/test/db/db.cpp +++ b/source/dnode/mnode/impl/test/db/db.cpp @@ -93,7 +93,7 @@ TEST_F(MndTestDb, 02_Create_Alter_Drop_Db) { SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_DB, pReq, contLen); ASSERT_NE(pRsp, nullptr); - ASSERT_EQ(pRsp->code, 0); + ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT); } test.SendShowReq(TSDB_MGMT_TABLE_DB, "user_databases", ""); diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c index fbf66da632..3db0087334 100644 --- a/source/dnode/mnode/sdb/src/sdb.c +++ b/source/dnode/mnode/sdb/src/sdb.c @@ -131,7 +131,7 @@ int32_t sdbSetTable(SSdb *pSdb, SSdbTable table) { hashType = TSDB_DATA_TYPE_BINARY; } - SHashObj *hash = taosHashInit(64, taosGetDefaultHashFunction(hashType), true, HASH_NO_LOCK); + SHashObj *hash = taosHashInit(64, taosGetDefaultHashFunction(hashType), true, HASH_ENTRY_LOCK); if (hash == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index b09ab34224..174fe6dab5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -31,6 +31,8 @@ target_sources( "src/sma/smaEnv.c" "src/sma/smaUtil.c" "src/sma/smaOpen.c" + "src/sma/smaCommit.c" + "src/sma/smaSnapshot.c" "src/sma/smaRollup.c" "src/sma/smaTimeRange.c" diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h index b455d779c1..7f7b3fa885 100644 --- a/source/dnode/vnode/src/inc/sma.h +++ b/source/dnode/vnode/src/inc/sma.h @@ -62,12 +62,10 @@ struct STSmaStat { struct SRSmaStat { SSma *pSma; - int64_t refId; // shared by persistence/fetch tasks - void *tmrHandle; // for persistence task - tmr_h tmrId; // for persistence task - int32_t tmrSeconds; // for persistence task - int8_t triggerStat; // for persistence task - int8_t runningStat; // for persistence task + int64_t refId; // shared by fetch tasks + void *tmrHandle; // shared by fetch tasks + int8_t triggerStat; // shared by fetch tasks + int8_t runningStat; // for persistence task SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo; }; @@ -82,7 +80,6 @@ struct SSmaStat { #define SMA_TSMA_STAT(s) (&(s)->tsmaStat) #define SMA_RSMA_STAT(s) (&(s)->rsmaStat) #define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash) -#define RSMA_TMR_ID(r) ((r)->tmrId) #define RSMA_TMR_HANDLE(r) ((r)->tmrHandle) #define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat) #define RSMA_RUNNING_STAT(r) (&(r)->runningStat) @@ -92,8 +89,9 @@ enum { TASK_TRIGGER_STAT_INIT = 0, TASK_TRIGGER_STAT_ACTIVE = 1, TASK_TRIGGER_STAT_INACTIVE = 2, - TASK_TRIGGER_STAT_CANCELLED = 3, - TASK_TRIGGER_STAT_FINISHED = 4, + TASK_TRIGGER_STAT_PAUSED = 3, + TASK_TRIGGER_STAT_CANCELLED = 4, + TASK_TRIGGER_STAT_FINISHED = 5, }; void tdDestroySmaEnv(SSmaEnv *pSmaEnv); void *tdFreeSmaEnv(SSmaEnv *pSmaEnv); @@ -184,9 +182,11 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) { static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType); void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType); void *tdFreeRSmaInfo(SRSmaInfo *pInfo); +int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat); int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName); int32_t tdProcessRSmaRestoreImpl(SSma *pSma); + int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg); int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg); int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days); @@ -214,25 +214,22 @@ struct STFInfo { }; struct STFile { - STFInfo info; - STfsFile f; - TdFilePtr pFile; uint8_t state; + STFInfo info; + char *fname; + TdFilePtr pFile; }; -#define TD_TFILE_F(tf) (&((tf)->f)) #define TD_TFILE_PFILE(tf) ((tf)->pFile) #define TD_TFILE_OPENED(tf) (TD_TFILE_PFILE(tf) != NULL) -#define TD_TFILE_FULL_NAME(tf) (TD_TFILE_F(tf)->aname) -#define TD_TFILE_REL_NAME(tf) (TD_TFILE_F(tf)->rname) +#define TD_TFILE_FULL_NAME(tf) ((tf)->fname) #define TD_TFILE_OPENED(tf) (TD_TFILE_PFILE(tf) != NULL) #define TD_TFILE_CLOSED(tf) (!TD_TFILE_OPENED(tf)) #define TD_TFILE_SET_CLOSED(f) (TD_TFILE_PFILE(f) = NULL) #define TD_TFILE_SET_STATE(tf, s) ((tf)->state = (s)) -#define TD_TFILE_DID(tf) (TD_TFILE_F(tf)->did) -int32_t tdInitTFile(STFile *pTFile, STfs *pTfs, const char *fname); -int32_t tdCreateTFile(STFile *pTFile, STfs *pTfs, bool updateHeader, int8_t fType); +int32_t tdInitTFile(STFile *pTFile, const char *dname, const char *fname); +int32_t tdCreateTFile(STFile *pTFile, bool updateHeader, int8_t fType); int32_t tdOpenTFile(STFile *pTFile, int flags); int64_t tdReadTFile(STFile *pTFile, void *buf, int64_t nbyte); int64_t tdSeekTFile(STFile *pTFile, int64_t offset, int whence); @@ -244,8 +241,10 @@ int32_t tdLoadTFileHeader(STFile *pTFile, STFInfo *pInfo); int32_t tdUpdateTFileHeader(STFile *pTFile); void tdUpdateTFileMagic(STFile *pTFile, void *pCksm); void tdCloseTFile(STFile *pTFile); +void tdDestroyTFile(STFile *pTFile); -void tdGetVndFileName(int32_t vgId, const char *dname, const char *fname, char *outputName); +void tdGetVndFileName(int32_t vgId, const char *pdname, const char *dname, const char *fname, int64_t version, char *outputName); +void tdGetVndDirName(int32_t vgId,const char *pdname, const char *dname, bool endWithSep, char *outputName); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 6bd7d4edd1..4f81e9d62a 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -163,8 +163,11 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool // sma int32_t smaOpen(SVnode* pVnode); -int32_t smaCloseEnv(SSma* pSma); -int32_t smaCloseEx(SSma* pSma); +int32_t smaClose(SSma* pSma); +int32_t smaBegin(SSma* pSma); +int32_t smaPreCommit(SSma* pSma); +int32_t smaCommit(SSma* pSma); +int32_t smaPostCommit(SSma* pSma); int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg); int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg); @@ -238,7 +241,7 @@ struct SVnode { tsem_t canCommit; int64_t sync; int32_t syncCount; - sem_t syncSem; + tsem_t syncSem; SQHandle* pQuery; }; diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c new file mode 100644 index 0000000000..6e75176136 --- /dev/null +++ b/source/dnode/vnode/src/sma/smaCommit.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "sma.h" + +static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma); +static int32_t tdProcessRSmaCommitImpl(SSma *pSma); +static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma); + +/** + * @brief Only applicable to Rollup SMA + * + * @param pSma + * @return int32_t + */ +int32_t smaPreCommit(SSma *pSma) { return tdProcessRSmaPreCommitImpl(pSma); } + +/** + * @brief Only applicable to Rollup SMA + * + * @param pSma + * @return int32_t + */ +int32_t smaCommit(SSma *pSma) { return tdProcessRSmaCommitImpl(pSma); } + +/** + * @brief Only applicable to Rollup SMA + * + * @param pSma + * @return int32_t + */ +int32_t smaPostCommit(SSma *pSma) { return tdProcessRSmaPostCommitImpl(pSma); } + +/** + * @brief set rsma trigger stat active + * + * @param pSma + * @return int32_t + */ +int32_t smaBegin(SSma *pSma) { + SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); + if (!pSmaEnv) { + return TSDB_CODE_SUCCESS; + } + + SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); + SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); + + int8_t rsmaTriggerStat = + atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED, TASK_TRIGGER_STAT_ACTIVE); + switch (rsmaTriggerStat) { + case TASK_TRIGGER_STAT_PAUSED: { + smaDebug("vgId:%d rsma trigger stat from paused to active", SMA_VID(pSma)); + break; + } + case TASK_TRIGGER_STAT_INIT: { + atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE); + smaDebug("vgId:%d rsma trigger stat from init to active", SMA_VID(pSma)); + break; + } + default: { + atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE); + smaWarn("vgId:%d rsma trigger stat %" PRIi8 " is unexpected", SMA_VID(pSma), rsmaTriggerStat); + ASSERT(0); + break; + } + } + return TSDB_CODE_SUCCESS; +} + +/** + * @brief pre-commit for rollup sma. + * 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED. + * 2) wait all triggered fetch tasks finished + * 3) perform persist task for qTaskInfo + * + * @param pSma + * @return int32_t + */ +static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) { + SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); + if (!pSmaEnv) { + return TSDB_CODE_SUCCESS; + } + + SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); + SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); + + + // step 1: set persistence task paused + atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED); + + // step 2: wait all triggered fetch tasks finished + int32_t nLoops = 0; + while (1) { + if (T_REF_VAL_GET(pStat) == 0) { + smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma)); + break; + } else { + smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma)); + } + ++nLoops; + if (nLoops > 1000) { + sched_yield(); + nLoops = 0; + } + } + + // step 3: perform persist task for qTaskInfo + tdRSmaPersistExecImpl(pRSmaStat); + + smaDebug("vgId:%d, rsma pre commit succeess", SMA_VID(pSma)); + + return TSDB_CODE_SUCCESS; +} + +/** + * @brief commit for rollup sma + * + * @param pSma + * @return int32_t + */ +static int32_t tdProcessRSmaCommitImpl(SSma *pSma) { + SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); + if (!pSmaEnv) { + return TSDB_CODE_SUCCESS; + } + return TSDB_CODE_SUCCESS; +} + +/** + * @brief post-commit for rollup sma + * 1) clean up the outdated qtaskinfo files + * + * @param pSma + * @return int32_t + */ +static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma) { + SVnode *pVnode = pSma->pVnode; + + if (!VND_IS_RSMA(pVnode)) { + return TSDB_CODE_SUCCESS; + } + + int64_t committed = pVnode->state.committed; + TdDirPtr pDir = NULL; + TdDirEntryPtr pDirEntry = NULL; + char dir[TSDB_FILENAME_LEN]; + const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$"; + regex_t regex; + int code = 0; + + tdGetVndDirName(TD_VID(pVnode), tfsGetPrimaryPath(pVnode->pTfs), VNODE_RSMA_DIR, true, dir); + + // Resource allocation and init + if ((code = regcomp(®ex, pattern, REG_EXTENDED)) != 0) { + char errbuf[128]; + regerror(code, ®ex, errbuf, sizeof(errbuf)); + smaWarn("vgId:%d, rsma post commit, regcomp for %s failed since %s", TD_VID(pVnode), dir, errbuf); + return TSDB_CODE_FAILED; + } + + if ((pDir = taosOpenDir(dir)) == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + smaWarn("vgId:%d, rsma post commit, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr()); + return TSDB_CODE_FAILED; + } + + int32_t dirLen = strlen(dir); + char *dirEnd = POINTER_SHIFT(dir, dirLen); + regmatch_t regMatch[2]; + while ((pDirEntry = taosReadDir(pDir)) != NULL) { + char *entryName = taosGetDirEntryName(pDirEntry); + if (!entryName) { + continue; + } + + code = regexec(®ex, entryName, 2, regMatch, 0); + + if (code == 0) { + // match + int64_t version = -1; + sscanf((const char *)POINTER_SHIFT(entryName, regMatch[1].rm_so), "%" PRIi64, &version); + if ((version < committed) && (version > -1)) { + strncpy(dirEnd, entryName, TSDB_FILENAME_LEN - dirLen); + if (taosRemoveFile(dir) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + smaWarn("vgId:%d, committed version:%" PRIi64 ", failed to remove %s since %s", TD_VID(pVnode), committed, + dir, terrstr()); + } else { + smaDebug("vgId:%d, committed version:%" PRIi64 ", success to remove %s", TD_VID(pVnode), committed, dir); + } + } + } else if (code == REG_NOMATCH) { + // not match + smaTrace("vgId:%d, rsma post commit, not match %s", TD_VID(pVnode), entryName); + continue; + } else { + // has other error + char errbuf[128]; + regerror(code, ®ex, errbuf, sizeof(errbuf)); + smaWarn("vgId:%d, rsma post commit, regexec failed since %s", TD_VID(pVnode), errbuf); + + taosCloseDir(&pDir); + regfree(®ex); + return TSDB_CODE_FAILED; + } + } + + taosCloseDir(&pDir); + regfree(®ex); + return TSDB_CODE_SUCCESS; +} diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index 120d6612a2..c7b938f884 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -132,6 +132,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS if (smaType == TSDB_SMA_TYPE_ROLLUP) { SRSmaStat *pRSmaStat = (SRSmaStat *)(*pSmaStat); pRSmaStat->pSma = (SSma *)pSma; + atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_INIT); // init smaMgmt smaMgmt.smaRef = taosOpenRef(SMA_MGMT_REF_NUM, tdDestroyRSmaStat); @@ -192,22 +193,20 @@ static void *tdFreeTSmaStat(STSmaStat *pStat) { static void tdDestroyRSmaStat(void *pRSmaStat) { if (pRSmaStat) { SRSmaStat *pStat = (SRSmaStat *)pRSmaStat; - smaDebug("vgId:%d %s:%d destroy rsma stat %p", SMA_VID(pStat->pSma), __func__, __LINE__, pRSmaStat); - // step 1: set persistence task cancelled + SSma *pSma = pStat->pSma; + smaDebug("vgId:%d, destroy rsma stat %p", SMA_VID(pSma), pRSmaStat); + // step 1: set rsma trigger stat cancelled atomic_store_8(RSMA_TRIGGER_STAT(pStat), TASK_TRIGGER_STAT_CANCELLED); - // step 2: stop the persistence timer - taosTmrStopA(&RSMA_TMR_ID(pStat)); - - // step 3: wait the persistence thread to finish + // step 2: wait the persistence thread to finish int32_t nLoops = 0; if (atomic_load_8(RSMA_RUNNING_STAT(pStat)) == 1) { while (1) { if (atomic_load_8(RSMA_TRIGGER_STAT(pStat)) == TASK_TRIGGER_STAT_FINISHED) { - smaDebug("rsma, persist task finished already"); + smaDebug("vgId:%d, rsma persist task finished already", SMA_VID(pSma)); break; } else { - smaDebug("rsma, persist task not finished yet since rsma stat in %" PRIi8, + smaDebug("vgId:%d, rsma persist task not finished yet since rsma stat in %" PRIi8, SMA_VID(pSma), atomic_load_8(RSMA_TRIGGER_STAT(pStat))); } ++nLoops; @@ -218,13 +217,15 @@ static void tdDestroyRSmaStat(void *pRSmaStat) { } } - // step 4: destroy the rsma info and associated fetch tasks + // step 3: destroy the rsma info and associated fetch tasks // TODO: use taosHashSetFreeFp when taosHashSetFreeFp is ready. - void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL); - while (infoHash) { - SRSmaInfo *pSmaInfo = *(SRSmaInfo **)infoHash; - tdFreeRSmaInfo(pSmaInfo); - infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash); + if (taosHashGetSize(RSMA_INFO_HASH(pStat)) > 0) { + void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL); + while (infoHash) { + SRSmaInfo *pSmaInfo = *(SRSmaInfo **)infoHash; + tdFreeRSmaInfo(pSmaInfo); + infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash); + } } taosHashCleanup(RSMA_INFO_HASH(pStat)); @@ -232,10 +233,10 @@ static void tdDestroyRSmaStat(void *pRSmaStat) { nLoops = 0; while (1) { if (T_REF_VAL_GET((SSmaStat *)pStat) == 0) { - smaDebug("rsma, all fetch task finished already"); + smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma)); break; } else { - smaDebug("rsma, fetch tasks not all finished yet"); + smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma)); } ++nLoops; if (nLoops > 1000) { @@ -275,7 +276,7 @@ int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) { } else if (smaType == TSDB_SMA_TYPE_ROLLUP) { SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSmaStat); if (taosRemoveRef(smaMgmt.smaRef, RSMA_REF_ID(pRSmaStat)) < 0) { - smaError("remove refId from smaRef failed, refId:0x%" PRIx64, RSMA_REF_ID(pRSmaStat)); + smaError("remove refId from rsmaRef:0x%" PRIx64 " failed since %s", RSMA_REF_ID(pRSmaStat), terrstr()); } } else { ASSERT(0); diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c index 641b8c7934..d73b03f4a2 100644 --- a/source/dnode/vnode/src/sma/smaOpen.c +++ b/source/dnode/vnode/src/sma/smaOpen.c @@ -135,17 +135,11 @@ _err: return -1; } -int32_t smaCloseEnv(SSma *pSma) { - if (pSma) { - SMA_TSMA_ENV(pSma) = tdFreeSmaEnv(SMA_TSMA_ENV(pSma)); - SMA_RSMA_ENV(pSma) = tdFreeSmaEnv(SMA_RSMA_ENV(pSma)); - } - return 0; -} - -int32_t smaCloseEx(SSma *pSma) { +int32_t smaClose(SSma *pSma) { if (pSma) { taosThreadMutexDestroy(&pSma->mutex); + SMA_TSMA_ENV(pSma) = tdFreeSmaEnv(SMA_TSMA_ENV(pSma)); + SMA_RSMA_ENV(pSma) = tdFreeSmaEnv(SMA_RSMA_ENV(pSma)); if SMA_RSMA_TSDB0 (pSma) tsdbClose(&SMA_RSMA_TSDB0(pSma)); if SMA_RSMA_TSDB1 (pSma) tsdbClose(&SMA_RSMA_TSDB1(pSma)); if SMA_RSMA_TSDB2 (pSma) tsdbClose(&SMA_RSMA_TSDB2(pSma)); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 41af641e9e..4e1b2db44a 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -15,16 +15,14 @@ #include "sma.h" -#define RSMA_QTASKINFO_PERSIST_MS 7200000 -#define RSMA_QTASKINFO_BUFSIZE 32768 -#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid +#define RSMA_QTASKINFO_BUFSIZE 32768 +#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid SSmaMgmt smaMgmt = { .smaRef = -1, }; -typedef enum { TD_QTASK_TMP_F = 0, TD_QTASK_CUR_F } TD_QTASK_FILE_T; -static const char *tdQTaskInfoFname[] = {"qtaskinfo.t", "qtaskinfo"}; +#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver" typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem; typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter; @@ -37,16 +35,16 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType static void tdRSmaFetchTrigger(void *param, void *tmrId); static void tdRSmaPersistTrigger(void *param, void *tmrId); static void *tdRSmaPersistExec(void *param); -static void tdRSmaQTaskInfoGetFName(int32_t vid, int8_t ftype, char *outputName); +static void tdRSmaQTaskInfoGetFName(int32_t vid, int64_t version, char *outputName); static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile); static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish); static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, SRSmaQTaskInfoIter *pIter); static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *infoItem); -static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma); -static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma); -static int32_t tdRSmaRestoreTSDataReload(SSma *pSma); +static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables); +static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int64_t *committed); +static int32_t tdRSmaRestoreTSDataReload(SSma *pSma, int64_t committed); struct SRSmaInfoItem { SRSmaInfo *pRsmaInfo; @@ -88,8 +86,8 @@ struct SRSmaQTaskInfoIter { int32_t nBufPos; }; -static void tdRSmaQTaskInfoGetFName(int32_t vgId, int8_t ftype, char *outputName) { - tdGetVndFileName(vgId, VNODE_RSMA_DIR, tdQTaskInfoFname[ftype], outputName); +static void tdRSmaQTaskInfoGetFName(int32_t vgId, int64_t version, char *outputName) { + tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName); } static FORCE_INLINE int32_t tdRSmaQTaskInfoContLen(int32_t lenWithHead) { @@ -115,12 +113,14 @@ void *tdFreeRSmaInfo(SRSmaInfo *pInfo) { for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { SRSmaInfoItem *pItem = &pInfo->items[i]; if (pItem->taskInfo) { - smaDebug("vgId:%d, stb %" PRIi64 " stop fetch-timer %p level %d", SMA_VID(pSma), pInfo->suid, pItem->tmrId, - i + 1); - taosTmrStopA(&pItem->tmrId); + if (pItem->tmrId) { + smaDebug("vgId:%d, table %" PRIi64 " stop fetch timer %p level %d", SMA_VID(pSma), pInfo->suid, pItem->tmrId, + i + 1); + taosTmrStopA(&pItem->tmrId); + } tdFreeTaskHandle(&pItem->taskInfo, SMA_VID(pSma), i + 1); } else { - smaDebug("vgId:%d, stb %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma), + smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma), pInfo->suid, i + 1); } } @@ -359,13 +359,7 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con goto _err; } - smaDebug("vgId:%d, register rsma info succeed for suid:%" PRIi64, SMA_VID(pSma), suid); - - // start the persist timer - if (TASK_TRIGGER_STAT_INIT == - atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pStat), TASK_TRIGGER_STAT_INIT, TASK_TRIGGER_STAT_ACTIVE)) { - taosTmrStart(tdRSmaPersistTrigger, RSMA_QTASKINFO_PERSIST_MS, pStat, RSMA_TMR_HANDLE(pStat)); - } + smaDebug("vgId:%d, register rsma info succeed for table %" PRIi64, SMA_VID(pSma), suid); return TSDB_CODE_SUCCESS; _err: @@ -493,7 +487,6 @@ static int32_t tdProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) { } static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) { - ASSERT(pMsg != NULL); SSubmitMsgIter msgIter = {0}; SSubmitBlk *pBlock = NULL; SSubmitBlkIter blkIter = {0}; @@ -501,19 +494,26 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) { terrno = TSDB_CODE_SUCCESS; - if (tInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1; + if (tInitSubmitMsgIter(pMsg, &msgIter) < 0) { + return -1; + } while (true) { - if (tGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1; + if (tGetSubmitMsgNext(&msgIter, &pBlock) < 0) { + return -1; + } if (!pBlock) break; tdUidStorePut(pStore, msgIter.suid, NULL); } - if (terrno != TSDB_CODE_SUCCESS) return -1; + if (terrno != TSDB_CODE_SUCCESS) { + return -1; + } return 0; } static void tdDestroySDataBlockArray(SArray *pArray) { + // TODO #if 0 for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) { SSDataBlock *pDataBlock = taosArrayGet(pArray, i); @@ -598,33 +598,54 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { pSma = RSMA_INFO_SMA(pItem->pRsmaInfo); - // if rsma trigger stat in cancelled or finished, not start fetch task anymore + // if rsma trigger stat in paused, cancelled or finished, not start fetch task int8_t rsmaTriggerStat = atomic_load_8(RSMA_TRIGGER_STAT(pStat)); - if (rsmaTriggerStat == TASK_TRIGGER_STAT_CANCELLED || rsmaTriggerStat == TASK_TRIGGER_STAT_FINISHED) { - taosReleaseRef(smaMgmt.smaRef, pItem->refId); - smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is cancelled", - SMA_VID(pSma), pItem->level, pItem->pRsmaInfo->suid); - return; + switch (rsmaTriggerStat) { + case TASK_TRIGGER_STAT_PAUSED: + case TASK_TRIGGER_STAT_CANCELLED: + case TASK_TRIGGER_STAT_FINISHED: { + taosReleaseRef(smaMgmt.smaRef, pItem->refId); + smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is cancelled", + SMA_VID(pSma), pItem->level, pItem->pRsmaInfo->suid); + return; + } + default: + break; } int8_t fetchTriggerStat = atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE); - if (fetchTriggerStat == TASK_TRIGGER_STAT_ACTIVE) { - smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma), - pItem->level, pItem->pRsmaInfo->suid); + switch (fetchTriggerStat) { + case TASK_TRIGGER_STAT_ACTIVE: { + smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma), + pItem->level, pItem->pRsmaInfo->suid); - tdRefSmaStat(pSma, (SSmaStat *)pStat); + tdRefSmaStat(pSma, (SSmaStat *)pStat); - SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; - qSetStreamInput(pItem->taskInfo, &dataBlock, STREAM_INPUT__DATA_BLOCK, false); - tdFetchAndSubmitRSmaResult(pItem, STREAM_INPUT__DATA_BLOCK); + SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; + qSetStreamInput(pItem->taskInfo, &dataBlock, STREAM_INPUT__DATA_BLOCK, false); + tdFetchAndSubmitRSmaResult(pItem, STREAM_INPUT__DATA_BLOCK); - tdUnRefSmaStat(pSma, (SSmaStat *)pStat); - - } else { - smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is inactive", - SMA_VID(pSma), pItem->level, pItem->pRsmaInfo->suid); + tdUnRefSmaStat(pSma, (SSmaStat *)pStat); + } break; + case TASK_TRIGGER_STAT_PAUSED: { + smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is paused", + SMA_VID(pSma), pItem->level, pItem->pRsmaInfo->suid); + } break; + case TASK_TRIGGER_STAT_INACTIVE: { + smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is inactive", + SMA_VID(pSma), pItem->level, pItem->pRsmaInfo->suid); + } break; + case TASK_TRIGGER_STAT_INIT: { + smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is init", SMA_VID(pSma), + pItem->level, pItem->pRsmaInfo->suid); + } break; + default: { + smaWarn("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is unknown", + SMA_VID(pSma), pItem->level, pItem->pRsmaInfo->suid); + } break; } + _end: taosReleaseRef(smaMgmt.smaRef, pItem->refId); } @@ -722,7 +743,7 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { return TSDB_CODE_SUCCESS; } -static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma) { +static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) { SVnode *pVnode = pSma->pVnode; SArray *suidList = taosArrayInit(1, sizeof(tb_uid_t)); @@ -732,7 +753,12 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma) { return TSDB_CODE_FAILED; } - int32_t arrSize = taosArrayGetSize(suidList); + int64_t arrSize = taosArrayGetSize(suidList); + + if (nTables) { + *nTables = arrSize; + } + if (arrSize == 0) { taosArrayDestroy(suidList); smaDebug("vgId:%d, no need to restore rsma env since empty stb id list", TD_VID(pVnode)); @@ -741,9 +767,9 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma) { SMetaReader mr = {0}; metaReaderInit(&mr, SMA_META(pSma), 0); - for (int32_t i = 0; i < arrSize; ++i) { + for (int64_t i = 0; i < arrSize; ++i) { tb_uid_t suid = *(tb_uid_t *)taosArrayGet(suidList, i); - smaDebug("vgId:%d, rsma restore, suid[%d] is %" PRIi64, TD_VID(pVnode), i, suid); + smaDebug("vgId:%d, rsma restore, suid is %" PRIi64, TD_VID(pVnode), suid); if (metaGetTableEntryByUid(&mr, suid) < 0) { smaError("vgId:%d, rsma restore, failed to get table meta for %" PRIi64 " since %s", TD_VID(pVnode), suid, terrstr()); @@ -777,17 +803,24 @@ _err: return TSDB_CODE_FAILED; } -static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma) { +static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int64_t *committed) { SVnode *pVnode = pSma->pVnode; STFile tFile = {0}; - char qTaskInfoFName[TSDB_FILENAME_LEN]; + char qTaskInfoFName[TSDB_FILENAME_LEN] = {0}; - tdRSmaQTaskInfoGetFName(TD_VID(pVnode), TD_QTASK_TMP_F, qTaskInfoFName); - if (tdInitTFile(&tFile, pVnode->pTfs, qTaskInfoFName) < 0) { + tdRSmaQTaskInfoGetFName(TD_VID(pVnode), pVnode->state.committed, qTaskInfoFName); + if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) { goto _err; } if (!taosCheckExistFile(TD_TFILE_FULL_NAME(&tFile))) { + if (pVnode->state.committed > 0) { + smaWarn("vgId:%d, rsma restore for version %" PRIi64 ", not start as %s not exist", TD_VID(pVnode), + pVnode->state.committed, TD_TFILE_FULL_NAME(&tFile)); + } else { + smaDebug("vgId:%d, rsma restore for version %" PRIi64 ", no need as %s not exist", TD_VID(pVnode), + pVnode->state.committed, TD_TFILE_FULL_NAME(&tFile)); + } return TSDB_CODE_SUCCESS; } @@ -799,20 +832,28 @@ static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma) { if (tdRSmaQTaskInfoIterInit(&fIter, &tFile) < 0) { tdRSmaQTaskInfoIterDestroy(&fIter); tdCloseTFile(&tFile); + tdDestroyTFile(&tFile); goto _err; } if (tdRSmaQTaskInfoRestore(pSma, &fIter) < 0) { tdRSmaQTaskInfoIterDestroy(&fIter); tdCloseTFile(&tFile); + tdDestroyTFile(&tFile); goto _err; } tdRSmaQTaskInfoIterDestroy(&fIter); tdCloseTFile(&tFile); + tdDestroyTFile(&tFile); + + // restored successfully from committed + *committed = pVnode->state.committed; + return TSDB_CODE_SUCCESS; _err: - smaError("rsma restore, qtaskinfo reload failed since %s", terrstr()); + smaError("vgId:%d, rsma restore for version %" PRIi64 ", qtaskinfo reload failed since %s", TD_VID(pVnode), + pVnode->state.committed, terrstr()); return TSDB_CODE_FAILED; } @@ -820,35 +861,45 @@ _err: * @brief reload ts data from checkpoint * * @param pSma + * @param committed restore from committed version * @return int32_t */ -static int32_t tdRSmaRestoreTSDataReload(SSma *pSma) { +static int32_t tdRSmaRestoreTSDataReload(SSma *pSma, int64_t committed) { // TODO + smaDebug("vgId:%d, rsma restore from %" PRIi64 ", ts data reload success", SMA_VID(pSma), committed); return TSDB_CODE_SUCCESS; _err: - smaError("rsma restore, ts data reload failed since %s", terrstr()); + smaError("vgId:%d, rsma restore from %" PRIi64 ", ts data reload failed since %s", SMA_VID(pSma), committed, + terrstr()); return TSDB_CODE_FAILED; } int32_t tdProcessRSmaRestoreImpl(SSma *pSma) { // step 1: iterate all stables to restore the rsma env - if (tdRSmaRestoreQTaskInfoInit(pSma) < 0) { + int64_t nTables = 0; + if (tdRSmaRestoreQTaskInfoInit(pSma, &nTables) < 0) { goto _err; } + if (nTables <= 0) { + smaDebug("vgId:%d, no need to restore rsma task since no tables", SMA_VID(pSma)); + return TSDB_CODE_SUCCESS; + } + // step 2: retrieve qtaskinfo items from the persistence file(rsma/qtaskinfo) and restore - if (tdRSmaRestoreQTaskInfoReload(pSma) < 0) { + int64_t committed = -1; + if (tdRSmaRestoreQTaskInfoReload(pSma, &committed) < 0) { goto _err; } // step 3: reload ts data from checkpoint - if (tdRSmaRestoreTSDataReload(pSma) < 0) { + if (tdRSmaRestoreTSDataReload(pSma, committed) < 0) { goto _err; } return TSDB_CODE_SUCCESS; _err: - smaError("failed to restore rsma task since %s", terrstr()); + smaError("vgId:%d failed to restore rsma task since %s", SMA_VID(pSma), terrstr()); return TSDB_CODE_FAILED; } @@ -864,9 +915,9 @@ static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem * return TSDB_CODE_SUCCESS; } - if (pItem->type == 1) { + if (pItem->type == TSDB_RETENTION_L1) { qTaskInfo = pRSmaInfo->items[0].taskInfo; - } else if (pItem->type == 2) { + } else if (pItem->type == TSDB_RETENTION_L2) { qTaskInfo = pRSmaInfo->items[1].taskInfo; } else { ASSERT(0); @@ -931,19 +982,21 @@ static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isF } if (tdSeekTFile(pTFile, pIter->offset, SEEK_SET) < 0) { - ASSERT(0); return TSDB_CODE_FAILED; } if (tdReadTFile(pTFile, pIter->qBuf, nBytes) != nBytes) { - ASSERT(0); return TSDB_CODE_FAILED; } int32_t infoLen = 0; taosDecodeFixedI32(pIter->qBuf, &infoLen); if (infoLen > nBytes) { - ASSERT(infoLen > RSMA_QTASKINFO_BUFSIZE); + if (infoLen <= RSMA_QTASKINFO_BUFSIZE) { + terrno = TSDB_CODE_RSMA_FILE_CORRUPTED; + smaError("iterate rsma qtaskinfo file %s failed since %s", TD_TFILE_FULL_NAME(pIter->pTFile), terrstr()); + return TSDB_CODE_FAILED; + } pIter->nAlloc = infoLen; void *pBuf = taosMemoryRealloc(pIter->pBuf, infoLen); if (!pBuf) { @@ -955,12 +1008,10 @@ static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isF nBytes = infoLen; if (tdSeekTFile(pTFile, pIter->offset, SEEK_SET)) { - ASSERT(0); return TSDB_CODE_FAILED; } if (tdReadTFile(pTFile, pIter->pBuf, nBytes) != nBytes) { - ASSERT(0); return TSDB_CODE_FAILED; } } @@ -977,7 +1028,6 @@ static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, SRSmaQTaskInfoIter *pIter) { // block iter bool isFinish = false; if (tdRSmaQTaskInfoIterNextBlock(pIter, &isFinish) < 0) { - ASSERT(0); return TSDB_CODE_FAILED; } if (isFinish) { @@ -989,6 +1039,8 @@ static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, SRSmaQTaskInfoIter *pIter) { pIter->qBuf = taosDecodeFixedI32(pIter->qBuf, &qTaskInfoLenWithHead); if (qTaskInfoLenWithHead < RSMA_QTASKINFO_HEAD_LEN) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; + smaError("vgId:%d, restore rsma qtaskinfo file %s failed since %s", SMA_VID(pSma), + TD_TFILE_FULL_NAME(pIter->pTFile), terrstr()); return TSDB_CODE_FAILED; } @@ -1025,22 +1077,20 @@ static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, SRSmaQTaskInfoIter *pIter) { return TSDB_CODE_SUCCESS; } -static void *tdRSmaPersistExec(void *param) { - setThreadName("rsma-task-persist"); - SRSmaStat *pRSmaStat = param; - SSma *pSma = pRSmaStat->pSma; - STfs *pTfs = pSma->pVnode->pTfs; - int32_t vid = SMA_VID(pSma); - int64_t toffset = 0; - bool isFileCreated = false; +int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) { + SSma *pSma = pRSmaStat->pSma; + SVnode *pVnode = pSma->pVnode; + int32_t vid = SMA_VID(pSma); + int64_t toffset = 0; + bool isFileCreated = false; - if (TASK_TRIGGER_STAT_CANCELLED == atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat))) { - goto _end; + if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) { + return TSDB_CODE_SUCCESS; } void *infoHash = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL); if (!infoHash) { - goto _end; + return TSDB_CODE_SUCCESS; } STFile tFile = {0}; @@ -1074,9 +1124,17 @@ static void *tdRSmaPersistExec(void *param) { if (!isFileCreated) { char qTaskInfoFName[TSDB_FILENAME_LEN]; - tdRSmaQTaskInfoGetFName(vid, TD_QTASK_TMP_F, qTaskInfoFName); - tdInitTFile(&tFile, pTfs, qTaskInfoFName); - tdCreateTFile(&tFile, pTfs, true, -1); + tdRSmaQTaskInfoGetFName(vid, pSma->pVnode->state.applied, qTaskInfoFName); + if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) { + smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr()); + goto _err; + } + if (tdCreateTFile(&tFile, true, -1) < 0) { + smaError("vgId:%d, rsma persit, create %s failed since %s", vid, TD_TFILE_FULL_NAME(&tFile), terrstr()); + goto _err; + } + smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d serialize qTaskInfo, file %s created", vid, pRSmaInfo->suid, + i + 1, TD_TFILE_FULL_NAME(&tFile)); isFileCreated = true; } @@ -1101,49 +1159,56 @@ static void *tdRSmaPersistExec(void *param) { infoHash = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), infoHash); } -_normal: + if (isFileCreated) { if (tdUpdateTFileHeader(&tFile) < 0) { smaError("vgId:%d, rsma, failed to update tfile %s header since %s", vid, TD_TFILE_FULL_NAME(&tFile), tstrerror(terrno)); - tdCloseTFile(&tFile); - tdRemoveTFile(&tFile); goto _err; } else { smaDebug("vgId:%d, rsma, succeed to update tfile %s header", vid, TD_TFILE_FULL_NAME(&tFile)); } tdCloseTFile(&tFile); - - char newFName[TSDB_FILENAME_LEN]; - strncpy(newFName, TD_TFILE_FULL_NAME(&tFile), TSDB_FILENAME_LEN); - char *pos = strstr(newFName, tdQTaskInfoFname[TD_QTASK_TMP_F]); - strncpy(pos, tdQTaskInfoFname[TD_QTASK_TMP_F], TSDB_FILENAME_LEN - POINTER_DISTANCE(pos, newFName)); - if (taosRenameFile(TD_TFILE_FULL_NAME(&tFile), newFName) != 0) { - smaError("vgId:%d, rsma, failed to rename %s to %s", vid, TD_TFILE_FULL_NAME(&tFile), newFName); - goto _err; - } else { - smaDebug("vgId:%d, rsma, succeed to rename %s to %s", vid, TD_TFILE_FULL_NAME(&tFile), newFName); - } + tdDestroyTFile(&tFile); } - goto _end; + return TSDB_CODE_SUCCESS; _err: + smaError("vgId:%d, rsma persit failed since %s", vid, terrstr()); if (isFileCreated) { tdRemoveTFile(&tFile); + tdDestroyTFile(&tFile); } + return TSDB_CODE_FAILED; +} + +static void *tdRSmaPersistExec(void *param) { + setThreadName("rsma-task-persist"); + SRSmaStat *pRSmaStat = param; + SSma *pSma = pRSmaStat->pSma; + + int8_t triggerStat = atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)); + + if (TASK_TRIGGER_STAT_CANCELLED == triggerStat || TASK_TRIGGER_STAT_PAUSED == triggerStat) { + goto _end; + } + + // execution + tdRSmaPersistExecImpl(pRSmaStat); + _end: if (TASK_TRIGGER_STAT_INACTIVE == atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_INACTIVE, TASK_TRIGGER_STAT_ACTIVE)) { - smaDebug("vgId:%d, rsma persist task is active again", vid); + smaDebug("vgId:%d, rsma persist task is active again", SMA_VID(pSma)); } else if (TASK_TRIGGER_STAT_CANCELLED == atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_CANCELLED, TASK_TRIGGER_STAT_FINISHED)) { - smaDebug("vgId:%d, rsma persist task is cancelled", vid); + smaDebug("vgId:%d, rsma persist task is cancelled", SMA_VID(pSma)); } else { - smaWarn("vgId:%d, rsma persist task in abnormal stat %" PRIi8, vid, atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat))); - ASSERT(0); + smaWarn("vgId:%d, rsma persist task in stat %" PRIi8, SMA_VID(pSma), atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat))); } + atomic_store_8(RSMA_RUNNING_STAT(pRSmaStat), 0); taosReleaseRef(smaMgmt.smaRef, pRSmaStat->refId); taosThreadExit(NULL); @@ -1166,9 +1231,8 @@ static void tdRSmaPersistTask(SRSmaStat *pRSmaStat) { TASK_TRIGGER_STAT_FINISHED)) { smaDebug("vgId:%d, persist task is cancelled and set finished", SMA_VID(pRSmaStat->pSma)); } else { - smaWarn("vgId:%d, persist task in abnormal stat %" PRIi8, atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)), - SMA_VID(pRSmaStat->pSma)); - ASSERT(0); + smaWarn("vgId:%d, persist task in abnormal stat %" PRIi8, SMA_VID(pRSmaStat->pSma), + atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat))); } atomic_store_8(RSMA_RUNNING_STAT(pRSmaStat), 0); taosReleaseRef(smaMgmt.smaRef, pRSmaStat->refId); @@ -1205,8 +1269,8 @@ static void tdRSmaPersistTrigger(void *param, void *tmrId) { // start persist task tdRSmaPersistTask(pRSmaStat); - taosTmrReset(tdRSmaPersistTrigger, RSMA_QTASKINFO_PERSIST_MS, pRSmaStat, pRSmaStat->tmrHandle, - &pRSmaStat->tmrId); + // taosTmrReset(tdRSmaPersistTrigger, 5000, pRSmaStat, pRSmaStat->tmrHandle, + // RSMA_TMR_ID(pRSmaStat)); } else { atomic_store_8(RSMA_RUNNING_STAT(pRSmaStat), 0); } @@ -1216,6 +1280,9 @@ static void tdRSmaPersistTrigger(void *param, void *tmrId) { atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_FINISHED); smaDebug("rsma persistence not start since cancelled and finished"); } break; + case TASK_TRIGGER_STAT_PAUSED: { + smaDebug("rsma persistence not start since paused"); + } break; case TASK_TRIGGER_STAT_INACTIVE: { smaDebug("rsma persistence not start since inactive"); } break; diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c new file mode 100644 index 0000000000..b2c85642b9 --- /dev/null +++ b/source/dnode/vnode/src/sma/smaSnapshot.c @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "sma.h" \ No newline at end of file diff --git a/source/dnode/vnode/src/sma/smaUtil.c b/source/dnode/vnode/src/sma/smaUtil.c index 17bc2cdaca..14caf4144e 100644 --- a/source/dnode/vnode/src/sma/smaUtil.c +++ b/source/dnode/vnode/src/sma/smaUtil.c @@ -140,7 +140,7 @@ int64_t tdAppendTFile(STFile *pTFile, void *buf, int64_t nbyte, int64_t *offset) return -1; } -#if 1 +#if 0 smaDebug("append to file %s, offset:%" PRIi64 " nbyte:%" PRIi64 " fsize:%" PRIi64, TD_TFILE_FULL_NAME(pTFile), toffset, nbyte, toffset + nbyte); #endif @@ -179,52 +179,85 @@ void tdCloseTFile(STFile *pTFile) { } } -void tdGetVndFileName(int32_t vgId, const char *dname, const char *fname, char *outputName) { - snprintf(outputName, TSDB_FILENAME_LEN, "vnode/vnode%d/%s/%s", vgId, dname, fname); +void tdDestroyTFile(STFile *pTFile) { taosMemoryFreeClear(TD_TFILE_FULL_NAME(pTFile)); } + +void tdGetVndFileName(int32_t vgId, const char *pdname, const char *dname, const char *fname, int64_t version, + char *outputName) { + if (version < 0) { + if (pdname) { + snprintf(outputName, TSDB_FILENAME_LEN, "%s%svnode%svnode%d%s%s%sv%d%s", pdname, TD_DIRSEP, TD_DIRSEP, vgId, + TD_DIRSEP, dname, TD_DIRSEP, vgId, fname); + } else { + snprintf(outputName, TSDB_FILENAME_LEN, "vnode%svnode%d%s%s%sv%d%s", TD_DIRSEP, vgId, TD_DIRSEP, dname, TD_DIRSEP, + vgId, fname); + } + } else { + if (pdname) { + snprintf(outputName, TSDB_FILENAME_LEN, "%s%svnode%svnode%d%s%s%sv%d%s%" PRIi64, pdname, TD_DIRSEP, TD_DIRSEP, + vgId, TD_DIRSEP, dname, TD_DIRSEP, vgId, fname, version); + } else { + snprintf(outputName, TSDB_FILENAME_LEN, "vnode%svnode%d%s%s%sv%d%s%" PRIi64, TD_DIRSEP, vgId, TD_DIRSEP, dname, + TD_DIRSEP, vgId, fname, version); + } + } } -int32_t tdInitTFile(STFile *pTFile, STfs *pTfs, const char *fname) { - char fullname[TSDB_FILENAME_LEN]; - SDiskID did = {0}; +void tdGetVndDirName(int32_t vgId, const char *pdname, const char *dname, bool endWithSep, char *outputName) { + if (pdname) { + if (endWithSep) { + snprintf(outputName, TSDB_FILENAME_LEN, "%s%svnode%svnode%d%s%s%s", pdname, TD_DIRSEP, TD_DIRSEP, vgId, TD_DIRSEP, + dname, TD_DIRSEP); + } else { + snprintf(outputName, TSDB_FILENAME_LEN, "%s%svnode%svnode%d%s%s", pdname, TD_DIRSEP, TD_DIRSEP, vgId, TD_DIRSEP, + dname); + } + } else { + if (endWithSep) { + snprintf(outputName, TSDB_FILENAME_LEN, "vnode%svnode%d%s%s%s", TD_DIRSEP, vgId, TD_DIRSEP, dname, TD_DIRSEP); + } else { + snprintf(outputName, TSDB_FILENAME_LEN, "vnode%svnode%d%s%s", TD_DIRSEP, vgId, TD_DIRSEP, dname); + } + } +} +int32_t tdInitTFile(STFile *pTFile, const char *dname, const char *fname) { TD_TFILE_SET_STATE(pTFile, TD_FILE_STATE_OK); TD_TFILE_SET_CLOSED(pTFile); memset(&(pTFile->info), 0, sizeof(pTFile->info)); pTFile->info.magic = TD_FILE_INIT_MAGIC; - if (tfsAllocDisk(pTfs, 0, &did) < 0) { - terrno = TSDB_CODE_NO_AVAIL_DISK; + char tmpName[TSDB_FILENAME_LEN * 2 + 32] = {0}; + snprintf(tmpName, TSDB_FILENAME_LEN * 2 + 32, "%s%s%s", dname, TD_DIRSEP, fname); + int32_t tmpNameLen = strlen(tmpName) + 1; + pTFile->fname = taosMemoryMalloc(tmpNameLen); + if (!pTFile->fname) { + terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - - tfsInitFile(pTfs, &(pTFile->f), did, fname); + tstrncpy(pTFile->fname, tmpName, tmpNameLen); return 0; } -int32_t tdCreateTFile(STFile *pTFile, STfs *pTfs, bool updateHeader, int8_t fType) { +int32_t tdCreateTFile(STFile *pTFile, bool updateHeader, int8_t fType) { ASSERT(pTFile->info.fsize == 0 && pTFile->info.magic == TD_FILE_INIT_MAGIC); - pTFile->pFile = taosOpenFile(TD_TFILE_FULL_NAME(pTFile), TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (pTFile->pFile == NULL) { if (errno == ENOENT) { // Try to create directory recursively - char *s = strdup(TD_TFILE_REL_NAME(pTFile)); - if (tfsMkdirRecurAt(pTfs, taosDirName(s), TD_TFILE_DID(pTFile)) < 0) { - taosMemoryFreeClear(s); + char *s = strdup(TD_TFILE_FULL_NAME(pTFile)); + if (taosMulMkDir(taosDirName(s)) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(s); return -1; } - taosMemoryFreeClear(s); - + taosMemoryFree(s); pTFile->pFile = taosOpenFile(TD_TFILE_FULL_NAME(pTFile), TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (pTFile->pFile == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); return -1; } - } else { - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; } } @@ -244,7 +277,13 @@ int32_t tdCreateTFile(STFile *pTFile, STfs *pTfs, bool updateHeader, int8_t fTyp return 0; } -int32_t tdRemoveTFile(STFile *pTFile) { return tfsRemoveFile(TD_TFILE_F(pTFile)); } +int32_t tdRemoveTFile(STFile *pTFile) { + if (taosRemoveFile(TD_TFILE_FULL_NAME(pTFile)) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + }; + return 0; +} // smaXXXUtil ================ // ... \ No newline at end of file diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 35486a5267..5ce3cfab45 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -183,13 +183,15 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen) { } else { ASSERT(0); } - STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, offset.subKey); - if (pOffset == NULL || pOffset->val.version < offset.val.version) { - if (tqOffsetWrite(pTq->pOffsetStore, &offset) < 0) { - ASSERT(0); - return -1; - } + /*STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, offset.subKey);*/ + /*if (pOffset != NULL) {*/ + /*if (pOffset->val.type == TMQ_OFFSET__LOG && pOffset->val.version < offset.val.version) {*/ + if (tqOffsetWrite(pTq->pOffsetStore, &offset) < 0) { + ASSERT(0); + return -1; } + /*}*/ + /*}*/ return 0; } @@ -375,8 +377,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { taosMemoryFree(pCkHead); } else if (fetchOffsetNew.type == TMQ_OFFSET__SNAPSHOT_DATA) { - tqInfo("retrieve using snapshot req offset: uid %ld ts %ld, actual offset: uid %ld ts %ld", dataRsp.reqOffset.uid, - dataRsp.reqOffset.ts, fetchOffsetNew.uid, fetchOffsetNew.ts); + tqInfo("retrieve using snapshot actual offset: uid %ld ts %ld", fetchOffsetNew.uid, fetchOffsetNew.ts); if (tqScanSnapshot(pTq, &pHandle->execHandle, &dataRsp, fetchOffsetNew, workerId) < 0) { ASSERT(0); } @@ -405,193 +406,6 @@ OVER: return code; } -#if 0 -int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { - SMqPollReq* pReq = pMsg->pCont; - int64_t consumerId = pReq->consumerId; - int64_t timeout = pReq->timeout; - int32_t reqEpoch = pReq->epoch; - int64_t fetchOffset; - int32_t code = 0; - - // get offset to fetch message - if (pReq->currentOffset >= 0) { - fetchOffset = pReq->currentOffset + 1; - } else { - STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pReq->subKey); - if (pOffset != NULL) { - ASSERT(pOffset->val.type == TMQ_OFFSET__LOG); - tqDebug("consumer %ld, restore offset of %s on vg %d, offset(type:log) version: %ld", consumerId, pReq->subKey, - TD_VID(pTq->pVnode), pOffset->val.version); - fetchOffset = pOffset->val.version + 1; - } else { - if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__EARLIEAST) { - fetchOffset = walGetFirstVer(pTq->pWal); - } else if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__LATEST) { - fetchOffset = walGetCommittedVer(pTq->pWal); - } else if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__NONE) { - tqError("tmq poll: no offset committed for consumer %ld in vg %d, subkey %s", consumerId, TD_VID(pTq->pVnode), - pReq->subKey); - terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET; - return -1; - } - tqDebug("consumer %ld, restore offset of %s on vg %d failed, config is %ld, set to %ld", consumerId, pReq->subKey, - TD_VID(pTq->pVnode), pReq->currentOffset, fetchOffset); - } - } - - tqDebug("tmq poll: consumer %ld (epoch %d) recv poll req in vg %d, req offset %ld fetch offset %ld", consumerId, - pReq->epoch, TD_VID(pTq->pVnode), pReq->currentOffset, fetchOffset); - - STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey)); - /*ASSERT(pHandle);*/ - if (pHandle == NULL) { - tqError("tmq poll: no consumer handle for consumer %ld in vg %d, subkey %s", consumerId, TD_VID(pTq->pVnode), - pReq->subKey); - return -1; - } - - if (pHandle->consumerId != consumerId) { - tqError("tmq poll: consumer handle mismatch for consumer %ld in vg %d, subkey %s, handle consumer id %ld", - consumerId, TD_VID(pTq->pVnode), pReq->subKey, pHandle->consumerId); - return -1; - } - - int32_t consumerEpoch = atomic_load_32(&pHandle->epoch); - while (consumerEpoch < reqEpoch) { - consumerEpoch = atomic_val_compare_exchange_32(&pHandle->epoch, consumerEpoch, reqEpoch); - } - - SMqDataBlkRsp rsp = {0}; - rsp.reqOffset = pReq->currentOffset; - - rsp.blockData = taosArrayInit(0, sizeof(void*)); - rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t)); - - if (rsp.blockData == NULL || rsp.blockDataLen == NULL) { - return -1; - } - - rsp.withTbName = pReq->withTbName; - if (rsp.withTbName) { - rsp.blockTbName = taosArrayInit(0, sizeof(void*)); - } - - if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { - rsp.withSchema = false; - } else { - rsp.withSchema = true; - rsp.blockSchema = taosArrayInit(0, sizeof(void*)); - } - -#if 1 - if (pReq->useSnapshot) { - // TODO set ver into snapshot - int64_t lastVer = walGetCommittedVer(pTq->pWal); - if (rsp.reqOffset < lastVer) { - tqInfo("retrieve using snapshot req offset %ld last ver %ld", rsp.reqOffset, lastVer); - tqScanSnapshot(pTq, &pHandle->execHandle, &rsp, workerId); - - if (rsp.blockNum != 0) { - rsp.withTbName = false; - rsp.rspOffset = lastVer; - tqInfo("direct send by snapshot req offset %ld rsp offset %ld", rsp.reqOffset, rsp.rspOffset); - fetchOffset = lastVer; - goto SEND_RSP; - } - } - } -#endif - - SWalHead* pHeadWithCkSum = taosMemoryMalloc(sizeof(SWalHead) + 2048); - if (pHeadWithCkSum == NULL) { - return -1; - } - - walSetReaderCapacity(pHandle->pWalReader, 2048); - - while (1) { - consumerEpoch = atomic_load_32(&pHandle->epoch); - if (consumerEpoch > reqEpoch) { - tqWarn("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, found new consumer epoch %d, discard req epoch %d", - consumerId, pReq->epoch, TD_VID(pTq->pVnode), fetchOffset, consumerEpoch, reqEpoch); - break; - } - - if (tqFetchLog(pTq, pHandle, &fetchOffset, &pHeadWithCkSum) < 0) { - // TODO add push mgr - break; - } - - SWalCont* pHead = &pHeadWithCkSum->head; - - tqDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d offset %ld msgType %d", consumerId, pReq->epoch, - TD_VID(pTq->pVnode), fetchOffset, pHead->msgType); - - if (pHead->msgType == TDMT_VND_SUBMIT) { - SSubmitReq* pCont = (SSubmitReq*)&pHead->body; - - if (tqDataExec(pTq, &pHandle->execHandle, pCont, &rsp, workerId) < 0) { - /*ASSERT(0);*/ - } - } else { - ASSERT(pHandle->fetchMeta); - ASSERT(IS_META_MSG(pHead->msgType)); - tqInfo("fetch meta msg, ver: %ld, type: %d", pHead->version, pHead->msgType); - SMqMetaRsp metaRsp = {0}; - metaRsp.reqOffset = pReq->currentOffset; - metaRsp.rspOffset = fetchOffset; - metaRsp.resMsgType = pHead->msgType; - metaRsp.metaRspLen = pHead->bodyLen; - metaRsp.metaRsp = pHead->body; - if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) { - code = -1; - goto OVER; - } - code = 0; - goto OVER; - } - - // TODO batch optimization: - // TODO continue scan until meeting batch requirement - if (rsp.blockNum > 0 /* threshold */) { - break; - } else { - fetchOffset++; - } - } - - taosMemoryFree(pHeadWithCkSum); - -SEND_RSP: - ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum); - ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum); - if (rsp.withSchema) { - ASSERT(taosArrayGetSize(rsp.blockSchema) == rsp.blockNum); - } - - rsp.rspOffset = fetchOffset; - - if (tqSendDataRsp(pTq, pMsg, pReq, &rsp) < 0) { - code = -1; - } -OVER: - // TODO wrap in destroy func - taosArrayDestroy(rsp.blockDataLen); - taosArrayDestroyP(rsp.blockData, (FDelete)taosMemoryFree); - - if (rsp.withSchema) { - taosArrayDestroyP(rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper); - } - - if (rsp.withTbName) { - taosArrayDestroyP(rsp.blockTbName, (FDelete)taosMemoryFree); - } - - return code; -} -#endif - int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) { SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg; diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9e5c67fed4..bc992b2211 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -120,7 +120,9 @@ bool tqNextDataBlock(SStreamReader* pHandle) { return true; } void* ret = taosHashGet(pHandle->tbIdHash, &pHandle->msgIter.uid, sizeof(int64_t)); + /*tqDebug("search uid %ld", pHandle->msgIter.uid);*/ if (ret != NULL) { + /*tqDebug("find uid %ld", pHandle->msgIter.uid);*/ return true; } } diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 0bb9918488..dbbb2b2661 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -185,5 +185,7 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { .contLen = ntohl(pReq->length), }; - ASSERT(tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) == 0); + if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) { + tqDebug("failed to put into write-queue since %s", terrstr()); + } } diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 79e63d5abc..0b28d6bf10 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -28,12 +28,12 @@ const SVnodeCfg vnodeCfgDefault = { .update = 1, .compression = 2, .slLevel = 5, - .days = 10, + .days = 14400, .minRows = 100, .maxRows = 4096, - .keep2 = 3650, - .keep0 = 3650, - .keep1 = 3650}, + .keep2 = 5256000, + .keep0 = 5256000, + .keep1 = 5256000}, .walCfg = {.vgId = -1, .fsyncPeriod = 0, .retentionPeriod = 0, .rollPeriod = 0, .segSize = 0, .level = TAOS_WAL_WRITE}, .hashBegin = 0, diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 02a0783927..40112a1ee8 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -69,6 +69,9 @@ int vnodeBegin(SVnode *pVnode) { } } + // begin sma + smaBegin(pVnode->pSma); // TODO: refactor to include the rsma1/rsma2 tsdbBegin() after tsdb_refact branch merged + return 0; } @@ -229,6 +232,9 @@ int vnodeCommit(SVnode *pVnode) { return -1; } + // preCommit + smaPreCommit(pVnode->pSma); + // commit each sub-system if (metaCommit(pVnode->pMeta) < 0) { ASSERT(0); @@ -269,6 +275,9 @@ int vnodeCommit(SVnode *pVnode) { pVnode->state.committed = info.state.committed; + // postCommit + smaPostCommit(pVnode->pSma); + // apply the commit (TODO) vnodeBufPoolReset(pVnode->onCommit); pVnode->onCommit->next = pVnode->pPool; diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 57d7386667..0c654bee1f 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -152,12 +152,11 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { return pVnode; _err: - if (pVnode->pSma) smaCloseEnv(pVnode->pSma); if (pVnode->pQuery) vnodeQueryClose(pVnode); if (pVnode->pTq) tqClose(pVnode->pTq); if (pVnode->pWal) walClose(pVnode->pWal); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); - if (pVnode->pSma) smaCloseEx(pVnode->pSma); + if (pVnode->pSma) smaClose(pVnode->pSma); if (pVnode->pMeta) metaClose(pVnode->pMeta); tsem_destroy(&(pVnode->canCommit)); @@ -167,14 +166,13 @@ _err: void vnodeClose(SVnode *pVnode) { if (pVnode) { - smaCloseEnv(pVnode->pSma); vnodeCommit(pVnode); vnodeSyncClose(pVnode); vnodeQueryClose(pVnode); walClose(pVnode->pWal); tqClose(pVnode->pTq); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); - smaCloseEx(pVnode->pSma); + smaClose(pVnode->pSma); metaClose(pVnode->pMeta); vnodeCloseBufPool(pVnode); // destroy handle diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index b73eaa4288..649e8299f4 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -779,6 +779,7 @@ _exit: taosArrayDestroy(submitRsp.pArray); // TODO: the partial success scenario and the error case + // => If partial success, extract the success submitted rows and reconstruct a new submit msg, and push to level 1/level 2. // TODO: refactor if ((terrno == TSDB_CODE_SUCCESS) && (pRsp->code == TSDB_CODE_SUCCESS)) { tdProcessRSmaSubmit(pVnode->pSma, pReq, STREAM_INPUT__DATA_SUBMIT); diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 80ba80e61a..0445eda7af 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -256,70 +256,133 @@ int32_t vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { SRpcMsg *pRpcMsg = pMsg; - if (pRpcMsg->msgType == TDMT_SYNC_TIMEOUT) { - SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + // ToDo: ugly! use function pointer + // use different strategy + if (syncNodeStrategy(pSyncNode) == SYNC_STRATEGY_NO_SNAPSHOT) { + if (pRpcMsg->msgType == TDMT_SYNC_TIMEOUT) { + SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg); + syncTimeoutDestroy(pSyncMsg); - ret = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg); - syncTimeoutDestroy(pSyncMsg); + } else if (pRpcMsg->msgType == TDMT_SYNC_PING) { + SyncPing *pSyncMsg = syncPingFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnPingCb(pSyncNode, pSyncMsg); + syncPingDestroy(pSyncMsg); - } else if (pRpcMsg->msgType == TDMT_SYNC_PING) { - SyncPing *pSyncMsg = syncPingFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + } else if (pRpcMsg->msgType == TDMT_SYNC_PING_REPLY) { + SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg); + syncPingReplyDestroy(pSyncMsg); - ret = syncNodeOnPingCb(pSyncNode, pSyncMsg); - syncPingDestroy(pSyncMsg); + } else if (pRpcMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { + SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg, NULL); + syncClientRequestDestroy(pSyncMsg); - } else if (pRpcMsg->msgType == TDMT_SYNC_PING_REPLY) { - SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE) { + SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg); + syncRequestVoteDestroy(pSyncMsg); - ret = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg); - syncPingReplyDestroy(pSyncMsg); + } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE_REPLY) { + SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg); + syncRequestVoteReplyDestroy(pSyncMsg); - } else if (pRpcMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { - SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES) { + SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg); + syncAppendEntriesDestroy(pSyncMsg); - ret = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg, NULL); - syncClientRequestDestroy(pSyncMsg); + } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES_REPLY) { + SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg); + syncAppendEntriesReplyDestroy(pSyncMsg); - } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE) { - SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); + } else if (pRpcMsg->msgType == TDMT_SYNC_SET_VNODE_STANDBY) { + ret = vnodeSetStandBy(pVnode); + if (ret != 0 && terrno != 0) ret = terrno; + SRpcMsg rsp = {.code = ret, .info = pMsg->info}; + tmsgSendRsp(&rsp); + } else { + vError("==vnodeProcessSyncReq== error msg type:%d", pRpcMsg->msgType); + ret = -1; + } - ret = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg); - syncRequestVoteDestroy(pSyncMsg); - - } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE_REPLY) { - SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); - - ret = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg); - syncRequestVoteReplyDestroy(pSyncMsg); - - } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES) { - SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); - - ret = syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg); - syncAppendEntriesDestroy(pSyncMsg); - - } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES_REPLY) { - SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg); - assert(pSyncMsg != NULL); - - ret = syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg); - syncAppendEntriesReplyDestroy(pSyncMsg); - - } else if (pRpcMsg->msgType == TDMT_SYNC_SET_VNODE_STANDBY) { - ret = vnodeSetStandBy(pVnode); - if (ret != 0 && terrno != 0) ret = terrno; - SRpcMsg rsp = {.code = ret, .info = pMsg->info}; - tmsgSendRsp(&rsp); } else { - vError("==vnodeProcessSyncReq== error msg type:%d", pRpcMsg->msgType); - ret = -1; + // use wal first strategy + + if (pRpcMsg->msgType == TDMT_SYNC_TIMEOUT) { + SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg); + syncTimeoutDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_PING) { + SyncPing *pSyncMsg = syncPingFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnPingCb(pSyncNode, pSyncMsg); + syncPingDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_PING_REPLY) { + SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg); + syncPingReplyDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { + SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg, NULL); + syncClientRequestDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_CLIENT_REQUEST_BATCH) { + SyncClientRequestBatch *pSyncMsg = syncClientRequestBatchFromRpcMsg(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnClientRequestBatchCb(pSyncNode, pSyncMsg); + syncClientRequestBatchDestroyDeep(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE) { + SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg); + syncRequestVoteDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_REQUEST_VOTE_REPLY) { + SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg); + syncRequestVoteReplyDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES_BATCH) { + SyncAppendEntriesBatch *pSyncMsg = syncAppendEntriesBatchFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnAppendEntriesSnapshot2Cb(pSyncNode, pSyncMsg); + syncAppendEntriesBatchDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_APPEND_ENTRIES_REPLY) { + SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg); + ASSERT(pSyncMsg != NULL); + ret = syncNodeOnAppendEntriesReplySnapshot2Cb(pSyncNode, pSyncMsg); + syncAppendEntriesReplyDestroy(pSyncMsg); + + } else if (pRpcMsg->msgType == TDMT_SYNC_SET_VNODE_STANDBY) { + ret = vnodeSetStandBy(pVnode); + if (ret != 0 && terrno != 0) ret = terrno; + SRpcMsg rsp = {.code = ret, .info = pMsg->info}; + tmsgSendRsp(&rsp); + } else { + vError("==vnodeProcessSyncReq== error msg type:%d", pRpcMsg->msgType); + ret = -1; + } } syncNodeRelease(pSyncNode); @@ -415,7 +478,7 @@ static int32_t vnodeSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) { ret static int32_t vnodeSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) { return 0; } -static int32_t vnodeSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) { return 0; } +static int32_t vnodeSnapshotStartWrite(struct SSyncFSM *pFsm, void *pParam, void **ppWriter) { return 0; } static int32_t vnodeSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply) { return 0; } @@ -442,7 +505,8 @@ static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) { int32_t vnodeSyncOpen(SVnode *pVnode, char *path) { SSyncInfo syncInfo = { - .snapshotEnable = false, + .snapshotStrategy = SYNC_STRATEGY_NO_SNAPSHOT, + .batchSize = 10, .vgId = pVnode->config.vgId, .isStandBy = pVnode->config.standby, .syncCfg = pVnode->config.syncCfg, diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index a5768d9003..9d0e3871cc 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -278,7 +278,7 @@ typedef struct SCtgAsyncFps { typedef struct SCtgApiStat { -#ifdef WINDOWS +#if defined(WINDOWS) || defined(_TD_DARWIN_64) size_t avoidCompilationErrors; #endif diff --git a/source/libs/catalog/test/CMakeLists.txt b/source/libs/catalog/test/CMakeLists.txt index d74eef7855..b2e1c3b4ca 100644 --- a/source/libs/catalog/test/CMakeLists.txt +++ b/source/libs/catalog/test/CMakeLists.txt @@ -1,23 +1,25 @@ MESSAGE(STATUS "build catalog unit test") -# GoogleTest requires at least C++11 -SET(CMAKE_CXX_STANDARD 11) -AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) +IF(NOT TD_DARWIN) + # GoogleTest requires at least C++11 + SET(CMAKE_CXX_STANDARD 11) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) -ADD_EXECUTABLE(catalogTest ${SOURCE_LIST}) -TARGET_LINK_LIBRARIES( - catalogTest - PUBLIC os util common catalog transport gtest qcom taos_static -) + ADD_EXECUTABLE(catalogTest ${SOURCE_LIST}) + TARGET_LINK_LIBRARIES( + catalogTest + PUBLIC os util common catalog transport gtest qcom taos_static + ) -TARGET_INCLUDE_DIRECTORIES( - catalogTest - PUBLIC "${TD_SOURCE_DIR}/include/libs/catalog/" - PRIVATE "${TD_SOURCE_DIR}/source/libs/catalog/inc" -) + TARGET_INCLUDE_DIRECTORIES( + catalogTest + PUBLIC "${TD_SOURCE_DIR}/include/libs/catalog/" + PRIVATE "${TD_SOURCE_DIR}/source/libs/catalog/inc" + ) -# add_test( -# NAME catalogTest -# COMMAND catalogTest -# ) + # add_test( + # NAME catalogTest + # COMMAND catalogTest + # ) +ENDIF() diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 06e9af7569..6aca581f45 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -29,13 +29,17 @@ extern "C" { #define EXPLAIN_TAG_SCAN_FORMAT "Tag Scan on %s" #define EXPLAIN_TBL_SCAN_FORMAT "Table Scan on %s" #define EXPLAIN_SYSTBL_SCAN_FORMAT "System Table Scan on %s" +#define EXPLAIN_DISTBLK_SCAN_FORMAT "Block Dist Scan on %s" +#define EXPLAIN_LASTROW_SCAN_FORMAT "Last Row Scan on %s" #define EXPLAIN_PROJECTION_FORMAT "Projection" #define EXPLAIN_JOIN_FORMAT "%s" #define EXPLAIN_AGG_FORMAT "Aggragate" #define EXPLAIN_INDEF_ROWS_FORMAT "Indefinite Rows Function" #define EXPLAIN_EXCHANGE_FORMAT "Data Exchange %d:1" #define EXPLAIN_SORT_FORMAT "Sort" +#define EXPLAIN_GROUP_SORT_FORMAT "Group Sort" #define EXPLAIN_INTERVAL_FORMAT "Interval on Column %s" +#define EXPLAIN_MERGE_INTERVAL_FORMAT "Merge Interval on Column %s" #define EXPLAIN_FILL_FORMAT "Fill" #define EXPLAIN_SESSION_FORMAT "Session" #define EXPLAIN_STATE_WINDOW_FORMAT "StateWindow on Column %s" @@ -62,10 +66,12 @@ extern "C" { #define EXPLAIN_COST_FORMAT "cost=%.2f..%.2f" #define EXPLAIN_ROWS_FORMAT "rows=%" PRIu64 #define EXPLAIN_COLUMNS_FORMAT "columns=%d" +#define EXPLAIN_PSEUDO_COLUMNS_FORMAT "pseudo_columns=%d" #define EXPLAIN_WIDTH_FORMAT "width=%d" #define EXPLAIN_TABLE_SCAN_FORMAT "order=[asc|%d desc|%d]" #define EXPLAIN_GROUPS_FORMAT "groups=%d" #define EXPLAIN_WIDTH_FORMAT "width=%d" +#define EXPLAIN_INTERVAL_VALUE_FORMAT "interval=%" PRId64 "%c" #define EXPLAIN_FUNCTIONS_FORMAT "functions=%d" #define EXPLAIN_EXECINFO_FORMAT "cost=%.3f..%.3f rows=%" PRIu64 #define EXPLAIN_MODE_FORMAT "mode=%s" diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 7af36a0842..fde53b7064 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -199,6 +199,31 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo pPhysiChildren = mergePhysiNode->scan.node.pChildren; break; } + case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: { + SBlockDistScanPhysiNode *distPhysiNode = (SBlockDistScanPhysiNode *)pNode; + pPhysiChildren = distPhysiNode->node.pChildren; + break; + } + case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: { + SLastRowScanPhysiNode *lastRowPhysiNode = (SLastRowScanPhysiNode *)pNode; + pPhysiChildren = lastRowPhysiNode->node.pChildren; + break; + } + case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT: { + SGroupSortPhysiNode *groupSortPhysiNode = (SGroupSortPhysiNode *)pNode; + pPhysiChildren = groupSortPhysiNode->node.pChildren; + break; + } + case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: { + SMergeIntervalPhysiNode *mergeIntPhysiNode = (SMergeIntervalPhysiNode *)pNode; + pPhysiChildren = mergeIntPhysiNode->window.node.pChildren; + break; + } + case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: { + SInterpFuncPhysiNode *interpPhysiNode = (SInterpFuncPhysiNode *)pNode; + pPhysiChildren = interpPhysiNode->node.pChildren; + break; + } default: qError("not supported physical node type %d", pNode->type); QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR); @@ -378,6 +403,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pTagScanNode->pScanCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pTagScanNode->pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); @@ -415,6 +444,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pTblScanNode->scan.pScanCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pTblScanNode->scan.pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTblScanNode->scan.pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTblScanNode->scan.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_TABLE_SCAN_FORMAT, pTblScanNode->scanSeq[0], pTblScanNode->scanSeq[1]); @@ -516,6 +549,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pSTblScanNode->scan.pScanCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pSTblScanNode->scan.pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pSTblScanNode->scan.pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSTblScanNode->scan.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); @@ -1131,6 +1168,258 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } break; } + case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: { + SBlockDistScanPhysiNode *pDistScanNode = (SBlockDistScanPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_DISTBLK_SCAN_FORMAT, pDistScanNode->tableName.tname); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pDistScanNode->pScanCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pDistScanNode->pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pDistScanNode->pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDistScanNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pDistScanNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDistScanNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pDistScanNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pDistScanNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: { + SLastRowScanPhysiNode *pLastRowNode = (SLastRowScanPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_LASTROW_SCAN_FORMAT, pLastRowNode->tableName.tname); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pLastRowNode->pScanCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pLastRowNode->pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pLastRowNode->pScanPseudoCols->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pLastRowNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pLastRowNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pLastRowNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pLastRowNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pLastRowNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT: { + SGroupSortPhysiNode *pSortNode = (SGroupSortPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_GROUP_SORT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + + SDataBlockDescNode *pDescNode = pSortNode->node.pOutputDataBlockDesc; + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (EXPLAIN_MODE_ANALYZE == ctx->mode) { + // sort key + EXPLAIN_ROW_NEW(level + 1, "Sort Key: "); + if (pResNode->pExecInfo) { + for (int32_t i = 0; i < LIST_LENGTH(pSortNode->pSortKeys); ++i) { + SOrderByExprNode *ptn = (SOrderByExprNode *)nodesListGetNode(pSortNode->pSortKeys, i); + EXPLAIN_ROW_APPEND("%s ", nodesGetNameFromColumnNode(ptn->pExpr)); + } + } + + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + // sort method + EXPLAIN_ROW_NEW(level + 1, "Sort Method: "); + + int32_t nodeNum = taosArrayGetSize(pResNode->pExecInfo); + SExplainExecInfo *execInfo = taosArrayGet(pResNode->pExecInfo, 0); + SSortExecInfo *pExecInfo = (SSortExecInfo *)execInfo->verboseInfo; + EXPLAIN_ROW_APPEND("%s", pExecInfo->sortMethod == SORT_QSORT_T ? "quicksort" : "merge sort"); + if (pExecInfo->sortBuffer > 1024 * 1024) { + EXPLAIN_ROW_APPEND(" Buffers:%.2f Mb", pExecInfo->sortBuffer / (1024 * 1024.0)); + } else if (pExecInfo->sortBuffer > 1024) { + EXPLAIN_ROW_APPEND(" Buffers:%.2f Kb", pExecInfo->sortBuffer / (1024.0)); + } else { + EXPLAIN_ROW_APPEND(" Buffers:%d b", pExecInfo->sortBuffer); + } + + EXPLAIN_ROW_APPEND(" loops:%d", pExecInfo->loops); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + } + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pSortNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSortNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pSortNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pSortNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: { + SMergeIntervalPhysiNode *pIntNode = (SMergeIntervalPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_MERGE_INTERVAL_FORMAT, nodesGetNameFromColumnNode(pIntNode->window.pTspk)); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pIntNode->window.pFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pIntNode->window.node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + uint8_t precision = getIntervalPrecision(pIntNode); + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_TIME_WINDOWS_FORMAT, + INVERAL_TIME_FROM_PRECISION_TO_UNIT(pIntNode->interval, pIntNode->intervalUnit, precision), + pIntNode->intervalUnit, pIntNode->offset, getPrecisionUnit(precision), + INVERAL_TIME_FROM_PRECISION_TO_UNIT(pIntNode->sliding, pIntNode->slidingUnit, precision), + pIntNode->slidingUnit); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pIntNode->window.node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pIntNode->window.node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: { + SInterpFuncPhysiNode *pInterpNode = (SInterpFuncPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + if (pInterpNode->pFuncs) { + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pInterpNode->pFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + + EXPLAIN_ROW_APPEND(EXPLAIN_MODE_FORMAT, nodesGetFillModeString(pInterpNode->fillMode)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pInterpNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pInterpNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + if (pInterpNode->pFillValues) { + SNodeListNode *pValues = (SNodeListNode *)pInterpNode->pFillValues; + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILL_VALUE_FORMAT); + SNode *tNode = NULL; + int32_t i = 0; + FOREACH(tNode, pValues->pNodeList) { + if (i) { + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + SValueNode *tValue = (SValueNode *)tNode; + char *value = nodesGetStrValueFromNode(tValue); + EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, value); + taosMemoryFree(value); + ++i; + } + + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_INTERVAL_VALUE_FORMAT, pInterpNode->interval, pInterpNode->intervalUnit); + EXPLAIN_ROW_END(); + + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_TIMERANGE_FORMAT, pInterpNode->timeRange.skey, pInterpNode->timeRange.ekey); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pInterpNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pInterpNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } default: qError("not supported physical node type %d", pNode->type); return TSDB_CODE_QRY_APP_ERROR; diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index d1d2c55acf..f0f0361031 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -356,6 +356,7 @@ typedef struct SStreamBlockScanInfo { SUpdateInfo* pUpdateInfo; EStreamScanMode scanMode; + SOperatorInfo* pStreamScanOp; SOperatorInfo* pSnapshotReadOp; SArray* childIds; SessionWindowSupporter sessionSup; @@ -427,7 +428,7 @@ typedef struct SIntervalAggOperatorInfo { STimeWindowAggSupp twAggSup; bool invertible; SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. - bool ignoreCloseWindow; + bool ignoreExpiredData; } SIntervalAggOperatorInfo; typedef struct SStreamFinalIntervalOperatorInfo { @@ -449,7 +450,7 @@ typedef struct SStreamFinalIntervalOperatorInfo { SArray* pPullWins; // SPullWindowInfo int32_t pullIndex; SSDataBlock* pPullDataRes; - bool ignoreCloseWindow; + bool ignoreExpiredData; } SStreamFinalIntervalOperatorInfo; typedef struct SAggOperatorInfo { @@ -587,7 +588,7 @@ typedef struct SStreamSessionAggOperatorInfo { SArray* pChildren; // cache for children's result; final stream operator SPhysiNode* pPhyNode; // create new child bool isFinal; - bool ignoreCloseWindow; + bool ignoreExpiredData; } SStreamSessionAggOperatorInfo; typedef struct STimeSliceOperatorInfo { @@ -631,7 +632,7 @@ typedef struct SStreamStateAggOperatorInfo { void* pDelIterator; SArray* pScanWindow; SArray* pChildren; // cache for children's result; - bool ignoreCloseWindow; + bool ignoreExpiredData; } SStreamStateAggOperatorInfo; typedef struct SSortedMergeOperatorInfo { @@ -679,7 +680,7 @@ typedef struct SJoinOperatorInfo { SSDataBlock *pRight; int32_t rightPos; SColumnInfo rightCol; - SNode *pOnCondition; + SNode *pCondAfterMerge; } SJoinOperatorInfo; #define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED) diff --git a/source/libs/executor/src/dataSinkMgt.c b/source/libs/executor/src/dataSinkMgt.c index ffa9822e92..498171e88c 100644 --- a/source/libs/executor/src/dataSinkMgt.c +++ b/source/libs/executor/src/dataSinkMgt.c @@ -35,7 +35,7 @@ int32_t dsDataSinkGetCacheSize(SDataSinkStat *pStat) { int32_t dsCreateDataSinker(const SDataSinkNode *pDataSink, DataSinkHandle* pHandle, void* pParam) { - switch (nodeType(pDataSink)) { + switch ((int)nodeType(pDataSink)) { case QUERY_NODE_PHYSICAL_PLAN_DISPATCH: return createDataDispatcher(&gDataSinkManager, pDataSink, pHandle); case QUERY_NODE_PHYSICAL_PLAN_DELETE: diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 31edc46b4d..0e76607c8f 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -145,10 +145,12 @@ static SArray* filterQualifiedChildTables(const SStreamBlockScanInfo* pScanInfo, continue; } + // TODO handle ntb case if (mr.me.type != TSDB_CHILD_TABLE || mr.me.ctbEntry.suid != pScanInfo->tableUid) { continue; } - // TODO handle ntb case + /*pScanInfo->pStreamScanOp->pTaskInfo->tableqinfoList.*/ + // handle multiple partition taosArrayPush(qa, id); } diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 6ca4327c96..25b61e15c3 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1422,7 +1422,7 @@ void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t return; } #ifdef BUF_PAGE_DEBUG - qDebug("page_setbuf, groupId:%"PRIu64, groupId); + qDebug("page_setbuf, groupId:%" PRIu64, groupId); #endif doSetTableGroupOutputBuf(pOperator, pAggInfo, numOfOutput, groupId); @@ -1570,9 +1570,9 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI releaseBufPage(pBuf, page); pBlock->info.rows += pRow->numOfRows; -// if (pBlock->info.rows >= pBlock->info.capacity) { // output buffer is full -// break; -// } + // if (pBlock->info.rows >= pBlock->info.capacity) { // output buffer is full + // break; + // } } qDebug("%s result generated, rows:%d, groupId:%" PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows, @@ -2027,8 +2027,9 @@ static int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInf ASSERT(pDataInfo->status == EX_SOURCE_DATA_NOT_READY); - qDebug("%s build fetch msg and send to vgId:%d, ep:%s, taskId:0x%" PRIx64 ", execId:%d, %d/%" PRIzu, GET_TASKID(pTaskInfo), - pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->taskId, pSource->execId, sourceIndex, totalSources); + qDebug("%s build fetch msg and send to vgId:%d, ep:%s, taskId:0x%" PRIx64 ", execId:%d, %d/%" PRIzu, + GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->taskId, pSource->execId, + sourceIndex, totalSources); pMsg->header.vgId = htonl(pSource->addr.nodeId); pMsg->sId = htobe64(pSource->schedId); @@ -2163,8 +2164,8 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx SSDataBlock* pRes = pExchangeInfo->pResult; SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo; if (pRsp->numOfRows == 0) { - qDebug("%s vgId:%d, taskId:0x%" PRIx64 " execId:%d index:%d completed, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 - ", completed:%d try next %d/%" PRIzu, + qDebug("%s vgId:%d, taskId:0x%" PRIx64 " execId:%d index:%d completed, rowsOfSource:%" PRIu64 + ", totalRows:%" PRIu64 ", completed:%d try next %d/%" PRIzu, GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pDataInfo->totalRows, pExchangeInfo->loadInfo.totalRows, completed + 1, i + 1, totalSources); pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; @@ -2183,18 +2184,19 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx } if (pRsp->completed == 1) { - qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d" + qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 + " execId:%d" " index:%d completed, numOfRows:%d, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64 ", completed:%d try next %d/%" PRIzu, - GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRes->info.rows, pDataInfo->totalRows, - pLoadInfo->totalRows, pLoadInfo->totalSize, completed + 1, i + 1, totalSources); + GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, i, pRes->info.rows, + pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, completed + 1, i + 1, totalSources); completed += 1; pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; } else { qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, totalRows:%" PRIu64 ", totalBytes:%" PRIu64, - GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows, pLoadInfo->totalRows, - pLoadInfo->totalSize); + GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows, + pLoadInfo->totalRows, pLoadInfo->totalSize); } taosMemoryFreeClear(pDataInfo->pRsp); @@ -2267,8 +2269,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { SDownstreamSourceNode* pSource = taosArrayGet(pExchangeInfo->pSources, pExchangeInfo->current); if (pDataInfo->code != TSDB_CODE_SUCCESS) { - qError("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d error happens, code:%s", GET_TASKID(pTaskInfo), pSource->addr.nodeId, - pSource->taskId, pSource->execId, tstrerror(pDataInfo->code)); + qError("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d error happens, code:%s", GET_TASKID(pTaskInfo), + pSource->addr.nodeId, pSource->taskId, pSource->execId, tstrerror(pDataInfo->code)); pOperator->pTaskInfo->code = pDataInfo->code; return NULL; } @@ -2276,8 +2278,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { SRetrieveTableRsp* pRsp = pDataInfo->pRsp; SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo; if (pRsp->numOfRows == 0) { - qDebug("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d %d of total completed, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 - " try next", + qDebug("%s vgId:%d, taskID:0x%" PRIx64 " execId:%d %d of total completed, rowsOfSource:%" PRIu64 + ", totalRows:%" PRIu64 " try next", GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pExchangeInfo->current + 1, pDataInfo->totalRows, pLoadInfo->totalRows); @@ -2296,16 +2298,17 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { if (pRsp->completed == 1) { qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, rowsOfSource:%" PRIu64 ", totalRows:%" PRIu64 ", totalBytes:%" PRIu64 " try next %d/%" PRIzu, - GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows, pDataInfo->totalRows, - pLoadInfo->totalRows, pLoadInfo->totalSize, pExchangeInfo->current + 1, totalSources); + GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows, + pDataInfo->totalRows, pLoadInfo->totalRows, pLoadInfo->totalSize, pExchangeInfo->current + 1, + totalSources); pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; pExchangeInfo->current += 1; } else { qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, totalRows:%" PRIu64 ", totalBytes:%" PRIu64, - GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows, pLoadInfo->totalRows, - pLoadInfo->totalSize); + GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->taskId, pSource->execId, pRes->info.rows, + pLoadInfo->totalRows, pLoadInfo->totalSize); } pOperator->resultInfo.totalRows += pRes->info.rows; @@ -2840,11 +2843,18 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { int32_t type = pOperator->operatorType; + + pOperator->status = OP_OPENED; + if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { SStreamBlockScanInfo* pScanInfo = pOperator->info; pScanInfo->blockType = STREAM_INPUT__DATA_SCAN; + pScanInfo->pSnapshotReadOp->status = OP_OPENED; + STableScanInfo* pInfo = pScanInfo->pSnapshotReadOp->info; + ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER); + if (uid == 0) { pInfo->noTable = 1; return TSDB_CODE_SUCCESS; @@ -2858,6 +2868,20 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { pInfo->noTable = 0; if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + + int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); + bool found = false; + for (int32_t i = 0; i < tableSz; i++) { + STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i); + if (pTableInfo->uid == uid) { + found = true; + pInfo->currentTable = i; + } + } + // TODO after processing drop, found can be false + ASSERT(found); + tsdbSetTableId(pInfo->dataReader, uid); int64_t oldSkey = pInfo->cond.twindows[0].skey; pInfo->cond.twindows[0].skey = ts + 1; @@ -2865,7 +2889,11 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { pInfo->cond.twindows[0].skey = oldSkey; pInfo->scanTimes = 0; pInfo->curTWinIdx = 0; + + qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts, + pInfo->currentTable, tableSz); } + return TSDB_CODE_SUCCESS; } else { @@ -3250,6 +3278,10 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { doSetOperatorCompleted(pOperator); break; } + if (pBlock->info.type == STREAM_RETRIEVE) { + // for stream interval + return pBlock; + } // the pDataBlock are always the same one, no need to call this again int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag); @@ -4094,14 +4126,14 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, ASSERT(nodeType(pNew) == QUERY_NODE_VALUE); SValueNode* pValue = (SValueNode*)pNew; - if (pValue->node.resType.type == TSDB_DATA_TYPE_NULL) { + if (pValue->node.resType.type == TSDB_DATA_TYPE_NULL || pValue->isNull) { isNull[index++] = 1; continue; } else { isNull[index++] = 0; char* data = nodesGetValueFromNode(pValue); - if (pValue->node.resType.type == TSDB_DATA_TYPE_JSON){ - if(tTagIsJson(data)){ + if (pValue->node.resType.type == TSDB_DATA_TYPE_JSON) { + if (tTagIsJson(data)) { terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR; taosMemoryFree(keyBuf); nodesClearList(groupNew); @@ -4166,7 +4198,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == type) { STableMergeScanPhysiNode* pTableScanNode = (STableMergeScanPhysiNode*)pPhyNode; int32_t code = createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId); - if(code){ + if (code) { pTaskInfo->code = code; return NULL; } @@ -4195,7 +4227,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo }; if (pHandle) { int32_t code = createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId); - if(code){ + if (code) { pTaskInfo->code = code; return NULL; } diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 6fbda77808..e9995ed77a 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -53,13 +53,28 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t pOperator->info = pInfo; pOperator->pTaskInfo = pTaskInfo; - SNode* pOnCondition = pJoinNode->pOnConditions; - if (nodeType(pOnCondition) == QUERY_NODE_OPERATOR) { - SOperatorNode* pNode = (SOperatorNode*)pOnCondition; + SNode* pMergeCondition = pJoinNode->pMergeCondition; + if (nodeType(pMergeCondition) == QUERY_NODE_OPERATOR) { + SOperatorNode* pNode = (SOperatorNode*)pMergeCondition; setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft); setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight); - } else if (nodeType(pOnCondition) == QUERY_NODE_LOGIC_CONDITION) { - extractTimeCondition(pInfo, (SLogicConditionNode*)pOnCondition); + } else { + ASSERT(false); + } + + if (pJoinNode->pOnConditions != NULL && pJoinNode->node.pConditions != NULL) { + pInfo->pCondAfterMerge = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION); + SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pInfo->pCondAfterMerge); + pLogicCond->pParameterList = nodesMakeList(); + nodesListMakeAppend(&pLogicCond->pParameterList, nodesCloneNode(pJoinNode->pOnConditions)); + nodesListMakeAppend(&pLogicCond->pParameterList, nodesCloneNode(pJoinNode->node.pConditions)); + pLogicCond->condType = LOGIC_COND_TYPE_AND; + } else if (pJoinNode->pOnConditions != NULL) { + pInfo->pCondAfterMerge = nodesCloneNode(pJoinNode->pOnConditions); + } else if (pJoinNode->node.pConditions != NULL) { + pInfo->pCondAfterMerge = nodesCloneNode(pJoinNode->node.pConditions); + } else { + pInfo->pCondAfterMerge = NULL; } pOperator->fpSet = @@ -88,15 +103,12 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) { void destroyMergeJoinOperator(void* param, int32_t numOfOutput) { SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param; + nodesDestroyNode(pJoinOperator->pCondAfterMerge); } -SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { +static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes) { SJoinOperatorInfo* pJoinInfo = pOperator->info; - SSDataBlock* pRes = pJoinInfo->pRes; - blockDataCleanup(pRes); - blockDataEnsureCapacity(pRes, 4096); - int32_t nrows = 0; while (1) { @@ -181,7 +193,28 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { break; } } +} +SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { + SJoinOperatorInfo* pJoinInfo = pOperator->info; + + SSDataBlock* pRes = pJoinInfo->pRes; + blockDataCleanup(pRes); + blockDataEnsureCapacity(pRes, 4096); + while (true) { + int32_t numOfRowsBefore = pRes->info.rows; + doMergeJoinImpl(pOperator, pRes); + int32_t numOfNewRows = pRes->info.rows - numOfRowsBefore; + if (numOfNewRows == 0) { + break; + } + if (pJoinInfo->pCondAfterMerge != NULL) { + doFilter(pJoinInfo->pCondAfterMerge, pRes); + } + if (pRes->info.rows >= pOperator->resultInfo.threshold) { + break; + } + } return (pRes->info.rows > 0) ? pRes : NULL; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index f814e74269..061b4ab3c5 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -807,6 +807,23 @@ static bool isStateWindow(SStreamBlockScanInfo* pInfo) { return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE; } +static void setGroupId(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) { + ASSERT(rowIndex < pBlock->info.rows); + switch (pBlock->info.type) + { + case STREAM_RETRIEVE: { + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex); + uint64_t* groupCol = (uint64_t*)pColInfo->pData; + pInfo->groupId = groupCol[rowIndex]; + } + break; + case STREAM_DELETE_DATA: + break; + default: + break; + } +} + static bool prepareDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) { STimeWindow win = { .skey = INT64_MIN, @@ -829,6 +846,7 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int3 } else { win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[(*pRowIndex)], &pInfo->interval, pInfo->interval.precision, NULL); + setGroupId(pInfo, pSDB, 2, *pRowIndex); (*pRowIndex) += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, (*pRowIndex), win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); } @@ -1031,10 +1049,12 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; } else { - if (isStateWindow(pInfo) && taosArrayGetSize(pInfo->sessionSup.pStreamAggSup->pScanWindow) > 0) { + if (isStateWindow(pInfo)) { pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; pInfo->updateResIndex = pInfo->pUpdateRes->info.rows; - prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); + if (!prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex)) { + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + } } if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) { SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); @@ -1274,6 +1294,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; pInfo->groupId = 0; pInfo->pPullDataRes = createPullDataBlock(); + pInfo->pStreamScanOp = pOperator; pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; @@ -1308,6 +1329,13 @@ static void destroySysScanOperator(void* param, int32_t numOfOutput) { taosArrayDestroy(pInfo->scanCols); } +static int32_t getSysTableDbNameColId(const char* pTable) { + // if (0 == strcmp(TSDB_INS_TABLE_USER_INDEXES, pTable)) { + // return 1; + // } + return TSDB_INS_USER_STABLES_DBNAME_COLID; +} + EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) { int32_t code = TSDB_CODE_SUCCESS; ENodeType nType = nodeType(pNode); @@ -1329,7 +1357,7 @@ EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) { } SColumnNode* node = (SColumnNode*)pNode; - if (TSDB_INS_USER_STABLES_DBNAME_COLID == node->colId) { + if (getSysTableDbNameColId(node->tableName) == node->colId) { *(int32_t*)pContext = 2; return DEAL_RES_CONTINUE; } diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 66900fb7aa..03c939cc95 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -838,7 +838,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->interval.precision, &pInfo->win); int32_t ret = TSDB_CODE_SUCCESS; - if (!pInfo->ignoreCloseWindow || !isCloseWindow(&win, &pInfo->twAggSup)) { + if (!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) { ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { @@ -871,7 +871,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul doWindowBorderInterpolation(pInfo, pBlock, pResult, &win, startPos, forwardRows, pSup); } - if (!pInfo->ignoreCloseWindow || !isCloseWindow(&win, &pInfo->twAggSup)) { + if (!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) { updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true); doApplyFunctions(pTaskInfo, pSup->pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, pBlock->info.rows, numOfOutput, pInfo->order); @@ -886,7 +886,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul if (startPos < 0) { break; } - if (pInfo->ignoreCloseWindow && isCloseWindow(&nextWin, &pInfo->twAggSup)) { + if (pInfo->ignoreExpiredData && isCloseWindow(&nextWin, &pInfo->twAggSup)) { ekey = ascScan ? nextWin.ekey : nextWin.skey; forwardRows = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order); @@ -1535,7 +1535,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pInfo->interval = *pInterval; pInfo->execModel = pTaskInfo->execModel; pInfo->twAggSup = *pTwAggSupp; - pInfo->ignoreCloseWindow = false; + pInfo->ignoreExpiredData = pPhyNode->window.igExpired; if (pPhyNode->window.pExprs != NULL) { int32_t numOfScalar = 0; @@ -2292,7 +2292,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc pInfo->interval.precision, NULL); while (1) { bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup); - if (pInfo->ignoreCloseWindow && isClosed) { + if (pInfo->ignoreExpiredData && isClosed) { startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin); if (startPos < 0) { break; @@ -2710,7 +2710,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK); pInfo->pPullDataRes = createPullDataBlock(); - pInfo->ignoreCloseWindow = false; + pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired; pOperator->operatorType = pPhyNode->type; pOperator->blocking = true; @@ -2852,12 +2852,12 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh pInfo->pStDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK); pInfo->pDelIterator = NULL; pInfo->pDelRes = createOneDataBlock(pResBlock, false); - pInfo->pDelRes->info.type = STREAM_DELETE; + pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; blockDataEnsureCapacity(pInfo->pDelRes, 64); pInfo->pChildren = NULL; pInfo->isFinal = false; pInfo->pPhyNode = pPhyNode; - pInfo->ignoreCloseWindow = false; + pInfo->ignoreExpiredData = pSessionNode->window.igExpired; pOperator->name = "StreamSessionWindowAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION; @@ -3133,7 +3133,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; for (int32_t i = 0; i < pSDataBlock->info.rows;) { - if (pInfo->ignoreCloseWindow && isOverdue(endTsCols[i], &pInfo->twAggSup)) { + if (pInfo->ignoreExpiredData && isOverdue(endTsCols[i], &pInfo->twAggSup)) { i++; continue; } @@ -3413,8 +3413,8 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { pOperator->status = OP_RES_TO_RETURN; closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated, - getResWinForSession, pInfo->ignoreCloseWindow); - closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreCloseWindow); + getResWinForSession, pInfo->ignoreExpiredData); + closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData); copyUpdateResult(pStUpdated, pUpdated); taosHashCleanup(pStUpdated); @@ -3822,7 +3822,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; SColumnInfoData* pKeyColInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->stateCol.slotId); for (int32_t i = 0; i < pSDataBlock->info.rows; i += winRows) { - if (pInfo->ignoreCloseWindow && isOverdue(tsCols[i], &pInfo->twAggSup)) { + if (pInfo->ignoreExpiredData && isOverdue(tsCols[i], &pInfo->twAggSup)) { i++; continue; } @@ -3866,12 +3866,14 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { if (pOperator->status == OP_RES_TO_RETURN) { doBuildDeleteDataBlock(pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0) { + printDataBlock(pInfo->pDelRes, "single state"); return pInfo->pDelRes; } doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } + printDataBlock(pBInfo->pRes, "single state"); return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; } @@ -3884,6 +3886,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { if (pBlock == NULL) { break; } + printDataBlock(pBlock, "single state recv"); if (pBlock->info.type == STREAM_CLEAR) { doClearStateWindows(&pInfo->streamAggSup, pBlock, pInfo->primaryTsIndex, &pInfo->stateCol, pInfo->stateCol.slotId, @@ -3903,8 +3906,8 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { pOperator->status = OP_RES_TO_RETURN; closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated, - getResWinForState, pInfo->ignoreCloseWindow); - closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreCloseWindow); + getResWinForState, pInfo->ignoreExpiredData); + closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData); copyUpdateResult(pSeUpdated, pUpdated); taosHashCleanup(pSeUpdated); @@ -3914,9 +3917,11 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); doBuildDeleteDataBlock(pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0) { + printDataBlock(pInfo->pDelRes, "single state"); return pInfo->pDelRes; } doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); + printDataBlock(pBInfo->pRes, "single state"); return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; } @@ -3975,10 +3980,10 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys pInfo->pSeDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK); pInfo->pDelIterator = NULL; pInfo->pDelRes = createOneDataBlock(pResBlock, false); - pInfo->pDelRes->info.type = STREAM_DELETE; + pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; blockDataEnsureCapacity(pInfo->pDelRes, 64); pInfo->pChildren = NULL; - pInfo->ignoreCloseWindow = false; + pInfo->ignoreExpiredData = pStateNode->window.igExpired; pOperator->name = "StreamStateAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE; diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index e01750a3ca..0edefdd05b 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -18,6 +18,7 @@ #include "querynodes.h" #include "scalar.h" #include "taoserror.h" +#include "cJSON.h" static int32_t buildFuncErrMsg(char* pErrBuf, int32_t len, int32_t errCode, const char* pFormat, ...) { va_list vArgList; @@ -39,6 +40,174 @@ static int32_t invaildFuncParaValueErrMsg(char* pErrBuf, int32_t len, const char return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_PARA_VALUE, "Invalid parameter value : %s", pFuncName); } +#define TIME_UNIT_INVALID 1 +#define TIME_UNIT_TOO_SMALL 2 + +static int32_t validateTimeUnitParam(uint8_t dbPrec, const SValueNode* pVal) { + if (!pVal->isDuration) { + return TIME_UNIT_INVALID; + } + + if (TSDB_TIME_PRECISION_MILLI == dbPrec && 0 == strcasecmp(pVal->literal, "1u")) { + return TIME_UNIT_TOO_SMALL; + } + + if (pVal->literal[0] != '1' || (pVal->literal[1] != 'u' && pVal->literal[1] != 'a' && + pVal->literal[1] != 's' && pVal->literal[1] != 'm' && + pVal->literal[1] != 'h' && pVal->literal[1] != 'd' && + pVal->literal[1] != 'w')) { + return TIME_UNIT_INVALID; + } + + return TSDB_CODE_SUCCESS; +} + +/* Following are valid ISO-8601 timezone format: + * 1 z/Z + * 2 ±hh:mm + * 3 ±hhmm + * 4 ±hh + * + */ + +static bool validateHourRange(int8_t hour) { + if (hour < 0 || hour > 12) { + return false; + } + + return true; +} + +static bool validateMinuteRange(int8_t hour, int8_t minute, char sign) { + if (minute == 0 || (minute == 30 && (hour == 3 || hour == 5) && sign == '+')) { + return true; + } + + return false; +} + +static bool validateTimestampDigits(const SValueNode* pVal) { + if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { + return false; + } + + int64_t tsVal = pVal->datum.i; + char fraction[20] = {0}; + NUM_TO_STRING(pVal->node.resType.type, &tsVal, sizeof(fraction), fraction); + int32_t tsDigits = (int32_t)strlen(fraction); + + if (tsDigits > TSDB_TIME_PRECISION_SEC_DIGITS) { + if (tsDigits == TSDB_TIME_PRECISION_MILLI_DIGITS || tsDigits == TSDB_TIME_PRECISION_MICRO_DIGITS || + tsDigits == TSDB_TIME_PRECISION_NANO_DIGITS) { + return true; + } else { + return false; + } + } + + return true; +} + +static bool validateTimezoneFormat(const SValueNode* pVal) { + if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { + return false; + } + + char* tz = varDataVal(pVal->datum.p); + int32_t len = varDataLen(pVal->datum.p); + + char buf[3] = {0}; + int8_t hour = -1, minute = -1; + if (len == 0) { + return false; + } else if (len == 1 && (tz[0] == 'z' || tz[0] == 'Z')) { + return true; + } else if ((tz[0] == '+' || tz[0] == '-')) { + switch (len) { + case 3: + case 5: { + for (int32_t i = 1; i < len; ++i) { + if (!isdigit(tz[i])) { + return false; + } + + if (i == 2) { + memcpy(buf, &tz[i - 1], 2); + hour = taosStr2Int8(buf, NULL, 10); + if (!validateHourRange(hour)) { + return false; + } + } else if (i == 4) { + memcpy(buf, &tz[i - 1], 2); + minute = taosStr2Int8(buf, NULL, 10); + if (!validateMinuteRange(hour, minute, tz[0])) { + return false; + } + } + } + break; + } + case 6: { + for (int32_t i = 1; i < len; ++i) { + if (i == 3) { + if (tz[i] != ':') { + return false; + } + continue; + } + + if (!isdigit(tz[i])) { + return false; + } + + if (i == 2) { + memcpy(buf, &tz[i - 1], 2); + hour = taosStr2Int8(buf, NULL, 10); + if (!validateHourRange(hour)) { + return false; + } + } else if (i == 5) { + memcpy(buf, &tz[i - 1], 2); + minute = taosStr2Int8(buf, NULL, 10); + if (!validateMinuteRange(hour, minute, tz[0])) { + return false; + } + } + } + break; + } + default: { + return false; + } + } + } else { + return false; + } + + return true; +} + +void static addTimezoneParam(SNodeList* pList) { + char buf[6] = {0}; + time_t t = taosTime(NULL); + struct tm* tmInfo = taosLocalTime(&t, NULL); + strftime(buf, sizeof(buf), "%z", tmInfo); + int32_t len = (int32_t)strlen(buf); + + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + pVal->literal = strndup(buf, len); + pVal->isDuration = false; + pVal->translate = true; + pVal->node.resType.type = TSDB_DATA_TYPE_BINARY; + pVal->node.resType.bytes = len + VARSTR_HEADER_SIZE; + pVal->node.resType.precision = TSDB_TIME_PRECISION_MILLI; + pVal->datum.p = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE + 1); + varDataSetLen(pVal->datum.p, len); + strncpy(varDataVal(pVal->datum.p), pVal->literal, len); + + nodesListAppend(pList, (SNode*)pVal); +} + void static addDbPrecisonParam(SNodeList** pList, uint8_t precision) { SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); pVal->literal = NULL; @@ -525,9 +694,15 @@ static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - if (pValue->datum.i == 0) { + uint8_t dbPrec = pFunc->node.resType.precision; + + int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode *)nodesListGetNode(pFunc->pParameterList, 1)); + if (ret == TIME_UNIT_TOO_SMALL) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "ELAPSED function time unit parameter should be greater than db precision"); + } else if (ret == TIME_UNIT_INVALID) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "ELAPSED function time unit parameter should be one of the following: [1u, 1a, 1s, 1m, 1h, 1d, 1w]"); } } @@ -622,6 +797,165 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le return TSDB_CODE_SUCCESS; } +typedef enum { UNKNOWN_BIN = 0, USER_INPUT_BIN, LINEAR_BIN, LOG_BIN } EHistoBinType; + +static int8_t validateHistogramBinType(char* binTypeStr) { + int8_t binType; + if (strcasecmp(binTypeStr, "user_input") == 0) { + binType = USER_INPUT_BIN; + } else if (strcasecmp(binTypeStr, "linear_bin") == 0) { + binType = LINEAR_BIN; + } else if (strcasecmp(binTypeStr, "log_bin") == 0) { + binType = LOG_BIN; + } else { + binType = UNKNOWN_BIN; + } + + return binType; +} + +static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* errMsg, int32_t msgLen) { + const char *msg1 = "HISTOGRAM function requires four parameters"; + const char *msg3 = "HISTOGRAM function invalid format for binDesc parameter"; + const char *msg4 = "HISTOGRAM function binDesc parameter \"count\" should be in range [1, 1000]"; + const char *msg5 = "HISTOGRAM function bin/parameter should be in range [-DBL_MAX, DBL_MAX]"; + const char *msg6 = "HISTOGRAM function binDesc parameter \"width\" cannot be 0"; + const char *msg7 = "HISTOGRAM function binDesc parameter \"start\" cannot be 0 with \"log_bin\" type"; + const char *msg8 = "HISTOGRAM function binDesc parameter \"factor\" cannot be negative or equal to 0/1"; + + cJSON* binDesc = cJSON_Parse(binDescStr); + int32_t numOfBins; + double* intervals; + if (cJSON_IsObject(binDesc)) { /* linaer/log bins */ + int32_t numOfParams = cJSON_GetArraySize(binDesc); + int32_t startIndex; + if (numOfParams != 4) { + snprintf(errMsg, msgLen, "%s", msg1); + return false; + } + + cJSON* start = cJSON_GetObjectItem(binDesc, "start"); + cJSON* factor = cJSON_GetObjectItem(binDesc, "factor"); + cJSON* width = cJSON_GetObjectItem(binDesc, "width"); + cJSON* count = cJSON_GetObjectItem(binDesc, "count"); + cJSON* infinity = cJSON_GetObjectItem(binDesc, "infinity"); + + if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) { + snprintf(errMsg, msgLen, "%s", msg3); + return false; + } + + if (count->valueint <= 0 || count->valueint > 1000) { // limit count to 1000 + snprintf(errMsg, msgLen, "%s", msg4); + return false; + } + + if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) || + (factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) { + snprintf(errMsg, msgLen, "%s", msg5); + return false; + } + + int32_t counter = (int32_t)count->valueint; + if (infinity->valueint == false) { + startIndex = 0; + numOfBins = counter + 1; + } else { + startIndex = 1; + numOfBins = counter + 3; + } + + intervals = taosMemoryCalloc(numOfBins, sizeof(double)); + if (cJSON_IsNumber(width) && factor == NULL && binType == LINEAR_BIN) { + // linear bin process + if (width->valuedouble == 0) { + snprintf(errMsg, msgLen, "%s", msg6); + taosMemoryFree(intervals); + return false; + } + for (int i = 0; i < counter + 1; ++i) { + intervals[startIndex] = start->valuedouble + i * width->valuedouble; + if (isinf(intervals[startIndex])) { + snprintf(errMsg, msgLen, "%s", msg5); + taosMemoryFree(intervals); + return false; + } + startIndex++; + } + } else if (cJSON_IsNumber(factor) && width == NULL && binType == LOG_BIN) { + // log bin process + if (start->valuedouble == 0) { + snprintf(errMsg, msgLen, "%s", msg7); + taosMemoryFree(intervals); + return false; + } + if (factor->valuedouble < 0 || factor->valuedouble == 0 || factor->valuedouble == 1) { + snprintf(errMsg, msgLen, "%s", msg8); + taosMemoryFree(intervals); + return false; + } + for (int i = 0; i < counter + 1; ++i) { + intervals[startIndex] = start->valuedouble * pow(factor->valuedouble, i * 1.0); + if (isinf(intervals[startIndex])) { + snprintf(errMsg, msgLen, "%s", msg5); + taosMemoryFree(intervals); + return false; + } + startIndex++; + } + } else { + snprintf(errMsg, msgLen, "%s", msg3); + taosMemoryFree(intervals); + return false; + } + + if (infinity->valueint == true) { + intervals[0] = -INFINITY; + intervals[numOfBins - 1] = INFINITY; + // in case of desc bin orders, -inf/inf should be swapped + ASSERT(numOfBins >= 4); + if (intervals[1] > intervals[numOfBins - 2]) { + TSWAP(intervals[0], intervals[numOfBins - 1]); + } + } + } else if (cJSON_IsArray(binDesc)) { /* user input bins */ + if (binType != USER_INPUT_BIN) { + snprintf(errMsg, msgLen, "%s", msg3); + return false; + } + numOfBins = cJSON_GetArraySize(binDesc); + intervals = taosMemoryCalloc(numOfBins, sizeof(double)); + cJSON* bin = binDesc->child; + if (bin == NULL) { + snprintf(errMsg, msgLen, "%s", msg3); + taosMemoryFree(intervals); + return false; + } + int i = 0; + while (bin) { + intervals[i] = bin->valuedouble; + if (!cJSON_IsNumber(bin)) { + snprintf(errMsg, msgLen, "%s", msg3); + taosMemoryFree(intervals); + return false; + } + if (i != 0 && intervals[i] <= intervals[i - 1]) { + snprintf(errMsg, msgLen, "%s", msg3); + taosMemoryFree(intervals); + return false; + } + bin = bin->next; + i++; + } + } else { + snprintf(errMsg, msgLen, "%s", msg3); + return false; + } + + taosMemoryFree(intervals); + return true; +} + static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); if (4 != numOfParams) { @@ -640,6 +974,8 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + int8_t binType; + char* binDesc; for (int32_t i = 1; i < numOfParams; ++i) { SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); if (QUERY_NODE_VALUE != nodeType(pParamNode)) { @@ -650,6 +986,23 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l pValue->notReserved = true; + if (i == 1) { + binType = validateHistogramBinType(varDataVal(pValue->datum.p)); + if (binType == UNKNOWN_BIN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "HISTOGRAM function binType parameter should be " + "\"user_input\", \"log_bin\" or \"linear_bin\""); + } + } + + if (i == 2) { + char errMsg[128] = {0}; + binDesc = varDataVal(pValue->datum.p); + if (!validateHistogramBinDesc(binDesc, binType, errMsg, (int32_t)sizeof(errMsg))) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, errMsg); + } + } + if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "HISTOGRAM function normalized parameter should be 0/1"); @@ -679,6 +1032,8 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32 return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + int8_t binType; + char* binDesc; for (int32_t i = 1; i < numOfParams; ++i) { SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); if (QUERY_NODE_VALUE != nodeType(pParamNode)) { @@ -689,6 +1044,23 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32 pValue->notReserved = true; + if (i == 1) { + binType = validateHistogramBinType(varDataVal(pValue->datum.p)); + if (binType == UNKNOWN_BIN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "HISTOGRAM function binType parameter should be " + "\"user_input\", \"log_bin\" or \"linear_bin\""); + } + } + + if (i == 2) { + char errMsg[128] = {0}; + binDesc = varDataVal(pValue->datum.p); + if (!validateHistogramBinDesc(binDesc, binType, errMsg, (int32_t)sizeof(errMsg))) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, errMsg); + } + } + if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "HISTOGRAM function normalized parameter should be 0/1"); @@ -843,6 +1215,19 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + if (numOfParams == 4) { + uint8_t dbPrec = pFunc->node.resType.precision; + + int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode *)nodesListGetNode(pFunc->pParameterList, 3)); + if (ret == TIME_UNIT_TOO_SMALL) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "STATEDURATION function time unit parameter should be greater than db precision"); + } else if (ret == TIME_UNIT_INVALID) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "STATEDURATION function time unit parameter should be one of the following: [1u, 1a, 1s, 1m, 1h, 1d, 1w]"); + } + } + // set result type pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; return TSDB_CODE_SUCCESS; @@ -1259,19 +1644,9 @@ static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { // The number of parameters has been limited by the syntax definition - // uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; // The function return type has been set during syntax parsing uint8_t para2Type = pFunc->node.resType.type; - // if (para2Type != TSDB_DATA_TYPE_BIGINT && para2Type != TSDB_DATA_TYPE_UBIGINT && - // para2Type != TSDB_DATA_TYPE_VARCHAR && para2Type != TSDB_DATA_TYPE_NCHAR && - // para2Type != TSDB_DATA_TYPE_TIMESTAMP) { - // return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - // } - // if ((para2Type == TSDB_DATA_TYPE_TIMESTAMP && IS_VAR_DATA_TYPE(para1Type)) || - // (para2Type == TSDB_DATA_TYPE_BINARY && para1Type == TSDB_DATA_TYPE_NCHAR)) { - // return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - // } int32_t para2Bytes = pFunc->node.resType.bytes; if (IS_VAR_DATA_TYPE(para2Type)) { @@ -1281,155 +1656,14 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "CAST function converted length should be in range [0, 1000]"); } + + // add database precision as param + uint8_t dbPrec = pFunc->node.resType.precision; + addDbPrecisonParam(&pFunc->pParameterList, dbPrec); + return TSDB_CODE_SUCCESS; } -/* Following are valid ISO-8601 timezone format: - * 1 z/Z - * 2 ±hh:mm - * 3 ±hhmm - * 4 ±hh - * - */ - -static bool validateHourRange(int8_t hour) { - if (hour < 0 || hour > 12) { - return false; - } - - return true; -} - -static bool validateMinuteRange(int8_t hour, int8_t minute, char sign) { - if (minute == 0 || (minute == 30 && (hour == 3 || hour == 5) && sign == '+')) { - return true; - } - - return false; -} - -static bool validateTimestampDigits(const SValueNode* pVal) { - if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { - return false; - } - - int64_t tsVal = pVal->datum.i; - char fraction[20] = {0}; - NUM_TO_STRING(pVal->node.resType.type, &tsVal, sizeof(fraction), fraction); - int32_t tsDigits = (int32_t)strlen(fraction); - - if (tsDigits > TSDB_TIME_PRECISION_SEC_DIGITS) { - if (tsDigits == TSDB_TIME_PRECISION_MILLI_DIGITS || tsDigits == TSDB_TIME_PRECISION_MICRO_DIGITS || - tsDigits == TSDB_TIME_PRECISION_NANO_DIGITS) { - return true; - } else { - return false; - } - } - - return true; -} - -static bool validateTimezoneFormat(const SValueNode* pVal) { - if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { - return false; - } - - char* tz = varDataVal(pVal->datum.p); - int32_t len = varDataLen(pVal->datum.p); - - char buf[3] = {0}; - int8_t hour = -1, minute = -1; - if (len == 0) { - return false; - } else if (len == 1 && (tz[0] == 'z' || tz[0] == 'Z')) { - return true; - } else if ((tz[0] == '+' || tz[0] == '-')) { - switch (len) { - case 3: - case 5: { - for (int32_t i = 1; i < len; ++i) { - if (!isdigit(tz[i])) { - return false; - } - - if (i == 2) { - memcpy(buf, &tz[i - 1], 2); - hour = taosStr2Int8(buf, NULL, 10); - if (!validateHourRange(hour)) { - return false; - } - } else if (i == 4) { - memcpy(buf, &tz[i - 1], 2); - minute = taosStr2Int8(buf, NULL, 10); - if (!validateMinuteRange(hour, minute, tz[0])) { - return false; - } - } - } - break; - } - case 6: { - for (int32_t i = 1; i < len; ++i) { - if (i == 3) { - if (tz[i] != ':') { - return false; - } - continue; - } - - if (!isdigit(tz[i])) { - return false; - } - - if (i == 2) { - memcpy(buf, &tz[i - 1], 2); - hour = taosStr2Int8(buf, NULL, 10); - if (!validateHourRange(hour)) { - return false; - } - } else if (i == 5) { - memcpy(buf, &tz[i - 1], 2); - minute = taosStr2Int8(buf, NULL, 10); - if (!validateMinuteRange(hour, minute, tz[0])) { - return false; - } - } - } - break; - } - default: { - return false; - } - } - } else { - return false; - } - - return true; -} - -void static addTimezoneParam(SNodeList* pList) { - char buf[6] = {0}; - time_t t = taosTime(NULL); - struct tm* tmInfo = taosLocalTime(&t, NULL); - strftime(buf, sizeof(buf), "%z", tmInfo); - int32_t len = (int32_t)strlen(buf); - - SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); - pVal->literal = strndup(buf, len); - pVal->isDuration = false; - pVal->translate = true; - pVal->node.resType.type = TSDB_DATA_TYPE_BINARY; - pVal->node.resType.bytes = len + VARSTR_HEADER_SIZE; - pVal->node.resType.precision = TSDB_TIME_PRECISION_MILLI; - pVal->datum.p = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE + 1); - varDataSetLen(pVal->datum.p, len); - strncpy(varDataVal(pVal->datum.p), pVal->literal, len); - - nodesListAppend(pList, (SNode*)pVal); -} - static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); if (1 != numOfParams && 2 != numOfParams) { @@ -1498,6 +1732,16 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_ // add database precision as param uint8_t dbPrec = pFunc->node.resType.precision; + + int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode *)nodesListGetNode(pFunc->pParameterList, 1)); + if (ret == TIME_UNIT_TOO_SMALL) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "TIMETRUNCATE function time unit parameter should be greater than db precision"); + } else if (ret == TIME_UNIT_INVALID) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "TIMETRUNCATE function time unit parameter should be one of the following: [1u, 1a, 1s, 1m, 1h, 1d, 1w]"); + } + addDbPrecisonParam(&pFunc->pParameterList, dbPrec); pFunc->node.resType = @@ -1526,6 +1770,18 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le // add database precision as param uint8_t dbPrec = pFunc->node.resType.precision; + + if (3 == numOfParams) { + int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode *)nodesListGetNode(pFunc->pParameterList, 2)); + if (ret == TIME_UNIT_TOO_SMALL) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "TIMEDIFF function time unit parameter should be greater than db precision"); + } else if (ret == TIME_UNIT_INVALID) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "TIMEDIFF function time unit parameter should be one of the following: [1u, 1a, 1s, 1m, 1h, 1d, 1w]"); + } + } + addDbPrecisonParam(&pFunc->pParameterList, dbPrec); pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; @@ -2054,7 +2310,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "histogram", .type = FUNCTION_TYPE_HISTOGRAM, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_FORBID_FILL_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC, .translateFunc = translateHistogram, .getEnvFunc = getHistogramFuncEnv, .initFunc = histogramFunctionSetup, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 3f6afaf5fd..cf4a763423 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -719,8 +719,10 @@ int32_t avgFunction(SqlFunctionCtx* pCtx) { ASSERT(numOfElem >= 0); pAvgRes->count += numOfElem; - if (IS_INTEGER_TYPE(type)) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { pAvgRes->sum.isum += pAgg->sum; + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + pAvgRes->sum.usum += pAgg->sum; } else if (IS_FLOAT_TYPE(type)) { pAvgRes->sum.dsum += GET_DOUBLE_VAL((const char*)&(pAgg->sum)); } @@ -784,6 +786,64 @@ int32_t avgFunction(SqlFunctionCtx* pCtx) { break; } + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t* plist = (uint8_t*)pCol->pData; + for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { + if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + + numOfElem += 1; + pAvgRes->count += 1; + pAvgRes->sum.usum += plist[i]; + } + + break; + } + + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t* plist = (uint16_t*)pCol->pData; + for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { + if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + + numOfElem += 1; + pAvgRes->count += 1; + pAvgRes->sum.usum += plist[i]; + } + break; + } + + case TSDB_DATA_TYPE_UINT: { + uint32_t* plist = (uint32_t*)pCol->pData; + for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { + if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + + numOfElem += 1; + pAvgRes->count += 1; + pAvgRes->sum.usum += plist[i]; + } + + break; + } + + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t* plist = (uint64_t*)pCol->pData; + for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { + if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + + numOfElem += 1; + pAvgRes->count += 1; + pAvgRes->sum.usum += plist[i]; + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { float* plist = (float*)pCol->pData; for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { @@ -825,8 +885,10 @@ _avg_over: static void avgTransferInfo(SAvgRes* pInput, SAvgRes* pOutput) { pOutput->type = pInput->type; - if (IS_INTEGER_TYPE(pOutput->type)) { + if (IS_SIGNED_NUMERIC_TYPE(pOutput->type)) { pOutput->sum.isum += pInput->sum.isum; + } else if (IS_UNSIGNED_NUMERIC_TYPE(pOutput->type)) { + pOutput->sum.usum += pInput->sum.usum; } else { pOutput->sum.dsum += pInput->sum.dsum; } @@ -900,6 +962,22 @@ int32_t avgInvertFunction(SqlFunctionCtx* pCtx) { LIST_AVG_N(pAvgRes->sum.isum, int64_t); break; } + case TSDB_DATA_TYPE_UTINYINT: { + LIST_AVG_N(pAvgRes->sum.usum, uint8_t); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + LIST_AVG_N(pAvgRes->sum.usum, uint16_t); + break; + } + case TSDB_DATA_TYPE_UINT: { + LIST_AVG_N(pAvgRes->sum.usum, uint32_t); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + LIST_AVG_N(pAvgRes->sum.usum, uint64_t); + break; + } case TSDB_DATA_TYPE_FLOAT: { LIST_AVG_N(pAvgRes->sum.dsum, float); break; @@ -925,8 +1003,10 @@ int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); SAvgRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); - if (IS_INTEGER_TYPE(type)) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { pDBuf->sum.isum += pSBuf->sum.isum; + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + pDBuf->sum.usum += pSBuf->sum.usum; } else { pDBuf->sum.dsum += pSBuf->sum.dsum; } @@ -941,8 +1021,10 @@ int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); int32_t type = pAvgRes->type; - if (IS_INTEGER_TYPE(type)) { + if (IS_SIGNED_NUMERIC_TYPE(type)) { pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count); + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + pAvgRes->result = pAvgRes->sum.usum / ((double)pAvgRes->count); } else { pAvgRes->result = pAvgRes->sum.dsum / ((double)pAvgRes->count); } diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 7ad7612e7e..262adc5d6f 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -260,7 +260,7 @@ bool fmIsSameInOutType(int32_t funcId) { } static int32_t getFuncInfo(SFunctionNode* pFunc) { - char msg[64] = {0}; + char msg[128] = {0}; return fmGetFuncInfo(pFunc, msg, sizeof(msg)); } diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index da9474ede0..1bc759e833 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -1565,6 +1565,10 @@ void constructUdfService(void *argsThread) { //TODO return value of uv_run uv_run(&udfc->uvLoop, UV_RUN_DEFAULT); uv_loop_close(&udfc->uvLoop); + + uv_walk(&udfc->uvLoop, udfUdfdCloseWalkCb, NULL); + uv_run(&udfc->uvLoop, UV_RUN_DEFAULT); + uv_loop_close(&udfc->uvLoop); } int32_t udfcOpen() { diff --git a/source/libs/index/CMakeLists.txt b/source/libs/index/CMakeLists.txt index 75eac2430f..33133d21ae 100644 --- a/source/libs/index/CMakeLists.txt +++ b/source/libs/index/CMakeLists.txt @@ -35,7 +35,7 @@ if (${BUILD_WITH_INVERTEDINDEX}) endif(${BUILD_WITH_INVERTEDINDEX}) -if (${BUILD_TEST}) - add_subdirectory(test) -endif(${BUILD_TEST}) +# if (${BUILD_TEST}) +# add_subdirectory(test) +# endif(${BUILD_TEST}) diff --git a/source/libs/index/inc/indexFst.h b/source/libs/index/inc/indexFst.h index 6fd12c110c..c600ca4780 100644 --- a/source/libs/index/inc/indexFst.h +++ b/source/libs/index/inc/indexFst.h @@ -21,7 +21,7 @@ extern "C" { #endif #include "indexFstAutomation.h" -#include "indexFstCountingWriter.h" +#include "indexFstFile.h" #include "indexFstNode.h" #include "indexFstRegistry.h" #include "indexFstUtil.h" @@ -90,8 +90,8 @@ FstBuilderNode* fstUnFinishedNodesPopEmpty(FstUnFinishedNodes* nodes); uint64_t fstUnFinishedNodesFindCommPrefixAndSetOutput(FstUnFinishedNodes* node, FstSlice bs, Output in, Output* out); typedef struct FstBuilder { - FstCountingWriter* wrt; // The FST raw data is written directly to `wtr`. - FstUnFinishedNodes* unfinished; // The stack of unfinished nodes + IdxFstFile* wrt; // The FST raw data is written directly to `wtr`. + FstUnFinishedNodes* unfinished; // The stack of unfinished nodes FstRegistry* registry; // A map of finished nodes. FstSlice last; // The last word added CompiledAddr lastAddr; // The address of the last compiled node @@ -125,9 +125,9 @@ FstState fstStateCreateFrom(FstSlice* data, CompiledAddr addr); FstState fstStateCreate(State state); // compile -void fstStateCompileForOneTransNext(FstCountingWriter* w, CompiledAddr addr, uint8_t inp); -void fstStateCompileForOneTrans(FstCountingWriter* w, CompiledAddr addr, FstTransition* trn); -void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuilderNode* node); +void fstStateCompileForOneTransNext(IdxFstFile* w, CompiledAddr addr, uint8_t inp); +void fstStateCompileForOneTrans(IdxFstFile* w, CompiledAddr addr, FstTransition* trn); +void fstStateCompileForAnyTrans(IdxFstFile* w, CompiledAddr addr, FstBuilderNode* node); // set_comm_input void fstStateSetCommInput(FstState* state, uint8_t inp); @@ -282,7 +282,7 @@ FStmSt* stmBuilderIntoStm(FStmBuilder* sb); bool fstVerify(Fst* fst); // refactor this function -bool fstBuilderNodeCompileTo(FstBuilderNode* b, FstCountingWriter* wrt, CompiledAddr lastAddr, CompiledAddr startAddr); +bool fstBuilderNodeCompileTo(FstBuilderNode* b, IdxFstFile* wrt, CompiledAddr lastAddr, CompiledAddr startAddr); typedef struct StreamState { FstNode* node; diff --git a/source/libs/index/inc/indexFstCountingWriter.h b/source/libs/index/inc/indexFstCountingWriter.h deleted file mode 100644 index f8a6246723..0000000000 --- a/source/libs/index/inc/indexFstCountingWriter.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef __INDEX_FST_COUNTING_WRITER_H__ -#define __INDEX_FST_COUNTING_WRITER_H__ - -#include "indexInt.h" - -#ifdef __cplusplus -extern "C" { -#endif - -//#define USE_MMAP 1 - -#define DefaultMem 1024 * 1024 - -static char tmpFile[] = "./index"; -typedef enum WriterType { TMemory, TFile } WriterType; - -typedef struct WriterCtx { - int (*write)(struct WriterCtx* ctx, uint8_t* buf, int len); - int (*read)(struct WriterCtx* ctx, uint8_t* buf, int len); - int (*flush)(struct WriterCtx* ctx); - int (*readFrom)(struct WriterCtx* ctx, uint8_t* buf, int len, int32_t offset); - int (*size)(struct WriterCtx* ctx); - WriterType type; - union { - struct { - TdFilePtr pFile; - bool readOnly; - char buf[256]; - int size; -#ifdef USE_MMAP - char* ptr; -#endif - } file; - struct { - int32_t capa; - char* buf; - } mem; - }; - int32_t offset; - int32_t limit; -} WriterCtx; - -static int writeCtxDoWrite(WriterCtx* ctx, uint8_t* buf, int len); -static int writeCtxDoRead(WriterCtx* ctx, uint8_t* buf, int len); -static int writeCtxDoReadFrom(WriterCtx* ctx, uint8_t* buf, int len, int32_t offset); -static int writeCtxDoFlush(WriterCtx* ctx); - -WriterCtx* writerCtxCreate(WriterType type, const char* path, bool readOnly, int32_t capacity); -void writerCtxDestroy(WriterCtx* w, bool remove); - -typedef uint32_t CheckSummer; - -typedef struct FstCountingWriter { - void* wrt; // wrap any writer that counts and checksum bytes written - uint64_t count; - CheckSummer summer; -} FstCountingWriter; - -int fstCountingWriterWrite(FstCountingWriter* write, uint8_t* buf, uint32_t len); - -int fstCountingWriterRead(FstCountingWriter* write, uint8_t* buf, uint32_t len); - -int fstCountingWriterFlush(FstCountingWriter* write); - -uint32_t fstCountingWriterMaskedCheckSum(FstCountingWriter* write); - -FstCountingWriter* fstCountingWriterCreate(void* wtr); -void fstCountingWriterDestroy(FstCountingWriter* w); - -void fstCountingWriterPackUintIn(FstCountingWriter* writer, uint64_t n, uint8_t nBytes); -uint8_t fstCountingWriterPackUint(FstCountingWriter* writer, uint64_t n); - -#define FST_WRITER_COUNT(writer) (writer->count) -#define FST_WRITER_INTER_WRITER(writer) (writer->wtr) -#define FST_WRITE_CHECK_SUMMER(writer) (writer->summer) - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/source/libs/index/inc/indexFstFile.h b/source/libs/index/inc/indexFstFile.h new file mode 100644 index 0000000000..a161c4aee1 --- /dev/null +++ b/source/libs/index/inc/indexFstFile.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef __INDEX_FST_FILE_H__ +#define __INDEX_FST_FILE_H__ + +#include "indexInt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//#define USE_MMAP 1 + +#define DefaultMem 1024 * 1024 + +static char tmpFile[] = "./index"; +typedef enum WriterType { TMemory, TFile } WriterType; + +typedef struct IFileCtx { + int (*write)(struct IFileCtx* ctx, uint8_t* buf, int len); + int (*read)(struct IFileCtx* ctx, uint8_t* buf, int len); + int (*flush)(struct IFileCtx* ctx); + int (*readFrom)(struct IFileCtx* ctx, uint8_t* buf, int len, int32_t offset); + int (*size)(struct IFileCtx* ctx); + WriterType type; + union { + struct { + TdFilePtr pFile; + bool readOnly; + char buf[256]; + int64_t size; +#ifdef USE_MMAP + char* ptr; +#endif + } file; + struct { + int32_t cap; + char* buf; + } mem; + }; + int32_t offset; + int32_t limit; +} IFileCtx; + +static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len); +static int idxFileCtxDoRead(IFileCtx* ctx, uint8_t* buf, int len); +static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t offset); +static int idxFileCtxDoFlush(IFileCtx* ctx); + +IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int32_t capacity); +void idxFileCtxDestroy(IFileCtx* w, bool remove); + +typedef uint32_t CheckSummer; + +typedef struct IdxFstFile { + void* wrt; // wrap any writer that counts and checksum bytes written + uint64_t count; + CheckSummer summer; +} IdxFstFile; + +int idxFileWrite(IdxFstFile* write, uint8_t* buf, uint32_t len); + +int idxFileRead(IdxFstFile* write, uint8_t* buf, uint32_t len); + +int idxFileFlush(IdxFstFile* write); + +uint32_t idxFileMaskedCheckSum(IdxFstFile* write); + +IdxFstFile* idxFileCreate(void* wtr); +void idxFileDestroy(IdxFstFile* w); + +void idxFilePackUintIn(IdxFstFile* writer, uint64_t n, uint8_t nBytes); +uint8_t idxFilePackUint(IdxFstFile* writer, uint64_t n); + +#define FST_WRITER_COUNT(writer) (writer->count) +#define FST_WRITER_INTER_WRITER(writer) (writer->wtr) +#define FST_WRITE_CHECK_SUMMER(writer) (writer->summer) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/source/libs/index/inc/indexFstNode.h b/source/libs/index/inc/indexFstNode.h index 4bbc739c73..5bdb2acb32 100644 --- a/source/libs/index/inc/indexFstNode.h +++ b/source/libs/index/inc/indexFstNode.h @@ -20,12 +20,12 @@ extern "C" { #endif -#include "indexFstCountingWriter.h" +#include "indexFstFile.h" #include "indexFstUtil.h" #include "indexInt.h" -#define FST_BUILDER_NODE_IS_FINAL(bn) (bn->isFinal) -#define FST_BUILDER_NODE_TRANS_ISEMPTY(bn) (taosArrayGetSize(bn->trans) == 0) +#define FST_BUILDER_NODE_IS_FINAL(bn) (bn->isFinal) +#define FST_BUILDER_NODE_TRANS_ISEMPTY(bn) (taosArrayGetSize(bn->trans) == 0) #define FST_BUILDER_NODE_FINALOUTPUT_ISZERO(bn) (bn->finalOutput == 0) typedef struct FstTransition { @@ -46,7 +46,7 @@ FstBuilderNode* fstBuilderNodeClone(FstBuilderNode* src); void fstBuilderNodeCloneFrom(FstBuilderNode* dst, FstBuilderNode* src); -// bool fstBuilderNodeCompileTo(FstBuilderNode *b, FstCountingWriter *wrt, +// bool fstBuilderNodeCompileTo(FstBuilderNode *b, IdxFile' *wrt, // CompiledAddr lastAddr, CompiledAddr startAddr); bool fstBuilderNodeEqual(FstBuilderNode* n1, FstBuilderNode* n2); diff --git a/source/libs/index/inc/indexTfile.h b/source/libs/index/inc/indexTfile.h index 6cfea5bc0b..ca5c688162 100644 --- a/source/libs/index/inc/indexTfile.h +++ b/source/libs/index/inc/indexTfile.h @@ -16,7 +16,7 @@ #define __INDEX_TFILE_H__ #include "indexFst.h" -#include "indexFstCountingWriter.h" +#include "indexFstFile.h" #include "indexInt.h" #include "indexTfile.h" #include "indexUtil.h" @@ -59,7 +59,7 @@ typedef struct TFileCache { typedef struct TFileWriter { FstBuilder* fb; - WriterCtx* ctx; + IFileCtx* ctx; TFileHeader header; uint32_t offset; } TFileWriter; @@ -68,7 +68,7 @@ typedef struct TFileWriter { typedef struct TFileReader { T_REF_DECLARE() Fst* fst; - WriterCtx* ctx; + IFileCtx* ctx; TFileHeader header; bool remove; } TFileReader; @@ -103,7 +103,7 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* read TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName); TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName); -TFileReader* tfileReaderCreate(WriterCtx* ctx); +TFileReader* tfileReaderCreate(IFileCtx* ctx); void tfileReaderDestroy(TFileReader* reader); int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr); void tfileReaderRef(TFileReader* reader); @@ -111,7 +111,7 @@ void tfileReaderUnRef(TFileReader* reader); TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t type); void tfileWriterClose(TFileWriter* tw); -TFileWriter* tfileWriterCreate(WriterCtx* ctx, TFileHeader* header); +TFileWriter* tfileWriterCreate(IFileCtx* ctx, TFileHeader* header); void tfileWriterDestroy(TFileWriter* tw); int tfileWriterPut(TFileWriter* tw, void* data, bool order); int tfileWriterFinish(TFileWriter* tw); diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c index d6d55c6be0..f6424ee8a5 100644 --- a/source/libs/index/src/index.c +++ b/source/libs/index/src/index.c @@ -39,7 +39,7 @@ #define INDEX_DATA_BIGINT_NULL 0x8000000000000000LL #define INDEX_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL -#define INDEX_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN +#define INDEX_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN #define INDEX_DATA_DOUBLE_NULL 0x7FFFFF0000000000LL // an NAN #define INDEX_DATA_NCHAR_NULL 0xFFFFFFFF #define INDEX_DATA_BINARY_NULL 0xFF @@ -614,7 +614,7 @@ static int idxGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) { return ret; END: if (tw != NULL) { - writerCtxDestroy(tw->ctx, true); + idxFileCtxDestroy(tw->ctx, true); taosMemoryFree(tw); } return -1; diff --git a/source/libs/index/src/indexFst.c b/source/libs/index/src/indexFst.c index b368c6faf3..40de167a03 100644 --- a/source/libs/index/src/indexFst.c +++ b/source/libs/index/src/indexFst.c @@ -19,11 +19,11 @@ #include "tchecksum.h" #include "tcoding.h" -static void fstPackDeltaIn(FstCountingWriter* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr, uint8_t nBytes) { +static void fstPackDeltaIn(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr, uint8_t nBytes) { CompiledAddr deltaAddr = (transAddr == EMPTY_ADDRESS) ? EMPTY_ADDRESS : nodeAddr - transAddr; - fstCountingWriterPackUintIn(wrt, deltaAddr, nBytes); + idxFilePackUintIn(wrt, deltaAddr, nBytes); } -static uint8_t fstPackDetla(FstCountingWriter* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr) { +static uint8_t fstPackDetla(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr) { uint8_t nBytes = packDeltaSize(nodeAddr, transAddr); fstPackDeltaIn(wrt, nodeAddr, transAddr, nBytes); return nBytes; @@ -208,7 +208,7 @@ FstState fstStateCreate(State state) { return fstStateDict[idx]; } // compile -void fstStateCompileForOneTransNext(FstCountingWriter* w, CompiledAddr addr, uint8_t inp) { +void fstStateCompileForOneTransNext(IdxFstFile* w, CompiledAddr addr, uint8_t inp) { FstState s = fstStateCreate(OneTransNext); fstStateSetCommInput(&s, inp); @@ -216,21 +216,21 @@ void fstStateCompileForOneTransNext(FstCountingWriter* w, CompiledAddr addr, uin uint8_t v = fstStateCommInput(&s, &null); if (null) { // w->write_all(&[inp]) - fstCountingWriterWrite(w, &inp, 1); + idxFileWrite(w, &inp, 1); } - fstCountingWriterWrite(w, &(s.val), 1); + idxFileWrite(w, &(s.val), 1); // w->write_all(&[s.val]) return; } -void fstStateCompileForOneTrans(FstCountingWriter* w, CompiledAddr addr, FstTransition* trn) { +void fstStateCompileForOneTrans(IdxFstFile* w, CompiledAddr addr, FstTransition* trn) { Output out = trn->out; - uint8_t outPackSize = (out == 0 ? 0 : fstCountingWriterPackUint(w, out)); + uint8_t outPackSize = (out == 0 ? 0 : idxFilePackUint(w, out)); uint8_t transPackSize = fstPackDetla(w, addr, trn->addr); PackSizes packSizes = 0; FST_SET_OUTPUT_PACK_SIZE(packSizes, outPackSize); FST_SET_TRANSITION_PACK_SIZE(packSizes, transPackSize); - fstCountingWriterWrite(w, (char*)&packSizes, sizeof(packSizes)); + idxFileWrite(w, (char*)&packSizes, sizeof(packSizes)); FstState st = fstStateCreate(OneTrans); @@ -239,12 +239,12 @@ void fstStateCompileForOneTrans(FstCountingWriter* w, CompiledAddr addr, FstTran bool null = false; uint8_t inp = fstStateCommInput(&st, &null); if (null == true) { - fstCountingWriterWrite(w, (char*)&trn->inp, sizeof(trn->inp)); + idxFileWrite(w, (char*)&trn->inp, sizeof(trn->inp)); } - fstCountingWriterWrite(w, (char*)(&(st.val)), sizeof(st.val)); + idxFileWrite(w, (char*)(&(st.val)), sizeof(st.val)); return; } -void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuilderNode* node) { +void fstStateCompileForAnyTrans(IdxFstFile* w, CompiledAddr addr, FstBuilderNode* node) { int32_t sz = taosArrayGetSize(node->trans); assert(sz <= 256); @@ -275,11 +275,11 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil if (anyOuts) { if (FST_BUILDER_NODE_IS_FINAL(node)) { - fstCountingWriterPackUintIn(w, node->finalOutput, oSize); + idxFilePackUintIn(w, node->finalOutput, oSize); } for (int32_t i = sz - 1; i >= 0; i--) { FstTransition* t = taosArrayGet(node->trans, i); - fstCountingWriterPackUintIn(w, t->out, oSize); + idxFilePackUintIn(w, t->out, oSize); } } for (int32_t i = sz - 1; i >= 0; i--) { @@ -288,7 +288,7 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil } for (int32_t i = sz - 1; i >= 0; i--) { FstTransition* t = taosArrayGet(node->trans, i); - fstCountingWriterWrite(w, (char*)&t->inp, 1); + idxFileWrite(w, (char*)&t->inp, 1); // fstPackDeltaIn(w, addr, t->addr, tSize); } if (sz > TRANS_INDEX_THRESHOLD) { @@ -306,10 +306,10 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil index[t->inp] = i; // fstPackDeltaIn(w, addr, t->addr, tSize); } - fstCountingWriterWrite(w, (char*)index, 256); + idxFileWrite(w, (char*)index, 256); taosMemoryFree(index); } - fstCountingWriterWrite(w, (char*)&packSizes, 1); + idxFileWrite(w, (char*)&packSizes, 1); bool null = false; fstStateStateNtrans(&st, &null); if (null == true) { @@ -318,12 +318,12 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil // encoded in the state byte. uint8_t v = 1; if (sz == 256) { - fstCountingWriterWrite(w, (char*)&v, 1); + idxFileWrite(w, (char*)&v, 1); } else { - fstCountingWriterWrite(w, (char*)&sz, 1); + idxFileWrite(w, (char*)&sz, 1); } } - fstCountingWriterWrite(w, (char*)(&(st.val)), 1); + idxFileWrite(w, (char*)(&(st.val)), 1); return; } @@ -753,7 +753,7 @@ bool fstNodeCompile(FstNode* node, void* w, CompiledAddr lastAddr, CompiledAddr return true; } -bool fstBuilderNodeCompileTo(FstBuilderNode* b, FstCountingWriter* wrt, CompiledAddr lastAddr, CompiledAddr startAddr) { +bool fstBuilderNodeCompileTo(FstBuilderNode* b, IdxFstFile* wrt, CompiledAddr lastAddr, CompiledAddr startAddr) { return fstNodeCompile(NULL, wrt, lastAddr, startAddr, b); } @@ -763,7 +763,7 @@ FstBuilder* fstBuilderCreate(void* w, FstType ty) { return b; } - b->wrt = fstCountingWriterCreate(w); + b->wrt = idxFileCreate(w); b->unfinished = fstUnFinishedNodesCreate(); b->registry = fstRegistryCreate(10000, 2); b->last = fstSliceCreate(NULL, 0); @@ -773,12 +773,12 @@ FstBuilder* fstBuilderCreate(void* w, FstType ty) { char buf64[8] = {0}; void* pBuf64 = buf64; taosEncodeFixedU64(&pBuf64, VERSION); - fstCountingWriterWrite(b->wrt, buf64, sizeof(buf64)); + idxFileWrite(b->wrt, buf64, sizeof(buf64)); pBuf64 = buf64; memset(buf64, 0, sizeof(buf64)); taosEncodeFixedU64(&pBuf64, ty); - fstCountingWriterWrite(b->wrt, buf64, sizeof(buf64)); + idxFileWrite(b->wrt, buf64, sizeof(buf64)); return b; } @@ -787,7 +787,7 @@ void fstBuilderDestroy(FstBuilder* b) { return; } - fstCountingWriterDestroy(b->wrt); + idxFileDestroy(b->wrt); fstUnFinishedNodesDestroy(b->unfinished); fstRegistryDestroy(b->registry); fstSliceDestroy(&b->last); @@ -905,21 +905,19 @@ void* fstBuilderInsertInner(FstBuilder* b) { void* pBuf64 = buf64; taosEncodeFixedU64(&pBuf64, b->len); - fstCountingWriterWrite(b->wrt, buf64, sizeof(buf64)); + idxFileWrite(b->wrt, buf64, sizeof(buf64)); pBuf64 = buf64; taosEncodeFixedU64(&pBuf64, rootAddr); - fstCountingWriterWrite(b->wrt, buf64, sizeof(buf64)); + idxFileWrite(b->wrt, buf64, sizeof(buf64)); char buf32[4] = {0}; void* pBuf32 = buf32; - uint32_t sum = fstCountingWriterMaskedCheckSum(b->wrt); + uint32_t sum = idxFileMaskedCheckSum(b->wrt); taosEncodeFixedU32(&pBuf32, sum); - fstCountingWriterWrite(b->wrt, buf32, sizeof(buf32)); + idxFileWrite(b->wrt, buf32, sizeof(buf32)); - fstCountingWriterFlush(b->wrt); - // fstCountingWriterDestroy(b->wrt); - // b->wrt = NULL; + idxFileFlush(b->wrt); return b->wrt; } void fstBuilderFinish(FstBuilder* b) { fstBuilderInsertInner(b); } diff --git a/source/libs/index/src/indexFstDfa.c b/source/libs/index/src/indexFstDfa.c index ff6b154c54..3011f124c9 100644 --- a/source/libs/index/src/indexFstDfa.c +++ b/source/libs/index/src/indexFstDfa.c @@ -61,9 +61,10 @@ void dfaBuilderDestroy(FstDfaBuilder *builder) { pIter = taosHashIterate(builder->cache, pIter); } taosHashCleanup(builder->cache); + taosMemoryFree(builder); } -FstDfa *dfaBuilderBuild(FstDfaBuilder *builder) { +FstDfa *dfaBuilder(FstDfaBuilder *builder) { uint32_t sz = taosArrayGetSize(builder->dfa->insts); FstSparseSet *cur = sparSetCreate(sz); FstSparseSet *nxt = sparSetCreate(sz); diff --git a/source/libs/index/src/indexFstCountingWriter.c b/source/libs/index/src/indexFstFile.c similarity index 64% rename from source/libs/index/src/indexFstCountingWriter.c rename to source/libs/index/src/indexFstFile.c index 8ba5173602..77dce21150 100644 --- a/source/libs/index/src/indexFstCountingWriter.c +++ b/source/libs/index/src/indexFstFile.c @@ -13,13 +13,13 @@ * along with this program. If not, see . */ -#include "indexFstCountingWriter.h" +#include "indexFstFile.h" #include "indexFstUtil.h" #include "indexInt.h" #include "os.h" #include "tutil.h" -static int writeCtxDoWrite(WriterCtx* ctx, uint8_t* buf, int len) { +static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) { if (ctx->type == TFile) { assert(len == taosWriteFile(ctx->file.pFile, buf, len)); } else { @@ -28,7 +28,7 @@ static int writeCtxDoWrite(WriterCtx* ctx, uint8_t* buf, int len) { ctx->offset += len; return len; } -static int writeCtxDoRead(WriterCtx* ctx, uint8_t* buf, int len) { +static int idxFileCtxDoRead(IFileCtx* ctx, uint8_t* buf, int len) { int nRead = 0; if (ctx->type == TFile) { #ifdef USE_MMAP @@ -44,7 +44,7 @@ static int writeCtxDoRead(WriterCtx* ctx, uint8_t* buf, int len) { return nRead; } -static int writeCtxDoReadFrom(WriterCtx* ctx, uint8_t* buf, int len, int32_t offset) { +static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t offset) { int nRead = 0; if (ctx->type == TFile) { // tfLseek(ctx->file.pFile, offset, 0); @@ -61,7 +61,7 @@ static int writeCtxDoReadFrom(WriterCtx* ctx, uint8_t* buf, int len, int32_t off } return nRead; } -static int writeCtxGetSize(WriterCtx* ctx) { +static int idxFileCtxGetSize(IFileCtx* ctx) { if (ctx->type == TFile) { int64_t file_size = 0; taosStatFile(ctx->file.buf, &file_size, NULL); @@ -69,7 +69,7 @@ static int writeCtxGetSize(WriterCtx* ctx) { } return 0; } -static int writeCtxDoFlush(WriterCtx* ctx) { +static int idxFileCtxDoFlush(IFileCtx* ctx) { if (ctx->type == TFile) { // taosFsyncFile(ctx->file.pFile); taosFsyncFile(ctx->file.pFile); @@ -80,8 +80,8 @@ static int writeCtxDoFlush(WriterCtx* ctx) { return 1; } -WriterCtx* writerCtxCreate(WriterType type, const char* path, bool readOnly, int32_t capacity) { - WriterCtx* ctx = taosMemoryCalloc(1, sizeof(WriterCtx)); +IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int32_t capacity) { + IFileCtx* ctx = taosMemoryCalloc(1, sizeof(IFileCtx)); if (ctx == NULL) { return NULL; } @@ -90,39 +90,36 @@ WriterCtx* writerCtxCreate(WriterType type, const char* path, bool readOnly, int if (ctx->type == TFile) { // ugly code, refactor later ctx->file.readOnly = readOnly; + memcpy(ctx->file.buf, path, strlen(path)); if (readOnly == false) { - // ctx->file.pFile = open(path, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO); ctx->file.pFile = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); taosFtruncateFile(ctx->file.pFile, 0); - int64_t file_size; - taosStatFile(path, &file_size, NULL); - ctx->file.size = (int)file_size; + taosStatFile(path, &ctx->file.size, NULL); + // ctx->file.size = (int)size; } else { - // ctx->file.pFile = open(path, O_RDONLY, S_IRWXU | S_IRWXG | S_IRWXO); ctx->file.pFile = taosOpenFile(path, TD_FILE_READ); - int64_t file_size = 0; - taosFStatFile(ctx->file.pFile, &file_size, NULL); - ctx->file.size = (int)file_size; + int64_t size = 0; + taosFStatFile(ctx->file.pFile, &ctx->file.size, NULL); + ctx->file.size = (int)size; #ifdef USE_MMAP ctx->file.ptr = (char*)tfMmapReadOnly(ctx->file.pFile, ctx->file.size); #endif } - memcpy(ctx->file.buf, path, strlen(path)); if (ctx->file.pFile == NULL) { indexError("failed to open file, error %d", errno); goto END; } } else if (ctx->type == TMemory) { ctx->mem.buf = taosMemoryCalloc(1, sizeof(char) * capacity); - ctx->mem.capa = capacity; + ctx->mem.cap = capacity; } - ctx->write = writeCtxDoWrite; - ctx->read = writeCtxDoRead; - ctx->flush = writeCtxDoFlush; - ctx->readFrom = writeCtxDoReadFrom; - ctx->size = writeCtxGetSize; + ctx->write = idxFileCtxDoWrite; + ctx->read = idxFileCtxDoRead; + ctx->flush = idxFileCtxDoFlush; + ctx->readFrom = idxFileCtxDoReadFrom; + ctx->size = idxFileCtxGetSize; ctx->offset = 0; ctx->limit = capacity; @@ -135,7 +132,7 @@ END: taosMemoryFree(ctx); return NULL; } -void writerCtxDestroy(WriterCtx* ctx, bool remove) { +void idxFileCtxDestroy(IFileCtx* ctx, bool remove) { if (ctx->type == TMemory) { taosMemoryFree(ctx->mem.buf); } else { @@ -149,9 +146,6 @@ void writerCtxDestroy(WriterCtx* ctx, bool remove) { if (ctx->file.readOnly == false) { int64_t file_size = 0; taosStatFile(ctx->file.buf, &file_size, NULL); - // struct stat fstat; - // stat(ctx->file.buf, &fstat); - // indexError("write file size: %d", (int)(fstat.st_size)); } if (remove) { unlink(ctx->file.buf); @@ -160,30 +154,29 @@ void writerCtxDestroy(WriterCtx* ctx, bool remove) { taosMemoryFree(ctx); } -FstCountingWriter* fstCountingWriterCreate(void* wrt) { - FstCountingWriter* cw = taosMemoryCalloc(1, sizeof(FstCountingWriter)); +IdxFstFile* idxFileCreate(void* wrt) { + IdxFstFile* cw = taosMemoryCalloc(1, sizeof(IdxFstFile)); if (cw == NULL) { return NULL; } cw->wrt = wrt; - //(void *)(writerCtxCreate(TFile, readOnly)); return cw; } -void fstCountingWriterDestroy(FstCountingWriter* cw) { +void idxFileDestroy(IdxFstFile* cw) { // free wrt object: close fd or free mem - fstCountingWriterFlush(cw); - // writerCtxDestroy((WriterCtx *)(cw->wrt)); + idxFileFlush(cw); + // idxFileCtxDestroy((IFileCtx *)(cw->wrt)); taosMemoryFree(cw); } -int fstCountingWriterWrite(FstCountingWriter* write, uint8_t* buf, uint32_t len) { +int idxFileWrite(IdxFstFile* write, uint8_t* buf, uint32_t len) { if (write == NULL) { return 0; } // update checksum // write data to file/socket or mem - WriterCtx* ctx = write->wrt; + IFileCtx* ctx = write->wrt; int nWrite = ctx->write(ctx, buf, len); assert(nWrite == len); @@ -192,42 +185,41 @@ int fstCountingWriterWrite(FstCountingWriter* write, uint8_t* buf, uint32_t len) write->summer = taosCalcChecksum(write->summer, buf, len); return len; } -int fstCountingWriterRead(FstCountingWriter* write, uint8_t* buf, uint32_t len) { +int idxFileRead(IdxFstFile* write, uint8_t* buf, uint32_t len) { if (write == NULL) { return 0; } - WriterCtx* ctx = write->wrt; - int nRead = ctx->read(ctx, buf, len); + IFileCtx* ctx = write->wrt; + int nRead = ctx->read(ctx, buf, len); // assert(nRead == len); return nRead; } -uint32_t fstCountingWriterMaskedCheckSum(FstCountingWriter* write) { +uint32_t idxFileMaskedCheckSum(IdxFstFile* write) { // opt return write->summer; } -int fstCountingWriterFlush(FstCountingWriter* write) { - WriterCtx* ctx = write->wrt; +int idxFileFlush(IdxFstFile* write) { + IFileCtx* ctx = write->wrt; ctx->flush(ctx); - // write->wtr->flush return 1; } -void fstCountingWriterPackUintIn(FstCountingWriter* writer, uint64_t n, uint8_t nBytes) { +void idxFilePackUintIn(IdxFstFile* writer, uint64_t n, uint8_t nBytes) { assert(1 <= nBytes && nBytes <= 8); uint8_t* buf = taosMemoryCalloc(8, sizeof(uint8_t)); for (uint8_t i = 0; i < nBytes; i++) { buf[i] = (uint8_t)n; n = n >> 8; } - fstCountingWriterWrite(writer, buf, nBytes); + idxFileWrite(writer, buf, nBytes); taosMemoryFree(buf); return; } -uint8_t fstCountingWriterPackUint(FstCountingWriter* writer, uint64_t n) { +uint8_t idxFilePackUint(IdxFstFile* writer, uint64_t n) { uint8_t nBytes = packSize(n); - fstCountingWriterPackUintIn(writer, n, nBytes); + idxFilePackUintIn(writer, n, nBytes); return nBytes; } diff --git a/source/libs/index/src/indexFstNode.c b/source/libs/index/src/indexFstNode.c index e11f9dd37d..7185e44f46 100644 --- a/source/libs/index/src/indexFstNode.c +++ b/source/libs/index/src/indexFstNode.c @@ -95,7 +95,7 @@ void fstBuilderNodeCloneFrom(FstBuilderNode* dst, FstBuilderNode* src) { } } -// bool fstBuilderNodeCompileTo(FstBuilderNode *b, FstCountingWriter *wrt, CompiledAddr lastAddr, CompiledAddr +// bool fstBuilderNodeCompileTo(FstBuilderNode *b, IdxFile *wrt, CompiledAddr lastAddr, CompiledAddr // startAddr) { // size_t sz = taosArrayGetSize(b->trans); diff --git a/source/libs/index/src/indexFstSparse.c b/source/libs/index/src/indexFstSparse.c index e8ab3be2fe..71d8854dcc 100644 --- a/source/libs/index/src/indexFstSparse.c +++ b/source/libs/index/src/indexFstSparse.c @@ -17,7 +17,7 @@ FstSparseSet *sparSetCreate(int32_t sz) { FstSparseSet *ss = taosMemoryCalloc(1, sizeof(FstSparseSet)); - if (ss = NULL) { + if (ss == NULL) { return NULL; } diff --git a/source/libs/index/src/indexFstUtil.c b/source/libs/index/src/indexFstUtil.c index 5760b24900..5bda703b1f 100644 --- a/source/libs/index/src/indexFstUtil.c +++ b/source/libs/index/src/indexFstUtil.c @@ -75,7 +75,6 @@ CompiledAddr unpackDelta(char* data, uint64_t len, uint64_t nodeAddr) { } // fst slice func -// FstSlice fstSliceCreate(uint8_t* data, uint64_t len) { FstString* str = (FstString*)taosMemoryMalloc(sizeof(FstString)); @@ -164,16 +163,3 @@ int fstSliceCompare(FstSlice* a, FstSlice* b) { return 0; } } - -// FstStack* fstStackCreate(size_t elemSize, StackFreeElem freeFn) { -// FstStack *s = taosMemoryCalloc(1, sizeof(FstStack)); -// if (s == NULL) { return NULL; } -// s-> -// s->freeFn -// -//} -// void *fstStackPush(FstStack *s, void *elem); -// void *fstStackTop(FstStack *s); -// size_t fstStackLen(FstStack *s); -// void *fstStackGetAt(FstStack *s, size_t i); -// void fstStackDestory(FstStack *); diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c index d632540ee1..e9abd3e577 100644 --- a/source/libs/index/src/indexTfile.c +++ b/source/libs/index/src/indexTfile.c @@ -16,7 +16,7 @@ #include "index.h" #include "indexComm.h" #include "indexFst.h" -#include "indexFstCountingWriter.h" +#include "indexFstFile.h" #include "indexUtil.h" #include "taosdef.h" #include "taoserror.h" @@ -103,7 +103,7 @@ TFileCache* tfileCacheCreate(const char* path) { for (size_t i = 0; i < taosArrayGetSize(files); i++) { char* file = taosArrayGetP(files, i); - WriterCtx* wc = writerCtxCreate(TFile, file, true, 1024 * 1024 * 64); + IFileCtx* wc = idxFileCtxCreate(TFile, file, true, 1024 * 1024 * 64); if (wc == NULL) { indexError("failed to open index:%s", file); goto End; @@ -175,7 +175,7 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* reader) { tfileReaderRef(reader); return; } -TFileReader* tfileReaderCreate(WriterCtx* ctx) { +TFileReader* tfileReaderCreate(IFileCtx* ctx) { TFileReader* reader = taosMemoryCalloc(1, sizeof(TFileReader)); if (reader == NULL) { return NULL; @@ -216,7 +216,7 @@ void tfileReaderDestroy(TFileReader* reader) { } else { indexInfo("%s is not removed", reader->ctx->file.buf); } - writerCtxDestroy(reader->ctx, reader->remove); + idxFileCtxDestroy(reader->ctx, reader->remove); taosMemoryFree(reader); } @@ -490,7 +490,7 @@ TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const c char fullname[256] = {0}; tfileGenFileFullName(fullname, path, suid, colName, version); // indexInfo("open write file name %s", fullname); - WriterCtx* wcx = writerCtxCreate(TFile, fullname, false, 1024 * 1024 * 64); + IFileCtx* wcx = idxFileCtxCreate(TFile, fullname, false, 1024 * 1024 * 64); if (wcx == NULL) { return NULL; } @@ -507,18 +507,18 @@ TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const c char fullname[256] = {0}; tfileGenFileFullName(fullname, path, suid, colName, version); - WriterCtx* wc = writerCtxCreate(TFile, fullname, true, 1024 * 1024 * 1024); + IFileCtx* wc = idxFileCtxCreate(TFile, fullname, true, 1024 * 1024 * 1024); if (wc == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); indexError("failed to open readonly file: %s, reason: %s", fullname, terrstr()); return NULL; } - indexTrace("open read file name:%s, file size: %d", wc->file.buf, wc->file.size); + indexTrace("open read file name:%s, file size: %" PRId64 "", wc->file.buf, wc->file.size); TFileReader* reader = tfileReaderCreate(wc); return reader; } -TFileWriter* tfileWriterCreate(WriterCtx* ctx, TFileHeader* header) { +TFileWriter* tfileWriterCreate(IFileCtx* ctx, TFileHeader* header) { TFileWriter* tw = taosMemoryCalloc(1, sizeof(TFileWriter)); if (tw == NULL) { indexError("index: %" PRIu64 " failed to alloc TFilerWriter", header->suid); @@ -609,14 +609,14 @@ void tfileWriterClose(TFileWriter* tw) { if (tw == NULL) { return; } - writerCtxDestroy(tw->ctx, false); + idxFileCtxDestroy(tw->ctx, false); taosMemoryFree(tw); } void tfileWriterDestroy(TFileWriter* tw) { if (tw == NULL) { return; } - writerCtxDestroy(tw->ctx, false); + idxFileCtxDestroy(tw->ctx, false); taosMemoryFree(tw); } @@ -892,8 +892,8 @@ static int tfileReaderLoadHeader(TFileReader* reader) { return 0; } static int tfileReaderLoadFst(TFileReader* reader) { - WriterCtx* ctx = reader->ctx; - int size = ctx->size(ctx); + IFileCtx* ctx = reader->ctx; + int size = ctx->size(ctx); // current load fst into memory, refactor it later int fstSize = size - reader->header.fstOffset - sizeof(tfileMagicNumber); @@ -905,8 +905,9 @@ static int tfileReaderLoadFst(TFileReader* reader) { int64_t ts = taosGetTimestampUs(); int32_t nread = ctx->readFrom(ctx, buf, fstSize, reader->header.fstOffset); int64_t cost = taosGetTimestampUs() - ts; - indexInfo("nread = %d, and fst offset=%d, fst size: %d, filename: %s, file size: %d, time cost: %" PRId64 "us", nread, - reader->header.fstOffset, fstSize, ctx->file.buf, ctx->file.size, cost); + indexInfo("nread = %d, and fst offset=%d, fst size: %d, filename: %s, file size: %" PRId64 ", time cost: %" PRId64 + "us", + nread, reader->header.fstOffset, fstSize, ctx->file.buf, ctx->file.size, cost); // we assuse fst size less than FST_MAX_SIZE assert(nread > 0 && nread <= fstSize); @@ -919,7 +920,7 @@ static int tfileReaderLoadFst(TFileReader* reader) { } static int tfileReaderLoadTableIds(TFileReader* reader, int32_t offset, SArray* result) { // TODO(yihao): opt later - WriterCtx* ctx = reader->ctx; + IFileCtx* ctx = reader->ctx; // add block cache char block[4096] = {0}; int32_t nread = ctx->readFrom(ctx, block, sizeof(block), offset); @@ -952,7 +953,7 @@ static int tfileReaderLoadTableIds(TFileReader* reader, int32_t offset, SArray* } static int tfileReaderVerify(TFileReader* reader) { // just validate header and Footer, file corrupted also shuild be verified later - WriterCtx* ctx = reader->ctx; + IFileCtx* ctx = reader->ctx; uint64_t tMagicNumber = 0; diff --git a/source/libs/index/test/fstTest.cc b/source/libs/index/test/fstTest.cc index 332b7370df..7109c65e85 100644 --- a/source/libs/index/test/fstTest.cc +++ b/source/libs/index/test/fstTest.cc @@ -7,7 +7,6 @@ #include "index.h" #include "indexCache.h" #include "indexFst.h" -#include "indexFstCountingWriter.h" #include "indexFstUtil.h" #include "indexInt.h" #include "indexTfile.h" @@ -20,7 +19,7 @@ class FstWriter { public: FstWriter() { taosRemoveFile(fileName.c_str()); - _wc = writerCtxCreate(TFile, fileName.c_str(), false, 64 * 1024 * 1024); + _wc = idxFileCtxCreate(TFile, fileName.c_str(), false, 64 * 1024 * 1024); _b = fstBuilderCreate(_wc, 0); } bool Put(const std::string& key, uint64_t val) { @@ -38,25 +37,25 @@ class FstWriter { fstBuilderFinish(_b); fstBuilderDestroy(_b); - writerCtxDestroy(_wc, false); + idxFileCtxDestroy(_wc, false); } private: FstBuilder* _b; - WriterCtx* _wc; + IFileCtx* _wc; }; class FstReadMemory { public: FstReadMemory(int32_t size, const std::string& fileName = TD_TMP_DIR_PATH "tindex.tindex") { - _wc = writerCtxCreate(TFile, fileName.c_str(), true, 64 * 1024); - _w = fstCountingWriterCreate(_wc); + _wc = idxFileCtxCreate(TFile, fileName.c_str(), true, 64 * 1024); + _w = idxFileCreate(_wc); _size = size; memset((void*)&_s, 0, sizeof(_s)); } bool init() { char* buf = (char*)taosMemoryCalloc(1, sizeof(char) * _size); - int nRead = fstCountingWriterRead(_w, (uint8_t*)buf, _size); + int nRead = idxFileRead(_w, (uint8_t*)buf, _size); if (nRead <= 0) { return false; } @@ -141,18 +140,18 @@ class FstReadMemory { } ~FstReadMemory() { - fstCountingWriterDestroy(_w); + idxFileDestroy(_w); fstDestroy(_fst); fstSliceDestroy(&_s); - writerCtxDestroy(_wc, false); + idxFileCtxDestroy(_wc, false); } private: - FstCountingWriter* _w; - Fst* _fst; - FstSlice _s; - WriterCtx* _wc; - int32_t _size; + IdxFstFile* _w; + Fst* _fst; + FstSlice _s; + IFileCtx* _wc; + int32_t _size; }; #define L 100 diff --git a/source/libs/index/test/fstUT.cc b/source/libs/index/test/fstUT.cc index b9388e62f7..b8663dd9f2 100644 --- a/source/libs/index/test/fstUT.cc +++ b/source/libs/index/test/fstUT.cc @@ -8,7 +8,6 @@ #include "index.h" #include "indexCache.h" #include "indexFst.h" -#include "indexFstCountingWriter.h" #include "indexFstUtil.h" #include "indexInt.h" #include "indexTfile.h" @@ -40,7 +39,7 @@ static void EnvCleanup() {} class FstWriter { public: FstWriter() { - _wc = writerCtxCreate(TFile, tindex, false, 64 * 1024 * 1024); + _wc = idxFileCtxCreate(TFile, tindex, false, 64 * 1024 * 1024); _b = fstBuilderCreate(_wc, 0); } bool Put(const std::string& key, uint64_t val) { @@ -58,25 +57,25 @@ class FstWriter { fstBuilderFinish(_b); fstBuilderDestroy(_b); - writerCtxDestroy(_wc, false); + idxFileCtxDestroy(_wc, false); } private: FstBuilder* _b; - WriterCtx* _wc; + IFileCtx* _wc; }; class FstReadMemory { public: FstReadMemory(size_t size) { - _wc = writerCtxCreate(TFile, tindex, true, 64 * 1024); - _w = fstCountingWriterCreate(_wc); + _wc = idxFileCtxCreate(TFile, tindex, true, 64 * 1024); + _w = idxFileCreate(_wc); _size = size; memset((void*)&_s, 0, sizeof(_s)); } bool init() { char* buf = (char*)taosMemoryCalloc(1, sizeof(char) * _size); - int nRead = fstCountingWriterRead(_w, (uint8_t*)buf, _size); + int nRead = idxFileRead(_w, (uint8_t*)buf, _size); if (nRead <= 0) { return false; } @@ -130,18 +129,18 @@ class FstReadMemory { } ~FstReadMemory() { - fstCountingWriterDestroy(_w); + idxFileDestroy(_w); fstDestroy(_fst); fstSliceDestroy(&_s); - writerCtxDestroy(_wc, false); + idxFileCtxDestroy(_wc, false); } private: - FstCountingWriter* _w; - Fst* _fst; - FstSlice _s; - WriterCtx* _wc; - size_t _size; + IdxFstFile* _w; + Fst* _fst; + FstSlice _s; + IFileCtx* _wc; + size_t _size; }; class FstWriterEnv : public ::testing::Test { diff --git a/source/libs/index/test/indexTests.cc b/source/libs/index/test/indexTests.cc index e18297cd25..6b20205014 100644 --- a/source/libs/index/test/indexTests.cc +++ b/source/libs/index/test/indexTests.cc @@ -20,7 +20,6 @@ #include "index.h" #include "indexCache.h" #include "indexFst.h" -#include "indexFstCountingWriter.h" #include "indexFstUtil.h" #include "indexInt.h" #include "indexTfile.h" @@ -51,7 +50,7 @@ class DebugInfo { class FstWriter { public: FstWriter() { - _wc = writerCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", false, 64 * 1024 * 1024); + _wc = idxFileCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", false, 64 * 1024 * 1024); _b = fstBuilderCreate(NULL, 0); } bool Put(const std::string& key, uint64_t val) { @@ -64,25 +63,25 @@ class FstWriter { fstBuilderFinish(_b); fstBuilderDestroy(_b); - writerCtxDestroy(_wc, false); + idxFileCtxDestroy(_wc, false); } private: FstBuilder* _b; - WriterCtx* _wc; + IFileCtx* _wc; }; class FstReadMemory { public: FstReadMemory(size_t size) { - _wc = writerCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", true, 64 * 1024); - _w = fstCountingWriterCreate(_wc); + _wc = idxFileCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", true, 64 * 1024); + _w = idxFileCreate(_wc); _size = size; memset((void*)&_s, 0, sizeof(_s)); } bool init() { char* buf = (char*)taosMemoryCalloc(1, sizeof(char) * _size); - int nRead = fstCountingWriterRead(_w, (uint8_t*)buf, _size); + int nRead = idxFileRead(_w, (uint8_t*)buf, _size); if (nRead <= 0) { return false; } @@ -124,18 +123,18 @@ class FstReadMemory { } ~FstReadMemory() { - fstCountingWriterDestroy(_w); + idxFileDestroy(_w); fstDestroy(_fst); fstSliceDestroy(&_s); - writerCtxDestroy(_wc, true); + idxFileCtxDestroy(_wc, true); } private: - FstCountingWriter* _w; - Fst* _fst; - FstSlice _s; - WriterCtx* _wc; - size_t _size; + IdxFstFile* _w; + Fst* _fst; + FstSlice _s; + IFileCtx* _wc; + size_t _size; }; #define L 100 @@ -392,13 +391,13 @@ class TFileObj { fileName_ = path; - WriterCtx* ctx = writerCtxCreate(TFile, path.c_str(), false, 64 * 1024 * 1024); + IFileCtx* ctx = idxFileCtxCreate(TFile, path.c_str(), false, 64 * 1024 * 1024); writer_ = tfileWriterCreate(ctx, &header); return writer_ != NULL ? true : false; } bool InitReader() { - WriterCtx* ctx = writerCtxCreate(TFile, fileName_.c_str(), true, 64 * 1024 * 1024); + IFileCtx* ctx = idxFileCtxCreate(TFile, fileName_.c_str(), true, 64 * 1024 * 1024); reader_ = tfileReaderCreate(ctx); return reader_ != NULL ? true : false; } diff --git a/source/libs/index/test/jsonUT.cc b/source/libs/index/test/jsonUT.cc index 134fb53d2b..c65949277e 100644 --- a/source/libs/index/test/jsonUT.cc +++ b/source/libs/index/test/jsonUT.cc @@ -7,7 +7,6 @@ #include "index.h" #include "indexCache.h" #include "indexFst.h" -#include "indexFstCountingWriter.h" #include "indexFstUtil.h" #include "indexInt.h" #include "indexTfile.h" diff --git a/source/libs/index/test/utilUT.cc b/source/libs/index/test/utilUT.cc index 6858d31325..ab5128cd3e 100644 --- a/source/libs/index/test/utilUT.cc +++ b/source/libs/index/test/utilUT.cc @@ -8,7 +8,6 @@ #include "indexCache.h" #include "indexComm.h" #include "indexFst.h" -#include "indexFstCountingWriter.h" #include "indexFstUtil.h" #include "indexInt.h" #include "indexTfile.h" diff --git a/source/libs/monitor/test/monTest.cpp b/source/libs/monitor/test/monTest.cpp index 726b2aafe4..3f7b1b51da 100644 --- a/source/libs/monitor/test/monTest.cpp +++ b/source/libs/monitor/test/monTest.cpp @@ -75,18 +75,18 @@ void MonitorTest::GetSysInfo(SMonSysInfo *pInfo) { pInfo->cpu_engine = 2.1; pInfo->cpu_system = 2.1; pInfo->cpu_cores = 2; - pInfo->mem_engine = 3.1; - pInfo->mem_system = 3.2; - pInfo->mem_total = 3.3; - pInfo->disk_engine = 4.1; - pInfo->disk_used = 4.2; - pInfo->disk_total = 4.3; - pInfo->net_in = 5.1; - pInfo->net_out = 5.2; - pInfo->io_read = 6.1; - pInfo->io_write = 6.2; - pInfo->io_read_disk = 7.1; - pInfo->io_write_disk = 7.2; + pInfo->mem_engine = 3; + pInfo->mem_system = 3; + pInfo->mem_total = 3; + pInfo->disk_engine = 4; + pInfo->disk_used = 4; + pInfo->disk_total = 4; + pInfo->net_in = 5; + pInfo->net_out = 5; + pInfo->io_read = 6; + pInfo->io_write = 6; + pInfo->io_read_disk = 7; + pInfo->io_write_disk = 7; } void MonitorTest::GetClusterInfo(SMonClusterInfo *pInfo) { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 9c5cec07df..25a41fb15c 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -368,6 +368,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) { static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); COPY_SCALAR_FIELD(joinType); + CLONE_NODE_FIELD(pMergeCondition); CLONE_NODE_FIELD(pOnConditions); COPY_SCALAR_FIELD(isSingleTableJoin); return TSDB_CODE_SUCCESS; diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 4179930033..34f92dac0b 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1254,6 +1254,7 @@ static int32_t jsonToLogicPlan(const SJson* pJson, void* pObj) { static const char* jkJoinLogicPlanJoinType = "JoinType"; static const char* jkJoinLogicPlanOnConditions = "OnConditions"; +static const char* jkJoinLogicPlanMergeCondition = "MergeConditions"; static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) { const SJoinLogicNode* pNode = (const SJoinLogicNode*)pObj; @@ -1262,6 +1263,9 @@ static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkJoinLogicPlanJoinType, pNode->joinType); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkJoinLogicPlanMergeCondition, nodeToJson, pNode->pMergeCondition); + } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkJoinLogicPlanOnConditions, nodeToJson, pNode->pOnConditions); } @@ -1617,6 +1621,7 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) { } static const char* jkJoinPhysiPlanJoinType = "JoinType"; +static const char* jkJoinPhysiPlanMergeCondition = "MergeCondition"; static const char* jkJoinPhysiPlanOnConditions = "OnConditions"; static const char* jkJoinPhysiPlanTargets = "Targets"; @@ -1627,6 +1632,9 @@ static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanJoinType, pNode->joinType); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkJoinPhysiPlanMergeCondition, nodeToJson, pNode->pMergeCondition); + } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkJoinPhysiPlanOnConditions, nodeToJson, pNode->pOnConditions); } @@ -1648,6 +1656,9 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkJoinPhysiPlanMergeCondition, &pNode->pMergeCondition); + } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkJoinPhysiPlanTargets, &pNode->pTargets); } diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c index 3747dde9ed..b12e3b14c7 100644 --- a/source/libs/nodes/src/nodesTraverseFuncs.c +++ b/source/libs/nodes/src/nodesTraverseFuncs.c @@ -470,6 +470,9 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { SJoinPhysiNode* pJoin = (SJoinPhysiNode*)pNode; res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext); + if (DEAL_RES_ERROR != res && DEAL_RES_END != res) { + res = walkPhysiPlan(pJoin->pMergeCondition, order, walker, pContext); + } if (DEAL_RES_ERROR != res && DEAL_RES_END != res) { res = walkPhysiPlan(pJoin->pOnConditions, order, walker, pContext); } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index eb59bd7f6a..118cd80807 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -19,8 +19,8 @@ #include "querynodes.h" #include "taos.h" #include "taoserror.h" -#include "thash.h" #include "tdatablock.h" +#include "thash.h" static SNode* makeNode(ENodeType type, size_t size) { SNode* p = taosMemoryCalloc(1, size); @@ -718,6 +718,7 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_LOGIC_PLAN_JOIN: { SJoinLogicNode* pLogicNode = (SJoinLogicNode*)pNode; destroyLogicNode((SLogicNode*)pLogicNode); + nodesDestroyNode(pLogicNode->pMergeCondition); nodesDestroyNode(pLogicNode->pOnConditions); break; } @@ -828,6 +829,7 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { SJoinPhysiNode* pPhyNode = (SJoinPhysiNode*)pNode; destroyPhysiNode((SPhysiNode*)pPhyNode); + nodesDestroyNode(pPhyNode->pMergeCondition); nodesDestroyNode(pPhyNode->pOnConditions); nodesDestroyList(pPhyNode->pTargets); break; @@ -1493,17 +1495,54 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* return TSDB_CODE_SUCCESS; } +int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols) { + if (NULL == pCols) { + return TSDB_CODE_FAILED; + } + SCollectColumnsCxt cxt = { + .errCode = TSDB_CODE_SUCCESS, + .pTableAlias = pTableAlias, + .collectType = type, + .pCols = (NULL == *pCols ? nodesMakeList() : *pCols), + .pColHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK)}; + if (NULL == cxt.pCols || NULL == cxt.pColHash) { + return TSDB_CODE_OUT_OF_MEMORY; + } + *pCols = NULL; + + nodesWalkExpr(node, collectColumns, &cxt); + + taosHashCleanup(cxt.pColHash); + if (TSDB_CODE_SUCCESS != cxt.errCode) { + nodesDestroyList(cxt.pCols); + return cxt.errCode; + } + if (LIST_LENGTH(cxt.pCols) > 0) { + *pCols = cxt.pCols; + } else { + nodesDestroyList(cxt.pCols); + } + + return TSDB_CODE_SUCCESS; + +} + typedef struct SCollectFuncsCxt { int32_t errCode; FFuncClassifier classifier; SNodeList* pFuncs; + SHashObj* pAliasName; } SCollectFuncsCxt; static EDealRes collectFuncs(SNode* pNode, void* pContext) { SCollectFuncsCxt* pCxt = (SCollectFuncsCxt*)pContext; if (QUERY_NODE_FUNCTION == nodeType(pNode) && pCxt->classifier(((SFunctionNode*)pNode)->funcId) && !(((SExprNode*)pNode)->orderAlias)) { - pCxt->errCode = nodesListStrictAppend(pCxt->pFuncs, nodesCloneNode(pNode)); + SExprNode* pExpr = (SExprNode*)pNode; + if (NULL == taosHashGet(pCxt->pAliasName, pExpr->aliasName, strlen(pExpr->aliasName))) { + pCxt->errCode = nodesListStrictAppend(pCxt->pFuncs, nodesCloneNode(pNode)); + taosHashPut(pCxt->pAliasName, pExpr->aliasName, strlen(pExpr->aliasName), &pExpr, POINTER_BYTES); + } return (TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR); } return DEAL_RES_CONTINUE; @@ -1515,23 +1554,27 @@ int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifi } SCollectFuncsCxt cxt = { - .errCode = TSDB_CODE_SUCCESS, .classifier = classifier, .pFuncs = (NULL == *pFuncs ? nodesMakeList() : *pFuncs)}; + .errCode = TSDB_CODE_SUCCESS, + .classifier = classifier, + .pFuncs = (NULL == *pFuncs ? nodesMakeList() : *pFuncs), + .pAliasName = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, false)}; if (NULL == cxt.pFuncs) { return TSDB_CODE_OUT_OF_MEMORY; } *pFuncs = NULL; nodesWalkSelectStmt(pSelect, clause, collectFuncs, &cxt); - if (TSDB_CODE_SUCCESS != cxt.errCode) { - nodesDestroyList(cxt.pFuncs); - return cxt.errCode; - } - if (LIST_LENGTH(cxt.pFuncs) > 0) { - *pFuncs = cxt.pFuncs; + if (TSDB_CODE_SUCCESS == cxt.errCode) { + if (LIST_LENGTH(cxt.pFuncs) > 0) { + *pFuncs = cxt.pFuncs; + } else { + nodesDestroyList(cxt.pFuncs); + } } else { nodesDestroyList(cxt.pFuncs); } + taosHashCleanup(cxt.pAliasName); - return TSDB_CODE_SUCCESS; + return cxt.errCode; } typedef struct SCollectSpecialNodesCxt { diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index 4585a59612..0c9156fc4c 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -154,6 +154,7 @@ SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_ SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pOldColName, SToken* pNewColName); SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal); +SNode* setAlterSuperTableType(SNode* pStmt); SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName); SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type); SNode* createShowStmtWithCond(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbName, diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h index e829c9266f..896e2bc239 100644 --- a/source/libs/parser/inc/parUtil.h +++ b/source/libs/parser/inc/parUtil.h @@ -53,6 +53,7 @@ typedef struct SParseMetaCache { } SParseMetaCache; int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...); +int32_t generateSyntaxErrMsgExt(SMsgBuf* pBuf, int32_t errCode, const char* pFormat, ...); int32_t buildInvalidOperationMsg(SMsgBuf* pMsgBuf, const char* msg); int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 2e58cc56b8..19b327f8c6 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -232,7 +232,7 @@ cmd ::= DROP TABLE multi_drop_clause(A). cmd ::= DROP STABLE exists_opt(A) full_table_name(B). { pCxt->pRootNode = createDropSuperTableStmt(pCxt, A, B); } cmd ::= ALTER TABLE alter_table_clause(A). { pCxt->pRootNode = A; } -cmd ::= ALTER STABLE alter_table_clause(A). { pCxt->pRootNode = A; } +cmd ::= ALTER STABLE alter_table_clause(A). { pCxt->pRootNode = setAlterSuperTableType(A); } alter_table_clause(A) ::= full_table_name(B) alter_table_options(C). { A = createAlterTableModifyOptions(pCxt, B, C); } alter_table_clause(A) ::= @@ -259,7 +259,7 @@ multi_create_clause(A) ::= multi_create_clause(B) create_subtable_clause(C). create_subtable_clause(A) ::= not_exists_opt(B) full_table_name(C) USING full_table_name(D) - specific_tags_opt(E) TAGS NK_LP literal_list(F) NK_RP table_options(G). { A = createCreateSubTableClause(pCxt, B, C, D, E, F, G); } + specific_tags_opt(E) TAGS NK_LP expression_list(F) NK_RP table_options(G). { A = createCreateSubTableClause(pCxt, B, C, D, E, F, G); } %type multi_drop_clause { SNodeList* } %destructor multi_drop_clause { nodesDestroyList($$); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 59fbeded60..c85c44f09b 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1127,6 +1127,11 @@ SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken return createAlterTableStmtFinalize(pRealTable, pStmt); } +SNode* setAlterSuperTableType(SNode* pStmt) { + setNodeType(pStmt, QUERY_NODE_ALTER_SUPER_TABLE_STMT); + return pStmt; +} + SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) { CHECK_PARSER_STATUS(pCxt); if (!checkDbName(pCxt, pDbName, false)) { diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 80ca411e0e..9cc822ee38 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -247,6 +247,10 @@ static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTabl return code; } +static int32_t collectMetaKeyFromAlterStable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); +} + static int32_t collectMetaKeyFromUseDatabase(SCollectMetaKeyCxt* pCxt, SUseDatabaseStmt* pStmt) { return reserveDbVgVersionInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); } @@ -483,6 +487,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromDropTable(pCxt, (SDropTableStmt*)pStmt); case QUERY_NODE_ALTER_TABLE_STMT: return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt); + case QUERY_NODE_ALTER_SUPER_TABLE_STMT: + return collectMetaKeyFromAlterStable(pCxt, (SAlterTableStmt*)pStmt); case QUERY_NODE_USE_DATABASE_STMT: return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt); case QUERY_NODE_CREATE_INDEX_STMT: diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index a76640d0b5..a286531588 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -48,6 +48,12 @@ pSql += sToken.n; \ } while (TK_NK_SPACE == sToken.type) +typedef struct SInsertParseBaseContext { + SParseContext* pComCxt; + char* pSql; + SMsgBuf msg; +} SInsertParseBaseContext; + typedef struct SInsertParseContext { SParseContext* pComCxt; // input char* pSql; // input @@ -1105,6 +1111,32 @@ static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* return taosHashPut(pHash, pName, len, &pBackup, POINTER_BYTES); } +static int32_t skipParentheses(SInsertParseSyntaxCxt* pCxt) { + SToken sToken; + int32_t expectRightParenthesis = 1; + while (1) { + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_LP == sToken.type) { + ++expectRightParenthesis; + } else if (TK_NK_RP == sToken.type && 0 == --expectRightParenthesis) { + break; + } + if (0 == sToken.n) { + return buildSyntaxErrMsg(&pCxt->msg, ") expected", NULL); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t skipBoundColumns(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); } + +static int32_t ignoreBoundColumns(SInsertParseContext* pCxt) { + SInsertParseSyntaxCxt cxt = {.pComCxt = pCxt->pComCxt, .pSql = pCxt->pSql, .msg = pCxt->msg, .pMetaCache = NULL}; + int32_t code = skipBoundColumns(&cxt); + pCxt->pSql = cxt.pSql; + return code; +} + static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt); // pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...) @@ -1453,12 +1485,29 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { tNameGetFullDbName(&name, dbFName); CHECK_CODE(taosHashPut(pCxt->pDbFNameHashObj, dbFName, strlen(dbFName), dbFName, sizeof(dbFName))); + bool existedUsing = false; // USING clause + if (TK_USING == sToken.type) { + existedUsing = true; + CHECK_CODE(parseUsingClause(pCxt, &name, tbFName)); + NEXT_TOKEN(pCxt->pSql, sToken); + autoCreateTbl = true; + } + + char* pBoundColsStart = NULL; + if (TK_NK_LP == sToken.type) { + // pSql -> field1_name, ...) + pBoundColsStart = pCxt->pSql; + CHECK_CODE(ignoreBoundColumns(pCxt)); + // CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta))); + NEXT_TOKEN(pCxt->pSql, sToken); + } + if (TK_USING == sToken.type) { CHECK_CODE(parseUsingClause(pCxt, &name, tbFName)); NEXT_TOKEN(pCxt->pSql, sToken); autoCreateTbl = true; - } else { + } else if (!existedUsing) { CHECK_CODE(getTableMeta(pCxt, &name, dbFName)); } @@ -1467,10 +1516,11 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta, &dataBuf, NULL, &pCxt->createTblReq)); - if (TK_NK_LP == sToken.type) { - // pSql -> field1_name, ...) + if (NULL != pBoundColsStart) { + char* pCurrPos = pCxt->pSql; + pCxt->pSql = pBoundColsStart; CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta))); - NEXT_TOKEN(pCxt->pSql, sToken); + pCxt->pSql = pCurrPos; } if (TK_VALUES == sToken.type) { @@ -1610,25 +1660,6 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache return code; } -static int32_t skipParentheses(SInsertParseSyntaxCxt* pCxt) { - SToken sToken; - int32_t expectRightParenthesis = 1; - while (1) { - NEXT_TOKEN(pCxt->pSql, sToken); - if (TK_NK_LP == sToken.type) { - ++expectRightParenthesis; - } else if (TK_NK_RP == sToken.type && 0 == --expectRightParenthesis) { - break; - } - if (0 == sToken.n) { - return buildSyntaxErrMsg(&pCxt->msg, ") expected", NULL); - } - } - return TSDB_CODE_SUCCESS; -} - -static int32_t skipBoundColumns(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); } - // pSql -> (field1_value, ...) [(field1_value2, ...) ...] static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) { int32_t numOfRows = 0; @@ -1717,8 +1748,25 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) { SToken tbnameToken = sToken; NEXT_TOKEN(pCxt->pSql, sToken); + bool existedUsing = false; // USING clause if (TK_USING == sToken.type) { + existedUsing = true; + CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken)); + NEXT_TOKEN(pCxt->pSql, sToken); + CHECK_CODE(collectTableMetaKey(pCxt, &sToken)); + CHECK_CODE(skipUsingClause(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } + + if (TK_NK_LP == sToken.type) { + // pSql -> field1_name, ...) + CHECK_CODE(skipBoundColumns(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } + + if (TK_USING == sToken.type && !existedUsing) { + existedUsing = true; CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken)); NEXT_TOKEN(pCxt->pSql, sToken); CHECK_CODE(collectTableMetaKey(pCxt, &sToken)); @@ -1728,12 +1776,6 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) { CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken)); } - if (TK_NK_LP == sToken.type) { - // pSql -> field1_name, ...) - CHECK_CODE(skipBoundColumns(pCxt)); - NEXT_TOKEN(pCxt->pSql, sToken); - } - if (TK_VALUES == sToken.type) { // pSql -> (field1_value, ...) [(field1_value2, ...) ...] CHECK_CODE(skipValuesClause(pCxt)); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 2b5993b800..f6b8e194cb 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -121,8 +121,8 @@ static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STa } } if (TSDB_CODE_SUCCESS != code) { - parserError("catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, - pName->tname); + parserError("0x%" PRIx64 " catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId, + tstrerror(code), pName->dbname, pName->tname); } return code; } @@ -150,8 +150,8 @@ static int32_t getTableCfg(STranslateContext* pCxt, const SName* pName, STableCf } } if (TSDB_CODE_SUCCESS != code) { - parserError("catalogRefreshGetTableCfg error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, - pName->tname); + parserError("0x%" PRIx64 " catalogRefreshGetTableCfg error, code:%s, dbName:%s, tbName:%s", + pCxt->pParseCxt->requestId, tstrerror(code), pName->dbname, pName->tname); } return code; } @@ -173,8 +173,8 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName, code = catalogRefreshGetTableMeta(pParCxt->pCatalog, &conn, &name, pMeta, false); } if (TSDB_CODE_SUCCESS != code) { - parserError("catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pDbName, - pTableName); + parserError("0x%" PRIx64 " catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s", + pCxt->pParseCxt->requestId, tstrerror(code), pDbName, pTableName); } return code; } @@ -196,7 +196,8 @@ static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArr } } if (TSDB_CODE_SUCCESS != code) { - parserError("catalogGetDBVgInfo error, code:%s, dbFName:%s", tstrerror(code), fullDbName); + parserError("0x%" PRIx64 " catalogGetDBVgInfo error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId, + tstrerror(code), fullDbName); } return code; } @@ -227,8 +228,8 @@ static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pNam } } if (TSDB_CODE_SUCCESS != code) { - parserError("catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, - pName->tname); + parserError("0x%" PRIx64 " catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", + pCxt->pParseCxt->requestId, tstrerror(code), pName->dbname, pName->tname); } return code; } @@ -251,7 +252,8 @@ static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int } } if (TSDB_CODE_SUCCESS != code) { - parserError("catalogGetDBVgVersion error, code:%s, dbFName:%s", tstrerror(code), pDbFName); + parserError("0x%" PRIx64 " catalogGetDBVgVersion error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId, + tstrerror(code), pDbFName); } return code; } @@ -276,7 +278,8 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo } } if (TSDB_CODE_SUCCESS != code) { - parserError("catalogGetDBCfg error, code:%s, dbFName:%s", tstrerror(code), dbFname); + parserError("0x%" PRIx64 " catalogGetDBCfg error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId, tstrerror(code), + dbFname); } return code; } @@ -303,6 +306,10 @@ static int32_t getUdfInfo(STranslateContext* pCxt, SFunctionNode* pFunc) { pFunc->udfBufSize = funcInfo.bufSize; tFreeSFuncInfo(&funcInfo); } + if (TSDB_CODE_SUCCESS != code) { + parserError("0x%" PRIx64 " catalogGetUdfInfo error, code:%s, funcName:%s", pCxt->pParseCxt->requestId, + tstrerror(code), pFunc->functionName); + } return code; } @@ -323,7 +330,8 @@ static int32_t getTableIndex(STranslateContext* pCxt, const SName* pName, SArray code = catalogGetTableIndex(pParCxt->pCatalog, &conn, pName, pIndexes); } if (TSDB_CODE_SUCCESS != code) { - parserError("getTableIndex error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, pName->tname); + parserError("0x%" PRIx64 " getTableIndex error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId, + tstrerror(code), pName->dbname, pName->tname); } return code; } @@ -341,7 +349,7 @@ static int32_t getDnodeList(STranslateContext* pCxt, SArray** pDnodes) { code = catalogGetDnodeList(pParCxt->pCatalog, &conn, pDnodes); } if (TSDB_CODE_SUCCESS != code) { - parserError("getDnodeList error, code:%s", tstrerror(code)); + parserError("0x%" PRIx64 " getDnodeList error, code:%s", pCxt->pParseCxt->requestId, tstrerror(code)); } return code; } @@ -707,7 +715,7 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p } static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { - if (isSelectStmt(pCxt->pCurrStmt) && NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable) { + if (NULL == pCxt->pCurrStmt || isSelectStmt(pCxt->pCurrStmt) && NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName); } @@ -776,12 +784,144 @@ static int32_t parseBoolFromValueNode(STranslateContext* pCxt, SValueNode* pVal) } } -static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt) { - uint8_t precision = getPrecisionFromCurrStmt(pCxt->pCurrStmt, targetDt.precision); - pVal->node.resType.precision = precision; +static EDealRes translateDurationValue(STranslateContext* pCxt, SValueNode* pVal) { + if (parseNatualDuration(pVal->literal, strlen(pVal->literal), &pVal->datum.i, &pVal->unit, + pVal->node.resType.precision) != TSDB_CODE_SUCCESS) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int64_t*)&pVal->typeData = pVal->datum.i; + return DEAL_RES_CONTINUE; +} + +static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt, bool strict) { + int32_t code = TSDB_CODE_SUCCESS; + switch (targetDt.type) { + case TSDB_DATA_TYPE_BOOL: + if (TSDB_CODE_SUCCESS != parseBoolFromValueNode(pCxt, pVal)) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(bool*)&pVal->typeData = pVal->datum.b; + break; + case TSDB_DATA_TYPE_TINYINT: { + code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_TINYINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int8_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_SMALLINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int16_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_INT: { + code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_INT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int32_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_BIGINT: { + code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_BIGINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int64_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UTINYINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(uint8_t*)&pVal->typeData = pVal->datum.u; + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_USMALLINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(uint16_t*)&pVal->typeData = pVal->datum.u; + break; + } + case TSDB_DATA_TYPE_UINT: { + code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(uint32_t*)&pVal->typeData = pVal->datum.u; + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u); + if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UBIGINT(pVal->datum.i))) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(uint64_t*)&pVal->typeData = pVal->datum.u; + break; + } + case TSDB_DATA_TYPE_FLOAT: { + pVal->datum.d = taosStr2Double(pVal->literal, NULL); + *(float*)&pVal->typeData = pVal->datum.d; + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + pVal->datum.d = taosStr2Double(pVal->literal, NULL); + *(double*)&pVal->typeData = pVal->datum.d; + break; + } + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_VARBINARY: { + pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); + if (NULL == pVal->datum.p) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); + } + int32_t len = TMIN(targetDt.bytes - VARSTR_HEADER_SIZE, pVal->node.resType.bytes); + varDataSetLen(pVal->datum.p, len); + strncpy(varDataVal(pVal->datum.p), pVal->literal, len); + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: { + if (TSDB_CODE_SUCCESS != parseTimeFromValueNode(pCxt, pVal)) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + *(int64_t*)&pVal->typeData = pVal->datum.i; + break; + } + case TSDB_DATA_TYPE_NCHAR: { + pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); + if (NULL == pVal->datum.p) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); + } + + int32_t len = 0; + if (!taosMbsToUcs4(pVal->literal, strlen(pVal->literal), (TdUcs4*)varDataVal(pVal->datum.p), + targetDt.bytes - VARSTR_HEADER_SIZE, &len)) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + } + varDataSetLen(pVal->datum.p, len); + break; + } + case TSDB_DATA_TYPE_DECIMAL: + case TSDB_DATA_TYPE_BLOB: + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); + default: + break; + } + return DEAL_RES_CONTINUE; +} + +static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt, bool strict) { if (pVal->placeholderNo > 0 || pVal->isNull) { return DEAL_RES_CONTINUE; } + if (TSDB_DATA_TYPE_NULL == pVal->node.resType.type) { // TODO // pVal->node.resType = targetDt; @@ -789,114 +929,18 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD pVal->isNull = true; return DEAL_RES_CONTINUE; } - if (pVal->isDuration) { - if (parseNatualDuration(pVal->literal, strlen(pVal->literal), &pVal->datum.i, &pVal->unit, precision) != - TSDB_CODE_SUCCESS) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - } - *(int64_t*)&pVal->typeData = pVal->datum.i; - } else { - switch (targetDt.type) { - case TSDB_DATA_TYPE_NULL: - break; - case TSDB_DATA_TYPE_BOOL: - if (TSDB_CODE_SUCCESS != parseBoolFromValueNode(pCxt, pVal)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - } - *(bool*)&pVal->typeData = pVal->datum.b; - break; - case TSDB_DATA_TYPE_TINYINT: { - pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); - *(int8_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); - *(int16_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_INT: { - pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); - *(int32_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_BIGINT: { - pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); - *(int64_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); - *(uint8_t*)&pVal->typeData = pVal->datum.u; - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); - *(uint16_t*)&pVal->typeData = pVal->datum.u; - break; - } - case TSDB_DATA_TYPE_UINT: { - pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); - *(uint32_t*)&pVal->typeData = pVal->datum.u; - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); - *(uint64_t*)&pVal->typeData = pVal->datum.u; - break; - } - case TSDB_DATA_TYPE_FLOAT: { - pVal->datum.d = taosStr2Double(pVal->literal, NULL); - *(float*)&pVal->typeData = pVal->datum.d; - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - pVal->datum.d = taosStr2Double(pVal->literal, NULL); - *(double*)&pVal->typeData = pVal->datum.d; - break; - } - case TSDB_DATA_TYPE_VARCHAR: - case TSDB_DATA_TYPE_VARBINARY: { - pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); - if (NULL == pVal->datum.p) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); - } - int32_t len = TMIN(targetDt.bytes - VARSTR_HEADER_SIZE, pVal->node.resType.bytes); - varDataSetLen(pVal->datum.p, len); - strncpy(varDataVal(pVal->datum.p), pVal->literal, len); - break; - } - case TSDB_DATA_TYPE_TIMESTAMP: { - if (TSDB_CODE_SUCCESS != parseTimeFromValueNode(pCxt, pVal)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - } - *(int64_t*)&pVal->typeData = pVal->datum.i; - break; - } - case TSDB_DATA_TYPE_NCHAR: { - pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); - if (NULL == pVal->datum.p) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); - } - int32_t len = 0; - if (!taosMbsToUcs4(pVal->literal, strlen(pVal->literal), (TdUcs4*)varDataVal(pVal->datum.p), - targetDt.bytes - VARSTR_HEADER_SIZE, &len)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - } - varDataSetLen(pVal->datum.p, len); - break; - } - case TSDB_DATA_TYPE_DECIMAL: - case TSDB_DATA_TYPE_BLOB: - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - default: - break; - } + pVal->node.resType.precision = getPrecisionFromCurrStmt(pCxt->pCurrStmt, targetDt.precision); + + EDealRes res = DEAL_RES_CONTINUE; + if (pVal->isDuration) { + res = translateDurationValue(pCxt, pVal); + } else { + res = translateNormalValue(pCxt, pVal, targetDt, strict); } pVal->node.resType = targetDt; pVal->translate = true; - return DEAL_RES_CONTINUE; + return res; } static int32_t calcTypeBytes(SDataType dt) { @@ -912,7 +956,7 @@ static int32_t calcTypeBytes(SDataType dt) { static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) { SDataType dt = pVal->node.resType; dt.bytes = calcTypeBytes(dt); - return translateValueImpl(pCxt, pVal, dt); + return translateValueImpl(pCxt, pVal, dt, false); } static bool isMultiResFunc(SNode* pNode) { @@ -1248,6 +1292,22 @@ static int32_t translateForbidGroupByFunc(STranslateContext* pCxt, SFunctionNode return TSDB_CODE_SUCCESS; } +static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { + if (!fmIsRepeatScanFunc(pFunc->funcId)) { + return TSDB_CODE_SUCCESS; + } + if (isSelectStmt(pCxt->pCurrStmt) && NULL != ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable) { + SNode* pTable = ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable; + if (QUERY_NODE_REAL_TABLE == nodeType(pTable) && + (TSDB_CHILD_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType || + TSDB_NORMAL_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType)) { + return TSDB_CODE_SUCCESS; + } + } + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, + "%s is only supported in single table query", pFunc->functionName); +} + static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) { if (NULL != pCurrStmt && QUERY_NODE_SELECT_STMT == nodeType(pCurrStmt)) { SSelectStmt* pSelect = (SSelectStmt*)pCurrStmt; @@ -1370,6 +1430,9 @@ static int32_t translateNoramlFunction(STranslateContext* pCxt, SFunctionNode* p if (TSDB_CODE_SUCCESS == code) { code = translateForbidGroupByFunc(pCxt, pFunc); } + if (TSDB_CODE_SUCCESS == code) { + code = translateRepeatScanFunc(pCxt, pFunc); + } if (TSDB_CODE_SUCCESS == code) { setFuncClassification(pCxt->pCurrStmt, pFunc); } @@ -2760,6 +2823,7 @@ static int32_t translateDeleteWhere(STranslateContext* pCxt, SDeleteStmt* pDelet } static int32_t translateDelete(STranslateContext* pCxt, SDeleteStmt* pDelete) { + pCxt->pCurrStmt = (SNode*)pDelete; int32_t code = translateFrom(pCxt, pDelete->pFromTable); if (TSDB_CODE_SUCCESS == code) { code = translateDeleteWhere(pCxt, pDelete); @@ -3110,6 +3174,7 @@ static void buildAlterDbReq(STranslateContext* pCxt, SAlterDatabaseStmt* pStmt, pReq->buffer = pStmt->pOptions->buffer; pReq->pageSize = -1; pReq->pages = pStmt->pOptions->pages; + pReq->lastRowMem = -1; pReq->daysPerFile = -1; pReq->daysToKeep0 = pStmt->pOptions->keep[0]; pReq->daysToKeep1 = pStmt->pOptions->keep[1]; @@ -3187,13 +3252,30 @@ static bool validRollupFunc(const char* pFunc) { return false; } -static int32_t checkTableRollupOption(STranslateContext* pCxt, SNodeList* pFuncs) { +static int32_t checkTableRollupOption(STranslateContext* pCxt, SNodeList* pFuncs, bool createStable, + SDbCfgInfo* pDbCfg) { if (NULL == pFuncs) { + if (NULL != pDbCfg->pRetensions) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, + "To create a super table in a database with the retensions parameter configured, " + "the 'ROLLUP' option must be present"); + } return TSDB_CODE_SUCCESS; } - if (1 != LIST_LENGTH(pFuncs) || !validRollupFunc(((SFunctionNode*)nodesListGetNode(pFuncs, 0))->functionName)) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION); + if (!createStable || NULL == pDbCfg->pRetensions) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, + "Invalid option rollup: Only supported for create super table in databases " + "configured with the 'RETENTIONS' option"); + } + if (1 != LIST_LENGTH(pFuncs)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION, + "Invalid option rollup: only one function is allowed"); + } + const char* pFunc = ((SFunctionNode*)nodesListGetNode(pFuncs, 0))->functionName; + if (!validRollupFunc(pFunc)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION, + "Invalid option rollup: %s function is not supported", pFunc); } return TSDB_CODE_SUCCESS; } @@ -3219,8 +3301,8 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); } if (TSDB_CODE_SUCCESS == code) { - if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_BINARY_LEN) || - (TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_NCHAR_LEN)) { + if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_BINARY_LEN) || + (TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_NCHAR_LEN)) { code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } } @@ -3241,11 +3323,11 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN return code; } -static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, SNodeList* pCols) { +static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, int32_t ntags, SNodeList* pCols) { int32_t ncols = LIST_LENGTH(pCols); if (ncols < TSDB_MIN_COLUMNS) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); - } else if (ncols > TSDB_MAX_COLUMNS) { + } else if (ncols + ntags > TSDB_MAX_COLUMNS) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS); } @@ -3301,7 +3383,7 @@ static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt int32_t code = checkTableTagsSchema(pCxt, pHash, pStmt->pTags); if (TSDB_CODE_SUCCESS == code) { - code = checkTableColsSchema(pCxt, pHash, pStmt->pCols); + code = checkTableColsSchema(pCxt, pHash, LIST_LENGTH(pStmt->pTags), pStmt->pCols); } taosHashCleanup(pHash); @@ -3330,11 +3412,18 @@ static int32_t getTableMaxDelayOption(STranslateContext* pCxt, SValueNode* pVal, pMaxDelay); } -static int32_t checkTableMaxDelayOption(STranslateContext* pCxt, STableOptions* pOptions) { +static int32_t checkTableMaxDelayOption(STranslateContext* pCxt, STableOptions* pOptions, bool createStable, + SDbCfgInfo* pDbCfg) { if (NULL == pOptions->pMaxDelay) { return TSDB_CODE_SUCCESS; } + if (!createStable || NULL == pDbCfg->pRetensions) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, + "Invalid option maxdelay: Only supported for create super table in databases " + "configured with the 'RETENTIONS' option"); + } + if (LIST_LENGTH(pOptions->pMaxDelay) > 2) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "maxdelay"); } @@ -3353,11 +3442,18 @@ static int32_t getTableWatermarkOption(STranslateContext* pCxt, SValueNode* pVal pMaxDelay); } -static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions* pOptions) { +static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions* pOptions, bool createStable, + SDbCfgInfo* pDbCfg) { if (NULL == pOptions->pWatermark) { return TSDB_CODE_SUCCESS; } + if (!createStable || NULL == pDbCfg->pRetensions) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, + "Invalid option watermark: Only supported for create super table in databases " + "configured with the 'RETENTIONS' option"); + } + if (LIST_LENGTH(pOptions->pWatermark) > 2) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "watermark"); } @@ -3371,13 +3467,20 @@ static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions* return code; } -static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) { - int32_t code = checkTableMaxDelayOption(pCxt, pStmt->pOptions); - if (TSDB_CODE_SUCCESS == code) { - code = checkTableWatermarkOption(pCxt, pStmt->pOptions); +static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt, bool createStable) { + int32_t code = TSDB_CODE_SUCCESS; + SDbCfgInfo dbCfg = {0}; + if (createStable) { + code = getDBCfg(pCxt, pStmt->dbName, &dbCfg); } if (TSDB_CODE_SUCCESS == code) { - code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs); + code = checkTableMaxDelayOption(pCxt, pStmt->pOptions, createStable, &dbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = checkTableWatermarkOption(pCxt, pStmt->pOptions, createStable, &dbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs, createStable, &dbCfg); } if (TSDB_CODE_SUCCESS == code) { code = checkTableSmaOption(pCxt, pStmt); @@ -3656,7 +3759,7 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm static int32_t translateCreateSuperTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) { SMCreateStbReq createReq = {0}; - int32_t code = checkCreateTable(pCxt, pStmt); + int32_t code = checkCreateTable(pCxt, pStmt, true); if (TSDB_CODE_SUCCESS == code) { code = buildCreateStbReq(pCxt, pStmt, &createReq); } @@ -3743,12 +3846,24 @@ static int32_t buildAlterSuperTableReq(STranslateContext* pCxt, SAlterTableStmt* return TSDB_CODE_SUCCESS; } -static SSchema* getColSchema(STableMeta* pTableMeta, const char* pTagName) { +static SSchema* getColSchema(STableMeta* pTableMeta, const char* pColName) { int32_t numOfFields = getNumOfTags(pTableMeta) + getNumOfColumns(pTableMeta); for (int32_t i = 0; i < numOfFields; ++i) { - SSchema* pTagSchema = pTableMeta->schema + i; - if (0 == strcmp(pTagName, pTagSchema->name)) { - return pTagSchema; + SSchema* pSchema = pTableMeta->schema + i; + if (0 == strcmp(pColName, pSchema->name)) { + return pSchema; + } + } + return NULL; +} + +static SSchema* getTagSchema(STableMeta* pTableMeta, const char* pTagName) { + int32_t numOfTags = getNumOfTags(pTableMeta); + SSchema* pTagsSchema = getTableTagSchema(pTableMeta); + for (int32_t i = 0; i < numOfTags; ++i) { + SSchema* pSchema = pTagsSchema + i; + if (0 == strcmp(pTagName, pSchema->name)) { + return pSchema; } } return NULL; @@ -3756,22 +3871,48 @@ static SSchema* getColSchema(STableMeta* pTableMeta, const char* pTagName) { static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) { if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, + "Set tag value only available for child table"); + } + + if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE); } + + if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); + } + + if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON); + } + + STableMeta* pTableMeta = NULL; + int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + + SSchema* pTagsSchema = getTableTagSchema(pTableMeta); + if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON && + (pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG || + pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_TAG_BYTES)) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); + } + if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) { - STableMeta* pTableMeta = NULL; - int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta); - if (TSDB_CODE_SUCCESS == code) { - SSchema* pSchema = getColSchema(pTableMeta, pStmt->colName); - if (NULL == pSchema) { - code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, pStmt->colName); - } else if (!IS_VAR_DATA_TYPE(pSchema->type) || pSchema->type != pStmt->dataType.type || - pSchema->bytes >= calcTypeBytes(pStmt->dataType)) { - code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL); - } + if (TSDB_SUPER_TABLE != pTableMeta->tableType) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Table is not super table"); + } + + SSchema* pSchema = getColSchema(pTableMeta, pStmt->colName); + if (NULL == pSchema) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, pStmt->colName); + } else if (!IS_VAR_DATA_TYPE(pSchema->type) || pSchema->type != pStmt->dataType.type || + pSchema->bytes >= calcTypeBytes(pStmt->dataType)) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL); } - return code; } return TSDB_CODE_SUCCESS; } @@ -4198,6 +4339,39 @@ static void getSourceDatabase(SNode* pStmt, int32_t acctId, char* pDbFName) { tNameGetFullDbName(&name, pDbFName); } +static int32_t addWstartTsToCreateStreamQuery(SNode* pStmt) { + SSelectStmt* pSelect = (SSelectStmt*)pStmt; + SNode* pProj = nodesListGetNode(pSelect->pProjectionList, 0); + if (NULL == pSelect->pWindow || + (QUERY_NODE_FUNCTION == nodeType(pProj) && 0 == strcmp("_wstartts", ((SFunctionNode*)pProj)->functionName))) { + return TSDB_CODE_SUCCESS; + } + SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION); + if (NULL == pFunc) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(pFunc->functionName, "_wstartts"); + strcpy(pFunc->node.aliasName, pFunc->functionName); + int32_t code = nodesListPushFront(pSelect->pProjectionList, (SNode*)pFunc); + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode((SNode*)pFunc); + } + return code; +} + +static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SNode* pStmt, SCMCreateStreamReq* pReq) { + pCxt->createStream = true; + int32_t code = addWstartTsToCreateStreamQuery(pStmt); + if (TSDB_CODE_SUCCESS == code) { + code = translateQuery(pCxt, pStmt); + } + if (TSDB_CODE_SUCCESS == code) { + getSourceDatabase(pStmt, pCxt->pParseCxt->acctId, pReq->sourceDB); + code = nodesNodeToString(pStmt, false, &pReq->ast, NULL); + } + return code; +} + static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { pReq->igExists = pStmt->ignoreExists; @@ -4211,13 +4385,7 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* tNameExtractFullName(&name, pReq->targetStbFullName); } - pCxt->createStream = true; - int32_t code = translateQuery(pCxt, pStmt->pQuery); - if (TSDB_CODE_SUCCESS == code) { - getSourceDatabase(pStmt->pQuery, pCxt->pParseCxt->acctId, pReq->sourceDB); - code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); - } - + int32_t code = buildCreateStreamQuery(pCxt, pStmt->pQuery, pReq); if (TSDB_CODE_SUCCESS == code) { pReq->sql = strdup(pCxt->pParseCxt->pSql); if (NULL == pReq->sql) { @@ -4455,6 +4623,7 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) { code = translateDropSuperTable(pCxt, (SDropSuperTableStmt*)pNode); break; case QUERY_NODE_ALTER_TABLE_STMT: + case QUERY_NODE_ALTER_SUPER_TABLE_STMT: code = translateAlterSuperTable(pCxt, (SAlterTableStmt*)pNode); break; case QUERY_NODE_CREATE_USER_STMT: @@ -5165,7 +5334,7 @@ static int32_t buildCreateTableDataBlock(int32_t acctId, const SCreateTableStmt* static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) { SCreateTableStmt* pStmt = (SCreateTableStmt*)pQuery->pRoot; - int32_t code = checkCreateTable(pCxt, pStmt); + int32_t code = checkCreateTable(pCxt, pStmt, false); SVgroupInfo info = {0}; if (TSDB_CODE_SUCCESS == code) { code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info); @@ -5222,30 +5391,62 @@ static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, S } } -static int32_t createValueFromFunction(STranslateContext* pCxt, SFunctionNode* pFunc, SValueNode** pVal) { - int32_t code = getFuncInfo(pCxt, pFunc); - if (TSDB_CODE_SUCCESS == code) { - code = scalarCalculateConstants((SNode*)pFunc, (SNode**)pVal); - } - return code; -} - static SDataType schemaToDataType(uint8_t precision, SSchema* pSchema) { SDataType dt = {.type = pSchema->type, .bytes = pSchema->bytes, .precision = precision, .scale = 0}; return dt; } -static int32_t translateTagVal(STranslateContext* pCxt, uint8_t precision, SSchema* pSchema, SNode* pNode, - SValueNode** pVal) { - if (QUERY_NODE_FUNCTION == nodeType(pNode)) { - return createValueFromFunction(pCxt, (SFunctionNode*)pNode, pVal); - } else if (QUERY_NODE_VALUE == nodeType(pNode)) { - return (DEAL_RES_ERROR == translateValueImpl(pCxt, (SValueNode*)pNode, schemaToDataType(precision, pSchema)) - ? pCxt->errCode - : TSDB_CODE_SUCCESS); +static int32_t createCastFuncForTag(STranslateContext* pCxt, SNode* pNode, SDataType dt, SNode** pCast) { + SNode* pExpr = nodesCloneNode(pNode); + if (NULL == pExpr) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t code = translateExpr(pCxt, &pExpr); + if (TSDB_CODE_SUCCESS == code) { + code = createCastFunc(pCxt, pExpr, dt, pCast); + } + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode(pExpr); + } + return code; +} + +static int32_t createTagValFromExpr(STranslateContext* pCxt, SDataType targetDt, SNode* pNode, SValueNode** pVal) { + SNode* pCast = NULL; + int32_t code = createCastFuncForTag(pCxt, pNode, targetDt, &pCast); + SNode* pNew = NULL; + if (TSDB_CODE_SUCCESS == code) { + code = scalarCalculateConstants(pCast, &pNew); + } + if (TSDB_CODE_SUCCESS == code) { + pCast = pNew; + if (QUERY_NODE_VALUE != nodeType(pCast)) { + code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)pNode)->aliasName); + } + } + + if (TSDB_CODE_SUCCESS == code) { + *pVal = (SValueNode*)pCast; } else { - // return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)pNode)->aliasName); - return TSDB_CODE_FAILED; + nodesDestroyNode(pCast); + } + return code; +} + +static int32_t createTagValFromVal(STranslateContext* pCxt, SDataType targetDt, SNode* pNode, SValueNode** pVal) { + *pVal = (SValueNode*)nodesCloneNode(pNode); + if (NULL == *pVal) { + return TSDB_CODE_OUT_OF_MEMORY; + } + return DEAL_RES_ERROR == translateValueImpl(pCxt, *pVal, targetDt, true) ? pCxt->errCode : TSDB_CODE_SUCCESS; +} + +static int32_t createTagVal(STranslateContext* pCxt, uint8_t precision, SSchema* pSchema, SNode* pNode, + SValueNode** pVal) { + if (QUERY_NODE_VALUE == nodeType(pNode)) { + return createTagValFromVal(pCxt, schemaToDataType(precision, pSchema), pNode, pVal); + } else { + return createTagValFromExpr(pCxt, schemaToDataType(precision, pSchema), pNode, pVal); } } @@ -5285,56 +5486,45 @@ static int32_t buildKVRowForBindTags(STranslateContext* pCxt, SCreateSubTableCla if (NULL == pTagArray) { return TSDB_CODE_OUT_OF_MEMORY; } - int32_t code = TSDB_CODE_SUCCESS; - SSchema* pTagSchema = getTableTagSchema(pSuperTableMeta); - SNode * pTag = NULL, *pNode = NULL; - bool isJson = false; + + int32_t code = TSDB_CODE_SUCCESS; + + bool isJson = false; + SNodeList* pVals = NULL; + SNode * pTag = NULL, *pNode = NULL; FORBOTH(pTag, pStmt->pSpecificTags, pNode, pStmt->pValsOfTags) { SColumnNode* pCol = (SColumnNode*)pTag; - SSchema* pSchema = NULL; - for (int32_t i = 0; i < numOfTags; ++i) { - if (0 == strcmp(pCol->colName, pTagSchema[i].name)) { - pSchema = pTagSchema + i; - break; - } - } + SSchema* pSchema = getTagSchema(pSuperTableMeta, pCol->colName); if (NULL == pSchema) { code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, pCol->colName); - goto end; } SValueNode* pVal = NULL; - code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pSchema, pNode, &pVal); - if (TSDB_CODE_SUCCESS != code) { - goto end; + if (TSDB_CODE_SUCCESS == code) { + code = createTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pSchema, pNode, &pVal); } - - if (NULL == pVal) { - pVal = (SValueNode*)pNode; - } else { - REPLACE_LIST2_NODE(pVal); - } - - if (pSchema->type == TSDB_DATA_TYPE_JSON) { - isJson = true; - code = buildJsonTagVal(pCxt, pSchema, pVal, pTagArray, ppTag); - } else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) { - code = buildNormalTagVal(pCxt, pSchema, pVal, pTagArray); - } - } - - if (!isJson) code = tTagNew(pTagArray, 1, false, ppTag); - -end: - if (isJson) { - for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) { - STagVal* p = (STagVal*)taosArrayGet(pTagArray, i); - if (IS_VAR_DATA_TYPE(p->type)) { - taosMemoryFree(p->pData); + if (TSDB_CODE_SUCCESS == code) { + if (pSchema->type == TSDB_DATA_TYPE_JSON) { + isJson = true; + code = buildJsonTagVal(pCxt, pSchema, pVal, pTagArray, ppTag); + } else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) { + code = buildNormalTagVal(pCxt, pSchema, pVal, pTagArray); } } + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeAppend(&pVals, (SNode*)pVal); + } + if (TSDB_CODE_SUCCESS != code) { + break; + } } + + if (TSDB_CODE_SUCCESS == code && !isJson) { + code = tTagNew(pTagArray, 1, false, ppTag); + } + + nodesDestroyList(pVals); taosArrayDestroy(pTagArray); - return TSDB_CODE_SUCCESS; + return code; } static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClause* pStmt, STableMeta* pSuperTableMeta, @@ -5343,64 +5533,52 @@ static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClau return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TAGS_NOT_MATCHED); } - SSchema* pTagSchemas = getTableTagSchema(pSuperTableMeta); - SNode* pNode; - int32_t code = TSDB_CODE_SUCCESS; - int32_t index = 0; - SArray* pTagArray = taosArrayInit(LIST_LENGTH(pStmt->pValsOfTags), sizeof(STagVal)); - if (!pTagArray) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_TSC_OUT_OF_MEMORY); + SArray* pTagArray = taosArrayInit(LIST_LENGTH(pStmt->pValsOfTags), sizeof(STagVal)); + if (NULL == pTagArray) { + return TSDB_CODE_OUT_OF_MEMORY; } - bool isJson = false; + int32_t code = TSDB_CODE_SUCCESS; + + bool isJson = false; + int32_t index = 0; + SSchema* pTagSchemas = getTableTagSchema(pSuperTableMeta); + SNodeList* pVals = NULL; + SNode* pNode; FOREACH(pNode, pStmt->pValsOfTags) { SValueNode* pVal = NULL; SSchema* pTagSchema = pTagSchemas + index; - code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pTagSchema, pNode, &pVal); + code = createTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pTagSchema, pNode, &pVal); + if (TSDB_CODE_SUCCESS == code) { + if (pTagSchema->type == TSDB_DATA_TYPE_JSON) { + isJson = true; + code = buildJsonTagVal(pCxt, pTagSchema, pVal, pTagArray, ppTag); + } else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL && !pVal->isNull) { + char* tmpVal = nodesGetValueFromNode(pVal); + STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type}; + if (IS_VAR_DATA_TYPE(pTagSchema->type)) { + val.pData = varDataVal(tmpVal); + val.nData = varDataLen(tmpVal); + } else { + memcpy(&val.i64, tmpVal, pTagSchema->bytes); + } + taosArrayPush(pTagArray, &val); + } + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeAppend(&pVals, (SNode*)pVal); + } if (TSDB_CODE_SUCCESS != code) { - goto end; - } - if (NULL == pVal) { - pVal = (SValueNode*)pNode; - } else { - REPLACE_NODE(pVal); - } - if (pTagSchema->type == TSDB_DATA_TYPE_JSON) { - if (pVal->literal && strlen(pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { - code = buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pVal->literal); - goto end; - } - - isJson = true; - code = parseJsontoTagData(pVal->literal, pTagArray, ppTag, &pCxt->msgBuf); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - } else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL && !pVal->isNull) { - char* tmpVal = nodesGetValueFromNode(pVal); - STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type}; - if (IS_VAR_DATA_TYPE(pTagSchema->type)) { - val.pData = varDataVal(tmpVal); - val.nData = varDataLen(tmpVal); - } else { - memcpy(&val.i64, tmpVal, pTagSchema->bytes); - } - taosArrayPush(pTagArray, &val); + break; } ++index; } - if (!isJson) code = tTagNew(pTagArray, 1, false, ppTag); -end: - if (isJson) { - for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) { - STagVal* p = (STagVal*)taosArrayGet(pTagArray, i); - if (IS_VAR_DATA_TYPE(p->type)) { - taosMemoryFree(p->pData); - } - } + if (TSDB_CODE_SUCCESS == code && !isJson) { + code = tTagNew(pTagArray, 1, false, ppTag); } + nodesDestroyList(pVals); taosArrayDestroy(pTagArray); return code; } @@ -5644,7 +5822,7 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS } SDataType targetDt = schemaToDataType(pTableMeta->tableInfo.precision, pSchema); - if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt)) { + if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt, true)) { return pCxt->errCode; } @@ -5898,15 +6076,6 @@ static int32_t rewriteAlterTableImpl(STranslateContext* pCxt, SAlterTableStmt* p } if (TSDB_SUPER_TABLE == pTableMeta->tableType) { - SSchema* pTagsSchema = getTableTagSchema(pTableMeta); - if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON && - (pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG || - pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_TAG_BYTES)) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); - } - if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE); - } return TSDB_CODE_SUCCESS; } else if (TSDB_CHILD_TABLE != pTableMeta->tableType && TSDB_NORMAL_TABLE != pTableMeta->tableType) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE); @@ -5929,10 +6098,6 @@ static int32_t rewriteAlterTableImpl(STranslateContext* pCxt, SAlterTableStmt* p static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) { SAlterTableStmt* pStmt = (SAlterTableStmt*)pQuery->pRoot; - if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); - } - if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON); } diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 168e51a2bb..69917ad7f9 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -215,13 +215,21 @@ int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...) { return errCode; } +int32_t generateSyntaxErrMsgExt(SMsgBuf* pBuf, int32_t errCode, const char* pFormat, ...) { + va_list vArgList; + va_start(vArgList, pFormat); + vsnprintf(pBuf->buf, pBuf->len, pFormat, vArgList); + va_end(vArgList); + return errCode; +} + int32_t buildInvalidOperationMsg(SMsgBuf* pBuf, const char* msg) { strncpy(pBuf->buf, msg, pBuf->len); return TSDB_CODE_TSC_INVALID_OPERATION; } int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr) { - if(pBuf == NULL) return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; + if (pBuf == NULL) return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; const char* msgFormat1 = "syntax error near \'%s\'"; const char* msgFormat2 = "syntax error near \'%s\' (%s)"; const char* msgFormat3 = "%s"; diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index e4317d97b0..fe6e36bf5b 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -216,609 +216,635 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2471) +#define YY_ACTTAB_COUNT (2609) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 422, 1670, 423, 1475, 294, 1744, 1438, 430, 317, 423, - /* 10 */ 1475, 1563, 39, 37, 1505, 338, 1741, 69, 1619, 1621, - /* 20 */ 326, 1441, 1238, 1744, 1900, 1565, 334, 525, 373, 379, - /* 30 */ 115, 1674, 1775, 1313, 1741, 1236, 1741, 1899, 1552, 1569, - /* 40 */ 518, 1897, 101, 1737, 1743, 100, 99, 98, 97, 96, - /* 50 */ 95, 94, 93, 92, 570, 119, 1308, 987, 1757, 14, - /* 60 */ 61, 1737, 1743, 1737, 1743, 1244, 297, 343, 1900, 39, - /* 70 */ 37, 1376, 570, 439, 570, 474, 473, 326, 517, 1238, - /* 80 */ 472, 157, 1, 116, 469, 1897, 1775, 468, 467, 466, - /* 90 */ 1313, 1004, 1236, 1003, 528, 117, 439, 991, 992, 1727, - /* 100 */ 479, 548, 1380, 310, 649, 566, 458, 1900, 1262, 527, - /* 110 */ 153, 1842, 1843, 1308, 1847, 489, 14, 552, 1315, 1316, - /* 120 */ 157, 1005, 1244, 1550, 1897, 329, 412, 1788, 1671, 202, - /* 130 */ 88, 1758, 551, 1760, 1761, 547, 60, 570, 60, 2, - /* 140 */ 1834, 525, 210, 482, 319, 1830, 152, 476, 30, 240, - /* 150 */ 652, 311, 201, 309, 308, 140, 462, 1452, 156, 421, - /* 160 */ 464, 649, 425, 1239, 262, 1237, 1860, 43, 60, 119, - /* 170 */ 73, 330, 171, 170, 566, 1315, 1316, 55, 149, 138, - /* 180 */ 54, 604, 463, 642, 638, 634, 630, 260, 1576, 1242, - /* 190 */ 1243, 519, 1291, 1292, 1294, 1295, 1296, 1297, 1298, 544, - /* 200 */ 568, 1306, 1307, 1309, 1310, 1311, 1312, 1314, 1317, 117, - /* 210 */ 1263, 1669, 85, 1323, 294, 254, 372, 167, 371, 1262, - /* 220 */ 1239, 160, 1237, 427, 154, 1842, 1843, 1900, 1847, 1260, - /* 230 */ 1745, 33, 32, 504, 1263, 40, 38, 36, 35, 34, - /* 240 */ 158, 1741, 67, 525, 1897, 66, 1242, 1243, 563, 1291, - /* 250 */ 1292, 1294, 1295, 1296, 1297, 1298, 544, 568, 1306, 1307, - /* 260 */ 1309, 1310, 1311, 1312, 1314, 1317, 39, 37, 1737, 1743, - /* 270 */ 488, 119, 490, 1900, 326, 160, 1238, 160, 69, 570, - /* 280 */ 212, 1293, 300, 486, 60, 484, 157, 1313, 101, 1236, - /* 290 */ 1897, 100, 99, 98, 97, 96, 95, 94, 93, 92, - /* 300 */ 1570, 1211, 151, 205, 1900, 552, 1276, 160, 1136, 1137, - /* 310 */ 1308, 117, 1900, 14, 1335, 1613, 1672, 1898, 1262, 1244, - /* 320 */ 602, 1897, 566, 39, 37, 157, 155, 1842, 1843, 1897, - /* 330 */ 1847, 326, 604, 1238, 1502, 1004, 2, 1003, 1261, 129, - /* 340 */ 128, 599, 598, 597, 1313, 78, 1236, 1094, 593, 592, + /* 0 */ 422, 1563, 423, 1475, 1900, 430, 1438, 423, 1475, 69, + /* 10 */ 69, 379, 40, 38, 1744, 141, 1775, 1899, 541, 1531, + /* 20 */ 333, 1897, 1238, 115, 534, 1741, 41, 39, 37, 36, + /* 30 */ 35, 1570, 1569, 1313, 101, 1236, 1565, 100, 99, 98, + /* 40 */ 97, 96, 95, 94, 93, 92, 120, 1741, 297, 544, + /* 50 */ 1745, 1737, 1743, 322, 338, 1757, 1308, 1619, 1621, 14, + /* 60 */ 544, 1741, 533, 562, 439, 1244, 343, 546, 1263, 40, + /* 70 */ 38, 1376, 541, 1737, 1743, 1552, 1900, 333, 140, 1238, + /* 80 */ 1452, 167, 1, 1775, 544, 562, 118, 1737, 1743, 158, + /* 90 */ 1313, 569, 1236, 1897, 373, 464, 1727, 1900, 568, 562, + /* 100 */ 120, 245, 1842, 540, 649, 539, 67, 1900, 1900, 66, + /* 110 */ 1898, 44, 546, 1308, 1897, 1056, 14, 463, 1315, 1316, + /* 120 */ 157, 159, 1244, 1788, 1897, 1897, 1263, 87, 1758, 571, + /* 130 */ 1760, 1761, 567, 439, 562, 1900, 60, 1834, 1380, 2, + /* 140 */ 118, 300, 1830, 541, 1262, 1058, 43, 1262, 157, 151, + /* 150 */ 652, 210, 1897, 1900, 543, 153, 1842, 1843, 1004, 1847, + /* 160 */ 1003, 649, 1613, 1239, 262, 1237, 159, 31, 255, 987, + /* 170 */ 1897, 120, 535, 458, 320, 1315, 1316, 60, 149, 73, + /* 180 */ 604, 203, 138, 642, 638, 634, 630, 260, 1005, 1242, + /* 190 */ 1243, 1576, 1291, 1292, 1294, 1295, 1296, 1297, 1298, 564, + /* 200 */ 560, 1306, 1307, 1309, 1310, 1311, 1312, 1314, 1317, 991, + /* 210 */ 992, 118, 85, 1323, 421, 225, 372, 425, 371, 1262, + /* 220 */ 1239, 160, 1237, 412, 1136, 1137, 154, 1842, 1843, 1261, + /* 230 */ 1847, 34, 33, 310, 505, 41, 39, 37, 36, 35, + /* 240 */ 71, 299, 319, 541, 507, 1671, 1242, 1243, 514, 1291, + /* 250 */ 1292, 1294, 1295, 1296, 1297, 1298, 564, 560, 1306, 1307, + /* 260 */ 1309, 1310, 1311, 1312, 1314, 1317, 40, 38, 1440, 171, + /* 270 */ 170, 120, 1626, 1674, 333, 160, 1238, 216, 217, 321, + /* 280 */ 212, 311, 301, 309, 308, 160, 462, 1313, 1624, 1236, + /* 290 */ 464, 530, 110, 109, 108, 107, 106, 105, 104, 103, + /* 300 */ 102, 1211, 1463, 205, 517, 429, 1276, 517, 425, 517, + /* 310 */ 1308, 118, 463, 14, 1335, 111, 160, 1293, 111, 1244, + /* 320 */ 162, 61, 460, 40, 38, 465, 155, 1842, 1843, 60, + /* 330 */ 1847, 333, 1574, 1238, 1502, 1574, 2, 1574, 299, 427, + /* 340 */ 1462, 507, 1244, 1727, 1313, 1260, 1236, 1094, 593, 592, /* 350 */ 591, 1098, 590, 1100, 1101, 589, 1103, 586, 649, 1109, - /* 360 */ 583, 1111, 1112, 580, 577, 1005, 1567, 1308, 1336, 36, - /* 370 */ 35, 34, 1315, 1316, 1664, 1463, 1244, 40, 38, 36, - /* 380 */ 35, 34, 33, 32, 42, 169, 40, 38, 36, 35, - /* 390 */ 34, 1341, 1293, 8, 541, 626, 625, 624, 341, 596, + /* 360 */ 583, 1111, 1112, 580, 577, 1550, 517, 1308, 1336, 536, + /* 370 */ 531, 600, 1315, 1316, 1617, 1461, 1244, 377, 37, 36, + /* 380 */ 35, 1727, 34, 33, 1620, 1621, 41, 39, 37, 36, + /* 390 */ 35, 1341, 1293, 8, 1574, 626, 625, 624, 341, 60, /* 400 */ 623, 622, 621, 121, 616, 615, 614, 613, 612, 611, - /* 410 */ 610, 609, 131, 605, 141, 649, 1727, 1239, 1531, 1237, - /* 420 */ 33, 32, 1397, 160, 40, 38, 36, 35, 34, 1315, - /* 430 */ 1316, 1551, 29, 324, 1330, 1331, 1332, 1333, 1334, 1338, - /* 440 */ 1339, 1340, 600, 1242, 1243, 1617, 1291, 1292, 1294, 1295, - /* 450 */ 1296, 1297, 1298, 544, 568, 1306, 1307, 1309, 1310, 1311, - /* 460 */ 1312, 1314, 1317, 511, 1395, 1396, 1398, 1399, 1056, 567, - /* 470 */ 474, 473, 138, 1337, 1239, 472, 1237, 1264, 116, 469, - /* 480 */ 111, 1577, 468, 467, 466, 33, 32, 460, 1244, 40, - /* 490 */ 38, 36, 35, 34, 607, 1407, 1342, 1574, 1058, 1462, - /* 500 */ 1242, 1243, 203, 1291, 1292, 1294, 1295, 1296, 1297, 1298, - /* 510 */ 544, 568, 1306, 1307, 1309, 1310, 1311, 1312, 1314, 1317, - /* 520 */ 39, 37, 1318, 160, 1327, 602, 1620, 1621, 326, 1433, - /* 530 */ 1238, 567, 160, 471, 470, 189, 300, 27, 245, 246, - /* 540 */ 1727, 1313, 162, 1236, 129, 128, 599, 598, 597, 144, - /* 550 */ 1559, 525, 567, 1626, 456, 452, 448, 444, 188, 1574, - /* 560 */ 331, 71, 305, 111, 1308, 556, 1265, 336, 1335, 1624, - /* 570 */ 465, 11, 10, 1244, 339, 138, 497, 39, 37, 119, - /* 580 */ 1574, 1561, 138, 70, 1576, 326, 186, 1238, 33, 32, - /* 590 */ 9, 1576, 40, 38, 36, 35, 34, 84, 1313, 305, - /* 600 */ 1236, 529, 556, 1757, 567, 567, 464, 232, 1626, 1461, - /* 610 */ 120, 1460, 649, 1626, 567, 377, 378, 1432, 1566, 117, - /* 620 */ 337, 1308, 1336, 22, 1625, 382, 1315, 1316, 463, 1624, - /* 630 */ 1244, 1775, 1574, 1574, 230, 1842, 524, 1262, 523, 549, - /* 640 */ 1459, 1900, 1574, 364, 1727, 1341, 548, 9, 185, 178, - /* 650 */ 1727, 183, 1727, 1349, 159, 435, 33, 32, 1897, 514, - /* 660 */ 40, 38, 36, 35, 34, 366, 362, 429, 58, 649, - /* 670 */ 425, 1239, 1788, 1237, 176, 142, 1758, 551, 1760, 1761, - /* 680 */ 547, 1727, 570, 1315, 1316, 1549, 29, 324, 1330, 1331, - /* 690 */ 1332, 1333, 1334, 1338, 1339, 1340, 213, 1242, 1243, 1458, - /* 700 */ 1291, 1292, 1294, 1295, 1296, 1297, 1298, 544, 568, 1306, - /* 710 */ 1307, 1309, 1310, 1311, 1312, 1314, 1317, 1440, 267, 530, - /* 720 */ 1914, 1604, 567, 567, 567, 620, 618, 1557, 1239, 601, - /* 730 */ 1237, 1387, 1617, 397, 398, 438, 206, 520, 515, 1715, - /* 740 */ 1727, 110, 109, 108, 107, 106, 105, 104, 103, 102, - /* 750 */ 1574, 1574, 1574, 525, 1242, 1243, 543, 1291, 1292, 1294, - /* 760 */ 1295, 1296, 1297, 1298, 544, 568, 1306, 1307, 1309, 1310, - /* 770 */ 1311, 1312, 1314, 1317, 39, 37, 296, 1238, 1260, 602, - /* 780 */ 567, 119, 326, 567, 1238, 405, 352, 1457, 417, 1849, - /* 790 */ 1236, 1571, 1849, 595, 1703, 1313, 1373, 1236, 129, 128, - /* 800 */ 599, 598, 597, 529, 1456, 390, 567, 418, 1574, 392, - /* 810 */ 1293, 1574, 244, 1846, 991, 992, 1845, 498, 1308, 1453, - /* 820 */ 1244, 117, 567, 7, 1455, 1849, 1532, 1244, 1727, 1854, - /* 830 */ 1369, 1454, 194, 502, 1574, 192, 230, 1842, 524, 383, - /* 840 */ 523, 26, 235, 1900, 2, 1727, 533, 33, 32, 1844, - /* 850 */ 1574, 40, 38, 36, 35, 34, 157, 28, 512, 649, - /* 860 */ 1897, 44, 4, 33, 32, 1727, 649, 40, 38, 36, - /* 870 */ 35, 34, 1727, 457, 1219, 1220, 1451, 52, 501, 416, + /* 410 */ 610, 609, 131, 605, 596, 649, 1727, 1239, 1849, 1237, + /* 420 */ 34, 33, 1397, 604, 41, 39, 37, 36, 35, 1315, + /* 430 */ 1316, 1551, 30, 331, 1330, 1331, 1332, 1333, 1334, 1338, + /* 440 */ 1339, 1340, 1846, 1242, 1243, 607, 1291, 1292, 1294, 1295, + /* 450 */ 1296, 1297, 1298, 564, 560, 1306, 1307, 1309, 1310, 1311, + /* 460 */ 1312, 1314, 1317, 527, 1395, 1396, 1398, 1399, 160, 517, + /* 470 */ 474, 473, 471, 470, 1239, 472, 1237, 1559, 116, 469, + /* 480 */ 378, 84, 468, 467, 466, 34, 33, 11, 10, 41, + /* 490 */ 39, 37, 36, 35, 117, 1407, 1004, 1574, 1003, 209, + /* 500 */ 1242, 1243, 1566, 1291, 1292, 1294, 1295, 1296, 1297, 1298, + /* 510 */ 564, 560, 1306, 1307, 1309, 1310, 1311, 1312, 1314, 1317, + /* 520 */ 40, 38, 1318, 336, 479, 602, 1005, 72, 333, 1433, + /* 530 */ 1238, 138, 160, 1505, 504, 189, 301, 1561, 160, 489, + /* 540 */ 1576, 1313, 339, 1236, 129, 128, 599, 598, 597, 144, + /* 550 */ 138, 517, 497, 202, 456, 452, 448, 444, 188, 1576, + /* 560 */ 77, 517, 382, 517, 1308, 620, 618, 482, 1335, 1349, + /* 570 */ 490, 476, 397, 1244, 398, 1900, 201, 40, 38, 1574, + /* 580 */ 1626, 1567, 517, 70, 58, 333, 186, 1238, 157, 1574, + /* 590 */ 9, 1574, 1897, 438, 474, 473, 1625, 364, 1313, 472, + /* 600 */ 1236, 55, 116, 469, 54, 517, 468, 467, 466, 7, + /* 610 */ 1574, 1900, 649, 1670, 517, 294, 1571, 1432, 517, 366, + /* 620 */ 362, 1308, 1336, 1373, 157, 1703, 1315, 1316, 1897, 498, + /* 630 */ 1244, 34, 33, 1574, 1460, 41, 39, 37, 36, 35, + /* 640 */ 991, 992, 1574, 1626, 1459, 1341, 1574, 9, 185, 178, + /* 650 */ 337, 183, 1849, 1264, 517, 435, 34, 33, 1458, 1624, + /* 660 */ 41, 39, 37, 36, 35, 502, 23, 1664, 517, 649, + /* 670 */ 1669, 1239, 294, 1237, 176, 1727, 1845, 215, 169, 515, + /* 680 */ 1457, 550, 1574, 1315, 1316, 1727, 30, 331, 1330, 1331, + /* 690 */ 1332, 1333, 1334, 1338, 1339, 1340, 1574, 1242, 1243, 1727, + /* 700 */ 1291, 1292, 1294, 1295, 1296, 1297, 1298, 564, 560, 1306, + /* 710 */ 1307, 1309, 1310, 1311, 1312, 1314, 1317, 1441, 34, 33, + /* 720 */ 488, 1727, 41, 39, 37, 36, 35, 1849, 1239, 601, + /* 730 */ 1237, 1387, 1617, 486, 267, 484, 1456, 1604, 101, 1219, + /* 740 */ 1220, 100, 99, 98, 97, 96, 95, 94, 93, 92, + /* 750 */ 608, 1844, 1546, 541, 1242, 1243, 1715, 1291, 1292, 1294, + /* 760 */ 1295, 1296, 1297, 1298, 564, 560, 1306, 1307, 1309, 1310, + /* 770 */ 1311, 1312, 1314, 1317, 40, 38, 296, 1727, 1260, 553, + /* 780 */ 517, 120, 333, 247, 1238, 405, 1453, 1455, 417, 517, + /* 790 */ 1369, 516, 602, 1276, 1454, 1313, 1265, 1236, 1557, 505, + /* 800 */ 256, 1337, 546, 352, 1451, 390, 1262, 418, 1574, 392, + /* 810 */ 1672, 129, 128, 599, 598, 597, 1247, 1574, 1308, 548, + /* 820 */ 517, 118, 1854, 1369, 1342, 45, 4, 1244, 1727, 52, + /* 830 */ 501, 340, 1450, 138, 1449, 1727, 245, 1842, 540, 383, + /* 840 */ 539, 1372, 1577, 1900, 2, 1727, 34, 33, 1574, 528, + /* 850 */ 41, 39, 37, 36, 35, 194, 157, 206, 192, 1448, + /* 860 */ 1897, 1238, 1447, 34, 33, 28, 649, 41, 39, 37, + /* 870 */ 36, 35, 1446, 1727, 1236, 1727, 563, 1246, 551, 416, /* 880 */ 1315, 1316, 411, 410, 409, 408, 407, 404, 403, 402, /* 890 */ 401, 400, 396, 395, 394, 393, 387, 386, 385, 384, - /* 900 */ 1492, 381, 380, 531, 196, 567, 1450, 195, 567, 139, - /* 910 */ 536, 608, 1449, 1546, 273, 491, 564, 1727, 1239, 565, - /* 920 */ 1237, 198, 475, 200, 197, 1239, 199, 1237, 271, 57, - /* 930 */ 11, 10, 56, 1574, 33, 32, 1574, 1448, 40, 38, - /* 940 */ 36, 35, 34, 1757, 1242, 1243, 1447, 1727, 172, 1446, - /* 950 */ 1445, 1242, 1243, 1727, 1291, 1292, 1294, 1295, 1296, 1297, - /* 960 */ 1298, 544, 568, 1306, 1307, 1309, 1310, 1311, 1312, 1314, - /* 970 */ 1317, 1775, 567, 60, 567, 1435, 1436, 209, 1727, 549, - /* 980 */ 1444, 1487, 224, 256, 1727, 340, 548, 1727, 33, 32, - /* 990 */ 1727, 1727, 40, 38, 36, 35, 34, 1443, 1776, 619, - /* 1000 */ 1574, 529, 1574, 477, 1369, 72, 1747, 1247, 1246, 1276, - /* 1010 */ 50, 86, 1788, 217, 1372, 87, 1758, 551, 1760, 1761, - /* 1020 */ 547, 1727, 570, 1485, 1757, 1834, 1476, 33, 32, 299, - /* 1030 */ 1830, 40, 38, 36, 35, 34, 1481, 342, 1727, 367, - /* 1040 */ 1614, 1900, 1749, 534, 1757, 480, 64, 63, 376, 41, - /* 1050 */ 1394, 166, 1775, 219, 159, 41, 83, 370, 1897, 1864, - /* 1060 */ 549, 1028, 229, 526, 41, 1727, 80, 548, 242, 234, - /* 1070 */ 295, 237, 1775, 360, 123, 358, 354, 350, 163, 345, - /* 1080 */ 528, 239, 529, 3, 644, 1727, 5, 548, 126, 1343, - /* 1090 */ 344, 1029, 1260, 1788, 127, 1299, 87, 1758, 551, 1760, - /* 1100 */ 1761, 547, 50, 570, 1187, 347, 1834, 351, 247, 537, - /* 1110 */ 299, 1830, 160, 1788, 559, 306, 88, 1758, 551, 1760, - /* 1120 */ 1761, 547, 1900, 570, 1056, 307, 1834, 1203, 253, 263, - /* 1130 */ 319, 1830, 152, 575, 1087, 157, 1757, 1250, 1249, 1897, - /* 1140 */ 126, 127, 266, 112, 126, 399, 1666, 168, 406, 414, - /* 1150 */ 413, 415, 1861, 419, 1266, 420, 428, 1269, 432, 431, - /* 1160 */ 175, 177, 1268, 433, 1775, 1270, 434, 1267, 180, 137, - /* 1170 */ 436, 182, 549, 1115, 437, 184, 68, 1727, 440, 548, - /* 1180 */ 1119, 1126, 459, 1124, 130, 187, 461, 91, 1564, 191, - /* 1190 */ 1560, 298, 1708, 193, 132, 133, 1562, 1558, 264, 1757, - /* 1200 */ 134, 204, 135, 492, 493, 1788, 207, 316, 88, 1758, - /* 1210 */ 551, 1760, 1761, 547, 496, 570, 499, 503, 1834, 1265, - /* 1220 */ 211, 513, 319, 1830, 1913, 1875, 1757, 1775, 1865, 508, - /* 1230 */ 555, 510, 1874, 1868, 215, 549, 333, 332, 218, 318, - /* 1240 */ 1727, 516, 548, 6, 509, 522, 1252, 1856, 223, 507, - /* 1250 */ 228, 146, 506, 1369, 1775, 225, 118, 1313, 1264, 1245, - /* 1260 */ 320, 538, 549, 1850, 18, 124, 535, 1727, 1788, 548, - /* 1270 */ 125, 88, 1758, 551, 1760, 1761, 547, 226, 570, 227, - /* 1280 */ 1308, 1834, 553, 554, 1707, 319, 1830, 1913, 1815, 1244, - /* 1290 */ 1676, 1896, 557, 328, 561, 1788, 1891, 1757, 88, 1758, - /* 1300 */ 551, 1760, 1761, 547, 1916, 570, 532, 233, 1834, 539, - /* 1310 */ 236, 560, 319, 1830, 1913, 238, 265, 251, 562, 77, - /* 1320 */ 249, 1575, 79, 1853, 268, 1775, 573, 259, 571, 1547, - /* 1330 */ 1618, 645, 646, 549, 648, 145, 270, 272, 1727, 1721, - /* 1340 */ 548, 289, 291, 290, 1720, 62, 1719, 346, 1716, 348, - /* 1350 */ 349, 51, 1231, 1232, 164, 529, 353, 1714, 355, 356, - /* 1360 */ 357, 1713, 1757, 359, 1712, 361, 1788, 1711, 1710, 280, - /* 1370 */ 1758, 551, 1760, 1761, 547, 363, 570, 365, 1693, 165, - /* 1380 */ 368, 369, 1206, 1205, 1687, 1686, 374, 1253, 375, 1248, - /* 1390 */ 1775, 1685, 1684, 1175, 1659, 1900, 1658, 1657, 549, 65, - /* 1400 */ 1656, 1655, 1654, 1727, 1653, 548, 1652, 388, 159, 389, - /* 1410 */ 1651, 1650, 1897, 1256, 391, 1649, 1648, 1647, 1646, 1645, - /* 1420 */ 529, 1644, 1643, 1642, 568, 1306, 1307, 1309, 1310, 1311, - /* 1430 */ 1312, 1788, 1641, 1757, 280, 1758, 551, 1760, 1761, 547, - /* 1440 */ 1640, 570, 1639, 1638, 1637, 122, 1636, 1635, 1634, 1633, - /* 1450 */ 1632, 1631, 1177, 1630, 1629, 1757, 173, 49, 424, 113, - /* 1460 */ 1900, 1775, 1628, 1627, 1504, 1472, 994, 150, 1471, 549, - /* 1470 */ 114, 993, 426, 157, 1727, 174, 548, 1897, 1701, 1695, - /* 1480 */ 1683, 181, 1682, 1775, 179, 1668, 1553, 1022, 1503, 1501, - /* 1490 */ 441, 549, 1499, 1497, 445, 1495, 1727, 1484, 548, 443, - /* 1500 */ 447, 1483, 1788, 1468, 449, 89, 1758, 551, 1760, 1761, - /* 1510 */ 547, 451, 570, 442, 446, 1834, 453, 450, 1757, 1833, - /* 1520 */ 1830, 454, 455, 1555, 1788, 190, 1130, 89, 1758, 551, - /* 1530 */ 1760, 1761, 547, 1129, 570, 1554, 1757, 1834, 1493, 1055, - /* 1540 */ 1054, 540, 1830, 1053, 1052, 617, 1775, 1049, 1048, 619, - /* 1550 */ 1047, 312, 1488, 313, 546, 1486, 478, 481, 314, 1727, - /* 1560 */ 1467, 548, 483, 1466, 1775, 485, 1465, 487, 90, 1700, - /* 1570 */ 1694, 494, 549, 1681, 1213, 53, 136, 1727, 495, 548, - /* 1580 */ 208, 1679, 1680, 1678, 315, 1677, 41, 1788, 15, 1757, - /* 1590 */ 287, 1758, 551, 1760, 1761, 547, 545, 570, 542, 1806, - /* 1600 */ 23, 47, 221, 1409, 216, 1788, 214, 143, 89, 1758, - /* 1610 */ 551, 1760, 1761, 547, 220, 570, 1393, 1775, 1834, 24, - /* 1620 */ 222, 1747, 74, 1831, 1386, 549, 231, 500, 45, 25, - /* 1630 */ 1727, 46, 548, 16, 1366, 147, 1365, 1426, 17, 1415, - /* 1640 */ 1421, 1420, 321, 1757, 505, 10, 1425, 1424, 322, 148, - /* 1650 */ 1301, 19, 1284, 31, 1757, 1300, 1675, 1667, 1788, 12, - /* 1660 */ 1328, 288, 1758, 551, 1760, 1761, 547, 161, 570, 20, - /* 1670 */ 250, 1775, 21, 1746, 241, 550, 1391, 1223, 243, 549, - /* 1680 */ 248, 255, 1775, 13, 1727, 1303, 548, 1254, 574, 80, - /* 1690 */ 549, 75, 558, 252, 572, 1727, 76, 548, 1791, 569, - /* 1700 */ 48, 1093, 1116, 335, 576, 578, 1113, 579, 581, 1110, - /* 1710 */ 582, 1757, 1788, 584, 585, 283, 1758, 551, 1760, 1761, - /* 1720 */ 547, 1104, 570, 1788, 1102, 587, 142, 1758, 551, 1760, - /* 1730 */ 1761, 547, 1757, 570, 588, 1108, 594, 81, 1125, 1775, - /* 1740 */ 1107, 82, 1106, 1105, 59, 257, 1121, 549, 1020, 1044, - /* 1750 */ 1062, 603, 1727, 521, 548, 606, 258, 1042, 1041, 1037, - /* 1760 */ 1775, 1040, 1039, 1038, 1036, 1035, 323, 1057, 546, 1059, - /* 1770 */ 1032, 1915, 1500, 1727, 1757, 548, 1031, 1030, 1027, 1026, - /* 1780 */ 1788, 1025, 627, 288, 1758, 551, 1760, 1761, 547, 629, - /* 1790 */ 570, 1498, 628, 631, 633, 632, 1496, 1757, 635, 636, - /* 1800 */ 637, 1788, 1775, 1494, 287, 1758, 551, 1760, 1761, 547, - /* 1810 */ 549, 570, 639, 1807, 640, 1727, 1482, 548, 641, 643, - /* 1820 */ 984, 1464, 261, 647, 1439, 1775, 1240, 269, 650, 325, - /* 1830 */ 651, 1439, 1439, 549, 1439, 1439, 1439, 1439, 1727, 1439, - /* 1840 */ 548, 1439, 1439, 1788, 1439, 1439, 288, 1758, 551, 1760, - /* 1850 */ 1761, 547, 327, 570, 1757, 1439, 1439, 1439, 1439, 1439, - /* 1860 */ 1439, 1439, 1439, 1439, 1439, 1757, 1788, 1439, 1439, 288, - /* 1870 */ 1758, 551, 1760, 1761, 547, 1439, 570, 1439, 1439, 1757, - /* 1880 */ 1439, 1439, 1775, 1439, 1439, 1439, 1439, 1439, 1439, 1439, - /* 1890 */ 549, 1439, 1439, 1775, 1439, 1727, 1439, 548, 1439, 1439, - /* 1900 */ 1439, 549, 1439, 1439, 1439, 1439, 1727, 1775, 548, 1439, - /* 1910 */ 1439, 1439, 1439, 1439, 1439, 549, 1439, 1439, 1439, 1439, - /* 1920 */ 1727, 1757, 548, 1788, 1439, 1439, 274, 1758, 551, 1760, - /* 1930 */ 1761, 547, 1439, 570, 1788, 1757, 1439, 275, 1758, 551, - /* 1940 */ 1760, 1761, 547, 1439, 570, 1439, 1757, 1439, 1788, 1775, - /* 1950 */ 1439, 276, 1758, 551, 1760, 1761, 547, 549, 570, 1439, - /* 1960 */ 1439, 1439, 1727, 1775, 548, 1439, 1439, 1439, 1439, 1439, - /* 1970 */ 1439, 549, 1439, 1439, 1775, 1439, 1727, 1439, 548, 1439, - /* 1980 */ 1439, 1439, 549, 1439, 1439, 1439, 1439, 1727, 1757, 548, - /* 1990 */ 1788, 1439, 1439, 282, 1758, 551, 1760, 1761, 547, 1439, - /* 2000 */ 570, 1439, 1439, 1439, 1788, 1439, 1439, 284, 1758, 551, - /* 2010 */ 1760, 1761, 547, 1439, 570, 1788, 1775, 1439, 277, 1758, - /* 2020 */ 551, 1760, 1761, 547, 549, 570, 1439, 1439, 1439, 1727, - /* 2030 */ 1757, 548, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, - /* 2040 */ 1439, 1757, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, - /* 2050 */ 1439, 1439, 1439, 1757, 1439, 1439, 1439, 1788, 1775, 1439, - /* 2060 */ 285, 1758, 551, 1760, 1761, 547, 549, 570, 1439, 1775, - /* 2070 */ 1439, 1727, 1439, 548, 1439, 1439, 1439, 549, 1439, 1439, - /* 2080 */ 1439, 1775, 1727, 1439, 548, 1439, 1439, 1439, 1439, 549, - /* 2090 */ 1439, 1439, 1439, 1439, 1727, 1439, 548, 1439, 1439, 1788, - /* 2100 */ 1439, 1439, 278, 1758, 551, 1760, 1761, 547, 1757, 570, - /* 2110 */ 1788, 1439, 1439, 286, 1758, 551, 1760, 1761, 547, 1439, - /* 2120 */ 570, 1439, 1788, 1439, 1439, 279, 1758, 551, 1760, 1761, - /* 2130 */ 547, 1439, 570, 1439, 1439, 1757, 1775, 1439, 1439, 1439, - /* 2140 */ 1439, 1439, 1439, 1439, 549, 1439, 1439, 1439, 1439, 1727, - /* 2150 */ 1439, 548, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, - /* 2160 */ 1439, 1439, 1757, 1775, 1439, 1439, 1439, 1439, 1439, 1439, - /* 2170 */ 1439, 549, 1439, 1439, 1439, 1439, 1727, 1788, 548, 1439, - /* 2180 */ 292, 1758, 551, 1760, 1761, 547, 1439, 570, 1439, 1757, - /* 2190 */ 1775, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 549, 1439, - /* 2200 */ 1439, 1439, 1439, 1727, 1788, 548, 1439, 293, 1758, 551, - /* 2210 */ 1760, 1761, 547, 1439, 570, 1439, 1439, 1775, 1439, 1439, - /* 2220 */ 1439, 1439, 1439, 1439, 1439, 549, 1439, 1439, 1439, 1439, - /* 2230 */ 1727, 1788, 548, 1439, 1769, 1758, 551, 1760, 1761, 547, - /* 2240 */ 1439, 570, 1439, 1757, 1439, 1439, 1439, 1439, 1439, 1439, - /* 2250 */ 1439, 1439, 1757, 1439, 1439, 1439, 1439, 1439, 1788, 1439, - /* 2260 */ 1439, 1768, 1758, 551, 1760, 1761, 547, 1439, 570, 1439, - /* 2270 */ 1757, 1775, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 549, - /* 2280 */ 1775, 1439, 1439, 1439, 1727, 1439, 548, 1439, 549, 1439, - /* 2290 */ 1439, 1439, 1439, 1727, 1439, 548, 1439, 1439, 1775, 1439, - /* 2300 */ 1439, 1439, 1439, 1439, 1439, 1439, 549, 1439, 1439, 1439, - /* 2310 */ 1439, 1727, 1788, 548, 1439, 1767, 1758, 551, 1760, 1761, - /* 2320 */ 547, 1788, 570, 1757, 303, 1758, 551, 1760, 1761, 547, - /* 2330 */ 1439, 570, 1439, 1439, 1439, 1439, 1439, 1757, 1439, 1788, - /* 2340 */ 1439, 1439, 302, 1758, 551, 1760, 1761, 547, 1439, 570, - /* 2350 */ 1439, 1775, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 549, - /* 2360 */ 1439, 1439, 1439, 1439, 1727, 1775, 548, 1439, 1439, 1439, - /* 2370 */ 1439, 1439, 1439, 549, 1439, 1439, 1439, 1439, 1727, 1439, - /* 2380 */ 548, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, - /* 2390 */ 1439, 1757, 1788, 1439, 1439, 304, 1758, 551, 1760, 1761, - /* 2400 */ 547, 1439, 570, 1439, 1439, 1439, 1788, 1439, 1439, 301, - /* 2410 */ 1758, 551, 1760, 1761, 547, 1439, 570, 1439, 1439, 1775, - /* 2420 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 549, 1439, 1439, - /* 2430 */ 1439, 1439, 1727, 1439, 548, 1439, 1439, 1439, 1439, 1439, - /* 2440 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, - /* 2450 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, - /* 2460 */ 1788, 1439, 1439, 281, 1758, 551, 1760, 1761, 547, 1439, - /* 2470 */ 570, + /* 900 */ 1727, 381, 380, 1727, 1244, 27, 1445, 11, 10, 139, + /* 910 */ 619, 34, 33, 1727, 273, 41, 39, 37, 36, 35, + /* 920 */ 1549, 196, 1435, 1436, 195, 1239, 227, 1237, 271, 57, + /* 930 */ 1444, 1492, 56, 1443, 198, 200, 42, 197, 199, 558, + /* 940 */ 1532, 1747, 214, 649, 1757, 595, 1250, 1727, 172, 250, + /* 950 */ 367, 1242, 1243, 475, 1291, 1292, 1294, 1295, 1296, 1297, + /* 960 */ 1298, 564, 560, 1306, 1307, 1309, 1310, 1311, 1312, 1314, + /* 970 */ 1317, 1727, 1775, 60, 1727, 123, 1187, 1749, 554, 1293, + /* 980 */ 569, 491, 218, 29, 1487, 1727, 1485, 568, 137, 34, + /* 990 */ 33, 457, 126, 41, 39, 37, 36, 35, 127, 50, + /* 1000 */ 231, 546, 1239, 42, 1237, 42, 477, 1249, 480, 42, + /* 1010 */ 239, 86, 1788, 575, 602, 510, 87, 1758, 571, 1760, + /* 1020 */ 1761, 567, 1757, 562, 1481, 1776, 1834, 342, 1242, 1243, + /* 1030 */ 300, 1830, 224, 129, 128, 599, 598, 597, 1087, 1394, + /* 1040 */ 234, 126, 1900, 1343, 127, 1299, 64, 63, 376, 266, + /* 1050 */ 1775, 166, 112, 1115, 83, 157, 1028, 370, 545, 1897, + /* 1060 */ 1476, 1614, 126, 1727, 80, 568, 1864, 542, 244, 1327, + /* 1070 */ 295, 249, 644, 360, 252, 358, 354, 350, 163, 345, + /* 1080 */ 254, 1119, 3, 1757, 1126, 5, 1029, 1260, 347, 344, + /* 1090 */ 1788, 351, 1124, 306, 88, 1758, 571, 1760, 1761, 567, + /* 1100 */ 1757, 562, 130, 1056, 1834, 307, 1203, 263, 326, 1830, + /* 1110 */ 152, 1775, 160, 399, 1666, 168, 406, 413, 414, 545, + /* 1120 */ 415, 419, 156, 1266, 1727, 420, 568, 428, 1775, 1269, + /* 1130 */ 1860, 175, 431, 177, 1268, 432, 569, 433, 1270, 180, + /* 1140 */ 434, 1727, 436, 568, 182, 1267, 184, 437, 68, 440, + /* 1150 */ 187, 1788, 461, 1757, 459, 88, 1758, 571, 1760, 1761, + /* 1160 */ 567, 1564, 562, 492, 264, 1834, 124, 191, 1788, 326, + /* 1170 */ 1830, 152, 88, 1758, 571, 1760, 1761, 567, 298, 562, + /* 1180 */ 91, 1775, 1834, 1560, 193, 132, 326, 1830, 1913, 569, + /* 1190 */ 133, 1861, 1562, 1558, 1727, 134, 568, 1868, 135, 324, + /* 1200 */ 323, 1708, 204, 207, 493, 499, 496, 503, 1757, 1252, + /* 1210 */ 525, 211, 506, 511, 1707, 316, 512, 220, 1676, 508, + /* 1220 */ 1313, 1788, 1245, 318, 222, 88, 1758, 571, 1760, 1761, + /* 1230 */ 567, 125, 562, 513, 76, 1834, 1775, 265, 1575, 326, + /* 1240 */ 1830, 1913, 1265, 1308, 569, 1865, 521, 529, 1875, 1727, + /* 1250 */ 1891, 568, 1244, 1874, 1856, 229, 538, 523, 233, 524, + /* 1260 */ 325, 532, 6, 522, 520, 519, 1369, 119, 1264, 552, + /* 1270 */ 555, 19, 1757, 146, 238, 243, 1788, 1850, 327, 78, + /* 1280 */ 88, 1758, 571, 1760, 1761, 567, 1757, 562, 240, 242, + /* 1290 */ 1834, 526, 241, 248, 326, 1830, 1913, 1896, 1757, 573, + /* 1300 */ 1775, 1618, 268, 549, 645, 1853, 1547, 251, 569, 556, + /* 1310 */ 1916, 253, 259, 1727, 1775, 568, 646, 1815, 648, 51, + /* 1320 */ 145, 270, 569, 272, 1721, 281, 1775, 1727, 291, 568, + /* 1330 */ 290, 1720, 62, 1719, 569, 346, 1716, 348, 349, 1727, + /* 1340 */ 1788, 568, 1231, 546, 284, 1758, 571, 1760, 1761, 567, + /* 1350 */ 1253, 562, 1248, 1232, 1788, 546, 164, 353, 280, 1758, + /* 1360 */ 571, 1760, 1761, 567, 1714, 562, 1788, 1757, 357, 355, + /* 1370 */ 280, 1758, 571, 1760, 1761, 567, 1256, 562, 356, 1713, + /* 1380 */ 1712, 359, 537, 361, 1900, 1711, 1710, 560, 1306, 1307, + /* 1390 */ 1309, 1310, 1311, 1312, 363, 1775, 1900, 159, 365, 1693, + /* 1400 */ 368, 1897, 165, 569, 369, 1206, 1205, 1687, 1727, 157, + /* 1410 */ 568, 1686, 374, 1897, 375, 1685, 1684, 1175, 1659, 1658, + /* 1420 */ 1657, 65, 1656, 1655, 1757, 1654, 1653, 1652, 388, 389, + /* 1430 */ 1651, 391, 1650, 1649, 1648, 1788, 1647, 1646, 1645, 89, + /* 1440 */ 1758, 571, 1760, 1761, 567, 1757, 562, 1644, 1643, 1834, + /* 1450 */ 1642, 1641, 1775, 1833, 1830, 1640, 1639, 1638, 1637, 122, + /* 1460 */ 569, 1636, 1635, 1634, 1633, 1727, 1632, 568, 1631, 1630, + /* 1470 */ 1629, 1177, 1628, 1775, 150, 424, 1471, 173, 113, 994, + /* 1480 */ 174, 566, 1627, 1504, 1472, 993, 1727, 426, 568, 1701, + /* 1490 */ 1695, 114, 1788, 179, 181, 1682, 89, 1758, 571, 1760, + /* 1500 */ 1761, 567, 1683, 562, 1668, 1757, 1834, 1553, 1503, 1501, + /* 1510 */ 557, 1830, 441, 1788, 443, 1499, 1497, 288, 1758, 571, + /* 1520 */ 1760, 1761, 567, 565, 562, 559, 1806, 1022, 442, 445, + /* 1530 */ 447, 449, 446, 1775, 450, 1495, 451, 455, 453, 454, + /* 1540 */ 1484, 569, 1483, 1468, 1555, 1130, 1727, 1129, 568, 190, + /* 1550 */ 49, 1554, 1055, 1054, 1053, 1052, 617, 619, 1049, 1048, + /* 1560 */ 1493, 1757, 1047, 312, 1488, 1486, 481, 313, 314, 1467, + /* 1570 */ 478, 483, 1466, 1788, 485, 1465, 487, 142, 1758, 571, + /* 1580 */ 1760, 1761, 567, 1757, 562, 1700, 90, 1213, 1694, 1775, + /* 1590 */ 136, 1681, 53, 494, 495, 1679, 315, 569, 1680, 1678, + /* 1600 */ 208, 1677, 1727, 1675, 568, 213, 1223, 1667, 15, 221, + /* 1610 */ 219, 1775, 74, 226, 75, 16, 317, 42, 1254, 569, + /* 1620 */ 1409, 547, 1914, 48, 1727, 500, 568, 509, 17, 1788, + /* 1630 */ 80, 24, 223, 89, 1758, 571, 1760, 1761, 567, 228, + /* 1640 */ 562, 230, 1391, 1834, 1757, 232, 237, 236, 1831, 13, + /* 1650 */ 143, 1788, 1747, 26, 246, 289, 1758, 571, 1760, 1761, + /* 1660 */ 567, 235, 562, 25, 1757, 1393, 1386, 1366, 79, 47, + /* 1670 */ 1365, 1746, 1775, 147, 18, 1426, 1415, 518, 1421, 10, + /* 1680 */ 569, 1420, 328, 1425, 1424, 1727, 329, 568, 1328, 1284, + /* 1690 */ 572, 1791, 1775, 20, 1303, 561, 32, 46, 1301, 1300, + /* 1700 */ 569, 148, 161, 12, 21, 1727, 22, 568, 574, 335, + /* 1710 */ 578, 1116, 1788, 1113, 576, 579, 289, 1758, 571, 1760, + /* 1720 */ 1761, 567, 1110, 562, 570, 581, 584, 582, 587, 1757, + /* 1730 */ 1093, 594, 1788, 1125, 1121, 1104, 142, 1758, 571, 1760, + /* 1740 */ 1761, 567, 585, 562, 1102, 588, 1108, 1107, 1757, 1020, + /* 1750 */ 81, 82, 59, 257, 603, 1044, 606, 1775, 258, 1106, + /* 1760 */ 1105, 1062, 330, 1042, 1041, 569, 1040, 1039, 1038, 1037, + /* 1770 */ 1727, 1036, 568, 1035, 1059, 1057, 1775, 1032, 1031, 1030, + /* 1780 */ 1027, 1915, 1500, 1026, 566, 1025, 627, 1498, 628, 1727, + /* 1790 */ 631, 568, 629, 632, 1496, 633, 635, 1788, 637, 636, + /* 1800 */ 1494, 289, 1758, 571, 1760, 1761, 567, 639, 562, 640, + /* 1810 */ 641, 1482, 643, 984, 1464, 261, 1788, 1757, 647, 650, + /* 1820 */ 288, 1758, 571, 1760, 1761, 567, 1240, 562, 269, 1807, + /* 1830 */ 651, 1439, 1439, 1757, 1439, 1439, 1439, 1439, 1439, 1439, + /* 1840 */ 1439, 1439, 1439, 1439, 1439, 1775, 1439, 1439, 1439, 1439, + /* 1850 */ 332, 1439, 1439, 569, 1439, 1439, 1439, 1439, 1727, 1439, + /* 1860 */ 568, 1775, 1439, 1439, 1439, 1439, 334, 1439, 1439, 569, + /* 1870 */ 1439, 1439, 1439, 1439, 1727, 1757, 568, 1439, 1439, 1439, + /* 1880 */ 1439, 1439, 1439, 1439, 1439, 1788, 1439, 1439, 1439, 289, + /* 1890 */ 1758, 571, 1760, 1761, 567, 1439, 562, 1439, 1439, 1757, + /* 1900 */ 1439, 1788, 1439, 1775, 1439, 289, 1758, 571, 1760, 1761, + /* 1910 */ 567, 569, 562, 1439, 1439, 1439, 1727, 1439, 568, 1439, + /* 1920 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1775, 1439, 1439, + /* 1930 */ 1439, 1439, 1439, 1439, 1439, 569, 1439, 1439, 1439, 1439, + /* 1940 */ 1727, 1757, 568, 1788, 1439, 1439, 1439, 274, 1758, 571, + /* 1950 */ 1760, 1761, 567, 1439, 562, 1439, 1439, 1439, 1439, 1439, + /* 1960 */ 1439, 1757, 1439, 1439, 1439, 1439, 1439, 1788, 1439, 1775, + /* 1970 */ 1439, 275, 1758, 571, 1760, 1761, 567, 569, 562, 1439, + /* 1980 */ 1439, 1439, 1727, 1439, 568, 1439, 1439, 1439, 1439, 1775, + /* 1990 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 569, 1439, 1439, + /* 2000 */ 1439, 1439, 1727, 1439, 568, 1439, 1439, 1439, 1439, 1788, + /* 2010 */ 1439, 1439, 1439, 276, 1758, 571, 1760, 1761, 567, 1439, + /* 2020 */ 562, 1757, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1788, + /* 2030 */ 1439, 1439, 1439, 283, 1758, 571, 1760, 1761, 567, 1439, + /* 2040 */ 562, 1439, 1439, 1439, 1439, 1757, 1439, 1439, 1439, 1775, + /* 2050 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 569, 1439, 1439, + /* 2060 */ 1439, 1439, 1727, 1439, 568, 1439, 1439, 1439, 1439, 1439, + /* 2070 */ 1439, 1439, 1439, 1775, 1439, 1439, 1439, 1439, 1439, 1439, + /* 2080 */ 1439, 569, 1439, 1439, 1439, 1439, 1727, 1439, 568, 1788, + /* 2090 */ 1439, 1439, 1439, 285, 1758, 571, 1760, 1761, 567, 1439, + /* 2100 */ 562, 1757, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, + /* 2110 */ 1439, 1439, 1439, 1788, 1439, 1439, 1439, 277, 1758, 571, + /* 2120 */ 1760, 1761, 567, 1439, 562, 1757, 1439, 1439, 1439, 1775, + /* 2130 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 569, 1439, 1439, + /* 2140 */ 1439, 1439, 1727, 1439, 568, 1439, 1439, 1439, 1439, 1439, + /* 2150 */ 1439, 1439, 1439, 1775, 1439, 1439, 1439, 1439, 1439, 1439, + /* 2160 */ 1439, 569, 1439, 1439, 1439, 1439, 1727, 1757, 568, 1788, + /* 2170 */ 1439, 1439, 1439, 286, 1758, 571, 1760, 1761, 567, 1439, + /* 2180 */ 562, 1439, 1439, 1757, 1439, 1439, 1439, 1439, 1439, 1439, + /* 2190 */ 1439, 1439, 1439, 1788, 1439, 1775, 1439, 278, 1758, 571, + /* 2200 */ 1760, 1761, 567, 569, 562, 1439, 1439, 1439, 1727, 1439, + /* 2210 */ 568, 1775, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 569, + /* 2220 */ 1439, 1439, 1439, 1439, 1727, 1757, 568, 1439, 1439, 1439, + /* 2230 */ 1439, 1439, 1439, 1439, 1439, 1788, 1439, 1439, 1439, 287, + /* 2240 */ 1758, 571, 1760, 1761, 567, 1757, 562, 1439, 1439, 1439, + /* 2250 */ 1439, 1788, 1439, 1775, 1439, 279, 1758, 571, 1760, 1761, + /* 2260 */ 567, 569, 562, 1439, 1439, 1439, 1727, 1439, 568, 1439, + /* 2270 */ 1439, 1439, 1439, 1775, 1439, 1439, 1439, 1439, 1439, 1439, + /* 2280 */ 1439, 569, 1439, 1439, 1439, 1439, 1727, 1439, 568, 1439, + /* 2290 */ 1439, 1439, 1439, 1788, 1439, 1439, 1439, 292, 1758, 571, + /* 2300 */ 1760, 1761, 567, 1439, 562, 1757, 1439, 1439, 1439, 1439, + /* 2310 */ 1439, 1439, 1439, 1788, 1439, 1439, 1439, 293, 1758, 571, + /* 2320 */ 1760, 1761, 567, 1439, 562, 1439, 1439, 1439, 1439, 1757, + /* 2330 */ 1439, 1439, 1439, 1775, 1439, 1439, 1439, 1439, 1439, 1439, + /* 2340 */ 1439, 569, 1439, 1439, 1439, 1439, 1727, 1439, 568, 1439, + /* 2350 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1775, 1439, 1439, + /* 2360 */ 1439, 1439, 1439, 1439, 1439, 569, 1439, 1439, 1439, 1439, + /* 2370 */ 1727, 1439, 568, 1788, 1439, 1439, 1439, 1769, 1758, 571, + /* 2380 */ 1760, 1761, 567, 1439, 562, 1757, 1439, 1439, 1439, 1439, + /* 2390 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1788, 1439, 1439, + /* 2400 */ 1439, 1768, 1758, 571, 1760, 1761, 567, 1439, 562, 1757, + /* 2410 */ 1439, 1439, 1439, 1775, 1439, 1439, 1439, 1439, 1439, 1439, + /* 2420 */ 1439, 569, 1439, 1439, 1439, 1439, 1727, 1439, 568, 1439, + /* 2430 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1775, 1439, 1439, + /* 2440 */ 1439, 1439, 1439, 1439, 1439, 569, 1439, 1439, 1439, 1439, + /* 2450 */ 1727, 1757, 568, 1788, 1439, 1439, 1439, 1767, 1758, 571, + /* 2460 */ 1760, 1761, 567, 1439, 562, 1439, 1439, 1757, 1439, 1439, + /* 2470 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1788, 1439, 1775, + /* 2480 */ 1439, 304, 1758, 571, 1760, 1761, 567, 569, 562, 1439, + /* 2490 */ 1439, 1439, 1727, 1439, 568, 1775, 1439, 1439, 1439, 1439, + /* 2500 */ 1439, 1439, 1439, 569, 1439, 1439, 1439, 1439, 1727, 1757, + /* 2510 */ 568, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1788, + /* 2520 */ 1439, 1439, 1439, 303, 1758, 571, 1760, 1761, 567, 1757, + /* 2530 */ 562, 1439, 1439, 1439, 1439, 1788, 1439, 1775, 1439, 305, + /* 2540 */ 1758, 571, 1760, 1761, 567, 569, 562, 1439, 1439, 1439, + /* 2550 */ 1727, 1439, 568, 1439, 1439, 1439, 1439, 1775, 1439, 1439, + /* 2560 */ 1439, 1439, 1439, 1439, 1439, 569, 1439, 1439, 1439, 1439, + /* 2570 */ 1727, 1439, 568, 1439, 1439, 1439, 1439, 1788, 1439, 1439, + /* 2580 */ 1439, 302, 1758, 571, 1760, 1761, 567, 1439, 562, 1439, + /* 2590 */ 1439, 1439, 1439, 1439, 1439, 1439, 1439, 1788, 1439, 1439, + /* 2600 */ 1439, 282, 1758, 571, 1760, 1761, 567, 1439, 562, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 259, 308, 261, 262, 311, 285, 252, 259, 288, 261, - /* 10 */ 262, 284, 12, 13, 0, 294, 296, 267, 297, 298, - /* 20 */ 20, 0, 22, 285, 353, 285, 288, 263, 313, 263, - /* 30 */ 280, 0, 283, 33, 296, 35, 296, 366, 0, 289, - /* 40 */ 291, 370, 21, 323, 324, 24, 25, 26, 27, 28, - /* 50 */ 29, 30, 31, 32, 334, 291, 56, 4, 255, 59, - /* 60 */ 4, 323, 324, 323, 324, 65, 300, 313, 353, 12, - /* 70 */ 13, 14, 334, 58, 334, 61, 62, 20, 329, 22, - /* 80 */ 66, 366, 82, 69, 70, 370, 283, 73, 74, 75, - /* 90 */ 33, 20, 35, 22, 291, 331, 58, 44, 45, 296, - /* 100 */ 4, 298, 14, 37, 104, 20, 35, 353, 20, 345, - /* 110 */ 346, 347, 348, 56, 350, 19, 59, 298, 118, 119, - /* 120 */ 366, 50, 65, 0, 370, 306, 76, 324, 309, 33, - /* 130 */ 327, 328, 329, 330, 331, 332, 82, 334, 82, 82, - /* 140 */ 337, 263, 56, 47, 341, 342, 343, 51, 338, 339, - /* 150 */ 19, 85, 56, 87, 88, 254, 90, 256, 355, 260, - /* 160 */ 94, 104, 263, 163, 33, 165, 363, 82, 82, 291, - /* 170 */ 84, 275, 122, 123, 20, 118, 119, 81, 47, 283, - /* 180 */ 84, 58, 116, 52, 53, 54, 55, 56, 292, 189, - /* 190 */ 190, 20, 192, 193, 194, 195, 196, 197, 198, 199, - /* 200 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 331, - /* 210 */ 20, 308, 81, 14, 311, 84, 162, 56, 164, 20, - /* 220 */ 163, 221, 165, 14, 346, 347, 348, 353, 350, 20, - /* 230 */ 285, 8, 9, 313, 20, 12, 13, 14, 15, 16, - /* 240 */ 366, 296, 81, 263, 370, 84, 189, 190, 117, 192, + /* 0 */ 259, 284, 261, 262, 353, 259, 252, 261, 262, 267, + /* 10 */ 267, 263, 12, 13, 285, 268, 283, 366, 263, 272, + /* 20 */ 20, 370, 22, 280, 291, 296, 12, 13, 14, 15, + /* 30 */ 16, 289, 289, 33, 21, 35, 285, 24, 25, 26, + /* 40 */ 27, 28, 29, 30, 31, 32, 291, 296, 300, 20, + /* 50 */ 285, 322, 323, 324, 294, 255, 56, 297, 298, 59, + /* 60 */ 20, 296, 329, 334, 58, 65, 312, 312, 20, 12, + /* 70 */ 13, 14, 263, 322, 323, 0, 353, 20, 254, 22, + /* 80 */ 256, 56, 82, 283, 20, 334, 331, 322, 323, 366, + /* 90 */ 33, 291, 35, 370, 312, 94, 296, 353, 298, 334, + /* 100 */ 291, 346, 347, 348, 104, 350, 81, 353, 353, 84, + /* 110 */ 366, 82, 312, 56, 370, 35, 59, 116, 118, 119, + /* 120 */ 366, 366, 65, 323, 370, 370, 20, 327, 328, 329, + /* 130 */ 330, 331, 332, 58, 334, 353, 82, 337, 14, 82, + /* 140 */ 331, 341, 342, 263, 20, 65, 82, 20, 366, 282, + /* 150 */ 19, 56, 370, 353, 345, 346, 347, 348, 20, 350, + /* 160 */ 22, 104, 295, 163, 33, 165, 366, 338, 339, 4, + /* 170 */ 370, 291, 20, 35, 275, 118, 119, 82, 47, 84, + /* 180 */ 58, 114, 283, 52, 53, 54, 55, 56, 50, 189, + /* 190 */ 190, 292, 192, 193, 194, 195, 196, 197, 198, 199, + /* 200 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 44, + /* 210 */ 45, 331, 81, 14, 260, 84, 162, 263, 164, 20, + /* 220 */ 163, 221, 165, 76, 118, 119, 346, 347, 348, 20, + /* 230 */ 350, 8, 9, 37, 298, 12, 13, 14, 15, 16, + /* 240 */ 173, 174, 306, 263, 177, 309, 189, 190, 117, 192, /* 250 */ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, - /* 260 */ 203, 204, 205, 206, 207, 208, 12, 13, 323, 324, - /* 270 */ 21, 291, 313, 353, 20, 221, 22, 221, 267, 334, - /* 280 */ 149, 193, 59, 34, 82, 36, 366, 33, 21, 35, - /* 290 */ 370, 24, 25, 26, 27, 28, 29, 30, 31, 32, - /* 300 */ 289, 170, 282, 172, 353, 298, 83, 221, 118, 119, - /* 310 */ 56, 331, 353, 59, 91, 295, 309, 366, 20, 65, - /* 320 */ 94, 370, 20, 12, 13, 366, 346, 347, 348, 370, - /* 330 */ 350, 20, 58, 22, 0, 20, 82, 22, 20, 113, - /* 340 */ 114, 115, 116, 117, 33, 265, 35, 95, 96, 97, + /* 260 */ 203, 204, 205, 206, 207, 208, 12, 13, 0, 122, + /* 270 */ 123, 291, 283, 0, 20, 221, 22, 113, 114, 290, + /* 280 */ 149, 85, 59, 87, 88, 221, 90, 33, 299, 35, + /* 290 */ 94, 148, 24, 25, 26, 27, 28, 29, 30, 31, + /* 300 */ 32, 170, 255, 172, 263, 260, 83, 263, 263, 263, + /* 310 */ 56, 331, 116, 59, 91, 274, 221, 193, 274, 65, + /* 320 */ 274, 4, 281, 12, 13, 281, 346, 347, 348, 82, + /* 330 */ 350, 20, 291, 22, 0, 291, 82, 291, 174, 14, + /* 340 */ 255, 177, 65, 296, 33, 20, 35, 95, 96, 97, /* 350 */ 98, 99, 100, 101, 102, 103, 104, 105, 104, 107, - /* 360 */ 108, 109, 110, 111, 112, 50, 286, 56, 145, 14, - /* 370 */ 15, 16, 118, 119, 291, 255, 65, 12, 13, 14, - /* 380 */ 15, 16, 8, 9, 82, 302, 12, 13, 14, 15, - /* 390 */ 16, 168, 193, 82, 59, 61, 62, 63, 64, 93, + /* 360 */ 108, 109, 110, 111, 112, 0, 263, 56, 145, 226, + /* 370 */ 227, 293, 118, 119, 296, 255, 65, 274, 14, 15, + /* 380 */ 16, 296, 8, 9, 297, 298, 12, 13, 14, 15, + /* 390 */ 16, 168, 193, 82, 291, 61, 62, 63, 64, 82, /* 400 */ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, - /* 410 */ 76, 77, 78, 79, 268, 104, 296, 163, 272, 165, - /* 420 */ 8, 9, 189, 221, 12, 13, 14, 15, 16, 118, + /* 410 */ 76, 77, 78, 79, 93, 104, 296, 163, 325, 165, + /* 420 */ 8, 9, 189, 58, 12, 13, 14, 15, 16, 118, /* 430 */ 119, 0, 209, 210, 211, 212, 213, 214, 215, 216, - /* 440 */ 217, 218, 293, 189, 190, 296, 192, 193, 194, 195, + /* 440 */ 217, 218, 349, 189, 190, 65, 192, 193, 194, 195, /* 450 */ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - /* 460 */ 206, 207, 208, 230, 231, 232, 233, 234, 35, 263, - /* 470 */ 61, 62, 283, 145, 163, 66, 165, 20, 69, 70, - /* 480 */ 274, 292, 73, 74, 75, 8, 9, 281, 65, 12, - /* 490 */ 13, 14, 15, 16, 65, 83, 168, 291, 65, 255, - /* 500 */ 189, 190, 114, 192, 193, 194, 195, 196, 197, 198, + /* 460 */ 206, 207, 208, 230, 231, 232, 233, 234, 221, 263, + /* 470 */ 61, 62, 269, 270, 163, 66, 165, 284, 69, 70, + /* 480 */ 274, 265, 73, 74, 75, 8, 9, 1, 2, 12, + /* 490 */ 13, 14, 15, 16, 278, 83, 20, 291, 22, 56, + /* 500 */ 189, 190, 286, 192, 193, 194, 195, 196, 197, 198, /* 510 */ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, - /* 520 */ 12, 13, 14, 221, 189, 94, 297, 298, 20, 155, - /* 530 */ 22, 263, 221, 269, 270, 33, 59, 209, 113, 114, - /* 540 */ 296, 33, 274, 35, 113, 114, 115, 116, 117, 47, - /* 550 */ 284, 263, 263, 283, 52, 53, 54, 55, 56, 291, - /* 560 */ 290, 173, 174, 274, 56, 177, 20, 275, 91, 299, - /* 570 */ 281, 1, 2, 65, 275, 283, 317, 12, 13, 291, - /* 580 */ 291, 284, 283, 81, 292, 20, 84, 22, 8, 9, - /* 590 */ 82, 292, 12, 13, 14, 15, 16, 265, 33, 174, - /* 600 */ 35, 313, 177, 255, 263, 263, 94, 150, 283, 255, - /* 610 */ 278, 255, 104, 283, 263, 274, 274, 243, 286, 331, - /* 620 */ 290, 56, 145, 43, 299, 274, 118, 119, 116, 299, - /* 630 */ 65, 283, 291, 291, 346, 347, 348, 20, 350, 291, - /* 640 */ 255, 353, 291, 158, 296, 168, 298, 82, 146, 147, - /* 650 */ 296, 149, 296, 83, 366, 153, 8, 9, 370, 148, - /* 660 */ 12, 13, 14, 15, 16, 180, 181, 260, 3, 104, - /* 670 */ 263, 163, 324, 165, 172, 327, 328, 329, 330, 331, - /* 680 */ 332, 296, 334, 118, 119, 0, 209, 210, 211, 212, - /* 690 */ 213, 214, 215, 216, 217, 218, 150, 189, 190, 255, + /* 520 */ 12, 13, 14, 275, 4, 94, 50, 84, 20, 155, + /* 530 */ 22, 283, 221, 0, 312, 33, 59, 284, 221, 19, + /* 540 */ 292, 33, 275, 35, 113, 114, 115, 116, 117, 47, + /* 550 */ 283, 263, 316, 33, 52, 53, 54, 55, 56, 292, + /* 560 */ 265, 263, 274, 263, 56, 269, 270, 47, 91, 83, + /* 570 */ 312, 51, 274, 65, 274, 353, 56, 12, 13, 291, + /* 580 */ 283, 286, 263, 81, 3, 20, 84, 22, 366, 291, + /* 590 */ 82, 291, 370, 274, 61, 62, 299, 158, 33, 66, + /* 600 */ 35, 81, 69, 70, 84, 263, 73, 74, 75, 39, + /* 610 */ 291, 353, 104, 308, 263, 310, 274, 243, 263, 180, + /* 620 */ 181, 56, 145, 4, 366, 274, 118, 119, 370, 274, + /* 630 */ 65, 8, 9, 291, 255, 12, 13, 14, 15, 16, + /* 640 */ 44, 45, 291, 283, 255, 168, 291, 82, 146, 147, + /* 650 */ 290, 149, 325, 20, 263, 153, 8, 9, 255, 299, + /* 660 */ 12, 13, 14, 15, 16, 274, 43, 291, 263, 104, + /* 670 */ 308, 163, 310, 165, 172, 296, 349, 113, 302, 274, + /* 680 */ 255, 43, 291, 118, 119, 296, 209, 210, 211, 212, + /* 690 */ 213, 214, 215, 216, 217, 218, 291, 189, 190, 296, /* 700 */ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, - /* 710 */ 202, 203, 204, 205, 206, 207, 208, 0, 276, 371, - /* 720 */ 372, 279, 263, 263, 263, 269, 270, 284, 163, 293, - /* 730 */ 165, 83, 296, 274, 274, 274, 284, 226, 227, 0, - /* 740 */ 296, 24, 25, 26, 27, 28, 29, 30, 31, 32, - /* 750 */ 291, 291, 291, 263, 189, 190, 284, 192, 193, 194, + /* 710 */ 202, 203, 204, 205, 206, 207, 208, 0, 8, 9, + /* 720 */ 21, 296, 12, 13, 14, 15, 16, 325, 163, 293, + /* 730 */ 165, 83, 296, 34, 276, 36, 255, 279, 21, 175, + /* 740 */ 176, 24, 25, 26, 27, 28, 29, 30, 31, 32, + /* 750 */ 271, 349, 273, 263, 189, 190, 0, 192, 193, 194, /* 760 */ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, - /* 770 */ 205, 206, 207, 208, 12, 13, 18, 22, 20, 94, - /* 780 */ 263, 291, 20, 263, 22, 27, 47, 255, 30, 325, - /* 790 */ 35, 274, 325, 284, 274, 33, 4, 35, 113, 114, - /* 800 */ 115, 116, 117, 313, 255, 47, 263, 49, 291, 51, - /* 810 */ 193, 291, 113, 349, 44, 45, 349, 274, 56, 256, - /* 820 */ 65, 331, 263, 39, 255, 325, 272, 65, 296, 219, - /* 830 */ 220, 255, 86, 274, 291, 89, 346, 347, 348, 81, - /* 840 */ 350, 2, 373, 353, 82, 296, 43, 8, 9, 349, - /* 850 */ 291, 12, 13, 14, 15, 16, 366, 2, 364, 104, - /* 860 */ 370, 42, 43, 8, 9, 296, 104, 12, 13, 14, - /* 870 */ 15, 16, 296, 264, 175, 176, 255, 150, 151, 121, + /* 770 */ 205, 206, 207, 208, 12, 13, 18, 296, 20, 43, + /* 780 */ 263, 291, 20, 150, 22, 27, 256, 255, 30, 263, + /* 790 */ 220, 274, 94, 83, 255, 33, 20, 35, 284, 298, + /* 800 */ 274, 145, 312, 47, 255, 47, 20, 49, 291, 51, + /* 810 */ 309, 113, 114, 115, 116, 117, 35, 291, 56, 238, + /* 820 */ 263, 331, 219, 220, 168, 42, 43, 65, 296, 150, + /* 830 */ 151, 274, 255, 283, 255, 296, 346, 347, 348, 81, + /* 840 */ 350, 222, 292, 353, 82, 296, 8, 9, 291, 364, + /* 850 */ 12, 13, 14, 15, 16, 86, 366, 284, 89, 255, + /* 860 */ 370, 22, 255, 8, 9, 209, 104, 12, 13, 14, + /* 870 */ 15, 16, 255, 296, 35, 296, 284, 35, 240, 121, /* 880 */ 118, 119, 124, 125, 126, 127, 128, 129, 130, 131, /* 890 */ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, - /* 900 */ 0, 143, 144, 238, 86, 263, 255, 89, 263, 18, - /* 910 */ 43, 271, 255, 273, 23, 320, 274, 296, 163, 274, - /* 920 */ 165, 86, 22, 86, 89, 163, 89, 165, 37, 38, - /* 930 */ 1, 2, 41, 291, 8, 9, 291, 255, 12, 13, - /* 940 */ 14, 15, 16, 255, 189, 190, 255, 296, 57, 255, - /* 950 */ 255, 189, 190, 296, 192, 193, 194, 195, 196, 197, + /* 900 */ 296, 143, 144, 296, 65, 2, 255, 1, 2, 18, + /* 910 */ 43, 8, 9, 296, 23, 12, 13, 14, 15, 16, + /* 920 */ 0, 86, 118, 119, 89, 163, 150, 165, 37, 38, + /* 930 */ 255, 0, 41, 255, 86, 86, 43, 89, 89, 59, + /* 940 */ 272, 46, 43, 104, 255, 284, 165, 296, 57, 373, + /* 950 */ 83, 189, 190, 22, 192, 193, 194, 195, 196, 197, /* 960 */ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, - /* 970 */ 208, 283, 263, 82, 263, 118, 119, 56, 296, 291, - /* 980 */ 255, 0, 360, 274, 296, 274, 298, 296, 8, 9, - /* 990 */ 296, 296, 12, 13, 14, 15, 16, 255, 283, 43, - /* 1000 */ 291, 313, 291, 22, 220, 84, 46, 35, 35, 83, - /* 1010 */ 43, 120, 324, 43, 222, 327, 328, 329, 330, 331, - /* 1020 */ 332, 296, 334, 0, 255, 337, 262, 8, 9, 341, - /* 1030 */ 342, 12, 13, 14, 15, 16, 0, 264, 296, 83, - /* 1040 */ 295, 353, 82, 240, 255, 22, 155, 156, 157, 43, - /* 1050 */ 83, 160, 283, 83, 366, 43, 82, 166, 370, 326, - /* 1060 */ 291, 35, 344, 351, 43, 296, 92, 298, 43, 367, - /* 1070 */ 179, 367, 283, 182, 43, 184, 185, 186, 187, 188, - /* 1080 */ 291, 367, 313, 354, 48, 296, 223, 298, 43, 83, - /* 1090 */ 322, 65, 20, 324, 43, 83, 327, 328, 329, 330, - /* 1100 */ 331, 332, 43, 334, 83, 263, 337, 47, 83, 242, - /* 1110 */ 341, 342, 221, 324, 83, 321, 327, 328, 329, 330, - /* 1120 */ 331, 332, 353, 334, 35, 269, 337, 161, 83, 315, - /* 1130 */ 341, 342, 343, 43, 83, 366, 255, 165, 165, 370, - /* 1140 */ 43, 43, 83, 43, 43, 263, 263, 42, 303, 145, - /* 1150 */ 301, 301, 363, 263, 20, 257, 257, 20, 298, 319, - /* 1160 */ 267, 267, 20, 312, 283, 20, 314, 20, 267, 150, - /* 1170 */ 312, 267, 291, 83, 304, 267, 267, 296, 263, 298, - /* 1180 */ 83, 83, 257, 83, 83, 267, 283, 263, 283, 283, - /* 1190 */ 283, 257, 296, 283, 283, 283, 283, 283, 319, 255, - /* 1200 */ 283, 265, 283, 171, 318, 324, 265, 312, 327, 328, - /* 1210 */ 329, 330, 331, 332, 298, 334, 263, 263, 337, 20, - /* 1220 */ 265, 229, 341, 342, 343, 359, 255, 283, 326, 296, - /* 1230 */ 228, 296, 359, 352, 307, 291, 12, 13, 307, 296, - /* 1240 */ 296, 296, 298, 235, 237, 154, 22, 362, 361, 236, - /* 1250 */ 322, 359, 224, 220, 283, 358, 291, 33, 20, 35, - /* 1260 */ 244, 241, 291, 325, 82, 307, 239, 296, 324, 298, - /* 1270 */ 307, 327, 328, 329, 330, 331, 332, 357, 334, 356, - /* 1280 */ 56, 337, 296, 296, 296, 341, 342, 343, 340, 65, - /* 1290 */ 296, 369, 296, 296, 305, 324, 352, 255, 327, 328, - /* 1300 */ 329, 330, 331, 332, 374, 334, 369, 368, 337, 369, - /* 1310 */ 368, 147, 341, 342, 343, 368, 279, 265, 304, 265, - /* 1320 */ 291, 291, 82, 352, 263, 283, 287, 265, 104, 273, - /* 1330 */ 296, 36, 258, 291, 257, 311, 266, 253, 296, 0, - /* 1340 */ 298, 277, 277, 277, 0, 42, 0, 73, 0, 35, - /* 1350 */ 183, 316, 35, 35, 35, 313, 183, 0, 35, 35, - /* 1360 */ 183, 0, 255, 183, 0, 35, 324, 0, 0, 327, - /* 1370 */ 328, 329, 330, 331, 332, 22, 334, 35, 0, 82, - /* 1380 */ 168, 167, 165, 163, 0, 0, 159, 163, 158, 165, - /* 1390 */ 283, 0, 0, 46, 0, 353, 0, 0, 291, 142, - /* 1400 */ 0, 0, 0, 296, 0, 298, 0, 137, 366, 35, - /* 1410 */ 0, 0, 370, 189, 137, 0, 0, 0, 0, 0, - /* 1420 */ 313, 0, 0, 0, 200, 201, 202, 203, 204, 205, - /* 1430 */ 206, 324, 0, 255, 327, 328, 329, 330, 331, 332, - /* 1440 */ 0, 334, 0, 0, 0, 42, 0, 0, 0, 0, - /* 1450 */ 0, 0, 22, 0, 0, 255, 42, 91, 46, 39, - /* 1460 */ 353, 283, 0, 0, 0, 0, 14, 43, 0, 291, - /* 1470 */ 39, 14, 46, 366, 296, 40, 298, 370, 0, 0, - /* 1480 */ 0, 154, 0, 283, 39, 0, 0, 60, 0, 0, - /* 1490 */ 35, 291, 0, 0, 35, 0, 296, 0, 298, 39, - /* 1500 */ 39, 0, 324, 0, 35, 327, 328, 329, 330, 331, - /* 1510 */ 332, 39, 334, 47, 47, 337, 35, 47, 255, 341, - /* 1520 */ 342, 47, 39, 0, 324, 89, 35, 327, 328, 329, - /* 1530 */ 330, 331, 332, 22, 334, 0, 255, 337, 0, 35, - /* 1540 */ 35, 341, 342, 35, 35, 43, 283, 35, 35, 43, - /* 1550 */ 35, 22, 0, 22, 291, 0, 49, 35, 22, 296, - /* 1560 */ 0, 298, 35, 0, 283, 35, 0, 22, 20, 0, - /* 1570 */ 0, 22, 291, 0, 35, 150, 169, 296, 150, 298, - /* 1580 */ 147, 0, 0, 0, 150, 0, 43, 324, 225, 255, - /* 1590 */ 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, - /* 1600 */ 82, 43, 43, 83, 83, 324, 82, 82, 327, 328, - /* 1610 */ 329, 330, 331, 332, 82, 334, 83, 283, 337, 82, - /* 1620 */ 46, 46, 82, 342, 83, 291, 46, 152, 219, 43, - /* 1630 */ 296, 43, 298, 225, 83, 46, 83, 83, 43, 83, - /* 1640 */ 35, 35, 35, 255, 310, 2, 35, 35, 35, 46, - /* 1650 */ 83, 43, 22, 82, 255, 83, 0, 0, 324, 82, - /* 1660 */ 189, 327, 328, 329, 330, 331, 332, 46, 334, 82, - /* 1670 */ 39, 283, 82, 46, 83, 191, 83, 178, 82, 291, - /* 1680 */ 82, 46, 283, 225, 296, 83, 298, 22, 35, 92, - /* 1690 */ 291, 82, 148, 146, 93, 296, 82, 298, 82, 82, - /* 1700 */ 82, 22, 83, 35, 82, 35, 83, 82, 35, 83, - /* 1710 */ 82, 255, 324, 35, 82, 327, 328, 329, 330, 331, - /* 1720 */ 332, 83, 334, 324, 83, 35, 327, 328, 329, 330, - /* 1730 */ 331, 332, 255, 334, 82, 106, 94, 82, 35, 283, - /* 1740 */ 106, 82, 106, 106, 82, 43, 22, 291, 60, 35, - /* 1750 */ 65, 59, 296, 365, 298, 80, 43, 35, 35, 22, - /* 1760 */ 283, 35, 35, 35, 35, 35, 310, 35, 291, 65, - /* 1770 */ 35, 372, 0, 296, 255, 298, 35, 35, 35, 35, - /* 1780 */ 324, 35, 35, 327, 328, 329, 330, 331, 332, 39, - /* 1790 */ 334, 0, 47, 35, 39, 47, 0, 255, 35, 47, - /* 1800 */ 39, 324, 283, 0, 327, 328, 329, 330, 331, 332, - /* 1810 */ 291, 334, 35, 336, 47, 296, 0, 298, 39, 35, - /* 1820 */ 35, 0, 22, 21, 375, 283, 22, 22, 21, 310, - /* 1830 */ 20, 375, 375, 291, 375, 375, 375, 375, 296, 375, - /* 1840 */ 298, 375, 375, 324, 375, 375, 327, 328, 329, 330, - /* 1850 */ 331, 332, 310, 334, 255, 375, 375, 375, 375, 375, - /* 1860 */ 375, 375, 375, 375, 375, 255, 324, 375, 375, 327, - /* 1870 */ 328, 329, 330, 331, 332, 375, 334, 375, 375, 255, - /* 1880 */ 375, 375, 283, 375, 375, 375, 375, 375, 375, 375, - /* 1890 */ 291, 375, 375, 283, 375, 296, 375, 298, 375, 375, - /* 1900 */ 375, 291, 375, 375, 375, 375, 296, 283, 298, 375, - /* 1910 */ 375, 375, 375, 375, 375, 291, 375, 375, 375, 375, - /* 1920 */ 296, 255, 298, 324, 375, 375, 327, 328, 329, 330, - /* 1930 */ 331, 332, 375, 334, 324, 255, 375, 327, 328, 329, - /* 1940 */ 330, 331, 332, 375, 334, 375, 255, 375, 324, 283, - /* 1950 */ 375, 327, 328, 329, 330, 331, 332, 291, 334, 375, - /* 1960 */ 375, 375, 296, 283, 298, 375, 375, 375, 375, 375, - /* 1970 */ 375, 291, 375, 375, 283, 375, 296, 375, 298, 375, - /* 1980 */ 375, 375, 291, 375, 375, 375, 375, 296, 255, 298, - /* 1990 */ 324, 375, 375, 327, 328, 329, 330, 331, 332, 375, - /* 2000 */ 334, 375, 375, 375, 324, 375, 375, 327, 328, 329, - /* 2010 */ 330, 331, 332, 375, 334, 324, 283, 375, 327, 328, - /* 2020 */ 329, 330, 331, 332, 291, 334, 375, 375, 375, 296, - /* 2030 */ 255, 298, 375, 375, 375, 375, 375, 375, 375, 375, - /* 2040 */ 375, 255, 375, 375, 375, 375, 375, 375, 375, 375, - /* 2050 */ 375, 375, 375, 255, 375, 375, 375, 324, 283, 375, - /* 2060 */ 327, 328, 329, 330, 331, 332, 291, 334, 375, 283, - /* 2070 */ 375, 296, 375, 298, 375, 375, 375, 291, 375, 375, - /* 2080 */ 375, 283, 296, 375, 298, 375, 375, 375, 375, 291, - /* 2090 */ 375, 375, 375, 375, 296, 375, 298, 375, 375, 324, - /* 2100 */ 375, 375, 327, 328, 329, 330, 331, 332, 255, 334, - /* 2110 */ 324, 375, 375, 327, 328, 329, 330, 331, 332, 375, - /* 2120 */ 334, 375, 324, 375, 375, 327, 328, 329, 330, 331, - /* 2130 */ 332, 375, 334, 375, 375, 255, 283, 375, 375, 375, - /* 2140 */ 375, 375, 375, 375, 291, 375, 375, 375, 375, 296, - /* 2150 */ 375, 298, 375, 375, 375, 375, 375, 375, 375, 375, - /* 2160 */ 375, 375, 255, 283, 375, 375, 375, 375, 375, 375, - /* 2170 */ 375, 291, 375, 375, 375, 375, 296, 324, 298, 375, - /* 2180 */ 327, 328, 329, 330, 331, 332, 375, 334, 375, 255, - /* 2190 */ 283, 375, 375, 375, 375, 375, 375, 375, 291, 375, - /* 2200 */ 375, 375, 375, 296, 324, 298, 375, 327, 328, 329, - /* 2210 */ 330, 331, 332, 375, 334, 375, 375, 283, 375, 375, - /* 2220 */ 375, 375, 375, 375, 375, 291, 375, 375, 375, 375, - /* 2230 */ 296, 324, 298, 375, 327, 328, 329, 330, 331, 332, - /* 2240 */ 375, 334, 375, 255, 375, 375, 375, 375, 375, 375, - /* 2250 */ 375, 375, 255, 375, 375, 375, 375, 375, 324, 375, - /* 2260 */ 375, 327, 328, 329, 330, 331, 332, 375, 334, 375, - /* 2270 */ 255, 283, 375, 375, 375, 375, 375, 375, 375, 291, - /* 2280 */ 283, 375, 375, 375, 296, 375, 298, 375, 291, 375, - /* 2290 */ 375, 375, 375, 296, 375, 298, 375, 375, 283, 375, - /* 2300 */ 375, 375, 375, 375, 375, 375, 291, 375, 375, 375, - /* 2310 */ 375, 296, 324, 298, 375, 327, 328, 329, 330, 331, - /* 2320 */ 332, 324, 334, 255, 327, 328, 329, 330, 331, 332, - /* 2330 */ 375, 334, 375, 375, 375, 375, 375, 255, 375, 324, - /* 2340 */ 375, 375, 327, 328, 329, 330, 331, 332, 375, 334, - /* 2350 */ 375, 283, 375, 375, 375, 375, 375, 375, 375, 291, - /* 2360 */ 375, 375, 375, 375, 296, 283, 298, 375, 375, 375, - /* 2370 */ 375, 375, 375, 291, 375, 375, 375, 375, 296, 375, - /* 2380 */ 298, 375, 375, 375, 375, 375, 375, 375, 375, 375, - /* 2390 */ 375, 255, 324, 375, 375, 327, 328, 329, 330, 331, - /* 2400 */ 332, 375, 334, 375, 375, 375, 324, 375, 375, 327, - /* 2410 */ 328, 329, 330, 331, 332, 375, 334, 375, 375, 283, - /* 2420 */ 375, 375, 375, 375, 375, 375, 375, 291, 375, 375, - /* 2430 */ 375, 375, 296, 375, 298, 375, 375, 375, 375, 375, - /* 2440 */ 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, - /* 2450 */ 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, - /* 2460 */ 324, 375, 375, 327, 328, 329, 330, 331, 332, 375, - /* 2470 */ 334, + /* 970 */ 208, 296, 283, 82, 296, 43, 83, 82, 242, 193, + /* 980 */ 291, 319, 83, 2, 0, 296, 0, 298, 150, 8, + /* 990 */ 9, 264, 43, 12, 13, 14, 15, 16, 43, 43, + /* 1000 */ 43, 312, 163, 43, 165, 43, 22, 165, 22, 43, + /* 1010 */ 360, 120, 323, 43, 94, 83, 327, 328, 329, 330, + /* 1020 */ 331, 332, 255, 334, 0, 283, 337, 264, 189, 190, + /* 1030 */ 341, 342, 83, 113, 114, 115, 116, 117, 83, 83, + /* 1040 */ 83, 43, 353, 83, 43, 83, 155, 156, 157, 83, + /* 1050 */ 283, 160, 43, 83, 82, 366, 35, 166, 291, 370, + /* 1060 */ 262, 295, 43, 296, 92, 298, 326, 351, 344, 189, + /* 1070 */ 179, 367, 48, 182, 367, 184, 185, 186, 187, 188, + /* 1080 */ 367, 83, 354, 255, 83, 223, 65, 20, 263, 321, + /* 1090 */ 323, 47, 83, 320, 327, 328, 329, 330, 331, 332, + /* 1100 */ 255, 334, 83, 35, 337, 269, 161, 314, 341, 342, + /* 1110 */ 343, 283, 221, 263, 263, 42, 303, 301, 145, 291, + /* 1120 */ 301, 263, 355, 20, 296, 257, 298, 257, 283, 20, + /* 1130 */ 363, 267, 318, 267, 20, 298, 291, 311, 20, 267, + /* 1140 */ 313, 296, 311, 298, 267, 20, 267, 304, 267, 263, + /* 1150 */ 267, 323, 283, 255, 257, 327, 328, 329, 330, 331, + /* 1160 */ 332, 283, 334, 171, 318, 337, 307, 283, 323, 341, + /* 1170 */ 342, 343, 327, 328, 329, 330, 331, 332, 257, 334, + /* 1180 */ 263, 283, 337, 283, 283, 283, 341, 342, 343, 291, + /* 1190 */ 283, 363, 283, 283, 296, 283, 298, 352, 283, 12, + /* 1200 */ 13, 296, 265, 265, 317, 263, 298, 263, 255, 22, + /* 1210 */ 228, 265, 296, 147, 296, 311, 305, 291, 296, 296, + /* 1220 */ 33, 323, 35, 296, 265, 327, 328, 329, 330, 331, + /* 1230 */ 332, 307, 334, 304, 265, 337, 283, 279, 291, 341, + /* 1240 */ 342, 343, 20, 56, 291, 326, 296, 229, 359, 296, + /* 1250 */ 352, 298, 65, 359, 362, 307, 154, 296, 307, 296, + /* 1260 */ 296, 296, 235, 237, 236, 224, 220, 291, 20, 239, + /* 1270 */ 241, 82, 255, 359, 361, 321, 323, 325, 244, 82, + /* 1280 */ 327, 328, 329, 330, 331, 332, 255, 334, 358, 356, + /* 1290 */ 337, 104, 357, 368, 341, 342, 343, 369, 255, 287, + /* 1300 */ 283, 296, 263, 369, 36, 352, 273, 368, 291, 369, + /* 1310 */ 374, 368, 265, 296, 283, 298, 258, 340, 257, 315, + /* 1320 */ 310, 266, 291, 253, 0, 277, 283, 296, 277, 298, + /* 1330 */ 277, 0, 42, 0, 291, 73, 0, 35, 183, 296, + /* 1340 */ 323, 298, 35, 312, 327, 328, 329, 330, 331, 332, + /* 1350 */ 163, 334, 165, 35, 323, 312, 35, 183, 327, 328, + /* 1360 */ 329, 330, 331, 332, 0, 334, 323, 255, 183, 35, + /* 1370 */ 327, 328, 329, 330, 331, 332, 189, 334, 35, 0, + /* 1380 */ 0, 183, 365, 35, 353, 0, 0, 200, 201, 202, + /* 1390 */ 203, 204, 205, 206, 22, 283, 353, 366, 35, 0, + /* 1400 */ 168, 370, 82, 291, 167, 165, 163, 0, 296, 366, + /* 1410 */ 298, 0, 159, 370, 158, 0, 0, 46, 0, 0, + /* 1420 */ 0, 142, 0, 0, 255, 0, 0, 0, 137, 35, + /* 1430 */ 0, 137, 0, 0, 0, 323, 0, 0, 0, 327, + /* 1440 */ 328, 329, 330, 331, 332, 255, 334, 0, 0, 337, + /* 1450 */ 0, 0, 283, 341, 342, 0, 0, 0, 0, 42, + /* 1460 */ 291, 0, 0, 0, 0, 296, 0, 298, 0, 0, + /* 1470 */ 0, 22, 0, 283, 43, 46, 0, 42, 39, 14, + /* 1480 */ 40, 291, 0, 0, 0, 14, 296, 46, 298, 0, + /* 1490 */ 0, 39, 323, 39, 154, 0, 327, 328, 329, 330, + /* 1500 */ 331, 332, 0, 334, 0, 255, 337, 0, 0, 0, + /* 1510 */ 341, 342, 35, 323, 39, 0, 0, 327, 328, 329, + /* 1520 */ 330, 331, 332, 333, 334, 335, 336, 60, 47, 35, + /* 1530 */ 39, 35, 47, 283, 47, 0, 39, 39, 35, 47, + /* 1540 */ 0, 291, 0, 0, 0, 35, 296, 22, 298, 89, + /* 1550 */ 91, 0, 35, 35, 35, 35, 43, 43, 35, 35, + /* 1560 */ 0, 255, 35, 22, 0, 0, 35, 22, 22, 0, + /* 1570 */ 49, 35, 0, 323, 35, 0, 22, 327, 328, 329, + /* 1580 */ 330, 331, 332, 255, 334, 0, 20, 35, 0, 283, + /* 1590 */ 169, 0, 150, 22, 150, 0, 150, 291, 0, 0, + /* 1600 */ 147, 0, 296, 0, 298, 83, 178, 0, 82, 39, + /* 1610 */ 82, 283, 82, 46, 82, 225, 288, 43, 22, 291, + /* 1620 */ 83, 371, 372, 43, 296, 152, 298, 148, 225, 323, + /* 1630 */ 92, 82, 146, 327, 328, 329, 330, 331, 332, 82, + /* 1640 */ 334, 83, 83, 337, 255, 82, 46, 43, 342, 225, + /* 1650 */ 82, 323, 46, 43, 46, 327, 328, 329, 330, 331, + /* 1660 */ 332, 82, 334, 82, 255, 83, 83, 83, 82, 43, + /* 1670 */ 83, 46, 283, 46, 43, 83, 83, 288, 35, 2, + /* 1680 */ 291, 35, 35, 35, 35, 296, 35, 298, 189, 22, + /* 1690 */ 93, 82, 283, 43, 83, 82, 82, 219, 83, 83, + /* 1700 */ 291, 46, 46, 82, 82, 296, 82, 298, 35, 35, + /* 1710 */ 35, 83, 323, 83, 82, 82, 327, 328, 329, 330, + /* 1720 */ 331, 332, 83, 334, 191, 35, 35, 82, 35, 255, + /* 1730 */ 22, 94, 323, 35, 22, 83, 327, 328, 329, 330, + /* 1740 */ 331, 332, 82, 334, 83, 82, 106, 106, 255, 60, + /* 1750 */ 82, 82, 82, 43, 59, 35, 80, 283, 43, 106, + /* 1760 */ 106, 65, 288, 35, 35, 291, 35, 35, 35, 22, + /* 1770 */ 296, 35, 298, 35, 65, 35, 283, 35, 35, 35, + /* 1780 */ 35, 372, 0, 35, 291, 35, 35, 0, 47, 296, + /* 1790 */ 35, 298, 39, 47, 0, 39, 35, 323, 39, 47, + /* 1800 */ 0, 327, 328, 329, 330, 331, 332, 35, 334, 47, + /* 1810 */ 39, 0, 35, 35, 0, 22, 323, 255, 21, 21, + /* 1820 */ 327, 328, 329, 330, 331, 332, 22, 334, 22, 336, + /* 1830 */ 20, 375, 375, 255, 375, 375, 375, 375, 375, 375, + /* 1840 */ 375, 375, 375, 375, 375, 283, 375, 375, 375, 375, + /* 1850 */ 288, 375, 375, 291, 375, 375, 375, 375, 296, 375, + /* 1860 */ 298, 283, 375, 375, 375, 375, 288, 375, 375, 291, + /* 1870 */ 375, 375, 375, 375, 296, 255, 298, 375, 375, 375, + /* 1880 */ 375, 375, 375, 375, 375, 323, 375, 375, 375, 327, + /* 1890 */ 328, 329, 330, 331, 332, 375, 334, 375, 375, 255, + /* 1900 */ 375, 323, 375, 283, 375, 327, 328, 329, 330, 331, + /* 1910 */ 332, 291, 334, 375, 375, 375, 296, 375, 298, 375, + /* 1920 */ 375, 375, 375, 375, 375, 375, 375, 283, 375, 375, + /* 1930 */ 375, 375, 375, 375, 375, 291, 375, 375, 375, 375, + /* 1940 */ 296, 255, 298, 323, 375, 375, 375, 327, 328, 329, + /* 1950 */ 330, 331, 332, 375, 334, 375, 375, 375, 375, 375, + /* 1960 */ 375, 255, 375, 375, 375, 375, 375, 323, 375, 283, + /* 1970 */ 375, 327, 328, 329, 330, 331, 332, 291, 334, 375, + /* 1980 */ 375, 375, 296, 375, 298, 375, 375, 375, 375, 283, + /* 1990 */ 375, 375, 375, 375, 375, 375, 375, 291, 375, 375, + /* 2000 */ 375, 375, 296, 375, 298, 375, 375, 375, 375, 323, + /* 2010 */ 375, 375, 375, 327, 328, 329, 330, 331, 332, 375, + /* 2020 */ 334, 255, 375, 375, 375, 375, 375, 375, 375, 323, + /* 2030 */ 375, 375, 375, 327, 328, 329, 330, 331, 332, 375, + /* 2040 */ 334, 375, 375, 375, 375, 255, 375, 375, 375, 283, + /* 2050 */ 375, 375, 375, 375, 375, 375, 375, 291, 375, 375, + /* 2060 */ 375, 375, 296, 375, 298, 375, 375, 375, 375, 375, + /* 2070 */ 375, 375, 375, 283, 375, 375, 375, 375, 375, 375, + /* 2080 */ 375, 291, 375, 375, 375, 375, 296, 375, 298, 323, + /* 2090 */ 375, 375, 375, 327, 328, 329, 330, 331, 332, 375, + /* 2100 */ 334, 255, 375, 375, 375, 375, 375, 375, 375, 375, + /* 2110 */ 375, 375, 375, 323, 375, 375, 375, 327, 328, 329, + /* 2120 */ 330, 331, 332, 375, 334, 255, 375, 375, 375, 283, + /* 2130 */ 375, 375, 375, 375, 375, 375, 375, 291, 375, 375, + /* 2140 */ 375, 375, 296, 375, 298, 375, 375, 375, 375, 375, + /* 2150 */ 375, 375, 375, 283, 375, 375, 375, 375, 375, 375, + /* 2160 */ 375, 291, 375, 375, 375, 375, 296, 255, 298, 323, + /* 2170 */ 375, 375, 375, 327, 328, 329, 330, 331, 332, 375, + /* 2180 */ 334, 375, 375, 255, 375, 375, 375, 375, 375, 375, + /* 2190 */ 375, 375, 375, 323, 375, 283, 375, 327, 328, 329, + /* 2200 */ 330, 331, 332, 291, 334, 375, 375, 375, 296, 375, + /* 2210 */ 298, 283, 375, 375, 375, 375, 375, 375, 375, 291, + /* 2220 */ 375, 375, 375, 375, 296, 255, 298, 375, 375, 375, + /* 2230 */ 375, 375, 375, 375, 375, 323, 375, 375, 375, 327, + /* 2240 */ 328, 329, 330, 331, 332, 255, 334, 375, 375, 375, + /* 2250 */ 375, 323, 375, 283, 375, 327, 328, 329, 330, 331, + /* 2260 */ 332, 291, 334, 375, 375, 375, 296, 375, 298, 375, + /* 2270 */ 375, 375, 375, 283, 375, 375, 375, 375, 375, 375, + /* 2280 */ 375, 291, 375, 375, 375, 375, 296, 375, 298, 375, + /* 2290 */ 375, 375, 375, 323, 375, 375, 375, 327, 328, 329, + /* 2300 */ 330, 331, 332, 375, 334, 255, 375, 375, 375, 375, + /* 2310 */ 375, 375, 375, 323, 375, 375, 375, 327, 328, 329, + /* 2320 */ 330, 331, 332, 375, 334, 375, 375, 375, 375, 255, + /* 2330 */ 375, 375, 375, 283, 375, 375, 375, 375, 375, 375, + /* 2340 */ 375, 291, 375, 375, 375, 375, 296, 375, 298, 375, + /* 2350 */ 375, 375, 375, 375, 375, 375, 375, 283, 375, 375, + /* 2360 */ 375, 375, 375, 375, 375, 291, 375, 375, 375, 375, + /* 2370 */ 296, 375, 298, 323, 375, 375, 375, 327, 328, 329, + /* 2380 */ 330, 331, 332, 375, 334, 255, 375, 375, 375, 375, + /* 2390 */ 375, 375, 375, 375, 375, 375, 375, 323, 375, 375, + /* 2400 */ 375, 327, 328, 329, 330, 331, 332, 375, 334, 255, + /* 2410 */ 375, 375, 375, 283, 375, 375, 375, 375, 375, 375, + /* 2420 */ 375, 291, 375, 375, 375, 375, 296, 375, 298, 375, + /* 2430 */ 375, 375, 375, 375, 375, 375, 375, 283, 375, 375, + /* 2440 */ 375, 375, 375, 375, 375, 291, 375, 375, 375, 375, + /* 2450 */ 296, 255, 298, 323, 375, 375, 375, 327, 328, 329, + /* 2460 */ 330, 331, 332, 375, 334, 375, 375, 255, 375, 375, + /* 2470 */ 375, 375, 375, 375, 375, 375, 375, 323, 375, 283, + /* 2480 */ 375, 327, 328, 329, 330, 331, 332, 291, 334, 375, + /* 2490 */ 375, 375, 296, 375, 298, 283, 375, 375, 375, 375, + /* 2500 */ 375, 375, 375, 291, 375, 375, 375, 375, 296, 255, + /* 2510 */ 298, 375, 375, 375, 375, 375, 375, 375, 375, 323, + /* 2520 */ 375, 375, 375, 327, 328, 329, 330, 331, 332, 255, + /* 2530 */ 334, 375, 375, 375, 375, 323, 375, 283, 375, 327, + /* 2540 */ 328, 329, 330, 331, 332, 291, 334, 375, 375, 375, + /* 2550 */ 296, 375, 298, 375, 375, 375, 375, 283, 375, 375, + /* 2560 */ 375, 375, 375, 375, 375, 291, 375, 375, 375, 375, + /* 2570 */ 296, 375, 298, 375, 375, 375, 375, 323, 375, 375, + /* 2580 */ 375, 327, 328, 329, 330, 331, 332, 375, 334, 375, + /* 2590 */ 375, 375, 375, 375, 375, 375, 375, 323, 375, 375, + /* 2600 */ 375, 327, 328, 329, 330, 331, 332, 375, 334, }; #define YY_SHIFT_COUNT (652) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (1821) +#define YY_SHIFT_MAX (1814) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 891, 0, 0, 57, 57, 254, 254, 254, 311, 311, /* 10 */ 254, 254, 508, 565, 762, 565, 565, 565, 565, 565, /* 20 */ 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, /* 30 */ 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, - /* 40 */ 565, 565, 302, 302, 85, 85, 85, 1224, 1224, 1224, - /* 50 */ 1224, 54, 86, 202, 154, 154, 53, 53, 56, 190, - /* 60 */ 202, 202, 154, 154, 154, 154, 154, 154, 154, 154, - /* 70 */ 15, 154, 154, 154, 171, 214, 298, 154, 154, 298, - /* 80 */ 154, 298, 298, 298, 154, 274, 758, 223, 477, 477, - /* 90 */ 267, 409, 755, 755, 755, 755, 755, 755, 755, 755, - /* 100 */ 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, - /* 110 */ 755, 66, 190, 209, 209, 38, 433, 457, 457, 457, - /* 120 */ 123, 433, 318, 214, 31, 31, 298, 298, 423, 423, - /* 130 */ 306, 429, 252, 252, 252, 252, 252, 252, 252, 131, - /* 140 */ 21, 14, 374, 233, 71, 388, 511, 88, 199, 315, - /* 150 */ 770, 512, 546, 610, 784, 610, 819, 665, 665, 665, - /* 160 */ 792, 617, 863, 1072, 1060, 1089, 966, 1072, 1072, 1105, - /* 170 */ 1004, 1004, 1072, 1134, 1134, 1137, 15, 214, 15, 1142, - /* 180 */ 1145, 15, 1142, 15, 1147, 15, 15, 1072, 15, 1134, - /* 190 */ 298, 298, 298, 298, 298, 298, 298, 298, 298, 298, - /* 200 */ 298, 1072, 1134, 423, 1137, 274, 1032, 214, 274, 1072, - /* 210 */ 1072, 1142, 274, 1199, 423, 992, 1002, 423, 992, 1002, - /* 220 */ 423, 423, 298, 1008, 1091, 992, 1007, 1013, 1028, 863, - /* 230 */ 1033, 318, 1238, 1020, 1027, 1016, 1020, 1027, 1020, 1027, - /* 240 */ 1182, 1002, 423, 423, 423, 423, 423, 1002, 423, 1164, - /* 250 */ 318, 1147, 274, 306, 274, 318, 1240, 423, 429, 1072, - /* 260 */ 274, 1295, 1134, 2471, 2471, 2471, 2471, 2471, 2471, 2471, - /* 270 */ 334, 502, 717, 96, 412, 580, 648, 839, 855, 1019, - /* 280 */ 926, 980, 980, 980, 980, 980, 980, 980, 980, 431, - /* 290 */ 685, 226, 365, 365, 425, 485, 161, 50, 249, 570, - /* 300 */ 328, 355, 355, 355, 355, 699, 739, 956, 746, 818, - /* 310 */ 835, 837, 900, 981, 1023, 921, 727, 967, 970, 929, - /* 320 */ 857, 803, 867, 1006, 335, 1012, 960, 1021, 1025, 1031, - /* 330 */ 1045, 1051, 972, 973, 1059, 1090, 1097, 1098, 1100, 1101, - /* 340 */ 974, 1026, 1036, 1339, 1344, 1303, 1346, 1274, 1348, 1314, - /* 350 */ 1167, 1317, 1318, 1319, 1173, 1357, 1323, 1324, 1177, 1361, - /* 360 */ 1180, 1364, 1330, 1367, 1353, 1368, 1342, 1378, 1297, 1212, - /* 370 */ 1214, 1217, 1220, 1384, 1385, 1227, 1230, 1391, 1392, 1347, - /* 380 */ 1394, 1396, 1397, 1257, 1400, 1401, 1402, 1404, 1406, 1270, - /* 390 */ 1374, 1410, 1277, 1411, 1415, 1416, 1417, 1418, 1419, 1421, - /* 400 */ 1422, 1423, 1432, 1440, 1442, 1443, 1444, 1403, 1446, 1447, - /* 410 */ 1448, 1449, 1450, 1451, 1430, 1453, 1454, 1462, 1463, 1464, - /* 420 */ 1465, 1414, 1420, 1424, 1452, 1412, 1457, 1426, 1468, 1435, - /* 430 */ 1431, 1478, 1479, 1480, 1445, 1327, 1482, 1485, 1486, 1427, - /* 440 */ 1488, 1489, 1455, 1466, 1460, 1492, 1459, 1467, 1461, 1493, - /* 450 */ 1469, 1470, 1472, 1495, 1481, 1474, 1483, 1497, 1501, 1503, - /* 460 */ 1523, 1366, 1436, 1491, 1511, 1535, 1504, 1505, 1508, 1509, - /* 470 */ 1502, 1506, 1512, 1513, 1515, 1538, 1529, 1552, 1531, 1507, - /* 480 */ 1555, 1536, 1522, 1560, 1527, 1563, 1530, 1566, 1545, 1548, - /* 490 */ 1569, 1425, 1539, 1570, 1407, 1549, 1428, 1433, 1573, 1581, - /* 500 */ 1434, 1475, 1582, 1583, 1585, 1543, 1363, 1518, 1520, 1524, - /* 510 */ 1521, 1558, 1533, 1525, 1532, 1537, 1541, 1559, 1574, 1575, - /* 520 */ 1540, 1586, 1408, 1551, 1553, 1580, 1409, 1588, 1589, 1554, - /* 530 */ 1595, 1458, 1556, 1605, 1606, 1607, 1611, 1612, 1613, 1556, - /* 540 */ 1643, 1471, 1608, 1567, 1571, 1572, 1603, 1577, 1587, 1621, - /* 550 */ 1630, 1484, 1590, 1591, 1593, 1596, 1499, 1656, 1598, 1544, - /* 560 */ 1609, 1657, 1631, 1547, 1614, 1597, 1627, 1635, 1616, 1602, - /* 570 */ 1617, 1665, 1618, 1601, 1619, 1653, 1668, 1622, 1623, 1670, - /* 580 */ 1625, 1626, 1673, 1628, 1638, 1678, 1632, 1641, 1690, 1652, - /* 590 */ 1629, 1634, 1636, 1637, 1679, 1642, 1655, 1659, 1703, 1662, - /* 600 */ 1702, 1702, 1724, 1688, 1692, 1714, 1685, 1675, 1713, 1722, - /* 610 */ 1723, 1726, 1727, 1728, 1737, 1729, 1730, 1704, 1502, 1732, - /* 620 */ 1506, 1735, 1741, 1742, 1743, 1744, 1746, 1772, 1747, 1745, - /* 630 */ 1750, 1791, 1758, 1748, 1755, 1796, 1763, 1752, 1761, 1803, - /* 640 */ 1777, 1767, 1779, 1816, 1784, 1785, 1821, 1800, 1802, 1804, - /* 650 */ 1805, 1807, 1810, + /* 40 */ 565, 565, 565, 64, 64, 29, 29, 29, 1187, 1187, + /* 50 */ 1187, 54, 95, 247, 40, 40, 165, 165, 317, 106, + /* 60 */ 247, 247, 40, 40, 40, 40, 40, 40, 40, 40, + /* 70 */ 6, 40, 40, 40, 48, 127, 40, 40, 127, 152, + /* 80 */ 40, 127, 127, 127, 40, 122, 758, 223, 477, 477, + /* 90 */ 13, 409, 839, 839, 839, 839, 839, 839, 839, 839, + /* 100 */ 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, + /* 110 */ 839, 196, 106, 325, 325, 75, 80, 365, 633, 633, + /* 120 */ 633, 80, 209, 48, 273, 273, 127, 127, 277, 277, + /* 130 */ 321, 380, 252, 252, 252, 252, 252, 252, 252, 131, + /* 140 */ 717, 533, 374, 233, 138, 67, 143, 124, 199, 476, + /* 150 */ 596, 1, 776, 603, 570, 603, 783, 581, 581, 581, + /* 160 */ 619, 786, 862, 1067, 1044, 1068, 945, 1067, 1067, 1073, + /* 170 */ 973, 973, 1067, 1103, 1103, 1109, 6, 48, 6, 1114, + /* 180 */ 1118, 6, 1114, 6, 1125, 6, 6, 1067, 6, 1103, + /* 190 */ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, + /* 200 */ 127, 1067, 1103, 277, 1109, 122, 992, 48, 122, 1067, + /* 210 */ 1067, 1114, 122, 982, 277, 277, 277, 277, 982, 277, + /* 220 */ 1066, 209, 1125, 122, 321, 122, 209, 1222, 277, 1018, + /* 230 */ 982, 277, 277, 1018, 982, 277, 277, 127, 1027, 1102, + /* 240 */ 1018, 1026, 1028, 1041, 862, 1046, 209, 1248, 1029, 1030, + /* 250 */ 1034, 1029, 1030, 1029, 1030, 1189, 1197, 277, 380, 1067, + /* 260 */ 122, 1268, 1103, 2609, 2609, 2609, 2609, 2609, 2609, 2609, + /* 270 */ 334, 502, 268, 520, 412, 623, 648, 903, 981, 838, + /* 280 */ 710, 431, 855, 855, 855, 855, 855, 855, 855, 855, + /* 290 */ 920, 698, 14, 14, 164, 439, 25, 147, 699, 564, + /* 300 */ 486, 656, 364, 364, 364, 364, 756, 867, 769, 835, + /* 310 */ 848, 849, 931, 984, 986, 443, 679, 893, 899, 932, + /* 320 */ 949, 955, 956, 781, 842, 957, 906, 804, 638, 736, + /* 330 */ 960, 880, 962, 895, 966, 970, 998, 1001, 1009, 1019, + /* 340 */ 972, 1021, 1024, 1324, 1331, 1290, 1333, 1262, 1336, 1302, + /* 350 */ 1155, 1307, 1318, 1321, 1174, 1364, 1334, 1343, 1185, 1379, + /* 360 */ 1198, 1380, 1348, 1385, 1372, 1386, 1363, 1399, 1320, 1232, + /* 370 */ 1237, 1240, 1243, 1407, 1411, 1253, 1256, 1415, 1416, 1371, + /* 380 */ 1418, 1419, 1420, 1279, 1422, 1423, 1425, 1426, 1427, 1291, + /* 390 */ 1394, 1430, 1294, 1432, 1433, 1434, 1436, 1437, 1438, 1447, + /* 400 */ 1448, 1450, 1451, 1455, 1456, 1457, 1458, 1417, 1461, 1462, + /* 410 */ 1463, 1464, 1466, 1468, 1449, 1469, 1470, 1472, 1482, 1483, + /* 420 */ 1484, 1435, 1439, 1431, 1465, 1429, 1471, 1441, 1476, 1440, + /* 430 */ 1452, 1489, 1490, 1502, 1454, 1340, 1495, 1504, 1507, 1467, + /* 440 */ 1508, 1509, 1477, 1481, 1475, 1515, 1494, 1485, 1491, 1516, + /* 450 */ 1496, 1487, 1497, 1535, 1503, 1492, 1498, 1540, 1542, 1543, + /* 460 */ 1544, 1459, 1460, 1510, 1525, 1551, 1517, 1518, 1519, 1520, + /* 470 */ 1513, 1514, 1523, 1524, 1527, 1560, 1541, 1564, 1545, 1521, + /* 480 */ 1565, 1546, 1531, 1569, 1536, 1572, 1539, 1575, 1554, 1566, + /* 490 */ 1585, 1442, 1552, 1588, 1421, 1571, 1444, 1453, 1591, 1595, + /* 500 */ 1446, 1473, 1598, 1599, 1601, 1526, 1522, 1428, 1603, 1528, + /* 510 */ 1479, 1530, 1607, 1570, 1486, 1532, 1538, 1567, 1574, 1390, + /* 520 */ 1549, 1537, 1557, 1558, 1559, 1563, 1596, 1580, 1582, 1568, + /* 530 */ 1579, 1581, 1583, 1604, 1600, 1606, 1586, 1610, 1403, 1584, + /* 540 */ 1587, 1608, 1478, 1626, 1625, 1627, 1592, 1631, 1424, 1593, + /* 550 */ 1643, 1646, 1647, 1648, 1649, 1651, 1593, 1677, 1499, 1650, + /* 560 */ 1609, 1611, 1613, 1615, 1614, 1616, 1655, 1621, 1622, 1656, + /* 570 */ 1667, 1533, 1624, 1597, 1628, 1673, 1674, 1632, 1630, 1675, + /* 580 */ 1633, 1639, 1690, 1645, 1652, 1691, 1660, 1661, 1693, 1663, + /* 590 */ 1640, 1641, 1653, 1654, 1708, 1637, 1668, 1669, 1698, 1670, + /* 600 */ 1710, 1710, 1712, 1689, 1695, 1720, 1696, 1676, 1715, 1728, + /* 610 */ 1729, 1731, 1732, 1733, 1747, 1736, 1738, 1709, 1513, 1740, + /* 620 */ 1514, 1742, 1743, 1744, 1745, 1748, 1750, 1782, 1751, 1741, + /* 630 */ 1753, 1787, 1755, 1746, 1756, 1794, 1761, 1752, 1759, 1800, + /* 640 */ 1772, 1762, 1771, 1811, 1777, 1778, 1814, 1793, 1797, 1804, + /* 650 */ 1806, 1798, 1810, }; #define YY_REDUCE_COUNT (269) -#define YY_REDUCE_MIN (-329) -#define YY_REDUCE_MAX (2136) +#define YY_REDUCE_MIN (-349) +#define YY_REDUCE_MAX (2274) static const short yy_reduce_ofst[] = { - /* 0 */ -246, 688, 769, -197, 789, 881, 944, 971, 1042, 1107, - /* 10 */ 1178, 1200, 1263, 348, 1281, 1334, 1388, 1399, 1456, 1477, - /* 20 */ 1519, 1542, 1599, 1610, 1624, 1666, 1680, 1691, 1733, 1775, - /* 30 */ 1786, 1798, 1853, 1880, 1907, 1934, 1988, 1997, 2015, 2068, - /* 40 */ 2082, 2136, 288, 490, -236, -122, -20, -280, -262, -260, - /* 50 */ -55, -285, -80, -41, 206, 289, -259, -252, -329, -279, - /* 60 */ -126, -49, 268, 341, 342, 351, 459, 460, 461, 517, - /* 70 */ -250, 520, 543, 559, -251, -181, -104, 642, 645, 270, - /* 80 */ 709, 292, 330, 299, 711, 332, -234, -190, -190, -190, - /* 90 */ -99, 146, 120, 244, 354, 356, 385, 444, 532, 549, - /* 100 */ 569, 576, 621, 651, 657, 682, 691, 694, 695, 725, - /* 110 */ 742, 20, 229, -101, 407, 11, 264, 464, 467, 500, - /* 120 */ 80, 456, 83, 7, -307, -97, 189, 325, 149, 436, - /* 130 */ 442, 640, -273, 266, 297, 443, 452, 472, 509, 259, - /* 140 */ 563, 554, 469, 494, 609, 595, 622, 715, 715, 773, - /* 150 */ 764, 745, 733, 712, 712, 712, 718, 702, 704, 714, - /* 160 */ 729, 715, 768, 842, 794, 856, 814, 882, 883, 845, - /* 170 */ 849, 850, 890, 898, 899, 840, 893, 860, 894, 851, - /* 180 */ 852, 901, 858, 904, 870, 908, 909, 915, 918, 925, - /* 190 */ 903, 905, 906, 907, 910, 911, 912, 913, 914, 917, - /* 200 */ 919, 924, 934, 896, 879, 936, 886, 916, 941, 953, - /* 210 */ 954, 895, 955, 902, 933, 866, 927, 935, 873, 931, - /* 220 */ 943, 945, 715, 885, 887, 892, 897, 920, 923, 928, - /* 230 */ 712, 965, 938, 922, 939, 930, 937, 942, 940, 947, - /* 240 */ 948, 958, 986, 987, 988, 994, 996, 963, 997, 989, - /* 250 */ 1029, 1014, 1052, 1037, 1054, 1030, 1039, 1034, 1056, 1061, - /* 260 */ 1062, 1074, 1077, 1035, 1024, 1064, 1065, 1066, 1070, 1084, + /* 0 */ -246, -200, 689, 767, 828, 845, 898, 953, 1031, 1043, + /* 10 */ 1112, 1169, 1190, 1250, 1306, 1328, 1389, 1017, 1409, 1474, + /* 20 */ 1493, 1562, 1578, 1620, 1644, 1686, 1706, 1766, 1790, 1846, + /* 30 */ 1870, 1912, 1928, 1970, 1990, 2050, 2074, 2130, 2154, 2196, + /* 40 */ 2212, 2254, 2274, -245, 490, -191, -120, -20, -271, -249, + /* 50 */ -235, -218, 222, 258, 41, 44, -259, -254, -349, -240, + /* 60 */ -277, -256, 46, 103, 206, 288, 298, 300, 319, 342, + /* 70 */ -257, 351, 355, 391, -64, -101, 405, 517, -11, -267, + /* 80 */ 526, 248, 360, 267, 557, 216, -252, -171, -171, -171, + /* 90 */ -176, -253, 47, 85, 120, 379, 389, 403, 425, 481, + /* 100 */ 532, 539, 549, 577, 579, 604, 607, 617, 651, 675, + /* 110 */ 678, -133, 87, -46, 45, -258, 203, 295, 93, 327, + /* 120 */ 402, 296, 376, 501, 305, 362, 550, 297, 78, 436, + /* 130 */ 458, 479, -283, 193, 253, 514, 573, 592, 661, 236, + /* 140 */ 530, 668, 576, 485, 727, 662, 650, 742, 742, 763, + /* 150 */ 798, 766, 740, 716, 716, 716, 724, 704, 707, 713, + /* 160 */ 728, 742, 768, 825, 773, 836, 793, 850, 851, 813, + /* 170 */ 816, 819, 858, 868, 870, 814, 864, 837, 866, 826, + /* 180 */ 827, 872, 831, 877, 843, 879, 881, 886, 883, 897, + /* 190 */ 869, 878, 884, 900, 901, 902, 907, 909, 910, 912, + /* 200 */ 915, 917, 921, 905, 846, 937, 887, 908, 938, 942, + /* 210 */ 944, 904, 946, 859, 916, 918, 922, 923, 924, 927, + /* 220 */ 911, 926, 929, 959, 958, 969, 947, 919, 950, 889, + /* 230 */ 948, 961, 963, 894, 951, 964, 965, 742, 892, 913, + /* 240 */ 914, 930, 935, 933, 954, 716, 976, 952, 928, 925, + /* 250 */ 936, 934, 939, 940, 943, 977, 1012, 1005, 1033, 1039, + /* 260 */ 1047, 1058, 1061, 1004, 1010, 1048, 1051, 1053, 1055, 1070, }; static const YYACTIONTYPE yy_default[] = { /* 0 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, @@ -832,8 +858,8 @@ static const YYACTIONTYPE yy_default[] = { /* 80 */ 1437, 1437, 1437, 1437, 1437, 1507, 1660, 1437, 1836, 1437, /* 90 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, /* 100 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, - /* 110 */ 1437, 1437, 1437, 1437, 1437, 1509, 1437, 1848, 1848, 1848, - /* 120 */ 1507, 1437, 1437, 1437, 1704, 1704, 1437, 1437, 1437, 1437, + /* 110 */ 1437, 1437, 1437, 1437, 1437, 1509, 1437, 1507, 1848, 1848, + /* 120 */ 1848, 1437, 1437, 1437, 1704, 1704, 1437, 1437, 1437, 1437, /* 130 */ 1603, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1696, /* 140 */ 1437, 1437, 1917, 1437, 1437, 1702, 1871, 1437, 1437, 1437, /* 150 */ 1437, 1556, 1863, 1840, 1854, 1841, 1838, 1902, 1902, 1902, @@ -842,19 +868,19 @@ static const YYACTIONTYPE yy_default[] = { /* 180 */ 1437, 1509, 1437, 1509, 1437, 1509, 1509, 1437, 1509, 1437, /* 190 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, /* 200 */ 1437, 1437, 1437, 1437, 1437, 1507, 1698, 1437, 1507, 1437, - /* 210 */ 1437, 1437, 1507, 1437, 1437, 1878, 1876, 1437, 1878, 1876, - /* 220 */ 1437, 1437, 1437, 1890, 1886, 1878, 1894, 1892, 1869, 1867, - /* 230 */ 1854, 1437, 1437, 1908, 1904, 1920, 1908, 1904, 1908, 1904, - /* 240 */ 1437, 1876, 1437, 1437, 1437, 1437, 1437, 1876, 1437, 1437, - /* 250 */ 1437, 1437, 1507, 1437, 1507, 1437, 1572, 1437, 1437, 1437, + /* 210 */ 1437, 1437, 1507, 1876, 1437, 1437, 1437, 1437, 1876, 1437, + /* 220 */ 1437, 1437, 1437, 1507, 1437, 1507, 1437, 1437, 1437, 1878, + /* 230 */ 1876, 1437, 1437, 1878, 1876, 1437, 1437, 1437, 1890, 1886, + /* 240 */ 1878, 1894, 1892, 1869, 1867, 1854, 1437, 1437, 1908, 1904, + /* 250 */ 1920, 1908, 1904, 1908, 1904, 1437, 1572, 1437, 1437, 1437, /* 260 */ 1507, 1469, 1437, 1690, 1704, 1606, 1606, 1606, 1510, 1442, /* 270 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, - /* 280 */ 1437, 1774, 1889, 1888, 1812, 1811, 1810, 1808, 1773, 1437, + /* 280 */ 1437, 1437, 1774, 1889, 1888, 1812, 1811, 1810, 1808, 1773, /* 290 */ 1437, 1568, 1772, 1771, 1437, 1437, 1437, 1437, 1437, 1437, - /* 300 */ 1437, 1765, 1766, 1764, 1763, 1437, 1437, 1437, 1437, 1437, - /* 310 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1837, - /* 320 */ 1437, 1905, 1909, 1437, 1437, 1437, 1748, 1437, 1437, 1437, - /* 330 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, + /* 300 */ 1437, 1437, 1765, 1766, 1764, 1763, 1437, 1437, 1437, 1437, + /* 310 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, + /* 320 */ 1437, 1437, 1437, 1437, 1437, 1437, 1837, 1437, 1905, 1909, + /* 330 */ 1437, 1437, 1437, 1748, 1437, 1437, 1437, 1437, 1437, 1437, /* 340 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, /* 350 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, /* 360 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, @@ -871,14 +897,14 @@ static const YYACTIONTYPE yy_default[] = { /* 470 */ 1537, 1536, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, /* 480 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, /* 490 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, - /* 500 */ 1437, 1437, 1437, 1437, 1437, 1870, 1437, 1437, 1437, 1437, - /* 510 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1748, - /* 520 */ 1437, 1887, 1437, 1847, 1843, 1437, 1437, 1839, 1437, 1437, - /* 530 */ 1903, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, - /* 540 */ 1832, 1437, 1805, 1437, 1437, 1437, 1437, 1437, 1437, 1437, - /* 550 */ 1437, 1759, 1437, 1437, 1437, 1437, 1437, 1708, 1437, 1437, - /* 560 */ 1437, 1437, 1437, 1437, 1437, 1437, 1747, 1437, 1790, 1437, - /* 570 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1600, 1437, 1437, + /* 500 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1708, 1437, + /* 510 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1870, 1437, + /* 520 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, + /* 530 */ 1437, 1437, 1437, 1437, 1437, 1748, 1437, 1887, 1437, 1847, + /* 540 */ 1843, 1437, 1437, 1839, 1747, 1437, 1437, 1903, 1437, 1437, + /* 550 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1832, 1437, 1805, + /* 560 */ 1790, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, + /* 570 */ 1437, 1759, 1437, 1437, 1437, 1437, 1437, 1600, 1437, 1437, /* 580 */ 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, 1437, /* 590 */ 1585, 1583, 1582, 1581, 1437, 1578, 1437, 1437, 1437, 1437, /* 600 */ 1609, 1608, 1437, 1437, 1437, 1437, 1437, 1437, 1529, 1437, @@ -1533,7 +1559,7 @@ static const char *const yyTokenName[] = { /* 285 */ "signed_literal", /* 286 */ "create_subtable_clause", /* 287 */ "specific_tags_opt", - /* 288 */ "literal_list", + /* 288 */ "expression_list", /* 289 */ "drop_table_clause", /* 290 */ "col_name_list", /* 291 */ "table_name", @@ -1555,21 +1581,21 @@ static const char *const yyTokenName[] = { /* 307 */ "sliding_opt", /* 308 */ "sma_stream_opt", /* 309 */ "func", - /* 310 */ "expression_list", - /* 311 */ "stream_options", - /* 312 */ "topic_name", - /* 313 */ "query_expression", - /* 314 */ "cgroup_name", - /* 315 */ "analyze_opt", - /* 316 */ "explain_options", - /* 317 */ "agg_func_opt", - /* 318 */ "bufsize_opt", - /* 319 */ "stream_name", - /* 320 */ "into_opt", - /* 321 */ "dnode_list", - /* 322 */ "where_clause_opt", - /* 323 */ "signed", - /* 324 */ "literal_func", + /* 310 */ "stream_options", + /* 311 */ "topic_name", + /* 312 */ "query_expression", + /* 313 */ "cgroup_name", + /* 314 */ "analyze_opt", + /* 315 */ "explain_options", + /* 316 */ "agg_func_opt", + /* 317 */ "bufsize_opt", + /* 318 */ "stream_name", + /* 319 */ "into_opt", + /* 320 */ "dnode_list", + /* 321 */ "where_clause_opt", + /* 322 */ "signed", + /* 323 */ "literal_func", + /* 324 */ "literal_list", /* 325 */ "table_alias", /* 326 */ "column_alias", /* 327 */ "expression", @@ -1755,7 +1781,7 @@ static const char *const yyRuleName[] = { /* 125 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal", /* 126 */ "multi_create_clause ::= create_subtable_clause", /* 127 */ "multi_create_clause ::= multi_create_clause create_subtable_clause", - /* 128 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options", + /* 128 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP expression_list NK_RP table_options", /* 129 */ "multi_drop_clause ::= drop_table_clause", /* 130 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause", /* 131 */ "drop_table_clause ::= exists_opt full_table_name", @@ -2260,13 +2286,13 @@ static void yy_destructor( case 307: /* sliding_opt */ case 308: /* sma_stream_opt */ case 309: /* func */ - case 311: /* stream_options */ - case 313: /* query_expression */ - case 316: /* explain_options */ - case 320: /* into_opt */ - case 322: /* where_clause_opt */ - case 323: /* signed */ - case 324: /* literal_func */ + case 310: /* stream_options */ + case 312: /* query_expression */ + case 315: /* explain_options */ + case 319: /* into_opt */ + case 321: /* where_clause_opt */ + case 322: /* signed */ + case 323: /* literal_func */ case 327: /* expression */ case 328: /* pseudo_column */ case 329: /* column_reference */ @@ -2304,7 +2330,7 @@ static void yy_destructor( case 253: /* account_options */ case 254: /* alter_account_options */ case 256: /* alter_account_option */ - case 318: /* bufsize_opt */ + case 317: /* bufsize_opt */ { } @@ -2317,9 +2343,9 @@ static void yy_destructor( case 291: /* table_name */ case 298: /* function_name */ case 304: /* index_name */ - case 312: /* topic_name */ - case 314: /* cgroup_name */ - case 319: /* stream_name */ + case 311: /* topic_name */ + case 313: /* cgroup_name */ + case 318: /* stream_name */ case 325: /* table_alias */ case 326: /* column_alias */ case 332: /* star_func */ @@ -2343,8 +2369,8 @@ static void yy_destructor( break; case 265: /* not_exists_opt */ case 267: /* exists_opt */ - case 315: /* analyze_opt */ - case 317: /* agg_func_opt */ + case 314: /* analyze_opt */ + case 316: /* agg_func_opt */ case 354: /* set_quantifier_opt */ { @@ -2359,13 +2385,13 @@ static void yy_destructor( case 279: /* tags_def */ case 280: /* multi_drop_clause */ case 287: /* specific_tags_opt */ - case 288: /* literal_list */ + case 288: /* expression_list */ case 290: /* col_name_list */ case 293: /* duration_list */ case 294: /* rollup_func_list */ case 306: /* func_list */ - case 310: /* expression_list */ - case 321: /* dnode_list */ + case 320: /* dnode_list */ + case 324: /* literal_list */ case 333: /* star_func_para_list */ case 335: /* other_para_list */ case 355: /* select_list */ @@ -2837,7 +2863,7 @@ static const struct { { 281, -6 }, /* (125) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ { 278, -1 }, /* (126) multi_create_clause ::= create_subtable_clause */ { 278, -2 }, /* (127) multi_create_clause ::= multi_create_clause create_subtable_clause */ - { 286, -10 }, /* (128) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ + { 286, -10 }, /* (128) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP expression_list NK_RP table_options */ { 280, -1 }, /* (129) multi_drop_clause ::= drop_table_clause */ { 280, -2 }, /* (130) multi_drop_clause ::= multi_drop_clause drop_table_clause */ { 289, -2 }, /* (131) drop_table_clause ::= exists_opt full_table_name */ @@ -2957,28 +2983,28 @@ static const struct { { 252, -2 }, /* (245) cmd ::= DESCRIBE full_table_name */ { 252, -3 }, /* (246) cmd ::= RESET QUERY CACHE */ { 252, -4 }, /* (247) cmd ::= EXPLAIN analyze_opt explain_options query_expression */ - { 315, 0 }, /* (248) analyze_opt ::= */ - { 315, -1 }, /* (249) analyze_opt ::= ANALYZE */ - { 316, 0 }, /* (250) explain_options ::= */ - { 316, -3 }, /* (251) explain_options ::= explain_options VERBOSE NK_BOOL */ - { 316, -3 }, /* (252) explain_options ::= explain_options RATIO NK_FLOAT */ + { 314, 0 }, /* (248) analyze_opt ::= */ + { 314, -1 }, /* (249) analyze_opt ::= ANALYZE */ + { 315, 0 }, /* (250) explain_options ::= */ + { 315, -3 }, /* (251) explain_options ::= explain_options VERBOSE NK_BOOL */ + { 315, -3 }, /* (252) explain_options ::= explain_options RATIO NK_FLOAT */ { 252, -6 }, /* (253) cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ { 252, -10 }, /* (254) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ { 252, -4 }, /* (255) cmd ::= DROP FUNCTION exists_opt function_name */ - { 317, 0 }, /* (256) agg_func_opt ::= */ - { 317, -1 }, /* (257) agg_func_opt ::= AGGREGATE */ - { 318, 0 }, /* (258) bufsize_opt ::= */ - { 318, -2 }, /* (259) bufsize_opt ::= BUFSIZE NK_INTEGER */ + { 316, 0 }, /* (256) agg_func_opt ::= */ + { 316, -1 }, /* (257) agg_func_opt ::= AGGREGATE */ + { 317, 0 }, /* (258) bufsize_opt ::= */ + { 317, -2 }, /* (259) bufsize_opt ::= BUFSIZE NK_INTEGER */ { 252, -8 }, /* (260) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ { 252, -4 }, /* (261) cmd ::= DROP STREAM exists_opt stream_name */ - { 320, 0 }, /* (262) into_opt ::= */ - { 320, -2 }, /* (263) into_opt ::= INTO full_table_name */ - { 311, 0 }, /* (264) stream_options ::= */ - { 311, -3 }, /* (265) stream_options ::= stream_options TRIGGER AT_ONCE */ - { 311, -3 }, /* (266) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ - { 311, -4 }, /* (267) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ - { 311, -3 }, /* (268) stream_options ::= stream_options WATERMARK duration_literal */ - { 311, -3 }, /* (269) stream_options ::= stream_options IGNORE EXPIRED */ + { 319, 0 }, /* (262) into_opt ::= */ + { 319, -2 }, /* (263) into_opt ::= INTO full_table_name */ + { 310, 0 }, /* (264) stream_options ::= */ + { 310, -3 }, /* (265) stream_options ::= stream_options TRIGGER AT_ONCE */ + { 310, -3 }, /* (266) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ + { 310, -4 }, /* (267) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ + { 310, -3 }, /* (268) stream_options ::= stream_options WATERMARK duration_literal */ + { 310, -3 }, /* (269) stream_options ::= stream_options IGNORE EXPIRED */ { 252, -3 }, /* (270) cmd ::= KILL CONNECTION NK_INTEGER */ { 252, -3 }, /* (271) cmd ::= KILL QUERY NK_STRING */ { 252, -3 }, /* (272) cmd ::= KILL TRANSACTION NK_INTEGER */ @@ -2986,8 +3012,8 @@ static const struct { { 252, -4 }, /* (274) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ { 252, -4 }, /* (275) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ { 252, -3 }, /* (276) cmd ::= SPLIT VGROUP NK_INTEGER */ - { 321, -2 }, /* (277) dnode_list ::= DNODE NK_INTEGER */ - { 321, -3 }, /* (278) dnode_list ::= dnode_list DNODE NK_INTEGER */ + { 320, -2 }, /* (277) dnode_list ::= DNODE NK_INTEGER */ + { 320, -3 }, /* (278) dnode_list ::= dnode_list DNODE NK_INTEGER */ { 252, -3 }, /* (279) cmd ::= SYNCDB db_name REPLICA */ { 252, -4 }, /* (280) cmd ::= DELETE FROM full_table_name where_clause_opt */ { 252, -1 }, /* (281) cmd ::= query_expression */ @@ -3000,12 +3026,12 @@ static const struct { { 255, -1 }, /* (288) literal ::= NULL */ { 255, -1 }, /* (289) literal ::= NK_QUESTION */ { 296, -1 }, /* (290) duration_literal ::= NK_VARIABLE */ - { 323, -1 }, /* (291) signed ::= NK_INTEGER */ - { 323, -2 }, /* (292) signed ::= NK_PLUS NK_INTEGER */ - { 323, -2 }, /* (293) signed ::= NK_MINUS NK_INTEGER */ - { 323, -1 }, /* (294) signed ::= NK_FLOAT */ - { 323, -2 }, /* (295) signed ::= NK_PLUS NK_FLOAT */ - { 323, -2 }, /* (296) signed ::= NK_MINUS NK_FLOAT */ + { 322, -1 }, /* (291) signed ::= NK_INTEGER */ + { 322, -2 }, /* (292) signed ::= NK_PLUS NK_INTEGER */ + { 322, -2 }, /* (293) signed ::= NK_MINUS NK_INTEGER */ + { 322, -1 }, /* (294) signed ::= NK_FLOAT */ + { 322, -2 }, /* (295) signed ::= NK_PLUS NK_FLOAT */ + { 322, -2 }, /* (296) signed ::= NK_MINUS NK_FLOAT */ { 285, -1 }, /* (297) signed_literal ::= signed */ { 285, -1 }, /* (298) signed_literal ::= NK_STRING */ { 285, -1 }, /* (299) signed_literal ::= NK_BOOL */ @@ -3013,8 +3039,8 @@ static const struct { { 285, -1 }, /* (301) signed_literal ::= duration_literal */ { 285, -1 }, /* (302) signed_literal ::= NULL */ { 285, -1 }, /* (303) signed_literal ::= literal_func */ - { 288, -1 }, /* (304) literal_list ::= signed_literal */ - { 288, -3 }, /* (305) literal_list ::= literal_list NK_COMMA signed_literal */ + { 324, -1 }, /* (304) literal_list ::= signed_literal */ + { 324, -3 }, /* (305) literal_list ::= literal_list NK_COMMA signed_literal */ { 263, -1 }, /* (306) db_name ::= NK_ID */ { 291, -1 }, /* (307) table_name ::= NK_ID */ { 283, -1 }, /* (308) column_name ::= NK_ID */ @@ -3023,9 +3049,9 @@ static const struct { { 326, -1 }, /* (311) column_alias ::= NK_ID */ { 257, -1 }, /* (312) user_name ::= NK_ID */ { 304, -1 }, /* (313) index_name ::= NK_ID */ - { 312, -1 }, /* (314) topic_name ::= NK_ID */ - { 319, -1 }, /* (315) stream_name ::= NK_ID */ - { 314, -1 }, /* (316) cgroup_name ::= NK_ID */ + { 311, -1 }, /* (314) topic_name ::= NK_ID */ + { 318, -1 }, /* (315) stream_name ::= NK_ID */ + { 313, -1 }, /* (316) cgroup_name ::= NK_ID */ { 327, -1 }, /* (317) expression ::= literal */ { 327, -1 }, /* (318) expression ::= pseudo_column */ { 327, -1 }, /* (319) expression ::= column_reference */ @@ -3042,8 +3068,8 @@ static const struct { { 327, -3 }, /* (330) expression ::= column_reference NK_ARROW NK_STRING */ { 327, -3 }, /* (331) expression ::= expression NK_BITAND expression */ { 327, -3 }, /* (332) expression ::= expression NK_BITOR expression */ - { 310, -1 }, /* (333) expression_list ::= expression */ - { 310, -3 }, /* (334) expression_list ::= expression_list NK_COMMA expression */ + { 288, -1 }, /* (333) expression_list ::= expression */ + { 288, -3 }, /* (334) expression_list ::= expression_list NK_COMMA expression */ { 329, -1 }, /* (335) column_reference ::= column_name */ { 329, -3 }, /* (336) column_reference ::= table_name NK_DOT column_name */ { 328, -1 }, /* (337) pseudo_column ::= ROWTS */ @@ -3058,8 +3084,8 @@ static const struct { { 330, -4 }, /* (346) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ { 330, -6 }, /* (347) function_expression ::= CAST NK_LP expression AS type_name NK_RP */ { 330, -1 }, /* (348) function_expression ::= literal_func */ - { 324, -3 }, /* (349) literal_func ::= noarg_func NK_LP NK_RP */ - { 324, -1 }, /* (350) literal_func ::= NOW */ + { 323, -3 }, /* (349) literal_func ::= noarg_func NK_LP NK_RP */ + { 323, -1 }, /* (350) literal_func ::= NOW */ { 334, -1 }, /* (351) noarg_func ::= NOW */ { 334, -1 }, /* (352) noarg_func ::= TODAY */ { 334, -1 }, /* (353) noarg_func ::= TIMEZONE */ @@ -3136,8 +3162,8 @@ static const struct { { 363, -2 }, /* (424) select_item ::= common_expression column_alias */ { 363, -3 }, /* (425) select_item ::= common_expression AS column_alias */ { 363, -3 }, /* (426) select_item ::= table_name NK_DOT NK_STAR */ - { 322, 0 }, /* (427) where_clause_opt ::= */ - { 322, -2 }, /* (428) where_clause_opt ::= WHERE search_condition */ + { 321, 0 }, /* (427) where_clause_opt ::= */ + { 321, -2 }, /* (428) where_clause_opt ::= WHERE search_condition */ { 356, 0 }, /* (429) partition_by_clause_opt ::= */ { 356, -3 }, /* (430) partition_by_clause_opt ::= PARTITION BY expression_list */ { 360, 0 }, /* (431) twindow_clause_opt ::= */ @@ -3165,7 +3191,7 @@ static const struct { { 357, -6 }, /* (453) range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */ { 358, 0 }, /* (454) every_opt ::= */ { 358, -4 }, /* (455) every_opt ::= EVERY NK_LP duration_literal NK_RP */ - { 313, -4 }, /* (456) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ + { 312, -4 }, /* (456) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ { 366, -1 }, /* (457) query_expression_body ::= query_primary */ { 366, -4 }, /* (458) query_expression_body ::= query_expression_body UNION ALL query_expression_body */ { 366, -3 }, /* (459) query_expression_body ::= query_expression_body UNION query_expression_body */ @@ -3660,10 +3686,12 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy737, yymsp[0].minor.yy212); } break; case 114: /* cmd ::= ALTER TABLE alter_table_clause */ - case 115: /* cmd ::= ALTER STABLE alter_table_clause */ yytestcase(yyruleno==115); case 281: /* cmd ::= query_expression */ yytestcase(yyruleno==281); { pCxt->pRootNode = yymsp[0].minor.yy212; } break; + case 115: /* cmd ::= ALTER STABLE alter_table_clause */ +{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy212); } + break; case 116: /* alter_table_clause ::= full_table_name alter_table_options */ { yylhsminor.yy212 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy212, yymsp[0].minor.yy212); } yymsp[-1].minor.yy212 = yylhsminor.yy212; @@ -3709,7 +3737,7 @@ static YYACTIONTYPE yy_reduce( { yylhsminor.yy424 = addNodeToList(pCxt, yymsp[-1].minor.yy424, yymsp[0].minor.yy212); } yymsp[-1].minor.yy424 = yylhsminor.yy424; break; - case 128: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ + case 128: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP expression_list NK_RP table_options */ { yylhsminor.yy212 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy737, yymsp[-8].minor.yy212, yymsp[-6].minor.yy212, yymsp[-5].minor.yy424, yymsp[-2].minor.yy424, yymsp[0].minor.yy212); } yymsp[-9].minor.yy212 = yylhsminor.yy212; break; diff --git a/source/libs/parser/test/CMakeLists.txt b/source/libs/parser/test/CMakeLists.txt index 0e8adb978d..45431b69b7 100644 --- a/source/libs/parser/test/CMakeLists.txt +++ b/source/libs/parser/test/CMakeLists.txt @@ -1,32 +1,34 @@ MESSAGE(STATUS "build parser unit test") -# GoogleTest requires at least C++11 -SET(CMAKE_CXX_STANDARD 11) -AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) +IF(NOT TD_DARWIN) + # GoogleTest requires at least C++11 + SET(CMAKE_CXX_STANDARD 11) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) -ADD_EXECUTABLE(parserTest ${SOURCE_LIST}) + ADD_EXECUTABLE(parserTest ${SOURCE_LIST}) -TARGET_INCLUDE_DIRECTORIES( - parserTest - PUBLIC "${TD_SOURCE_DIR}/include/libs/parser/" - PRIVATE "${TD_SOURCE_DIR}/source/libs/parser/inc" -) - -TARGET_LINK_LIBRARIES( - parserTest - PUBLIC os util common nodes parser catalog transport gtest function planner qcom -) - -if(${BUILD_WINGETOPT}) - target_include_directories( - parserTest - PUBLIC "${TD_SOURCE_DIR}/contrib/wingetopt/src" + TARGET_INCLUDE_DIRECTORIES( + parserTest + PUBLIC "${TD_SOURCE_DIR}/include/libs/parser/" + PRIVATE "${TD_SOURCE_DIR}/source/libs/parser/inc" ) - target_link_libraries(parserTest PUBLIC wingetopt) -endif() -add_test( - NAME parserTest - COMMAND parserTest -) + TARGET_LINK_LIBRARIES( + parserTest + PUBLIC os util common nodes parser catalog transport gtest function planner qcom + ) + + if(${BUILD_WINGETOPT}) + target_include_directories( + parserTest + PUBLIC "${TD_SOURCE_DIR}/contrib/wingetopt/src" + ) + target_link_libraries(parserTest PUBLIC wingetopt) + endif() + + add_test( + NAME parserTest + COMMAND parserTest + ) +ENDIF() \ No newline at end of file diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index 36a2eb3808..6eafa0555b 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -159,7 +159,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) { * c4 | column | DOUBLE | 8 | * c5 | column | DOUBLE | 8 | */ -void generateTestT1(MockCatalogService* mcs) { +void generateTestTables(MockCatalogService* mcs) { ITableBuilder& builder = mcs->createTableBuilder("test", "t1", TSDB_NORMAL_TABLE, 6) .setPrecision(TSDB_TIME_PRECISION_MILLI) .setVgid(1) @@ -183,23 +183,7 @@ void generateTestT1(MockCatalogService* mcs) { * tag2 | tag | VARCHAR | 20 | * tag3 | tag | TIMESTAMP | 8 | * Child Table: st1s1, st1s2 - */ -void generateTestST1(MockCatalogService* mcs) { - ITableBuilder& builder = mcs->createTableBuilder("test", "st1", TSDB_SUPER_TABLE, 3, 3) - .setPrecision(TSDB_TIME_PRECISION_MILLI) - .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) - .addColumn("c1", TSDB_DATA_TYPE_INT) - .addColumn("c2", TSDB_DATA_TYPE_BINARY, 20) - .addTag("tag1", TSDB_DATA_TYPE_INT) - .addTag("tag2", TSDB_DATA_TYPE_BINARY, 20) - .addTag("tag3", TSDB_DATA_TYPE_TIMESTAMP); - builder.done(); - mcs->createSubTable("test", "st1", "st1s1", 1); - mcs->createSubTable("test", "st1", "st1s2", 2); - mcs->createSubTable("test", "st1", "st1s3", 1); -} - -/* + * * Super Table: st2 * Field | Type | DataType | Bytes | * ========================================================================== @@ -209,16 +193,32 @@ void generateTestST1(MockCatalogService* mcs) { * jtag | tag | json | -- | * Child Table: st2s1, st2s2 */ -void generateTestST2(MockCatalogService* mcs) { - ITableBuilder& builder = mcs->createTableBuilder("test", "st2", TSDB_SUPER_TABLE, 3, 1) - .setPrecision(TSDB_TIME_PRECISION_MILLI) - .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) - .addColumn("c1", TSDB_DATA_TYPE_INT) - .addColumn("c2", TSDB_DATA_TYPE_BINARY, 20) - .addTag("jtag", TSDB_DATA_TYPE_JSON); - builder.done(); - mcs->createSubTable("test", "st2", "st2s1", 1); - mcs->createSubTable("test", "st2", "st2s2", 2); +void generateTestStables(MockCatalogService* mcs) { + { + ITableBuilder& builder = mcs->createTableBuilder("test", "st1", TSDB_SUPER_TABLE, 3, 3) + .setPrecision(TSDB_TIME_PRECISION_MILLI) + .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) + .addColumn("c1", TSDB_DATA_TYPE_INT) + .addColumn("c2", TSDB_DATA_TYPE_BINARY, 20) + .addTag("tag1", TSDB_DATA_TYPE_INT) + .addTag("tag2", TSDB_DATA_TYPE_BINARY, 20) + .addTag("tag3", TSDB_DATA_TYPE_TIMESTAMP); + builder.done(); + mcs->createSubTable("test", "st1", "st1s1", 1); + mcs->createSubTable("test", "st1", "st1s2", 2); + mcs->createSubTable("test", "st1", "st1s3", 1); + } + { + ITableBuilder& builder = mcs->createTableBuilder("test", "st2", TSDB_SUPER_TABLE, 3, 1) + .setPrecision(TSDB_TIME_PRECISION_MILLI) + .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) + .addColumn("c1", TSDB_DATA_TYPE_INT) + .addColumn("c2", TSDB_DATA_TYPE_BINARY, 20) + .addTag("jtag", TSDB_DATA_TYPE_JSON); + builder.done(); + mcs->createSubTable("test", "st2", "st2s1", 1); + mcs->createSubTable("test", "st2", "st2s2", 2); + } } void generateFunctions(MockCatalogService* mcs) { @@ -233,6 +233,11 @@ void generateDnodes(MockCatalogService* mcs) { mcs->createDnode(3, "host3", 7030); } +void generateDatabases(MockCatalogService* mcs) { + mcs->createDatabase("test"); + mcs->createDatabase("rollup_db", true); +} + } // namespace int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandle) { return 0; } @@ -262,7 +267,7 @@ int32_t __catalogGetDBVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char } int32_t __catalogGetDBCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const char* dbFName, SDbCfgInfo* pDbCfg) { - return 0; + return g_mockCatalogService->catalogGetDBCfg(dbFName, pDbCfg); } int32_t __catalogChkAuth(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type, @@ -359,11 +364,11 @@ void initMetaDataEnv() { } void generateMetaData() { + generateDatabases(g_mockCatalogService.get()); generateInformationSchema(g_mockCatalogService.get()); generatePerformanceSchema(g_mockCatalogService.get()); - generateTestT1(g_mockCatalogService.get()); - generateTestST1(g_mockCatalogService.get()); - generateTestST2(g_mockCatalogService.get()); + generateTestTables(g_mockCatalogService.get()); + generateTestStables(g_mockCatalogService.get()); generateFunctions(g_mockCatalogService.get()); generateDnodes(g_mockCatalogService.get()); g_mockCatalogService->showTables(); diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp index 8830bc7cb3..0f759018d9 100644 --- a/source/libs/parser/test/mockCatalogService.cpp +++ b/source/libs/parser/test/mockCatalogService.cpp @@ -140,6 +140,17 @@ class MockCatalogServiceImpl { return TSDB_CODE_SUCCESS; } + int32_t catalogGetDBCfg(const char* pDbFName, SDbCfgInfo* pDbCfg) const { + std::string dbFName(pDbFName); + DbCfgCache::const_iterator it = dbCfg_.find(dbFName.substr(std::string(pDbFName).find_last_of('.') + 1)); + if (dbCfg_.end() == it) { + return TSDB_CODE_FAILED; + } + + memcpy(pDbCfg, &(it->second), sizeof(SDbCfgInfo)); + return TSDB_CODE_SUCCESS; + } + int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { auto it = udf_.find(funcName); if (udf_.end() == it) { @@ -323,12 +334,21 @@ class MockCatalogServiceImpl { dnode_.insert(std::make_pair(dnodeId, epSet)); } + void createDatabase(const std::string& db, bool rollup) { + SDbCfgInfo cfg = {0}; + if (rollup) { + cfg.pRetensions = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SRetention)); + } + dbCfg_.insert(std::make_pair(db, cfg)); + } + private: typedef std::map> TableMetaCache; typedef std::map DbMetaCache; typedef std::map> UdfMetaCache; typedef std::map> IndexMetaCache; typedef std::map DnodeCache; + typedef std::map DbCfgCache; uint64_t getNextId() { return id_++; } @@ -486,6 +506,7 @@ class MockCatalogServiceImpl { for (int32_t i = 0; i < ndbs; ++i) { SMetaRes res = {0}; res.pRes = taosMemoryCalloc(1, sizeof(SDbCfgInfo)); + res.code = catalogGetDBCfg((const char*)taosArrayGet(pDbCfgReq, i), (SDbCfgInfo*)res.pRes); taosArrayPush(*pDbCfgData, &res); } } @@ -576,6 +597,7 @@ class MockCatalogServiceImpl { UdfMetaCache udf_; IndexMetaCache index_; DnodeCache dnode_; + DbCfgCache dbCfg_; }; MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {} @@ -605,6 +627,8 @@ void MockCatalogService::createDnode(int32_t dnodeId, const std::string& host, i impl_->createDnode(dnodeId, host, port); } +void MockCatalogService::createDatabase(const std::string& db, bool rollup) { impl_->createDatabase(db, rollup); } + int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const { return impl_->catalogGetTableMeta(pTableName, pTableMeta); } @@ -621,6 +645,10 @@ int32_t MockCatalogService::catalogGetDBVgInfo(const char* pDbFName, SArray** pV return impl_->catalogGetDBVgInfo(pDbFName, pVgList); } +int32_t MockCatalogService::catalogGetDBCfg(const char* pDbFName, SDbCfgInfo* pDbCfg) const { + return impl_->catalogGetDBCfg(pDbFName, pDbCfg); +} + int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { return impl_->catalogGetUdfInfo(funcName, pInfo); } diff --git a/source/libs/parser/test/mockCatalogService.h b/source/libs/parser/test/mockCatalogService.h index 932424823c..5c8a8acad1 100644 --- a/source/libs/parser/test/mockCatalogService.h +++ b/source/libs/parser/test/mockCatalogService.h @@ -63,11 +63,13 @@ class MockCatalogService { void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize); void createSmaIndex(const SMCreateSmaReq* pReq); void createDnode(int32_t dnodeId, const std::string& host, int16_t port); + void createDatabase(const std::string& db, bool rollup = false); int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const; int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const; int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const; int32_t catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const; + int32_t catalogGetDBCfg(const char* pDbFName, SDbCfgInfo* pDbCfg) const; int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const; int32_t catalogGetTableIndex(const SName* pTableName, SArray** pIndexes) const; int32_t catalogGetDnodeList(SArray** pDnodes) const; diff --git a/source/libs/parser/test/parInitialATest.cpp b/source/libs/parser/test/parInitialATest.cpp index ee9dae2a01..f4d0ba1cc8 100644 --- a/source/libs/parser/test/parInitialATest.cpp +++ b/source/libs/parser/test/parInitialATest.cpp @@ -38,9 +38,9 @@ TEST_F(ParserInitialATest, alterDnode) { TEST_F(ParserInitialATest, alterDatabase) { useDb("root", "test"); - run("ALTER DATABASE wxy_db CACHELAST 1 FSYNC 200 WAL 1"); + run("ALTER DATABASE test CACHELAST 1 FSYNC 200 WAL 1"); - run("ALTER DATABASE wxy_db KEEP 2400"); + run("ALTER DATABASE test KEEP 2400"); } TEST_F(ParserInitialATest, alterLocal) { @@ -77,8 +77,6 @@ TEST_F(ParserInitialATest, alterLocal) { clearAlterLocal(); } -// todo ALTER stable - /* * ALTER TABLE [db_name.]tb_name alter_table_clause * @@ -157,7 +155,7 @@ TEST_F(ParserInitialATest, alterSTable) { }; setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) { - ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_TABLE_STMT); + ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_SUPER_TABLE_STMT); SMAlterStbReq req = {0}; ASSERT_EQ(tDeserializeSMAlterStbReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req), TSDB_CODE_SUCCESS); ASSERT_EQ(std::string(req.name), std::string(expect.name)); @@ -181,44 +179,44 @@ TEST_F(ParserInitialATest, alterSTable) { }); // setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, nullptr, 10); - // run("ALTER TABLE st1 TTL 10"); + // run("ALTER STABLE st1 TTL 10"); // clearAlterStbReq(); setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, "test"); - run("ALTER TABLE st1 COMMENT 'test'"); + run("ALTER STABLE st1 COMMENT 'test'"); clearAlterStbReq(); setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_COLUMN, 1, "cc1", TSDB_DATA_TYPE_BIGINT); - run("ALTER TABLE st1 ADD COLUMN cc1 BIGINT"); + run("ALTER STABLE st1 ADD COLUMN cc1 BIGINT"); clearAlterStbReq(); setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_DROP_COLUMN, 1, "c1"); - run("ALTER TABLE st1 DROP COLUMN c1"); + run("ALTER STABLE st1 DROP COLUMN c1"); clearAlterStbReq(); setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, 1, "c2", TSDB_DATA_TYPE_VARCHAR, 30 + VARSTR_HEADER_SIZE); - run("ALTER TABLE st1 MODIFY COLUMN c2 VARCHAR(30)"); + run("ALTER STABLE st1 MODIFY COLUMN c2 VARCHAR(30)"); clearAlterStbReq(); // setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, 2, "c1", 0, 0, "cc1"); - // run("ALTER TABLE st1 RENAME COLUMN c1 cc1"); + // run("ALTER STABLE st1 RENAME COLUMN c1 cc1"); setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_TAG, 1, "tag11", TSDB_DATA_TYPE_BIGINT); - run("ALTER TABLE st1 ADD TAG tag11 BIGINT"); + run("ALTER STABLE st1 ADD TAG tag11 BIGINT"); clearAlterStbReq(); setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_DROP_TAG, 1, "tag1"); - run("ALTER TABLE st1 DROP TAG tag1"); + run("ALTER STABLE st1 DROP TAG tag1"); clearAlterStbReq(); setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, 1, "tag2", TSDB_DATA_TYPE_VARCHAR, 30 + VARSTR_HEADER_SIZE); - run("ALTER TABLE st1 MODIFY TAG tag2 VARCHAR(30)"); + run("ALTER STABLE st1 MODIFY TAG tag2 VARCHAR(30)"); clearAlterStbReq(); setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_TAG_NAME, 2, "tag1", 0, 0, "tag11"); - run("ALTER TABLE st1 RENAME TAG tag1 tag11"); + run("ALTER STABLE st1 RENAME TAG tag1 tag11"); clearAlterStbReq(); // todo @@ -228,11 +226,11 @@ TEST_F(ParserInitialATest, alterSTable) { TEST_F(ParserInitialATest, alterSTableSemanticCheck) { useDb("root", "test"); - run("ALTER TABLE st1 RENAME COLUMN c1 cc1", TSDB_CODE_PAR_INVALID_ALTER_TABLE); + run("ALTER STABLE st1 RENAME COLUMN c1 cc1", TSDB_CODE_PAR_INVALID_ALTER_TABLE); - run("ALTER TABLE st1 MODIFY COLUMN c2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL); + run("ALTER STABLE st1 MODIFY COLUMN c2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL); - run("ALTER TABLE st1 MODIFY TAG tag2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL); + run("ALTER STABLE st1 MODIFY TAG tag2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL); } TEST_F(ParserInitialATest, alterTable) { diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index 0a980fa889..10921a2082 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -359,11 +359,11 @@ TEST_F(ParserInitialCTest, createStable) { memset(&expect, 0, sizeof(SMCreateStbReq)); }; - auto setCreateStbReqFunc = [&](const char* pTbname, int8_t igExists = 0, int64_t delay1 = -1, int64_t delay2 = -1, - int64_t watermark1 = TSDB_DEFAULT_ROLLUP_WATERMARK, + auto setCreateStbReqFunc = [&](const char* pDbName, const char* pTbName, int8_t igExists = 0, int64_t delay1 = -1, + int64_t delay2 = -1, int64_t watermark1 = TSDB_DEFAULT_ROLLUP_WATERMARK, int64_t watermark2 = TSDB_DEFAULT_ROLLUP_WATERMARK, int32_t ttl = TSDB_DEFAULT_TABLE_TTL, const char* pComment = nullptr) { - int32_t len = snprintf(expect.name, sizeof(expect.name), "0.test.%s", pTbname); + int32_t len = snprintf(expect.name, sizeof(expect.name), "0.%s.%s", pDbName, pTbName); expect.name[len] = '\0'; expect.igExists = igExists; expect.delay1 = delay1; @@ -454,14 +454,14 @@ TEST_F(ParserInitialCTest, createStable) { tFreeSMCreateStbReq(&req); }); - setCreateStbReqFunc("t1"); + setCreateStbReqFunc("test", "t1"); addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP); addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT); addFieldToCreateStbReqFunc(false, "id", TSDB_DATA_TYPE_INT); run("CREATE STABLE t1(ts TIMESTAMP, c1 INT) TAGS(id INT)"); clearCreateStbReq(); - setCreateStbReqFunc("t1", 1, 100 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_MINUTE, 10, + setCreateStbReqFunc("rollup_db", "t1", 1, 100 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_MINUTE, 10, 1 * MILLISECOND_PER_MINUTE, 100, "test create table"); addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0); addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT); @@ -493,7 +493,7 @@ TEST_F(ParserInitialCTest, createStable) { addFieldToCreateStbReqFunc(false, "a13", TSDB_DATA_TYPE_BOOL); addFieldToCreateStbReqFunc(false, "a14", TSDB_DATA_TYPE_NCHAR, 30 * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); addFieldToCreateStbReqFunc(false, "a15", TSDB_DATA_TYPE_VARCHAR, 50 + VARSTR_HEADER_SIZE); - run("CREATE STABLE IF NOT EXISTS test.t1(" + run("CREATE STABLE IF NOT EXISTS rollup_db.t1(" "ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), " "c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, " "c13 NCHAR(30), c14 VARCHAR(50)) " @@ -507,12 +507,13 @@ TEST_F(ParserInitialCTest, createStable) { TEST_F(ParserInitialCTest, createStableSemanticCheck) { useDb("root", "test"); - run("CREATE STABLE stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(CEIL)", TSDB_CODE_PAR_INVALID_ROLLUP_OPTION); + run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(CEIL)", + TSDB_CODE_PAR_INVALID_ROLLUP_OPTION); - run("CREATE STABLE stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 0s WATERMARK 1m", + run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 0s WATERMARK 1m", TSDB_CODE_PAR_INVALID_RANGE_OPTION); - run("CREATE STABLE stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 10s WATERMARK 18m", + run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 10s WATERMARK 18m", TSDB_CODE_PAR_INVALID_RANGE_OPTION); } @@ -561,30 +562,33 @@ TEST_F(ParserInitialCTest, createStream) { tFreeSCMCreateStreamReq(&req); }); - setCreateStreamReqFunc("s1", "test", "create stream s1 as select * from t1"); - run("CREATE STREAM s1 AS SELECT * FROM t1"); + setCreateStreamReqFunc("s1", "test", "create stream s1 as select count(*) from t1 interval(10s)"); + run("CREATE STREAM s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); - setCreateStreamReqFunc("s1", "test", "create stream if not exists s1 as select * from t1", nullptr, 1); - run("CREATE STREAM IF NOT EXISTS s1 AS SELECT * FROM t1"); + setCreateStreamReqFunc("s1", "test", "create stream if not exists s1 as select count(*) from t1 interval(10s)", + nullptr, 1); + run("CREATE STREAM IF NOT EXISTS s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); - setCreateStreamReqFunc("s1", "test", "create stream s1 into st1 as select * from t1", "st1"); - run("CREATE STREAM s1 INTO st1 AS SELECT * FROM t1"); + setCreateStreamReqFunc("s1", "test", "create stream s1 into st1 as select count(*) from t1 interval(10s)", "st1"); + run("CREATE STREAM s1 INTO st1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); - setCreateStreamReqFunc( - "s1", "test", - "create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired into st1 as select * from t1", - "st1", 1, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_SECOND, 1); - run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED INTO st1 AS SELECT * FROM t1"); + setCreateStreamReqFunc("s1", "test", + "create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired into st1 " + "as select count(*) from t1 interval(10s)", + "st1", 1, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_SECOND, + 1); + run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED INTO st1 AS SELECT COUNT(*) " + "FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); } TEST_F(ParserInitialCTest, createStreamSemanticCheck) { useDb("root", "test"); - run("CREATE STREAM s1 AS SELECT PERCENTILE(c1, 30) FROM t1", TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC); + run("CREATE STREAM s1 AS SELECT PERCENTILE(c1, 30) FROM t1 INTERVAL(10S)", TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC); } TEST_F(ParserInitialCTest, createTable) { @@ -598,7 +602,7 @@ TEST_F(ParserInitialCTest, createTable) { "c13 NCHAR(30), c15 VARCHAR(50)) " "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3)"); - run("CREATE TABLE IF NOT EXISTS test.t1(" + run("CREATE TABLE IF NOT EXISTS rollup_db.t1(" "ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), " "c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, " "c13 NCHAR(30), c14 VARCHAR(50)) " @@ -617,6 +621,21 @@ TEST_F(ParserInitialCTest, createTable) { // run("CREATE TABLE IF NOT EXISTS t1 USING st1 TAGS(1, 'wxy', NOW + 1S)"); } +TEST_F(ParserInitialCTest, createTableSemanticCheck) { + useDb("root", "test"); + + string sql = "CREATE TABLE st1(ts TIMESTAMP, "; + for (int32_t i = 1; i < 4096; ++i) { + if (i > 1) { + sql.append(", "); + } + sql.append("c" + to_string(i) + " INT"); + } + sql.append(") TAGS (t1 int)"); + + run(sql, TSDB_CODE_PAR_TOO_MANY_COLUMNS); +} + TEST_F(ParserInitialCTest, createTopic) { useDb("root", "test"); diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index fb27602c21..a4f30228d5 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -480,12 +480,18 @@ static int32_t pushDownCondOptPushCondToProject(SOptimizeContext* pCxt, SProject return pushDownCondOptAppendCond(&pProject->node.pConditions, pCond); } +static int32_t pushDownCondOptPushCondToJoin(SOptimizeContext* pCxt, SJoinLogicNode * pJoin, SNode** pCond) { + return pushDownCondOptAppendCond(&pJoin->node.pConditions, pCond); +} + static int32_t pushDownCondOptPushCondToChild(SOptimizeContext* pCxt, SLogicNode* pChild, SNode** pCond) { switch (nodeType(pChild)) { case QUERY_NODE_LOGIC_PLAN_SCAN: return pushDownCondOptPushCondToScan(pCxt, (SScanLogicNode*)pChild, pCond); case QUERY_NODE_LOGIC_PLAN_PROJECT: return pushDownCondOptPushCondToProject(pCxt, (SProjectLogicNode*)pChild, pCond); + case QUERY_NODE_LOGIC_PLAN_JOIN: + return pushDownCondOptPushCondToJoin(pCxt, (SJoinLogicNode*)pChild, pCond); default: break; } @@ -554,13 +560,83 @@ static int32_t pushDownCondOptCheckJoinOnCond(SOptimizeContext* pCxt, SJoinLogic return TSDB_CODE_SUCCESS; } +static int32_t pushDownCondOptPartJoinOnCondLogicCond(SJoinLogicNode* pJoin, SNode** ppMergeCond, SNode** ppOnCond) { + SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pJoin->pOnConditions); + + int32_t code = TSDB_CODE_SUCCESS; + SNodeList* pOnConds = NULL; + SNode* pCond = NULL; + FOREACH(pCond, pLogicCond->pParameterList) { + if (pushDownCondOptIsPriKeyEqualCond(pJoin, pCond)) { + *ppMergeCond = nodesCloneNode(pCond); + } else { + code = nodesListMakeAppend(&pOnConds, nodesCloneNode(pCond)); + } + } + + SNode* pTempOnCond = NULL; + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempOnCond, &pOnConds); + } + + if (TSDB_CODE_SUCCESS == code && NULL != *ppMergeCond) { + *ppOnCond = pTempOnCond; + nodesDestroyNode(pJoin->pOnConditions); + pJoin->pOnConditions = NULL; + return TSDB_CODE_SUCCESS; + } else { + nodesDestroyList(pOnConds); + nodesDestroyNode(pTempOnCond); + return TSDB_CODE_PLAN_INTERNAL_ERROR; + } +} + +static int32_t pushDownCondOptPartJoinOnCond(SJoinLogicNode* pJoin, SNode** ppMergeCond, SNode** ppOnCond) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(pJoin->pOnConditions) && + LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)(pJoin->pOnConditions))->condType) { + return pushDownCondOptPartJoinOnCondLogicCond(pJoin, ppMergeCond, ppOnCond); + } + + if (pushDownCondOptIsPriKeyEqualCond(pJoin, pJoin->pOnConditions)) { + *ppMergeCond = nodesCloneNode(pJoin->pOnConditions); + *ppOnCond = NULL; + nodesDestroyNode(pJoin->pOnConditions); + pJoin->pOnConditions = NULL; + return TSDB_CODE_SUCCESS; + } else { + return TSDB_CODE_PLAN_INTERNAL_ERROR; + } +} + +static int32_t pushDownCondOptJoinExtractMergeCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) { + int32_t code = pushDownCondOptCheckJoinOnCond(pCxt, pJoin); + SNode* pJoinMergeCond = NULL; + SNode* pJoinOnCond = NULL; + if (TSDB_CODE_SUCCESS == code) { + code = pushDownCondOptPartJoinOnCond(pJoin, &pJoinMergeCond, &pJoinOnCond); + } + if (TSDB_CODE_SUCCESS == code) { + pJoin->pMergeCondition = pJoinMergeCond; + pJoin->pOnConditions = pJoinOnCond; + } else { + nodesDestroyNode(pJoinMergeCond); + nodesDestroyNode(pJoinOnCond); + } + return code; +} + static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) { if (OPTIMIZE_FLAG_TEST_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) { return TSDB_CODE_SUCCESS; } if (NULL == pJoin->node.pConditions) { - return pushDownCondOptCheckJoinOnCond(pCxt, pJoin); + int32_t code = pushDownCondOptJoinExtractMergeCond(pCxt, pJoin); + if (TSDB_CODE_SUCCESS == code) { + OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE); + pCxt->optimized = true; + } + return code; } SNode* pOnCond = NULL; @@ -579,10 +655,13 @@ static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* p pushDownCondOptPushCondToChild(pCxt, (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1), &pRightChildCond); } + if (TSDB_CODE_SUCCESS == code) { + code = pushDownCondOptJoinExtractMergeCond(pCxt, pJoin); + } + if (TSDB_CODE_SUCCESS == code) { OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE); pCxt->optimized = true; - code = pushDownCondOptCheckJoinOnCond(pCxt, pJoin); } else { nodesDestroyNode(pOnCond); nodesDestroyNode(pLeftChildCond); @@ -720,7 +799,8 @@ static int32_t pushDownCondOptDealAgg(SOptimizeContext* pCxt, SAggLogicNode* pAg // TODO: remove it after full implementation of pushing down to child if (1 != LIST_LENGTH(pAgg->node.pChildren) || QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) && - QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) { + QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) && + QUERY_NODE_LOGIC_PLAN_JOIN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) { return TSDB_CODE_SUCCESS; } @@ -1251,7 +1331,7 @@ static SNode* partTagsCreateWrapperFunc(const char* pFuncName, SNode* pNode) { } strcpy(pFunc->functionName, pFuncName); - if (QUERY_NODE_COLUMN == nodeType(pNode)) { + if (QUERY_NODE_COLUMN == nodeType(pNode) && COLUMN_TYPE_TBNAME != ((SColumnNode*)pNode)->colType) { SColumnNode* pCol = (SColumnNode*)pNode; partTagsSetAlias(pFunc->node.aliasName, sizeof(pFunc->node.aliasName), pCol->tableAlias, pCol->colName); } else { @@ -1868,6 +1948,8 @@ static EDealRes mergeProjectionsExpr(SNode** pNode, void* pContext) { pCxt->errCode = terrno; return DEAL_RES_ERROR; } + snprintf(((SExprNode*)pExpr)->aliasName, sizeof(((SExprNode*)pExpr)->aliasName), "%s", + ((SExprNode*)*pNode)->aliasName); nodesDestroyNode(*pNode); *pNode = pExpr; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 0eb05ccbe9..d10908c519 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -612,10 +612,8 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren int32_t code = TSDB_CODE_SUCCESS; pJoin->joinType = pJoinLogicNode->joinType; - if (NULL != pJoinLogicNode->pOnConditions) { - code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pOnConditions, - &pJoin->pOnConditions); - } + setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pMergeCondition, + &pJoin->pMergeCondition); if (TSDB_CODE_SUCCESS == code) { code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets, &pJoin->pTargets); @@ -623,6 +621,21 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren if (TSDB_CODE_SUCCESS == code) { code = addDataBlockSlots(pCxt, pJoin->pTargets, pJoin->node.pOutputDataBlockDesc); } + + SNodeList* condCols = nodesMakeList(); + if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOnConditions) { + code = nodesCollectColumnsFromNode(pJoinLogicNode->pOnConditions, NULL, COLLECT_COL_TYPE_ALL, &condCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = addDataBlockSlots(pCxt, condCols, pJoin->node.pOutputDataBlockDesc); + nodesDestroyList(condCols); + } + + if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOnConditions) { + code = setNodeSlotId(pCxt, ((SPhysiNode*)pJoin)->pOutputDataBlockDesc->dataBlockId, -1, pJoinLogicNode->pOnConditions, + &pJoin->pOnConditions); + } + if (TSDB_CODE_SUCCESS == code) { code = setConditionsSlotId(pCxt, (const SLogicNode*)pJoinLogicNode, (SPhysiNode*)pJoin); } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 60c04c2c30..edeff83d5a 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -986,6 +986,10 @@ static bool unionIsChildSubplan(SLogicNode* pLogicNode, int32_t groupId) { return ((SExchangeLogicNode*)pLogicNode)->srcGroupId == groupId; } + if (QUERY_NODE_LOGIC_PLAN_MERGE == nodeType(pLogicNode)) { + return ((SMergeLogicNode*)pLogicNode)->srcGroupId == groupId; + } + SNode* pChild; FOREACH(pChild, pLogicNode->pChildren) { bool isChild = unionIsChildSubplan((SLogicNode*)pChild, groupId); @@ -1014,14 +1018,14 @@ static int32_t unionMountSubplan(SLogicSubplan* pParent, SNodeList* pChildren) { return TSDB_CODE_SUCCESS; } -static SLogicSubplan* unionCreateSubplan(SSplitContext* pCxt, SLogicNode* pNode) { +static SLogicSubplan* unionCreateSubplan(SSplitContext* pCxt, SLogicNode* pNode, ESubplanType subplanType) { SLogicSubplan* pSubplan = (SLogicSubplan*)nodesMakeNode(QUERY_NODE_LOGIC_SUBPLAN); if (NULL == pSubplan) { return NULL; } pSubplan->id.queryId = pCxt->queryId; pSubplan->id.groupId = pCxt->groupId; - pSubplan->subplanType = SUBPLAN_TYPE_SCAN; + pSubplan->subplanType = subplanType; pSubplan->pNode = pNode; pNode->pParent = NULL; return pSubplan; @@ -1035,7 +1039,7 @@ static int32_t unionSplitSubplan(SSplitContext* pCxt, SLogicSubplan* pUnionSubpl SNode* pChild = NULL; FOREACH(pChild, pSplitNode->pChildren) { - SLogicSubplan* pNewSubplan = unionCreateSubplan(pCxt, (SLogicNode*)pChild); + SLogicSubplan* pNewSubplan = unionCreateSubplan(pCxt, (SLogicNode*)pChild, pUnionSubplan->subplanType); code = nodesListMakeStrictAppend(&pUnionSubplan->pChildren, (SNode*)pNewSubplan); if (TSDB_CODE_SUCCESS == code) { REPLACE_NODE(NULL); @@ -1216,6 +1220,7 @@ static int32_t qnodeSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)qndSplFindSplitNode, &info)) { return TSDB_CODE_SUCCESS; } + ((SScanLogicNode*)info.pSplitNode)->dataRequired = FUNC_DATA_REQUIRED_DATA_LOAD; int32_t code = splCreateExchangeNodeForSubplan(pCxt, info.pSubplan, info.pSplitNode, info.pSubplan->subplanType); if (TSDB_CODE_SUCCESS == code) { SLogicSubplan* pScanSubplan = splCreateScanSubplan(pCxt, info.pSplitNode, 0); diff --git a/source/libs/planner/test/planOptimizeTest.cpp b/source/libs/planner/test/planOptimizeTest.cpp index 6a9a711dac..3994db0902 100644 --- a/source/libs/planner/test/planOptimizeTest.cpp +++ b/source/libs/planner/test/planOptimizeTest.cpp @@ -68,6 +68,8 @@ TEST_F(PlanOptimizeTest, PartitionTags) { run("SELECT SUM(c1), tag1 FROM st1 GROUP BY tag1"); run("SELECT SUM(c1), tag1 + 10 FROM st1 GROUP BY tag1 + 10"); + + run("SELECT SUM(c1), tbname FROM st1 GROUP BY tbname"); } TEST_F(PlanOptimizeTest, eliminateProjection) { diff --git a/source/libs/planner/test/planSetOpTest.cpp b/source/libs/planner/test/planSetOpTest.cpp index 62e017052e..de6d7466b8 100644 --- a/source/libs/planner/test/planSetOpTest.cpp +++ b/source/libs/planner/test/planSetOpTest.cpp @@ -97,7 +97,15 @@ TEST_F(PlanSetOpTest, unionSubquery) { run("SELECT * FROM (SELECT c1, c2 FROM t1 UNION SELECT c1, c2 FROM t1)"); } -TEST_F(PlanSetOpTest, bug001) { +TEST_F(PlanSetOpTest, unionWithSubquery) { + useDb("root", "test"); + + run("SELECT c1 FROM (SELECT c1 FROM st1) UNION SELECT c2 FROM (SELECT c1 AS c2 FROM st2)"); + + run("SELECT c1 FROM (SELECT c1 FROM st1 ORDER BY c2) UNION SELECT c1 FROM (SELECT c1 FROM st2)"); +} + +TEST_F(PlanSetOpTest, unionDataTypeConversion) { useDb("root", "test"); run("SELECT c2 FROM t1 WHERE c1 IS NOT NULL GROUP BY c2 " diff --git a/source/libs/qworker/test/CMakeLists.txt b/source/libs/qworker/test/CMakeLists.txt index e7cc8b7c83..780f5ae84b 100644 --- a/source/libs/qworker/test/CMakeLists.txt +++ b/source/libs/qworker/test/CMakeLists.txt @@ -1,18 +1,19 @@ MESSAGE(STATUS "build qworker unit test") +IF(NOT TD_DARWIN) + # GoogleTest requires at least C++11 + SET(CMAKE_CXX_STANDARD 11) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) -# GoogleTest requires at least C++11 -SET(CMAKE_CXX_STANDARD 11) -AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) + ADD_EXECUTABLE(qworkerTest ${SOURCE_LIST}) + TARGET_LINK_LIBRARIES( + qworkerTest + PUBLIC os util common transport gtest qcom nodes planner qworker executor + ) -ADD_EXECUTABLE(qworkerTest ${SOURCE_LIST}) -TARGET_LINK_LIBRARIES( - qworkerTest - PUBLIC os util common transport gtest qcom nodes planner qworker executor -) - -TARGET_INCLUDE_DIRECTORIES( - qworkerTest - PUBLIC "${TD_SOURCE_DIR}/include/libs/qworker/" - PRIVATE "${TD_SOURCE_DIR}/source/libs/qworker/inc" -) + TARGET_INCLUDE_DIRECTORIES( + qworkerTest + PUBLIC "${TD_SOURCE_DIR}/include/libs/qworker/" + PRIVATE "${TD_SOURCE_DIR}/source/libs/qworker/inc" + ) +ENDIF() diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 42be99d08b..a7f66ebb7d 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -196,7 +196,7 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { terrno = TSDB_CODE_QRY_JSON_IN_ERROR; return 0; default: - assert(0); + return 0; } } @@ -222,7 +222,7 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { terrno = TSDB_CODE_QRY_JSON_IN_ERROR; return 0; default: - assert(0); + return 0; } } diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index d093c8bdbd..cbb1089d61 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -109,9 +109,8 @@ int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type) { } if (IS_VAR_DATA_TYPE(type)) { - char* data = colDataGetVarData(out.columnData, 0); - len = varDataLen(data); - buf = varDataVal(data); + buf = colDataGetVarData(out.columnData, 0); + len = varDataTLen(data); } else { len = tDataTypes[type].bytes; buf = out.columnData->pData; @@ -119,8 +118,7 @@ int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type) { } else { buf = nodesGetValueFromNode(valueNode); if (IS_VAR_DATA_TYPE(type)) { - len = varDataLen(buf); - buf = varDataVal(buf); + len = varDataTLen(buf); } else { len = valueNode->node.resType.bytes; } @@ -194,7 +192,7 @@ int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t param->numOfRows = 1; param->columnData = sclCreateColumnInfoData(&valueNode->node.resType, 1); - if (TSDB_DATA_TYPE_NULL == valueNode->node.resType.type) { + if (TSDB_DATA_TYPE_NULL == valueNode->node.resType.type || valueNode->isNull) { colDataAppendNULL(param->columnData, 0); } else { colDataAppend(param->columnData, 0, nodesGetValueFromNode(valueNode), false); @@ -345,7 +343,7 @@ int32_t sclGetNodeType(SNode *pNode, SScalarCtx *ctx) { return -1; } - switch (nodeType(pNode)) { + switch ((int)nodeType(pNode)) { case QUERY_NODE_VALUE: { SValueNode *valueNode = (SValueNode *)pNode; return valueNode->node.resType.type; @@ -538,6 +536,14 @@ int32_t sclExecOperator(SOperatorNode *node, SScalarCtx *ctx, SScalarParam *outp int32_t rowNum = 0; int32_t code = 0; + // json not support in in operator + if(nodeType(node->pLeft) == QUERY_NODE_VALUE){ + SValueNode *valueNode = (SValueNode *)node->pLeft; + if(valueNode->node.resType.type == TSDB_DATA_TYPE_JSON && (node->opType == OP_TYPE_IN || node->opType == OP_TYPE_NOT_IN)){ + SCL_RET(TSDB_CODE_QRY_JSON_IN_ERROR); + } + } + SCL_ERR_RET(sclInitOperatorParams(¶ms, node, ctx, &rowNum)); output->columnData = sclCreateColumnInfoData(&node->node.resType, rowNum); if (output->columnData == NULL) { @@ -777,7 +783,12 @@ EDealRes sclRewriteOperator(SNode** pNode, SScalarCtx *ctx) { res->translate = true; if (colDataIsNull_s(output.columnData, 0)) { - res->node.resType.type = TSDB_DATA_TYPE_NULL; + if(node->node.resType.type != TSDB_DATA_TYPE_JSON){ + res->node.resType.type = TSDB_DATA_TYPE_NULL; + }else{ + res->node.resType = node->node.resType; + res->isNull = true; + } } else { res->node.resType = node->node.resType; int32_t type = output.columnData->info.type; @@ -1024,3 +1035,72 @@ _return: sclFreeRes(ctx.pRes); return code; } + +int32_t scalarGetOperatorResultType(SDataType left, SDataType right, EOperatorType op, SDataType* pRes) { + switch (op) { + case OP_TYPE_ADD: + if (left.type == TSDB_DATA_TYPE_TIMESTAMP && right.type == TSDB_DATA_TYPE_TIMESTAMP) { + qError("invalid op %d, left type:%d, right type:%d", op, left.type, right.type); + return TSDB_CODE_TSC_INVALID_OPERATION; + } + if ((left.type == TSDB_DATA_TYPE_TIMESTAMP && (IS_INTEGER_TYPE(right.type) || right.type == TSDB_DATA_TYPE_BOOL)) || + (right.type == TSDB_DATA_TYPE_TIMESTAMP && (IS_INTEGER_TYPE(left.type) || left.type == TSDB_DATA_TYPE_BOOL))) { + pRes->type = TSDB_DATA_TYPE_TIMESTAMP; + return TSDB_CODE_SUCCESS; + } + pRes->type = TSDB_DATA_TYPE_DOUBLE; + return TSDB_CODE_SUCCESS; + case OP_TYPE_SUB: + if ((left.type == TSDB_DATA_TYPE_TIMESTAMP && right.type == TSDB_DATA_TYPE_BIGINT) || + (right.type == TSDB_DATA_TYPE_TIMESTAMP && left.type == TSDB_DATA_TYPE_BIGINT)) { + pRes->type = TSDB_DATA_TYPE_TIMESTAMP; + return TSDB_CODE_SUCCESS; + } + pRes->type = TSDB_DATA_TYPE_DOUBLE; + return TSDB_CODE_SUCCESS; + case OP_TYPE_MULTI: + if (left.type == TSDB_DATA_TYPE_TIMESTAMP && right.type == TSDB_DATA_TYPE_TIMESTAMP) { + qError("invalid op %d, left type:%d, right type:%d", op, left.type, right.type); + return TSDB_CODE_TSC_INVALID_OPERATION; + } + case OP_TYPE_DIV: + if (left.type == TSDB_DATA_TYPE_TIMESTAMP && right.type == TSDB_DATA_TYPE_TIMESTAMP) { + qError("invalid op %d, left type:%d, right type:%d", op, left.type, right.type); + return TSDB_CODE_TSC_INVALID_OPERATION; + } + case OP_TYPE_REM: + case OP_TYPE_MINUS: + pRes->type = TSDB_DATA_TYPE_DOUBLE; + return TSDB_CODE_SUCCESS; + case OP_TYPE_GREATER_THAN: + case OP_TYPE_GREATER_EQUAL: + case OP_TYPE_LOWER_THAN: + case OP_TYPE_LOWER_EQUAL: + case OP_TYPE_EQUAL: + case OP_TYPE_NOT_EQUAL: + case OP_TYPE_IN: + case OP_TYPE_NOT_IN: + case OP_TYPE_LIKE: + case OP_TYPE_NOT_LIKE: + case OP_TYPE_MATCH: + case OP_TYPE_NMATCH: + case OP_TYPE_IS_NULL: + case OP_TYPE_IS_NOT_NULL: + case OP_TYPE_IS_TRUE: + case OP_TYPE_JSON_CONTAINS: + pRes->type = TSDB_DATA_TYPE_BOOL; + return TSDB_CODE_SUCCESS; + case OP_TYPE_BIT_AND: + case OP_TYPE_BIT_OR: + pRes->type = TSDB_DATA_TYPE_BIGINT; + return TSDB_CODE_SUCCESS; + case OP_TYPE_JSON_GET_VALUE: + pRes->type = TSDB_DATA_TYPE_JSON; + return TSDB_CODE_SUCCESS; + default: + ASSERT(0); + return TSDB_CODE_APP_ERROR; + } +} + + diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 3a219b78b5..e6b7c75564 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -360,9 +360,6 @@ static void trtrim(char *input, char *output, int32_t type, int32_t charLen) { static int32_t doLengthFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput, _len_fn lenFn) { int32_t type = GET_PARAM_TYPE(pInput); - if (inputNum != 1 || !IS_VAR_DATA_TYPE(type)) { - return TSDB_CODE_FAILED; - } SColumnInfoData *pInputData = pInput->columnData; SColumnInfoData *pOutputData = pOutput->columnData; @@ -586,9 +583,6 @@ DONE: static int32_t doCaseConvFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput, _conv_fn convFn) { int32_t type = GET_PARAM_TYPE(pInput); - if (inputNum != 1 || !IS_VAR_DATA_TYPE(type)) { - return TSDB_CODE_FAILED; - } SColumnInfoData *pInputData = pInput->columnData; SColumnInfoData *pOutputData = pOutput->columnData; @@ -628,9 +622,6 @@ static int32_t doCaseConvFunction(SScalarParam *pInput, int32_t inputNum, SScala static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput, _trim_fn trimFn) { int32_t type = GET_PARAM_TYPE(pInput); - if (inputNum != 1 || !IS_VAR_DATA_TYPE(type)) { - return TSDB_CODE_FAILED; - } SColumnInfoData *pInputData = pInput->columnData; SColumnInfoData *pOutputData = pOutput->columnData; @@ -664,16 +655,10 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar int32_t substrFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int32_t subPos = 0; GET_TYPED_DATA(subPos, int32_t, GET_PARAM_TYPE(&pInput[1]), pInput[1].columnData->pData); - if (subPos == 0) { //subPos needs to be positive or negative values; - return TSDB_CODE_FAILED; - } int32_t subLen = INT16_MAX; if (inputNum == 3) { GET_TYPED_DATA(subLen, int32_t, GET_PARAM_TYPE(&pInput[2]), pInput[2].columnData->pData); - if (subLen < 0 || subLen > INT16_MAX) { //subLen cannot be negative - return TSDB_CODE_FAILED; - } subLen = (GET_PARAM_TYPE(pInput) == TSDB_DATA_TYPE_VARCHAR) ? subLen : subLen * TSDB_NCHAR_SIZE; } @@ -934,9 +919,16 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp break; } case TSDB_DATA_TYPE_TIMESTAMP: { + int64_t timeVal; if (inputType == TSDB_DATA_TYPE_BINARY || inputType == TSDB_DATA_TYPE_NCHAR) { - //convert to 0 - *(int64_t *)output = 0; + int64_t timePrec; + GET_TYPED_DATA(timePrec, int64_t, GET_PARAM_TYPE(&pInput[1]), pInput[1].columnData->pData); + int32_t ret = convertStringToTimestamp(inputType, input, timePrec, &timeVal); + if (ret != TSDB_CODE_SUCCESS) { + *(int64_t *)output = 0; + } else { + *(int64_t *)output = timeVal; + } } else { GET_TYPED_DATA(*(int64_t *)output, int64_t, inputType, input); } @@ -1142,13 +1134,6 @@ int32_t toUnixtimestampFunction(SScalarParam *pInput, int32_t inputNum, SScalarP int32_t toJsonFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int32_t type = GET_PARAM_TYPE(pInput); - if (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR) { - return TSDB_CODE_FAILED; - } - - if (inputNum != 1) { - return TSDB_CODE_FAILED; - } char tmp[TSDB_MAX_JSON_TAG_LEN] = {0}; for (int32_t i = 0; i < pInput[0].numOfRows; ++i) { @@ -1189,6 +1174,8 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : (timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000); + timeUnit = timeUnit * 1000 / factor; + for (int32_t i = 0; i < pInput[0].numOfRows; ++i) { if (colDataIsNull_s(pInput[0].columnData, i)) { colDataAppendNULL(pOutput->columnData, i); @@ -1221,7 +1208,6 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara char buf[20] = {0}; NUM_TO_STRING(TSDB_DATA_TYPE_BIGINT, &timeVal, sizeof(buf), buf); int32_t tsDigits = (int32_t)strlen(buf); - timeUnit = timeUnit * 1000 / factor; switch (timeUnit) { case 0: { /* 1u */ @@ -1377,6 +1363,11 @@ int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p GET_TYPED_DATA(timePrec, int64_t, GET_PARAM_TYPE(&pInput[2]), pInput[2].columnData->pData); } + int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : + (timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000); + + timeUnit = timeUnit * 1000 / factor; + int32_t numOfRows = 0; for (int32_t i = 0; i < inputNum; ++i) { if (pInput[i].numOfRows > numOfRows) { @@ -1456,9 +1447,6 @@ int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p } } } else { - int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : - (timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000); - timeUnit = timeUnit * 1000 / factor; switch(timeUnit) { case 0: { /* 1u */ result = result / 1000; diff --git a/source/libs/scalar/test/filter/CMakeLists.txt b/source/libs/scalar/test/filter/CMakeLists.txt index 8e3aeb6f9d..a95a1655f8 100644 --- a/source/libs/scalar/test/filter/CMakeLists.txt +++ b/source/libs/scalar/test/filter/CMakeLists.txt @@ -1,18 +1,20 @@ MESSAGE(STATUS "build filter unit test") -# GoogleTest requires at least C++11 -SET(CMAKE_CXX_STANDARD 11) -AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) +IF(NOT TD_DARWIN) + # GoogleTest requires at least C++11 + SET(CMAKE_CXX_STANDARD 11) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) -ADD_EXECUTABLE(filterTest ${SOURCE_LIST}) -TARGET_LINK_LIBRARIES( - filterTest - PUBLIC os util common gtest qcom function nodes scalar -) + ADD_EXECUTABLE(filterTest ${SOURCE_LIST}) + TARGET_LINK_LIBRARIES( + filterTest + PUBLIC os util common gtest qcom function nodes scalar + ) -TARGET_INCLUDE_DIRECTORIES( - filterTest - PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar/" - PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc" -) + TARGET_INCLUDE_DIRECTORIES( + filterTest + PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar/" + PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc" + ) +ENDIF() \ No newline at end of file diff --git a/source/libs/scalar/test/scalar/CMakeLists.txt b/source/libs/scalar/test/scalar/CMakeLists.txt index 86b936d93a..4624c1a7fd 100644 --- a/source/libs/scalar/test/scalar/CMakeLists.txt +++ b/source/libs/scalar/test/scalar/CMakeLists.txt @@ -1,23 +1,25 @@ MESSAGE(STATUS "build scalar unit test") -# GoogleTest requires at least C++11 -SET(CMAKE_CXX_STANDARD 11) -AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) +IF(NOT TD_DARWIN) + # GoogleTest requires at least C++11 + SET(CMAKE_CXX_STANDARD 11) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) -ADD_EXECUTABLE(scalarTest ${SOURCE_LIST}) -TARGET_LINK_LIBRARIES( - scalarTest - PUBLIC os util common gtest qcom function nodes scalar parser catalog transport -) + ADD_EXECUTABLE(scalarTest ${SOURCE_LIST}) + TARGET_LINK_LIBRARIES( + scalarTest + PUBLIC os util common gtest qcom function nodes scalar parser catalog transport + ) -TARGET_INCLUDE_DIRECTORIES( - scalarTest - PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar/" - PUBLIC "${TD_SOURCE_DIR}/source/libs/parser/inc" - PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc" -) -add_test( - NAME scalarTest - COMMAND scalarTest -) + TARGET_INCLUDE_DIRECTORIES( + scalarTest + PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar/" + PUBLIC "${TD_SOURCE_DIR}/source/libs/parser/inc" + PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc" + ) + add_test( + NAME scalarTest + COMMAND scalarTest + ) +ENDIF() diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 2ad2fc9029..819d51c4e7 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -69,7 +69,7 @@ typedef struct SSchHbTrans { typedef struct SSchApiStat { -#ifdef WINDOWS +#if defined(WINDOWS) || defined(_TD_DARWIN_64) size_t avoidCompilationErrors; #endif @@ -77,7 +77,7 @@ typedef struct SSchApiStat { typedef struct SSchRuntimeStat { -#ifdef WINDOWS +#if defined(WINDOWS) || defined(_TD_DARWIN_64) size_t avoidCompilationErrors; #endif @@ -85,7 +85,7 @@ typedef struct SSchRuntimeStat { typedef struct SSchJobStat { -#ifdef WINDOWS +#if defined(WINDOWS) || defined(_TD_DARWIN_64) size_t avoidCompilationErrors; #endif diff --git a/source/libs/scheduler/test/CMakeLists.txt b/source/libs/scheduler/test/CMakeLists.txt index 0b0aafebb9..ce92886221 100644 --- a/source/libs/scheduler/test/CMakeLists.txt +++ b/source/libs/scheduler/test/CMakeLists.txt @@ -1,18 +1,20 @@ MESSAGE(STATUS "build scheduler unit test") -# GoogleTest requires at least C++11 -SET(CMAKE_CXX_STANDARD 11) -AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) +IF(NOT TD_DARWIN) + # GoogleTest requires at least C++11 + SET(CMAKE_CXX_STANDARD 11) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) -ADD_EXECUTABLE(schedulerTest ${SOURCE_LIST}) -TARGET_LINK_LIBRARIES( - schedulerTest - PUBLIC os util common catalog transport gtest qcom taos_static planner scheduler -) + ADD_EXECUTABLE(schedulerTest ${SOURCE_LIST}) + TARGET_LINK_LIBRARIES( + schedulerTest + PUBLIC os util common catalog transport gtest qcom taos_static planner scheduler + ) -TARGET_INCLUDE_DIRECTORIES( - schedulerTest - PUBLIC "${TD_SOURCE_DIR}/include/libs/scheduler/" - PRIVATE "${TD_SOURCE_DIR}/source/libs/scheduler/inc" -) + TARGET_INCLUDE_DIRECTORIES( + schedulerTest + PUBLIC "${TD_SOURCE_DIR}/include/libs/scheduler/" + PRIVATE "${TD_SOURCE_DIR}/source/libs/scheduler/inc" + ) +ENDIF() \ No newline at end of file diff --git a/source/libs/sync/inc/syncAppendEntriesReply.h b/source/libs/sync/inc/syncAppendEntriesReply.h index 70ce5a72c6..03148252fb 100644 --- a/source/libs/sync/inc/syncAppendEntriesReply.h +++ b/source/libs/sync/inc/syncAppendEntriesReply.h @@ -44,11 +44,6 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg); int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntriesReply* pMsg); -typedef struct SReaderParam { - SyncIndex start; - SyncIndex end; -} SReaderParam; - #ifdef __cplusplus } #endif diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 66e5d28bdd..8936cd6ed9 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -174,8 +174,9 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak); int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize); // option -bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); -SyncIndex syncNodeGetSnapshotConfigIndex(SSyncNode* pSyncNode, SyncIndex snapshotLastApplyIndex); +bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); +ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode); +SyncIndex syncNodeGetSnapshotConfigIndex(SSyncNode* pSyncNode, SyncIndex snapshotLastApplyIndex); // ping -------------- int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg); diff --git a/source/libs/sync/inc/syncRaftEntry.h b/source/libs/sync/inc/syncRaftEntry.h index 37f18a6388..82d5c0a6ea 100644 --- a/source/libs/sync/inc/syncRaftEntry.h +++ b/source/libs/sync/inc/syncRaftEntry.h @@ -29,19 +29,20 @@ extern "C" { typedef struct SSyncRaftEntry { uint32_t bytes; - uint32_t msgType; // SyncClientRequest msgType - uint32_t originalRpcType; // user RpcMsg msgType + uint32_t msgType; // TDMT_SYNC_CLIENT_REQUEST + uint32_t originalRpcType; // origin RpcMsg msgType uint64_t seqNum; bool isWeak; SyncTerm term; SyncIndex index; - uint32_t dataLen; // user RpcMsg.contLen - char data[]; // user RpcMsg.pCont + uint32_t dataLen; // origin RpcMsg.contLen + char data[]; // origin RpcMsg.pCont } SSyncRaftEntry; SSyncRaftEntry* syncEntryBuild(uint32_t dataLen); SSyncRaftEntry* syncEntryBuild2(SyncClientRequest* pMsg, SyncTerm term, SyncIndex index); // step 4 SSyncRaftEntry* syncEntryBuild3(SyncClientRequest* pMsg, SyncTerm term, SyncIndex index); +SSyncRaftEntry* syncEntryBuild4(SRpcMsg* pOriginalMsg, SyncTerm term, SyncIndex index); SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId); void syncEntryDestory(SSyncRaftEntry* pEntry); char* syncEntrySerialize(const SSyncRaftEntry* pEntry, uint32_t* len); // step 5 diff --git a/source/libs/sync/inc/syncReplication.h b/source/libs/sync/inc/syncReplication.h index 07e12460da..dad7d9b5ec 100644 --- a/source/libs/sync/inc/syncReplication.h +++ b/source/libs/sync/inc/syncReplication.h @@ -54,6 +54,7 @@ extern "C" { int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode); int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode); int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode); + int32_t syncNodeReplicate(SSyncNode* pSyncNode); int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg); int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntriesBatch* pMsg); diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index 6801e7212a..3df9c243e7 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -37,26 +37,28 @@ extern "C" { //--------------------------------------------------- typedef struct SSyncSnapshotSender { - bool start; - int32_t seq; - int32_t ack; - void *pReader; - void *pCurrentBlock; - int32_t blockLen; - SSnapshot snapshot; - SSyncCfg lastConfig; - int64_t sendingMS; - SSyncNode *pSyncNode; - int32_t replicaIndex; - SyncTerm term; - SyncTerm privateTerm; - bool finish; + bool start; + int32_t seq; + int32_t ack; + void *pReader; + void *pCurrentBlock; + int32_t blockLen; + SSnapshotParam snapshotParam; + SSnapshot snapshot; + SSyncCfg lastConfig; + int64_t sendingMS; + SSyncNode *pSyncNode; + int32_t replicaIndex; + SyncTerm term; + SyncTerm privateTerm; + bool finish; } SSyncSnapshotSender; SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaIndex); void snapshotSenderDestroy(SSyncSnapshotSender *pSender); bool snapshotSenderIsStart(SSyncSnapshotSender *pSender); -int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void *pReader); +int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapshotParam, SSnapshot snapshot, + void *pReader); int32_t snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish); int32_t snapshotSend(SSyncSnapshotSender *pSender); int32_t snapshotReSend(SSyncSnapshotSender *pSender); @@ -67,14 +69,15 @@ char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event); //--------------------------------------------------- typedef struct SSyncSnapshotReceiver { - bool start; - int32_t ack; - void *pWriter; - SyncTerm term; - SyncTerm privateTerm; - SSnapshot snapshot; - SRaftId fromId; - SSyncNode *pSyncNode; + bool start; + int32_t ack; + void *pWriter; + SyncTerm term; + SyncTerm privateTerm; + SSnapshotParam snapshotParam; + SSnapshot snapshot; + SRaftId fromId; + SSyncNode *pSyncNode; } SSyncSnapshotReceiver; diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index be48227d40..c923ee3d1d 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -628,8 +628,6 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { #endif -static int32_t syncNodeMakeLogSame2(SSyncNode* ths, SyncAppendEntriesBatch* pMsg) { return 0; } - static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) { int32_t code; @@ -675,6 +673,51 @@ static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) { return code; } +static int32_t syncNodeDoMakeLogSame(SSyncNode* ths, SyncIndex FromIndex) { + int32_t code; + + SyncIndex delBegin = FromIndex; + SyncIndex delEnd = ths->pLogStore->syncLogLastIndex(ths->pLogStore); + + // invert roll back! + for (SyncIndex index = delEnd; index >= delBegin; --index) { + if (ths->pFsm->FpRollBackCb != NULL) { + SSyncRaftEntry* pRollBackEntry; + code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, index, &pRollBackEntry); + ASSERT(code == 0); + ASSERT(pRollBackEntry != NULL); + + if (syncUtilUserRollback(pRollBackEntry->msgType)) { + SRpcMsg rpcMsg; + syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg); + + SFsmCbMeta cbMeta = {0}; + cbMeta.index = pRollBackEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); + cbMeta.isWeak = pRollBackEntry->isWeak; + cbMeta.code = 0; + cbMeta.state = ths->state; + cbMeta.seqNum = pRollBackEntry->seqNum; + ths->pFsm->FpRollBackCb(ths->pFsm, &rpcMsg, cbMeta); + rpcFreeCont(rpcMsg.pCont); + } + + syncEntryDestory(pRollBackEntry); + } + } + + // delete confict entries + code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin); + ASSERT(code == 0); + + char eventLog[128]; + snprintf(eventLog, sizeof(eventLog), "log truncate, from %ld to %ld", delBegin, delEnd); + syncNodeEventLog(ths, eventLog); + logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore); + + return code; +} + static int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry) { SRpcMsg rpcMsg; syncEntry2OriginalRpc(pEntry, &rpcMsg); @@ -694,6 +737,31 @@ static int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry) { return 0; } +static bool syncNodeOnAppendEntriesBatchLogOK(SSyncNode* pSyncNode, SyncAppendEntriesBatch* pMsg) { + if (pMsg->prevLogIndex == SYNC_INDEX_INVALID) { + return true; + } + + SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode); + if (pMsg->prevLogIndex > myLastIndex) { + sDebug("vgId:%d sync log not ok, preindex:%ld", pSyncNode->vgId, pMsg->prevLogIndex); + return false; + } + + SyncTerm myPreLogTerm = syncNodeGetPreTerm(pSyncNode, pMsg->prevLogIndex + 1); + if (myPreLogTerm == SYNC_TERM_INVALID) { + sDebug("vgId:%d sync log not ok2, preindex:%ld", pSyncNode->vgId, pMsg->prevLogIndex); + return false; + } + + if (pMsg->prevLogIndex <= myLastIndex && pMsg->prevLogTerm == myPreLogTerm) { + return true; + } + + sDebug("vgId:%d sync log not ok3, preindex:%ld", pSyncNode->vgId, pMsg->prevLogIndex); + return false; +} + // really pre log match // prevLogIndex == -1 static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries* pMsg) { @@ -767,7 +835,6 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc // operation: // if hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex, append entry // match my-commit-index or my-commit-index + 1 - // no operation on log do { bool condition = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) && (pMsg->prevLogIndex <= ths->commitIndex); @@ -780,14 +847,11 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc syncNodeEventLog(ths, logBuf); } while (0); - SyncIndex matchIndex = ths->commitIndex; - bool hasAppendEntries = pMsg->dataLen > 0; - if (hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex) { - SRpcMsg rpcMsgArr[SYNC_MAX_BATCH_SIZE]; - memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); - int32_t retArrSize = 0; - syncAppendEntriesBatch2RpcMsgArray(pMsg, rpcMsgArr, SYNC_MAX_BATCH_SIZE, &retArrSize); + SyncIndex matchIndex = ths->commitIndex; + bool hasAppendEntries = pMsg->dataLen > 0; + SOffsetAndContLen* metaTableArr = syncAppendEntriesBatchMetaTableArray(pMsg); + if (hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex) { // make log same do { SyncIndex logLastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); @@ -795,15 +859,15 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc if (hasExtraEntries) { // make log same, rollback deleted entries - code = syncNodeMakeLogSame2(ths, pMsg); + code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); ASSERT(code == 0); } } while (0); // append entry batch - for (int32_t i = 0; i < retArrSize; ++i) { - SSyncRaftEntry* pAppendEntry = syncEntryBuild(1234); + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); if (code != 0) { return -1; @@ -821,7 +885,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc walFsync(pWal, true); // update match index - matchIndex = pMsg->prevLogIndex + retArrSize; + matchIndex = pMsg->prevLogIndex + pMsg->dataCount; } // prepare response msg @@ -839,13 +903,12 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg); syncAppendEntriesReplyDestroy(pReply); - return ret; + return 0; } } while (0); // calculate logOK here, before will coredump, due to fake match - // bool logOK = syncNodeOnAppendEntriesLogOK(ths, pMsg); - bool logOK = true; + bool logOK = syncNodeOnAppendEntriesBatchLogOK(ths, pMsg); // not match // @@ -866,8 +929,9 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc if (condition) { char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, not match, pre-index:%ld, pre-term:%lu, datalen:%d", - pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen); + snprintf(logBuf, sizeof(logBuf), + "recv sync-append-entries-batch, not match, pre-index:%ld, pre-term:%lu, datalen:%d", pMsg->prevLogIndex, + pMsg->prevLogTerm, pMsg->dataLen); syncNodeEventLog(ths, logBuf); // prepare response msg @@ -885,7 +949,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg); syncAppendEntriesReplyDestroy(pReply); - return ret; + return 0; } } while (0); @@ -905,28 +969,26 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc bool hasExtraEntries = myLastIndex > pMsg->prevLogIndex; // has entries in SyncAppendEntries msg - bool hasAppendEntries = pMsg->dataLen > 0; + bool hasAppendEntries = pMsg->dataLen > 0; + SOffsetAndContLen* metaTableArr = syncAppendEntriesBatchMetaTableArray(pMsg); - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, match, pre-index:%ld, pre-term:%lu, datalen:%d", - pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen); - syncNodeEventLog(ths, logBuf); + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, match, pre-index:%ld, pre-term:%lu, datalen:%d", + pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen); + syncNodeEventLog(ths, logBuf); + } while (0); if (hasExtraEntries) { // make log same, rollback deleted entries - // code = syncNodeMakeLogSame(ths, pMsg); + code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); ASSERT(code == 0); } - int32_t retArrSize = 0; if (hasAppendEntries) { - SRpcMsg rpcMsgArr[SYNC_MAX_BATCH_SIZE]; - memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); - syncAppendEntriesBatch2RpcMsgArray(pMsg, rpcMsgArr, SYNC_MAX_BATCH_SIZE, &retArrSize); - // append entry batch - for (int32_t i = 0; i < retArrSize; ++i) { - SSyncRaftEntry* pAppendEntry = syncEntryBuild(1234); + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); if (code != 0) { return -1; @@ -951,7 +1013,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc pReply->term = ths->pRaftStore->currentTerm; pReply->privateTerm = ths->pNewNodeReceiver->privateTerm; pReply->success = true; - pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + retArrSize : pMsg->prevLogIndex; + pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + pMsg->dataCount : pMsg->prevLogIndex; // send response SRpcMsg rpcMsg; @@ -991,11 +1053,11 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc ASSERT(code == 0); } } - return ret; + return 0; } } while (0); - return ret; + return 0; } int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMsg) { diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 629d83eb51..f3206e9ccc 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -118,12 +118,12 @@ static void syncNodeStartSnapshot(SSyncNode* ths, SyncIndex beginIndex, SyncInde SSnapshot snapshot = { .data = NULL, .lastApplyIndex = endIndex, .lastApplyTerm = lastApplyTerm, .lastConfigIndex = SYNC_INDEX_INVALID}; - void* pReader = NULL; - SReaderParam readerParam = {.start = beginIndex, .end = endIndex}; + void* pReader = NULL; + SSnapshotParam readerParam = {.start = beginIndex, .end = endIndex}; ths->pFsm->FpSnapshotStartRead(ths->pFsm, &readerParam, &pReader); if (!snapshotSenderIsStart(pSender) && pMsg->privateTerm < pSender->privateTerm) { ASSERT(pReader != NULL); - snapshotSenderStart(pSender, snapshot, pReader); + snapshotSenderStart(pSender, readerParam, snapshot, pReader); } else { if (pReader != NULL) { @@ -165,23 +165,22 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie if (ths->pLogStore->syncLogExist(ths->pLogStore, newNextIndex) && ths->pLogStore->syncLogExist(ths->pLogStore, newNextIndex - 1)) { - // nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1] + // update next-index, match-index syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), newNextIndex); - - // matchIndex' = [matchIndex EXCEPT ![i][j] = m.mmatchIndex] syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), newMatchIndex); // maybe commit if (ths->state == TAOS_SYNC_STATE_LEADER) { syncMaybeAdvanceCommitIndex(ths); } + } else { // start snapshot - SSnapshot snapshot; - ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot); - syncNodeStartSnapshot(ths, newMatchIndex + 1, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pMsg); + SSnapshot oldSnapshot; + ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &oldSnapshot); + syncNodeStartSnapshot(ths, newMatchIndex + 1, oldSnapshot.lastApplyIndex, oldSnapshot.lastApplyTerm, pMsg); - syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), snapshot.lastApplyIndex + 1); + syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), oldSnapshot.lastApplyIndex + 1); syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), newMatchIndex); } @@ -301,7 +300,8 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries !snapshotSenderIsStart(pSender) && pMsg->privateTerm < pSender->privateTerm) { // has snapshot ASSERT(pReader != NULL); - snapshotSenderStart(pSender, snapshot, pReader); + SSnapshotParam readerParam = {.start = 0, .end = snapshot.lastApplyIndex}; + snapshotSenderStart(pSender, readerParam, snapshot, pReader); } else { // no snapshot diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index ddda60bc18..21d6b55cfa 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -65,6 +65,7 @@ int32_t syncInit() { syncCleanUp(); ret = -1; } else { + sDebug("sync rsetId:%" PRId64 " is open", tsNodeRefId); ret = syncEnvStart(); } } @@ -77,6 +78,7 @@ void syncCleanUp() { ASSERT(ret == 0); if (tsNodeRefId != -1) { + sDebug("sync rsetId:%" PRId64 " is closed", tsNodeRefId); taosCloseRef(tsNodeRefId); tsNodeRefId = -1; } @@ -96,6 +98,7 @@ int64_t syncOpen(const SSyncInfo* pSyncInfo) { return -1; } + sDebug("vgId:%d, rid:%" PRId64 " is added to rsetId:%" PRId64, pSyncInfo->vgId, pSyncNode->rid, tsNodeRefId); return pSyncNode->rid; } @@ -136,13 +139,14 @@ void syncStartStandBy(int64_t rid) { void syncStop(int64_t rid) { SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); - if (pSyncNode == NULL) { - return; - } + if (pSyncNode == NULL) return; + + int32_t vgId = pSyncNode->vgId; syncNodeClose(pSyncNode); taosReleaseRef(tsNodeRefId, pSyncNode->rid); taosRemoveRef(tsNodeRefId, rid); + sDebug("vgId:%d, rid:%" PRId64 " is removed from rsetId:%" PRId64, vgId, rid, tsNodeRefId); } int32_t syncSetStandby(int64_t rid) { @@ -815,7 +819,7 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) { // create a new raft config file SRaftCfgMeta meta; meta.isStandBy = pSyncInfo->isStandBy; - meta.snapshotEnable = pSyncInfo->snapshotEnable; + meta.snapshotEnable = pSyncInfo->snapshotStrategy; meta.lastConfigIndex = SYNC_INDEX_INVALID; ret = raftCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), meta, pSyncNode->configPath); ASSERT(ret == 0); @@ -1100,7 +1104,9 @@ void syncNodeClose(SSyncNode* pSyncNode) { } // option -bool syncNodeSnapshotEnable(SSyncNode* pSyncNode) { return pSyncNode->pRaftCfg->snapshotEnable; } +// bool syncNodeSnapshotEnable(SSyncNode* pSyncNode) { return pSyncNode->pRaftCfg->snapshotEnable; } + +ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode) { return pSyncNode->pRaftCfg->snapshotEnable; } // ping -------------- int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg) { diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c index ad352df59f..e53b9d5e36 100644 --- a/source/libs/sync/src/syncMessage.c +++ b/source/libs/sync/src/syncMessage.c @@ -15,6 +15,7 @@ #include "syncMessage.h" #include "syncRaftCfg.h" +#include "syncRaftEntry.h" #include "syncUtil.h" #include "tcoding.h" @@ -996,7 +997,135 @@ SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMet return pMsg; } -void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg) {} +void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg) { + memset(pRpcMsg, 0, sizeof(*pRpcMsg)); + pRpcMsg->msgType = pSyncMsg->msgType; + pRpcMsg->contLen = pSyncMsg->bytes; + pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen); + memcpy(pRpcMsg->pCont, pSyncMsg, pRpcMsg->contLen); +} + +void syncClientRequestBatchDestroy(SyncClientRequestBatch* pMsg) { + if (pMsg != NULL) { + taosMemoryFree(pMsg); + } +} + +void syncClientRequestBatchDestroyDeep(SyncClientRequestBatch* pMsg) { + if (pMsg != NULL) { + int32_t arrSize = pMsg->dataCount; + int32_t raftMetaArrayLen = sizeof(SRaftMeta) * arrSize; + SRpcMsg* msgArr = (SRpcMsg*)((char*)(pMsg->data) + raftMetaArrayLen); + for (int i = 0; i < arrSize; ++i) { + if (msgArr[i].pCont != NULL) { + rpcFreeCont(msgArr[i].pCont); + } + } + + taosMemoryFree(pMsg); + } +} + +SRaftMeta* syncClientRequestBatchMetaArr(const SyncClientRequestBatch* pSyncMsg) { + SRaftMeta* raftMetaArr = (SRaftMeta*)(pSyncMsg->data); + return raftMetaArr; +} + +SRpcMsg* syncClientRequestBatchRpcMsgArr(const SyncClientRequestBatch* pSyncMsg) { + int32_t arrSize = pSyncMsg->dataCount; + int32_t raftMetaArrayLen = sizeof(SRaftMeta) * arrSize; + SRpcMsg* msgArr = (SRpcMsg*)((char*)(pSyncMsg->data) + raftMetaArrayLen); + return msgArr; +} + +SyncClientRequestBatch* syncClientRequestBatchFromRpcMsg(const SRpcMsg* pRpcMsg) { + SyncClientRequestBatch* pSyncMsg = taosMemoryMalloc(pRpcMsg->contLen); + ASSERT(pSyncMsg != NULL); + memcpy(pSyncMsg, pRpcMsg->pCont, pRpcMsg->contLen); + ASSERT(pRpcMsg->contLen == pSyncMsg->bytes); + + return pSyncMsg; +} + +cJSON* syncClientRequestBatch2Json(const SyncClientRequestBatch* pMsg) { + char u64buf[128] = {0}; + cJSON* pRoot = cJSON_CreateObject(); + + if (pMsg != NULL) { + cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes); + cJSON_AddNumberToObject(pRoot, "vgId", pMsg->vgId); + cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType); + cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen); + cJSON_AddNumberToObject(pRoot, "dataCount", pMsg->dataCount); + + SRaftMeta* metaArr = syncClientRequestBatchMetaArr(pMsg); + SRpcMsg* msgArr = syncClientRequestBatchRpcMsgArr(pMsg); + + cJSON* pMetaArr = cJSON_CreateArray(); + cJSON_AddItemToObject(pRoot, "metaArr", pMetaArr); + for (int i = 0; i < pMsg->dataCount; ++i) { + cJSON* pMeta = cJSON_CreateObject(); + cJSON_AddNumberToObject(pMeta, "seqNum", metaArr[i].seqNum); + cJSON_AddNumberToObject(pMeta, "isWeak", metaArr[i].isWeak); + cJSON_AddItemToArray(pMetaArr, pMeta); + } + + cJSON* pMsgArr = cJSON_CreateArray(); + cJSON_AddItemToObject(pRoot, "msgArr", pMsgArr); + for (int i = 0; i < pMsg->dataCount; ++i) { + cJSON* pRpcMsgJson = syncRpcMsg2Json(&msgArr[i]); + cJSON_AddItemToArray(pMsgArr, pRpcMsgJson); + } + + char* s; + s = syncUtilprintBin((char*)(pMsg->data), pMsg->dataLen); + cJSON_AddStringToObject(pRoot, "data", s); + taosMemoryFree(s); + s = syncUtilprintBin2((char*)(pMsg->data), pMsg->dataLen); + cJSON_AddStringToObject(pRoot, "data2", s); + taosMemoryFree(s); + } + + cJSON* pJson = cJSON_CreateObject(); + cJSON_AddItemToObject(pJson, "SyncClientRequestBatch", pRoot); + return pJson; +} + +char* syncClientRequestBatch2Str(const SyncClientRequestBatch* pMsg) { + cJSON* pJson = syncClientRequestBatch2Json(pMsg); + char* serialized = cJSON_Print(pJson); + cJSON_Delete(pJson); + return serialized; +} + +// for debug ---------------------- +void syncClientRequestBatchPrint(const SyncClientRequestBatch* pMsg) { + char* serialized = syncClientRequestBatch2Str(pMsg); + printf("syncClientRequestBatchPrint | len:%lu | %s \n", strlen(serialized), serialized); + fflush(NULL); + taosMemoryFree(serialized); +} + +void syncClientRequestBatchPrint2(char* s, const SyncClientRequestBatch* pMsg) { + char* serialized = syncClientRequestBatch2Str(pMsg); + printf("syncClientRequestBatchPrint2 | len:%lu | %s | %s \n", strlen(serialized), s, serialized); + fflush(NULL); + taosMemoryFree(serialized); +} + +void syncClientRequestBatchLog(const SyncClientRequestBatch* pMsg) { + char* serialized = syncClientRequestBatch2Str(pMsg); + sTrace("syncClientRequestBatchLog | len:%lu | %s", strlen(serialized), serialized); + taosMemoryFree(serialized); +} + +void syncClientRequestBatchLog2(char* s, const SyncClientRequestBatch* pMsg) { + if (gRaftDetailLog) { + char* serialized = syncClientRequestBatch2Str(pMsg); + sTraceLong("syncClientRequestBatchLog2 | len:%lu | %s | %s", strlen(serialized), s, serialized); + taosMemoryFree(serialized); + } +} // ---- message process SyncRequestVote---- SyncRequestVote* syncRequestVoteBuild(int32_t vgId) { @@ -1472,21 +1601,20 @@ void syncAppendEntriesLog2(char* s, const SyncAppendEntries* pMsg) { // block1: SOffsetAndContLen // block2: SOffsetAndContLen Array -// block3: SRpcMsg Array -// block4: SRpcMsg pCont Array +// block3: entry Array -SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SRpcMsg* rpcMsgArr, int32_t arrSize, int32_t vgId) { - ASSERT(rpcMsgArr != NULL); +SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SSyncRaftEntry** entryPArr, int32_t arrSize, int32_t vgId) { + ASSERT(entryPArr != NULL); ASSERT(arrSize > 0); int32_t dataLen = 0; int32_t metaArrayLen = sizeof(SOffsetAndContLen) * arrSize; // - int32_t rpcArrayLen = sizeof(SRpcMsg) * arrSize; // SRpcMsg - int32_t contArrayLen = 0; + int32_t entryArrayLen = 0; for (int i = 0; i < arrSize; ++i) { // SRpcMsg pCont - contArrayLen += rpcMsgArr[i].contLen; + SSyncRaftEntry* pEntry = entryPArr[i]; + entryArrayLen += pEntry->bytes; } - dataLen += (metaArrayLen + rpcArrayLen + contArrayLen); + dataLen += (metaArrayLen + entryArrayLen); uint32_t bytes = sizeof(SyncAppendEntriesBatch) + dataLen; SyncAppendEntriesBatch* pMsg = taosMemoryMalloc(bytes); @@ -1498,30 +1626,30 @@ SyncAppendEntriesBatch* syncAppendEntriesBatchBuild(SRpcMsg* rpcMsgArr, int32_t pMsg->dataLen = dataLen; SOffsetAndContLen* metaArr = (SOffsetAndContLen*)(pMsg->data); - SRpcMsg* msgArr = (SRpcMsg*)((char*)(pMsg->data) + metaArrayLen); char* pData = pMsg->data; for (int i = 0; i < arrSize; ++i) { - // init + // init meta if (i == 0) { - metaArr[i].offset = metaArrayLen + rpcArrayLen; - metaArr[i].contLen = rpcMsgArr[i].contLen; + metaArr[i].offset = metaArrayLen; + metaArr[i].contLen = entryPArr[i]->bytes; } else { metaArr[i].offset = metaArr[i - 1].offset + metaArr[i - 1].contLen; - metaArr[i].contLen = rpcMsgArr[i].contLen; + metaArr[i].contLen = entryPArr[i]->bytes; } - // init msgArr - msgArr[i] = rpcMsgArr[i]; - - // init data - ASSERT(rpcMsgArr[i].contLen == metaArr[i].contLen); - memcpy(pData + metaArr[i].offset, rpcMsgArr[i].pCont, rpcMsgArr[i].contLen); + // init entry array + ASSERT(metaArr[i].contLen == entryPArr[i]->bytes); + memcpy(pData + metaArr[i].offset, entryPArr[i], metaArr[i].contLen); } return pMsg; } +SOffsetAndContLen* syncAppendEntriesBatchMetaTableArray(SyncAppendEntriesBatch* pMsg) { + return (SOffsetAndContLen*)(pMsg->data); +} + void syncAppendEntriesBatchDestroy(SyncAppendEntriesBatch* pMsg) { if (pMsg != NULL) { taosMemoryFree(pMsg); @@ -1634,16 +1762,12 @@ cJSON* syncAppendEntriesBatch2Json(const SyncAppendEntriesBatch* pMsg) { cJSON_AddNumberToObject(pRoot, "dataLen", pMsg->dataLen); int32_t metaArrayLen = sizeof(SOffsetAndContLen) * pMsg->dataCount; // - int32_t rpcArrayLen = sizeof(SRpcMsg) * pMsg->dataCount; // SRpcMsg - int32_t contArrayLen = pMsg->dataLen - metaArrayLen - rpcArrayLen; + int32_t entryArrayLen = pMsg->dataLen - metaArrayLen; cJSON_AddNumberToObject(pRoot, "metaArrayLen", metaArrayLen); - cJSON_AddNumberToObject(pRoot, "rpcArrayLen", rpcArrayLen); - cJSON_AddNumberToObject(pRoot, "contArrayLen", contArrayLen); + cJSON_AddNumberToObject(pRoot, "entryArrayLen", entryArrayLen); SOffsetAndContLen* metaArr = (SOffsetAndContLen*)(pMsg->data); - SRpcMsg* msgArr = (SRpcMsg*)(pMsg->data + metaArrayLen); - void* pData = (void*)(pMsg->data + metaArrayLen + rpcArrayLen); cJSON* pMetaArr = cJSON_CreateArray(); cJSON_AddItemToObject(pRoot, "metaArr", pMetaArr); @@ -1654,14 +1778,12 @@ cJSON* syncAppendEntriesBatch2Json(const SyncAppendEntriesBatch* pMsg) { cJSON_AddItemToArray(pMetaArr, pMeta); } - cJSON* pMsgArr = cJSON_CreateArray(); - cJSON_AddItemToObject(pRoot, "msgArr", pMsgArr); + cJSON* pEntryArr = cJSON_CreateArray(); + cJSON_AddItemToObject(pRoot, "entryArr", pEntryArr); for (int i = 0; i < pMsg->dataCount; ++i) { - cJSON* pRpcMsgJson = cJSON_CreateObject(); - cJSON_AddNumberToObject(pRpcMsgJson, "code", msgArr[i].code); - cJSON_AddNumberToObject(pRpcMsgJson, "contLen", msgArr[i].contLen); - cJSON_AddNumberToObject(pRpcMsgJson, "msgType", msgArr[i].msgType); - cJSON_AddItemToArray(pMsgArr, pRpcMsgJson); + SSyncRaftEntry* pEntry = (SSyncRaftEntry*)(pMsg->data + metaArr[i].offset); + cJSON* pEntryJson = syncEntry2Json(pEntry); + cJSON_AddItemToArray(pEntryArr, pEntryJson); } char* s; @@ -1685,33 +1807,6 @@ char* syncAppendEntriesBatch2Str(const SyncAppendEntriesBatch* pMsg) { return serialized; } -void syncAppendEntriesBatch2RpcMsgArray(SyncAppendEntriesBatch* pSyncMsg, SRpcMsg* rpcMsgArr, int32_t maxArrSize, - int32_t* pRetArrSize) { - if (pRetArrSize != NULL) { - *pRetArrSize = pSyncMsg->dataCount; - } - - int32_t arrSize = pSyncMsg->dataCount; - if (arrSize > maxArrSize) { - arrSize = maxArrSize; - } - - int32_t metaArrayLen = sizeof(SOffsetAndContLen) * pSyncMsg->dataCount; // - int32_t rpcArrayLen = sizeof(SRpcMsg) * pSyncMsg->dataCount; // SRpcMsg - int32_t contArrayLen = pSyncMsg->dataLen - metaArrayLen - rpcArrayLen; - - SOffsetAndContLen* metaArr = (SOffsetAndContLen*)(pSyncMsg->data); - SRpcMsg* msgArr = (SRpcMsg*)(pSyncMsg->data + metaArrayLen); - void* pData = pSyncMsg->data + metaArrayLen + rpcArrayLen; - - for (int i = 0; i < arrSize; ++i) { - rpcMsgArr[i] = msgArr[i]; - rpcMsgArr[i].pCont = rpcMallocCont(msgArr[i].contLen); - void* pRpcCont = pSyncMsg->data + metaArr[i].offset; - memcpy(rpcMsgArr[i].pCont, pRpcCont, rpcMsgArr[i].contLen); - } -} - // for debug ---------------------- void syncAppendEntriesBatchPrint(const SyncAppendEntriesBatch* pMsg) { char* serialized = syncAppendEntriesBatch2Str(pMsg); @@ -2159,6 +2254,9 @@ cJSON* syncSnapshotSend2Json(const SyncSnapshotSend* pMsg) { snprintf(u64buf, sizeof(u64buf), "%lu", pMsg->privateTerm); cJSON_AddStringToObject(pRoot, "privateTerm", u64buf); + snprintf(u64buf, sizeof(u64buf), "%ld", pMsg->beginIndex); + cJSON_AddStringToObject(pRoot, "beginIndex", u64buf); + snprintf(u64buf, sizeof(u64buf), "%ld", pMsg->lastIndex); cJSON_AddStringToObject(pRoot, "lastIndex", u64buf); diff --git a/source/libs/sync/src/syncRaftEntry.c b/source/libs/sync/src/syncRaftEntry.c index 225360630c..0ee4684ea8 100644 --- a/source/libs/sync/src/syncRaftEntry.c +++ b/source/libs/sync/src/syncRaftEntry.c @@ -50,6 +50,22 @@ SSyncRaftEntry* syncEntryBuild3(SyncClientRequest* pMsg, SyncTerm term, SyncInde return pEntry; } +SSyncRaftEntry* syncEntryBuild4(SRpcMsg* pOriginalMsg, SyncTerm term, SyncIndex index) { + SSyncRaftEntry* pEntry = syncEntryBuild(pOriginalMsg->contLen); + ASSERT(pEntry != NULL); + + pEntry->msgType = TDMT_SYNC_CLIENT_REQUEST; + pEntry->originalRpcType = pOriginalMsg->msgType; + pEntry->seqNum = 0; + pEntry->isWeak = 0; + pEntry->term = term; + pEntry->index = index; + pEntry->dataLen = pOriginalMsg->contLen; + memcpy(pEntry->data, pOriginalMsg->pCont, pOriginalMsg->contLen); + + return pEntry; +} + SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId) { // init rpcMsg SMsgHead head; diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index a026892629..83495e7486 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -32,6 +32,7 @@ static SyncTerm raftLogLastTerm(struct SSyncLogStore* pLogStore); static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncRaftEntry** ppEntry); static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIndex); +static bool raftLogExist(struct SSyncLogStore* pLogStore, SyncIndex index); // private function static int32_t raftLogGetLastEntry(SSyncLogStore* pLogStore, SSyncRaftEntry** ppLastEntry); @@ -83,6 +84,7 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) { pLogStore->syncLogGetEntry = raftLogGetEntry; pLogStore->syncLogTruncate = raftLogTruncate; pLogStore->syncLogWriteIndex = raftLogWriteIndex; + pLogStore->syncLogExist = raftLogExist; return pLogStore; } @@ -168,6 +170,13 @@ static SyncIndex raftLogWriteIndex(struct SSyncLogStore* pLogStore) { return lastVer + 1; } +static bool raftLogExist(struct SSyncLogStore* pLogStore, SyncIndex index) { + SSyncLogStoreData* pData = pLogStore->data; + SWal* pWal = pData->pWal; + bool b = walLogExist(pWal, index); + return b; +} + // if success, return last term // if not log, return 0 // if error, return SYNC_TERM_INVALID diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 4908822a3a..bcca44130a 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -145,26 +145,34 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) { return -1; } - SRpcMsg rpcMsgArr[SYNC_MAX_BATCH_SIZE]; - memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); + SSyncRaftEntry* entryPArr[SYNC_MAX_BATCH_SIZE]; + memset(entryPArr, 0, sizeof(entryPArr)); - int32_t getCount = 0; + int32_t getCount = 0; + SyncIndex getEntryIndex = nextIndex; for (int32_t i = 0; i < pSyncNode->batchSize; ++i) { SSyncRaftEntry* pEntry; - int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, nextIndex, &pEntry); + int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, getEntryIndex, &pEntry); if (code == 0) { ASSERT(pEntry != NULL); - // get rpc msg [i] from entry - syncEntryDestory(pEntry); + entryPArr[i] = pEntry; getCount++; } else { break; } } - SyncAppendEntriesBatch* pMsg = syncAppendEntriesBatchBuild(rpcMsgArr, getCount, pSyncNode->vgId); + SyncAppendEntriesBatch* pMsg = syncAppendEntriesBatchBuild(entryPArr, getCount, pSyncNode->vgId); ASSERT(pMsg != NULL); + for (int32_t i = 0; i < pSyncNode->batchSize; ++i) { + SSyncRaftEntry* pEntry = entryPArr[i]; + if (pEntry != NULL) { + syncEntryDestory(pEntry); + entryPArr[i] = NULL; + } + } + // prepare msg pMsg->srcId = pSyncNode->myRaftId; pMsg->destId = *pDestId; diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 4466cccbbc..cefe676f90 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -80,13 +80,15 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) { bool snapshotSenderIsStart(SSyncSnapshotSender *pSender) { return pSender->start; } // begin send snapshot by snapshot, pReader -int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void *pReader) { +int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapshotParam, SSnapshot snapshot, + void *pReader) { ASSERT(!snapshotSenderIsStart(pSender)); - // init snapshot and reader + // init snapshot, parm, reader ASSERT(pSender->pReader == NULL); pSender->pReader = pReader; pSender->snapshot = snapshot; + pSender->snapshotParam = snapshotParam; // init current block if (pSender->pCurrentBlock != NULL) { @@ -162,6 +164,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, vo pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex]; pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; + pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex; @@ -439,10 +442,13 @@ static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm p pReceiver->snapshot.lastApplyIndex = pBeginMsg->lastIndex; pReceiver->snapshot.lastApplyTerm = pBeginMsg->lastTerm; pReceiver->snapshot.lastConfigIndex = pBeginMsg->lastConfigIndex; + pReceiver->snapshotParam.start = pBeginMsg->beginIndex; + pReceiver->snapshotParam.end = pBeginMsg->lastIndex; // write data ASSERT(pReceiver->pWriter == NULL); - int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStartWrite(pReceiver->pSyncNode->pFsm, &(pReceiver->pWriter)); + int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStartWrite(pReceiver->pSyncNode->pFsm, + &(pReceiver->snapshotParam), &(pReceiver->pWriter)); ASSERT(ret == 0); // event log diff --git a/source/libs/sync/test/CMakeLists.txt b/source/libs/sync/test/CMakeLists.txt index c549b78399..37d9707cfd 100644 --- a/source/libs/sync/test/CMakeLists.txt +++ b/source/libs/sync/test/CMakeLists.txt @@ -24,6 +24,7 @@ add_executable(syncAppendEntriesTest "") add_executable(syncAppendEntriesBatchTest "") add_executable(syncAppendEntriesReplyTest "") add_executable(syncClientRequestTest "") +add_executable(syncClientRequestBatchTest "") add_executable(syncTimeoutTest "") add_executable(syncPingTest "") add_executable(syncPingReplyTest "") @@ -159,6 +160,10 @@ target_sources(syncClientRequestTest PRIVATE "syncClientRequestTest.cpp" ) +target_sources(syncClientRequestBatchTest + PRIVATE + "syncClientRequestBatchTest.cpp" +) target_sources(syncTimeoutTest PRIVATE "syncTimeoutTest.cpp" @@ -407,6 +412,11 @@ target_include_directories(syncClientRequestTest "${TD_SOURCE_DIR}/include/libs/sync" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) +target_include_directories(syncClientRequestBatchTest + PUBLIC + "${TD_SOURCE_DIR}/include/libs/sync" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" +) target_include_directories(syncTimeoutTest PUBLIC "${TD_SOURCE_DIR}/include/libs/sync" @@ -658,6 +668,10 @@ target_link_libraries(syncClientRequestTest sync gtest_main ) +target_link_libraries(syncClientRequestBatchTest + sync + gtest_main +) target_link_libraries(syncTimeoutTest sync gtest_main diff --git a/source/libs/sync/test/syncAppendEntriesBatchTest.cpp b/source/libs/sync/test/syncAppendEntriesBatchTest.cpp index 8784ddd637..515d580b35 100644 --- a/source/libs/sync/test/syncAppendEntriesBatchTest.cpp +++ b/source/libs/sync/test/syncAppendEntriesBatchTest.cpp @@ -3,6 +3,7 @@ #include "syncIO.h" #include "syncInt.h" #include "syncMessage.h" +#include "syncRaftEntry.h" #include "syncUtil.h" #include "trpc.h" @@ -15,30 +16,29 @@ void logTest() { sFatal("--- sync log test: fatal"); } -SRpcMsg *createRpcMsg(int32_t i, int32_t dataLen) { - SRpcMsg *pRpcMsg = (SRpcMsg *)taosMemoryMalloc(sizeof(SRpcMsg)); - memset(pRpcMsg, 0, sizeof(SRpcMsg)); - - pRpcMsg->msgType = TDMT_SYNC_PING; - pRpcMsg->contLen = dataLen; - pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen); - pRpcMsg->code = 10 * i; - snprintf((char *)pRpcMsg->pCont, pRpcMsg->contLen, "value_%d", i); - - return pRpcMsg; +SSyncRaftEntry *createEntry(int i) { + SSyncRaftEntry *pEntry = syncEntryBuild(20); + assert(pEntry != NULL); + pEntry->msgType = 1; + pEntry->originalRpcType = 2; + pEntry->seqNum = 3; + pEntry->isWeak = true; + pEntry->term = 100; + pEntry->index = 200; + snprintf(pEntry->data, pEntry->dataLen, "value_%d", i); + return pEntry; } SyncAppendEntriesBatch *createMsg() { - SRpcMsg rpcMsgArr[5]; - memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); + SSyncRaftEntry *entryPArr[5]; + memset(entryPArr, 0, sizeof(entryPArr)); for (int32_t i = 0; i < 5; ++i) { - SRpcMsg *pRpcMsg = createRpcMsg(i, 20); - rpcMsgArr[i] = *pRpcMsg; - taosMemoryFree(pRpcMsg); + SSyncRaftEntry *pEntry = createEntry(i); + entryPArr[i] = pEntry; } - SyncAppendEntriesBatch *pMsg = syncAppendEntriesBatchBuild(rpcMsgArr, 5, 1234); + SyncAppendEntriesBatch *pMsg = syncAppendEntriesBatchBuild(entryPArr, 5, 1234); pMsg->srcId.addr = syncUtilAddr2U64("127.0.0.1", 1234); pMsg->srcId.vgId = 100; pMsg->destId.addr = syncUtilAddr2U64("127.0.0.1", 5678); @@ -52,17 +52,17 @@ SyncAppendEntriesBatch *createMsg() { void test1() { SyncAppendEntriesBatch *pMsg = createMsg(); - syncAppendEntriesBatchLog2((char *)"test1:", pMsg); + syncAppendEntriesBatchLog2((char *)"==test1==", pMsg); - SRpcMsg rpcMsgArr[5]; - int32_t retArrSize; - syncAppendEntriesBatch2RpcMsgArray(pMsg, rpcMsgArr, 5, &retArrSize); +/* + SOffsetAndContLen *metaArr = syncAppendEntriesBatchMetaTableArray(pMsg); + int32_t retArrSize = pMsg->dataCount; for (int i = 0; i < retArrSize; ++i) { - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "==test1 decode rpc msg %d: msgType:%d, code:%d, contLen:%d, pCont:%s \n", i, - rpcMsgArr[i].msgType, rpcMsgArr[i].code, rpcMsgArr[i].contLen, (char *)rpcMsgArr[i].pCont); - sTrace("%s", logBuf); + SSyncRaftEntry *pEntry = (SSyncRaftEntry*)(pMsg->data + metaArr[i].offset); + ASSERT(pEntry->bytes == metaArr[i].contLen); + syncEntryPrint(pEntry); } +*/ syncAppendEntriesBatchDestroy(pMsg); } diff --git a/source/libs/sync/test/syncClientRequestBatchTest.cpp b/source/libs/sync/test/syncClientRequestBatchTest.cpp new file mode 100644 index 0000000000..ae74baeda4 --- /dev/null +++ b/source/libs/sync/test/syncClientRequestBatchTest.cpp @@ -0,0 +1,125 @@ +#include +#include +#include "syncIO.h" +#include "syncInt.h" +#include "syncMessage.h" +#include "syncUtil.h" + +void logTest() { + sTrace("--- sync log test: trace"); + sDebug("--- sync log test: debug"); + sInfo("--- sync log test: info"); + sWarn("--- sync log test: warn"); + sError("--- sync log test: error"); + sFatal("--- sync log test: fatal"); +} + +SRpcMsg *createRpcMsg(int32_t i, int32_t dataLen) { + SyncPing *pSyncMsg = syncPingBuild(20); + snprintf(pSyncMsg->data, pSyncMsg->dataLen, "value_%d", i); + + SRpcMsg *pRpcMsg = (SRpcMsg *)taosMemoryMalloc(sizeof(SRpcMsg)); + memset(pRpcMsg, 0, sizeof(SRpcMsg)); + pRpcMsg->code = 10 * i; + syncPing2RpcMsg(pSyncMsg, pRpcMsg); + + syncPingDestroy(pSyncMsg); + return pRpcMsg; +} + +SyncClientRequestBatch *createMsg() { + SRpcMsg rpcMsgArr[5]; + memset(rpcMsgArr, 0, sizeof(rpcMsgArr)); + for (int32_t i = 0; i < 5; ++i) { + SRpcMsg *pRpcMsg = createRpcMsg(i, 20); + rpcMsgArr[i] = *pRpcMsg; + taosMemoryFree(pRpcMsg); + } + + SRaftMeta raftArr[5]; + memset(raftArr, 0, sizeof(raftArr)); + for (int32_t i = 0; i < 5; ++i) { + raftArr[i].seqNum = i * 10; + raftArr[i].isWeak = i % 2; + } + + SyncClientRequestBatch *pMsg = syncClientRequestBatchBuild(rpcMsgArr, raftArr, 5, 1234); + return pMsg; +} + +void test1() { + SyncClientRequestBatch *pMsg = createMsg(); + syncClientRequestBatchLog2((char *)"==test1==", pMsg); + syncClientRequestBatchDestroyDeep(pMsg); +} + +/* +void test2() { + SyncClientRequest *pMsg = createMsg(); + uint32_t len = pMsg->bytes; + char * serialized = (char *)taosMemoryMalloc(len); + syncClientRequestSerialize(pMsg, serialized, len); + SyncClientRequest *pMsg2 = syncClientRequestBuild(pMsg->dataLen); + syncClientRequestDeserialize(serialized, len, pMsg2); + syncClientRequestLog2((char *)"test2: syncClientRequestSerialize -> syncClientRequestDeserialize ", pMsg2); + + taosMemoryFree(serialized); + syncClientRequestDestroy(pMsg); + syncClientRequestDestroy(pMsg2); +} + +void test3() { + SyncClientRequest *pMsg = createMsg(); + uint32_t len; + char * serialized = syncClientRequestSerialize2(pMsg, &len); + SyncClientRequest *pMsg2 = syncClientRequestDeserialize2(serialized, len); + syncClientRequestLog2((char *)"test3: syncClientRequestSerialize3 -> syncClientRequestDeserialize2 ", pMsg2); + + taosMemoryFree(serialized); + syncClientRequestDestroy(pMsg); + syncClientRequestDestroy(pMsg2); +} + +void test4() { + SyncClientRequest *pMsg = createMsg(); + SRpcMsg rpcMsg; + syncClientRequest2RpcMsg(pMsg, &rpcMsg); + SyncClientRequest *pMsg2 = (SyncClientRequest *)taosMemoryMalloc(rpcMsg.contLen); + syncClientRequestFromRpcMsg(&rpcMsg, pMsg2); + syncClientRequestLog2((char *)"test4: syncClientRequest2RpcMsg -> syncClientRequestFromRpcMsg ", pMsg2); + + rpcFreeCont(rpcMsg.pCont); + syncClientRequestDestroy(pMsg); + syncClientRequestDestroy(pMsg2); +} + +void test5() { + SyncClientRequest *pMsg = createMsg(); + SRpcMsg rpcMsg; + syncClientRequest2RpcMsg(pMsg, &rpcMsg); + SyncClientRequest *pMsg2 = syncClientRequestFromRpcMsg2(&rpcMsg); + syncClientRequestLog2((char *)"test5: syncClientRequest2RpcMsg -> syncClientRequestFromRpcMsg2 ", pMsg2); + + rpcFreeCont(rpcMsg.pCont); + syncClientRequestDestroy(pMsg); + syncClientRequestDestroy(pMsg2); +} +*/ + +int main() { + gRaftDetailLog = true; + tsAsyncLog = 0; + sDebugFlag = DEBUG_DEBUG + DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE; + logTest(); + + test1(); + + /* +test2(); +test3(); +test4(); +test5(); +*/ + + return 0; +} diff --git a/source/libs/sync/test/syncConfigChangeSnapshotTest.cpp b/source/libs/sync/test/syncConfigChangeSnapshotTest.cpp index 2908dd5907..968baff952 100644 --- a/source/libs/sync/test/syncConfigChangeSnapshotTest.cpp +++ b/source/libs/sync/test/syncConfigChangeSnapshotTest.cpp @@ -77,7 +77,7 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { return 0; } -int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void *pParam, void** ppReader) { +int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void* pParam, void** ppReader) { *ppReader = (void*)0xABCD; char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==SnapshotStartRead== pFsm:%p, *ppReader:%p", pFsm, *ppReader); @@ -114,7 +114,7 @@ int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32 return 0; } -int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void** ppWriter) { +int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void *pParam, void** ppWriter) { *ppWriter = (void*)0xCDEF; char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==SnapshotStartWrite== pFsm:%p, *ppWriter:%p", pFsm, *ppWriter); @@ -198,7 +198,7 @@ int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex); syncInfo.pWal = pWal; syncInfo.isStandBy = isStandBy; - syncInfo.snapshotEnable = true; + syncInfo.snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT; SSyncCfg* pCfg = &syncInfo.syncCfg; diff --git a/source/libs/sync/test/syncSnapshotReceiverTest.cpp b/source/libs/sync/test/syncSnapshotReceiverTest.cpp index 208a96daa4..b4bf08dd40 100644 --- a/source/libs/sync/test/syncSnapshotReceiverTest.cpp +++ b/source/libs/sync/test/syncSnapshotReceiverTest.cpp @@ -29,7 +29,7 @@ int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void** ppReader) { return 0; } int32_t SnapshotStopRead(struct SSyncFSM* pFsm, void* pReader) { return 0; } int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len) { return 0; } -int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void** ppWriter) { return 0; } +int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void *pParam, void** ppWriter) { return 0; } int32_t SnapshotStopWrite(struct SSyncFSM* pFsm, void* pWriter, bool isApply) { return 0; } int32_t SnapshotDoWrite(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len) { return 0; } diff --git a/source/libs/sync/test/syncSnapshotSenderTest.cpp b/source/libs/sync/test/syncSnapshotSenderTest.cpp index dc38cd3df5..8d1f83b3b1 100644 --- a/source/libs/sync/test/syncSnapshotSenderTest.cpp +++ b/source/libs/sync/test/syncSnapshotSenderTest.cpp @@ -25,7 +25,7 @@ void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) int32_t GetSnapshot(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { return 0; } -int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void *pParam, void** ppReader) { return 0; } +int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void* pParam, void** ppReader) { return 0; } int32_t SnapshotStopRead(struct SSyncFSM* pFsm, void* pReader) { return 0; } int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len) { return 0; } diff --git a/source/libs/sync/test/syncTestTool.cpp b/source/libs/sync/test/syncTestTool.cpp index 4e84ff0f7e..c6d3a3e4af 100644 --- a/source/libs/sync/test/syncTestTool.cpp +++ b/source/libs/sync/test/syncTestTool.cpp @@ -74,7 +74,7 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { return 0; } -int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void *pParam, void** ppReader) { +int32_t SnapshotStartRead(struct SSyncFSM* pFsm, void* pParam, void** ppReader) { *ppReader = (void*)0xABCD; char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==SnapshotStartRead== pFsm:%p, *ppReader:%p", pFsm, *ppReader); @@ -111,7 +111,7 @@ int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32 return 0; } -int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void** ppWriter) { +int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void *pParam, void** ppWriter) { *ppWriter = (void*)0xCDEF; char logBuf[256] = {0}; @@ -203,7 +203,7 @@ SWal* createWal(char* path, int32_t vgId) { } int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* pWal, char* path, bool isStandBy, - bool enableSnapshot) { + ESyncStrategy enableSnapshot) { SSyncInfo syncInfo; syncInfo.vgId = vgId; syncInfo.msgcb = &gSyncIO->msgcb; @@ -213,7 +213,7 @@ int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex); syncInfo.pWal = pWal; syncInfo.isStandBy = isStandBy; - syncInfo.snapshotEnable = enableSnapshot; + syncInfo.snapshotStrategy = enableSnapshot; SSyncCfg* pCfg = &syncInfo.syncCfg; @@ -316,7 +316,7 @@ int main(int argc, char** argv) { int32_t replicaNum = atoi(argv[1]); int32_t myIndex = atoi(argv[2]); - bool enableSnapshot = atoi(argv[3]); + ESyncStrategy enableSnapshot = (ESyncStrategy)atoi(argv[3]); int32_t lastApplyIndex = atoi(argv[4]); int32_t lastApplyTerm = atoi(argv[5]); int32_t writeRecordNum = atoi(argv[6]); diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 05fe62762a..5d51a031bf 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -40,8 +40,8 @@ struct SBTree { #define TDB_BTREE_PAGE_IS_ROOT(PAGE) (TDB_BTREE_PAGE_GET_FLAGS(PAGE) & TDB_BTREE_ROOT) #define TDB_BTREE_PAGE_IS_LEAF(PAGE) (TDB_BTREE_PAGE_GET_FLAGS(PAGE) & TDB_BTREE_LEAF) #define TDB_BTREE_PAGE_IS_OVFL(PAGE) (TDB_BTREE_PAGE_GET_FLAGS(PAGE) & TDB_BTREE_OVFL) -#define TDB_BTREE_ASSERT_FLAG(flags) \ - ASSERT(TDB_FLAG_IS(flags, TDB_BTREE_ROOT) || TDB_FLAG_IS(flags, TDB_BTREE_LEAF) || \ +#define TDB_BTREE_ASSERT_FLAG(flags) \ + ASSERT(TDB_FLAG_IS(flags, TDB_BTREE_ROOT) || TDB_FLAG_IS(flags, TDB_BTREE_LEAF) || \ TDB_FLAG_IS(flags, TDB_BTREE_ROOT | TDB_BTREE_LEAF) || TDB_FLAG_IS(flags, 0) || \ TDB_FLAG_IS(flags, TDB_BTREE_OVFL)) @@ -58,7 +58,7 @@ typedef struct { static int tdbDefaultKeyCmprFn(const void *pKey1, int keyLen1, const void *pKey2, int keyLen2); static int tdbBtreeOpenImpl(SBTree *pBt); -//static int tdbBtreeInitPage(SPage *pPage, void *arg, int init); +// static int tdbBtreeInitPage(SPage *pPage, void *arg, int init); static int tdbBtreeEncodeCell(SPage *pPage, const void *pKey, int kLen, const void *pVal, int vLen, SCell *pCell, int *szCell, TXN *pTxn, SBTree *pBt); static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pDecoder, TXN *pTxn, SBTree *pBt); @@ -321,7 +321,7 @@ static int tdbBtreeOpenImpl(SBTree *pBt) { { // 1. TODO: Search the main DB to check if the DB exists - ret = tdbPagerOpenDB(pBt->pPager, &pgno, true); + ret = tdbPagerOpenDB(pBt->pPager, &pgno, true, pBt); ASSERT(ret == 0); } @@ -721,7 +721,8 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx int szNewCell; SPgno pgno; pgno = TDB_PAGE_PGNO(pNews[iNew]); - tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, pTxn, pBt); + tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, pTxn, + pBt); tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0); tdbOsFree(pNewCell); } @@ -916,10 +917,10 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const int surplus = minLocal + (nPayload + nHeader - minLocal) % (maxLocal - sizeof(SPgno)); int nLocal = surplus <= maxLocal ? surplus : minLocal; - //int ofpCap = tdbPageCapacity(pBt->pageSize, sizeof(SIntHdr)); + // int ofpCap = tdbPageCapacity(pBt->pageSize, sizeof(SIntHdr)); // fetch a new ofp and make it dirty - SPgno pgno = 0; + SPgno pgno = 0; SPage *ofp, *nextOfp; ret = tdbFetchOvflPage(&pgno, &ofp, pTxn, pBt); @@ -942,8 +943,8 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const nLeft -= kLen; // pack partial val to local if any space left if (nLocal > kLen + 4) { - memcpy(pCell + nHeader + kLen, pVal, nLocal - kLen - sizeof(SPgno)); - nLeft -= nLocal - kLen - sizeof(SPgno); + memcpy(pCell + nHeader + kLen, pVal, nLocal - kLen - sizeof(SPgno)); + nLeft -= nLocal - kLen - sizeof(SPgno); } // pack nextPgno @@ -951,132 +952,132 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const // pack left val data to ovpages do { - lastPage = 0; - if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeft; - lastPage = 1; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + lastPage = 0; + if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeft; + lastPage = 1; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - // fetch next ofp if not last page - if (!lastPage) { - // fetch a new ofp and make it dirty - ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); - if (ret < 0) { - tdbFree(pBuf); - return -1; - } - } else { - pgno = 0; - } + // fetch next ofp if not last page + if (!lastPage) { + // fetch a new ofp and make it dirty + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); + if (ret < 0) { + tdbFree(pBuf); + return -1; + } + } else { + pgno = 0; + } - memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes); - memcpy(pBuf + bytes, &pgno, sizeof(pgno)); + memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes); + memcpy(pBuf + bytes, &pgno, sizeof(pgno)); - ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); - if (ret < 0) { - tdbFree(pBuf); - return -1; - } + ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); + if (ret < 0) { + tdbFree(pBuf); + return -1; + } - ofp = nextOfp; - nLeft -= bytes; + ofp = nextOfp; + nLeft -= bytes; } while (nLeft > 0); } else { int nLeftKey = kLen; // pack partial key and nextPgno memcpy(pCell + nHeader, pKey, nLocal - 4); nLeft -= nLocal - 4; - nLeftKey -= nLocal -4; + nLeftKey -= nLocal - 4; memcpy(pCell + nHeader + nLocal - 4, &pgno, sizeof(pgno)); int lastKeyPageSpace = 0; // pack left key & val to ovpages do { - // cal key to cpy - int lastKeyPage = 0; - if (nLeftKey <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeftKey; - lastKeyPage = 1; - lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + // cal key to cpy + int lastKeyPage = 0; + if (nLeftKey <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeftKey; + lastKeyPage = 1; + lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - // cpy key - memcpy(pBuf, ((SCell *)pKey) + kLen - nLeftKey, bytes); + // cpy key + memcpy(pBuf, ((SCell *)pKey) + kLen - nLeftKey, bytes); - if (lastKeyPage) { - if (lastKeyPageSpace >= vLen) { - memcpy(pBuf + kLen -nLeftKey, pVal, vLen); + if (lastKeyPage) { + if (lastKeyPageSpace >= vLen) { + memcpy(pBuf + kLen - nLeftKey, pVal, vLen); - nLeft -= vLen; - pgno = 0; - } else { - memcpy(pBuf + kLen -nLeftKey, pVal, lastKeyPageSpace); - nLeft -= lastKeyPageSpace; + nLeft -= vLen; + pgno = 0; + } else { + memcpy(pBuf + kLen - nLeftKey, pVal, lastKeyPageSpace); + nLeft -= lastKeyPageSpace; - // fetch next ofp, a new ofp and make it dirty - ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); - if (ret < 0) { - return -1; - } - } - } else { - // fetch next ofp, a new ofp and make it dirty - ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); - if (ret < 0) { - return -1; - } - } + // fetch next ofp, a new ofp and make it dirty + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); + if (ret < 0) { + return -1; + } + } + } else { + // fetch next ofp, a new ofp and make it dirty + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); + if (ret < 0) { + return -1; + } + } - memcpy(pBuf + kLen - nLeft, &pgno, sizeof(pgno)); + memcpy(pBuf + kLen - nLeft, &pgno, sizeof(pgno)); - ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); - if (ret < 0) { - return -1; - } + ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); + if (ret < 0) { + return -1; + } - ofp = nextOfp; - nLeftKey -= bytes; - nLeft -= bytes; + ofp = nextOfp; + nLeftKey -= bytes; + nLeft -= bytes; } while (nLeftKey > 0); while (nLeft > 0) { - // pack left val data to ovpages - lastPage = 0; - if (nLeft <= maxLocal - sizeof(SPgno)) { - bytes = nLeft; - lastPage = 1; - } else { - bytes = maxLocal - sizeof(SPgno); - } + // pack left val data to ovpages + lastPage = 0; + if (nLeft <= maxLocal - sizeof(SPgno)) { + bytes = nLeft; + lastPage = 1; + } else { + bytes = maxLocal - sizeof(SPgno); + } - // fetch next ofp if not last page - if (!lastPage) { - // fetch a new ofp and make it dirty - ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); - if (ret < 0) { - tdbFree(pBuf); - return -1; - } - } else { - pgno = 0; - } + // fetch next ofp if not last page + if (!lastPage) { + // fetch a new ofp and make it dirty + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); + if (ret < 0) { + tdbFree(pBuf); + return -1; + } + } else { + pgno = 0; + } - memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes); - memcpy(pBuf + bytes, &pgno, sizeof(pgno)); + memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes); + memcpy(pBuf + bytes, &pgno, sizeof(pgno)); - ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); - if (ret < 0) { - tdbFree(pBuf); - return -1; - } + ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); + if (ret < 0) { + tdbFree(pBuf); + return -1; + } - ofp = nextOfp; - nLeft -= bytes; + ofp = nextOfp; + nLeft -= bytes; } } @@ -1142,7 +1143,8 @@ static int tdbBtreeEncodeCell(SPage *pPage, const void *pKey, int kLen, const vo return 0; } -static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, SCellDecoder *pDecoder, TXN *pTxn, SBTree *pBt) { +static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, SCellDecoder *pDecoder, TXN *pTxn, + SBTree *pBt) { int ret = 0; int nPayload; int maxLocal = pPage->maxLocal; @@ -1171,149 +1173,149 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, int surplus = minLocal + (nPayload + nHeader - minLocal) % (maxLocal - sizeof(SPgno)); int nLocal = surplus <= maxLocal ? surplus : minLocal; - int nLeft = nPayload; - SPgno pgno = 0; + int nLeft = nPayload; + SPgno pgno = 0; SPage *ofp; SCell *ofpCell; - int bytes; - int lastPage = 0; + int bytes; + int lastPage = 0; if (nLocal >= pDecoder->kLen + 4) { pDecoder->pKey = (SCell *)pCell + nHeader; nLeft -= kLen; if (nLocal > kLen + 4) { - // read partial val to local - pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); - if (pDecoder->pVal == NULL) { - return -1; - } - TDB_CELLDECODER_SET_FREE_VAL(pDecoder); + // read partial val to local + pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); + if (pDecoder->pVal == NULL) { + return -1; + } + TDB_CELLDECODER_SET_FREE_VAL(pDecoder); - memcpy(pDecoder->pVal, pCell + nHeader + kLen, nLocal - kLen - sizeof(SPgno)); + memcpy(pDecoder->pVal, pCell + nHeader + kLen, nLocal - kLen - sizeof(SPgno)); - nLeft -= nLocal - kLen - sizeof(SPgno); + nLeft -= nLocal - kLen - sizeof(SPgno); } memcpy(&pgno, pCell + nHeader + nPayload - nLeft, sizeof(pgno)); // unpack left val data from ovpages - while (pgno != 0) { - ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); - if (ret < 0) { - return -1; - } + while (pgno != 0) { + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); + if (ret < 0) { + return -1; + } - ofpCell = tdbPageGetCell(ofp, 0); + ofpCell = tdbPageGetCell(ofp, 0); - if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeft; - lastPage = 1; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeft; + lastPage = 1; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - memcpy(pDecoder->pVal + vLen - nLeft, ofpCell, bytes); - nLeft -= bytes; + memcpy(pDecoder->pVal + vLen - nLeft, ofpCell, bytes); + nLeft -= bytes; - memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); } } else { int nLeftKey = kLen; // load partial key and nextPgno pDecoder->pKey = tdbRealloc(pDecoder->pKey, kLen); if (pDecoder->pKey == NULL) { - return -1; + return -1; } TDB_CELLDECODER_SET_FREE_KEY(pDecoder); memcpy(pDecoder->pKey, pCell + nHeader, nLocal - 4); nLeft -= nLocal - 4; - nLeftKey -= nLocal -4; + nLeftKey -= nLocal - 4; memcpy(&pgno, pCell + nHeader + nLocal - 4, sizeof(pgno)); int lastKeyPageSpace = 0; // load left key & val to ovpages while (pgno != 0) { - ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); - if (ret < 0) { - return -1; - } + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); + if (ret < 0) { + return -1; + } - ofpCell = tdbPageGetCell(ofp, 0); + ofpCell = tdbPageGetCell(ofp, 0); - int lastKeyPage = 0; - if (nLeftKey <= maxLocal - sizeof(SPgno)) { - bytes = nLeftKey; - lastKeyPage = 1; - lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + int lastKeyPage = 0; + if (nLeftKey <= maxLocal - sizeof(SPgno)) { + bytes = nLeftKey; + lastKeyPage = 1; + lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - // cpy key - memcpy(pDecoder->pKey + kLen - nLeftKey, ofpCell, bytes); + // cpy key + memcpy(pDecoder->pKey + kLen - nLeftKey, ofpCell, bytes); - if (lastKeyPage) { - if (lastKeyPageSpace >= vLen) { - pDecoder->pVal = ofpCell + kLen -nLeftKey; + if (lastKeyPage) { + if (lastKeyPageSpace >= vLen) { + pDecoder->pVal = ofpCell + kLen - nLeftKey; - nLeft -= vLen; - pgno = 0; - } else { - // read partial val to local - pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); - if (pDecoder->pVal == NULL) { - return -1; - } - TDB_CELLDECODER_SET_FREE_VAL(pDecoder); + nLeft -= vLen; + pgno = 0; + } else { + // read partial val to local + pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); + if (pDecoder->pVal == NULL) { + return -1; + } + TDB_CELLDECODER_SET_FREE_VAL(pDecoder); - memcpy(pDecoder->pVal, ofpCell + kLen -nLeftKey, lastKeyPageSpace); - nLeft -= lastKeyPageSpace; - } - } + memcpy(pDecoder->pVal, ofpCell + kLen - nLeftKey, lastKeyPageSpace); + nLeft -= lastKeyPageSpace; + } + } - memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); - nLeftKey -= bytes; - nLeft -= bytes; + nLeftKey -= bytes; + nLeft -= bytes; } while (nLeft > 0) { - ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); - if (ret < 0) { - return -1; - } + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); + if (ret < 0) { + return -1; + } - ofpCell = tdbPageGetCell(ofp, 0); + ofpCell = tdbPageGetCell(ofp, 0); - // load left val data to ovpages - lastPage = 0; - if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeft; - lastPage = 1; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + // load left val data to ovpages + lastPage = 0; + if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeft; + lastPage = 1; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - if (lastPage) { - pgno = 0; - } + if (lastPage) { + pgno = 0; + } - if (!pDecoder->pVal) { - pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); - if (pDecoder->pVal == NULL) { - return -1; - } - TDB_CELLDECODER_SET_FREE_VAL(pDecoder); - } + if (!pDecoder->pVal) { + pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen); + if (pDecoder->pVal == NULL) { + return -1; + } + TDB_CELLDECODER_SET_FREE_VAL(pDecoder); + } - memcpy(pDecoder->pVal, ofpCell + vLen - nLeft, bytes); - nLeft -= bytes; + memcpy(pDecoder->pVal, ofpCell + vLen - nLeft, bytes); + nLeft -= bytes; - memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno)); + memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno)); - nLeft -= bytes; + nLeft -= bytes; } } } @@ -1404,31 +1406,31 @@ static int tdbBtreeCellSize(const SPage *pPage, SCell *pCell, int dropOfp, TXN * // free ofp pages' cells if (dropOfp) { - int ret = 0; - SPgno pgno = *(SPgno *) (pCell + nHeader + nLocal - sizeof(SPgno)); - int nLeft = nPayload - nLocal + sizeof(SPgno); + int ret = 0; + SPgno pgno = *(SPgno *)(pCell + nHeader + nLocal - sizeof(SPgno)); + int nLeft = nPayload - nLocal + sizeof(SPgno); SPage *ofp; - int bytes; + int bytes; while (pgno != 0) { - ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); - if (ret < 0) { - return -1; - } + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); + if (ret < 0) { + return -1; + } - SCell *ofpCell = tdbPageGetCell(ofp, 0); + SCell *ofpCell = tdbPageGetCell(ofp, 0); - if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { - bytes = nLeft; - } else { - bytes = ofp->maxLocal - sizeof(SPgno); - } + if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { + bytes = nLeft; + } else { + bytes = ofp->maxLocal - sizeof(SPgno); + } - memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); - tdbPagerReturnPage(pPage->pPager, ofp, pTxn); + tdbPagerReturnPage(pPage->pPager, ofp, pTxn); - nLeft -= bytes; + nLeft -= bytes; } } @@ -1932,7 +1934,7 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int // alloc space szBuf = kLen + nData + 14; - pBuf = tdbRealloc(pBtc->pBt->pBuf, pBtc->pBt->pageSize > szBuf ? szBuf : pBtc->pBt->pageSize); + pBuf = tdbRealloc(pBtc->pBt->pBuf, pBtc->pBt->pageSize > szBuf ? szBuf : pBtc->pBt->pageSize); if (pBuf == NULL) { ASSERT(0); return -1; diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index dd3f09d5d2..892a913773 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -98,7 +98,7 @@ int tdbPagerClose(SPager *pPager) { return 0; } -int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate) { +int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate, SBTree *pBt) { SPgno pgno; SPage *pPage; int ret; @@ -110,25 +110,41 @@ int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate) { } { - // TODO: try to search the main DB to get the page number - // pgno = 0; + // TODO: try to search the main DB to get the page number + // pgno = 0; } - // if (pgno == 0 && toCreate) { - // ret = tdbPagerAllocPage(pPager, &pPage, &pgno); - // if (ret < 0) { - // return -1; - // } + if (pgno == 0 && toCreate) { + // allocate a new child page + TXN txn; + tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0); - // // TODO: Need to zero the page + pPager->inTran = 1; - // ret = tdbPagerWrite(pPager, pPage); - // if (ret < 0) { - // return -1; - // } - // } + SBtreeInitPageArg zArg; + zArg.flags = 0x1 | 0x2; // root leaf node; + zArg.pBt = pBt; + ret = tdbPagerFetchPage(pPager, &pgno, &pPage, tdbBtreeInitPage, &zArg, &txn); + if (ret < 0) { + return -1; + } - *ppgno = pgno; + // ret = tdbPagerAllocPage(pPager, &pPage, &pgno); + // if (ret < 0) { + // return -1; + //} + + // TODO: Need to zero the page + + ret = tdbPagerWrite(pPager, pPage); + if (ret < 0) { + return -1; + } + + tdbTxnClose(&txn); + } + + *ppgno = pgno; return 0; } @@ -427,9 +443,9 @@ static int tdbPagerWritePageToDB(SPager *pPager, SPage *pPage) { } int tdbPagerRestore(SPager *pPager, SBTree *pBt) { - int ret = 0; + int ret = 0; SPgno journalSize = 0; - u8 *pageBuf = NULL; + u8 *pageBuf = NULL; tdb_fd_t jfd = tdbOsOpen(pPager->jFileName, TDB_O_RDWR, 0755); if (jfd == NULL) { @@ -454,7 +470,7 @@ int tdbPagerRestore(SPager *pPager, SBTree *pBt) { for (int pgIndex = 0; pgIndex < journalSize; ++pgIndex) { // read pgno & the page from journal - SPgno pgno; + SPgno pgno; SPage *pPage; int ret = tdbOsRead(jfd, &pgno, sizeof(pgno)); diff --git a/source/libs/tdb/src/db/tdbTxn.c b/source/libs/tdb/src/db/tdbTxn.c index b06fe05acd..f173d89779 100644 --- a/source/libs/tdb/src/db/tdbTxn.c +++ b/source/libs/tdb/src/db/tdbTxn.c @@ -18,7 +18,7 @@ int tdbTxnOpen(TXN *pTxn, int64_t txnid, void *(*xMalloc)(void *, size_t), void (*xFree)(void *, void *), void *xArg, int flags) { // not support read-committed version at the moment - ASSERT(flags == 0 || flags == TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED); + ASSERT(flags == 0 || flags == (TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED)); pTxn->flags = flags; pTxn->txnId = txnid; diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h index c914e098f7..2713a12af3 100644 --- a/source/libs/tdb/src/inc/tdbInt.h +++ b/source/libs/tdb/src/inc/tdbInt.h @@ -128,13 +128,13 @@ typedef struct SBtInfo { #define TDB_CELLDECODER_FREE_VAL(pCellDecoder) ((pCellDecoder)->freeKV & TDB_CELLD_F_VAL) typedef struct { - int kLen; - u8 *pKey; - int vLen; - u8 *pVal; - SPgno pgno; - u8 *pBuf; - u8 freeKV; + int kLen; + u8 *pKey; + int vLen; + u8 *pVal; + SPgno pgno; + u8 *pBuf; + u8 freeKV; } SCellDecoder; struct SBTC { @@ -184,7 +184,7 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager); int tdbPagerClose(SPager *pPager); -int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate); +int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate, SBTree *pBt); int tdbPagerWrite(SPager *pPager, SPage *pPage); int tdbPagerBegin(SPager *pPager, TXN *pTxn); int tdbPagerCommit(SPager *pPager, TXN *pTxn); @@ -192,7 +192,7 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initP TXN *pTxn); void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn); int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno); -int tdbPagerRestore(SPager *pPager, SBTree *pBt); +int tdbPagerRestore(SPager *pPager, SBTree *pBt); // tdbPCache.c ==================================== #define TDB_PCACHE_PAGE \ @@ -314,19 +314,18 @@ static inline int tdbTryLockPage(tdb_spinlock_t *pLock) { #define TDB_TRY_LOCK_PAGE(pPage) tdbTryLockPage(&((pPage)->lock)) // APIs -#define TDB_PAGE_TOTAL_CELLS(pPage) ((pPage)->nOverflow + (pPage)->pPageMethods->getCellNum(pPage)) -#define TDB_PAGE_USABLE_SIZE(pPage) ((u8 *)(pPage)->pPageFtr - (pPage)->pCellIdx) -#define TDB_PAGE_FREE_SIZE(pPage) (*(pPage)->pPageMethods->getFreeBytes)(pPage) -#define TDB_PAGE_PGNO(pPage) ((pPage)->pgid.pgno) -#define TDB_BYTES_CELL_TAKEN(pPage, pCell) ((*(pPage)->xCellSize)(pPage, pCell, 0, NULL, NULL) + (pPage)->pPageMethods->szOffset) -#define TDB_PAGE_OFFSET_SIZE(pPage) ((pPage)->pPageMethods->szOffset) +#define TDB_PAGE_TOTAL_CELLS(pPage) ((pPage)->nOverflow + (pPage)->pPageMethods->getCellNum(pPage)) +#define TDB_PAGE_USABLE_SIZE(pPage) ((u8 *)(pPage)->pPageFtr - (pPage)->pCellIdx) +#define TDB_PAGE_FREE_SIZE(pPage) (*(pPage)->pPageMethods->getFreeBytes)(pPage) +#define TDB_PAGE_PGNO(pPage) ((pPage)->pgid.pgno) +#define TDB_BYTES_CELL_TAKEN(pPage, pCell) \ + ((*(pPage)->xCellSize)(pPage, pCell, 0, NULL, NULL) + (pPage)->pPageMethods->szOffset) +#define TDB_PAGE_OFFSET_SIZE(pPage) ((pPage)->pPageMethods->szOffset) int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t), void *arg); int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg); -void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, - TXN *, SBTree *pBt)); -void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, - TXN *, SBTree *pBt)); +void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)); +void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)); int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl); int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt); int tdbPageUpdateCell(SPage *pPage, int idx, SCell *pCell, int szCell, TXN *pTxn, SBTree *pBt); diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 2d3d41de64..62aec219df 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -291,7 +291,7 @@ int32_t tfsRmdir(STfs *pTfs, const char *rname) { for (int32_t id = 0; id < pTier->ndisk; id++) { STfsDisk *pDisk = pTier->disks[id]; snprintf(aname, TMPNAME_LEN, "%s%s%s", pDisk->path, TD_DIRSEP, rname); - uInfo("====> tfs remove dir : path:%s aname:%s rname:[%s]", pDisk->path, aname, rname); + uInfo("tfs remove dir : path:%s aname:%s rname:[%s]", pDisk->path, aname, rname); taosRemoveDir(aname); } } diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index f699df6883..5ce073081d 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -96,8 +96,8 @@ typedef void* queue[2]; #define QUEUE_DATA(e, type, field) ((type*)((void*)((char*)(e)-offsetof(type, field)))) #define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit -#define TRANS_RETRY_INTERVAL 15 // ms retry interval -#define TRANS_CONN_TIMEOUT 3 // connect timeout +#define TRANS_RETRY_INTERVAL 15 // ms retry interval +#define TRANS_CONN_TIMEOUT 3 // connect timeout typedef SRpcMsg STransMsg; typedef SRpcCtx STransCtx; @@ -180,18 +180,18 @@ typedef enum { Normal, Quit, Release, Register, Update } STransMsgType; typedef enum { ConnNormal, ConnAcquire, ConnRelease, ConnBroken, ConnInPool } ConnStatus; #define container_of(ptr, type, member) ((type*)((char*)(ptr)-offsetof(type, member))) -#define RPC_RESERVE_SIZE (sizeof(STranConnCtx)) +#define RPC_RESERVE_SIZE (sizeof(STranConnCtx)) #define rpcIsReq(type) (type & 1U) #define TRANS_RESERVE_SIZE (sizeof(STranConnCtx)) -#define TRANS_MSG_OVERHEAD (sizeof(STransMsgHead)) -#define transHeadFromCont(cont) ((STransMsgHead*)((char*)cont - sizeof(STransMsgHead))) -#define transContFromHead(msg) (msg + sizeof(STransMsgHead)) +#define TRANS_MSG_OVERHEAD (sizeof(STransMsgHead)) +#define transHeadFromCont(cont) ((STransMsgHead*)((char*)cont - sizeof(STransMsgHead))) +#define transContFromHead(msg) (msg + sizeof(STransMsgHead)) #define transMsgLenFromCont(contLen) (contLen + sizeof(STransMsgHead)) -#define transContLenFromMsg(msgLen) (msgLen - sizeof(STransMsgHead)); -#define transIsReq(type) (type & 1U) +#define transContLenFromMsg(msgLen) (msgLen - sizeof(STransMsgHead)); +#define transIsReq(type) (type & 1U) #define transLabel(trans) ((STrans*)trans)->label diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index 462debb247..5fb2980ceb 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -57,7 +57,7 @@ typedef struct { void* parent; void* tcphandle; // returned handle from TCP initialization - int32_t refMgt; + int64_t refId; TdThreadMutex mutex; } SRpcInfo; diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index c970440d47..48e7d7c91d 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -76,12 +76,16 @@ void* rpcOpen(const SRpcInit* pInit) { if (pInit->user) { memcpy(pRpc->user, pInit->user, strlen(pInit->user)); } - int64_t refId = taosAddRef(transGetInstMgt(), pRpc); + + int64_t refId = transAddExHandle(transGetInstMgt(), pRpc); + transAcquireExHandle(transGetInstMgt(), refId); + pRpc->refId = refId; return (void*)refId; } void rpcClose(void* arg) { tInfo("start to close rpc"); - taosRemoveRef(transGetInstMgt(), (int64_t)arg); + transRemoveExHandle(transGetInstMgt(), (int64_t)arg); + transReleaseExHandle(transGetInstMgt(), (int64_t)arg); tInfo("finish to close rpc"); return; } diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index bda40cbc2a..8b771f6f8a 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -47,6 +47,7 @@ typedef struct SCliMsg { queue q; STransMsgType type; + int64_t refId; uint64_t st; int sent; //(0: no send, 1: alread sent) } SCliMsg; @@ -262,13 +263,17 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) { #define REQUEST_PERSIS_HANDLE(msg) ((msg)->info.persistHandle == 1) #define REQUEST_RELEASE_HANDLE(cmsg) ((cmsg)->type == Release) +#define EPSET_IS_VALID(epSet) ((epSet) != NULL && (epSet)->numOfEps != 0) #define EPSET_GET_SIZE(epSet) (epSet)->numOfEps #define EPSET_GET_INUSE_IP(epSet) ((epSet)->eps[(epSet)->inUse].fqdn) #define EPSET_GET_INUSE_PORT(epSet) ((epSet)->eps[(epSet)->inUse].port) -#define EPSET_FORWARD_INUSE(epSet) \ - do { \ - (epSet)->inUse = (++((epSet)->inUse)) % ((epSet)->numOfEps); \ +#define EPSET_FORWARD_INUSE(epSet) \ + do { \ + if ((epSet)->numOfEps != 0) { \ + (epSet)->inUse = (++((epSet)->inUse)) % ((epSet)->numOfEps); \ + } \ } while (0) + #define EPSET_DEBUG_STR(epSet, tbuf) \ do { \ int len = snprintf(tbuf, sizeof(tbuf), "epset:{"); \ @@ -512,7 +517,6 @@ static void allocConnRef(SCliConn* conn, bool update) { } static void addConnToPool(void* pool, SCliConn* conn) { if (conn->status == ConnInPool) { - // assert(0); return; } SCliThrd* thrd = conn->hostThrd; @@ -606,11 +610,9 @@ static void cliDestroyConn(SCliConn* conn, bool clear) { if (clear) { if (!uv_is_closing((uv_handle_t*)conn->stream)) { + uv_read_stop(conn->stream); uv_close((uv_handle_t*)conn->stream, cliDestroy); } - //} else { - // cliDestroy((uv_handle_t*)conn->stream); - //} } } static void cliDestroy(uv_handle_t* handle) { @@ -635,7 +637,6 @@ static bool cliHandleNoResp(SCliConn* conn) { SCliMsg* pMsg = transQueueGet(&conn->cliMsgs, 0); if (REQUEST_NO_RESP(&pMsg->msg)) { transQueuePop(&conn->cliMsgs); - // taosArrayRemove(msgs, 0); destroyCmsg(pMsg); res = true; } @@ -668,7 +669,6 @@ static void cliSendCb(uv_write_t* req, int status) { void cliSend(SCliConn* pConn) { CONN_HANDLE_BROKEN(pConn); - // assert(taosArrayGetSize(pConn->cliMsgs) > 0); assert(!transQueueEmpty(&pConn->cliMsgs)); SCliMsg* pCliMsg = NULL; @@ -778,7 +778,6 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) { *ignore = true; destroyCmsg(pMsg); return NULL; - // assert(0); } else { conn = exh->handle; transReleaseExHandle(transGetRefMgt(), refId); @@ -811,8 +810,12 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { STrans* pTransInst = pThrd->pTransInst; cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr); + if (!EPSET_IS_VALID(&pCtx->epSet)) { + destroyCmsg(pMsg); + tError("invalid epset"); + return; + } - // transPrintEpSet(&pCtx->epSet); bool ignore = false; SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore); if (ignore == true) { @@ -979,6 +982,7 @@ void cliSendQuit(SCliThrd* thrd) { } void cliWalkCb(uv_handle_t* handle, void* arg) { if (!uv_is_closing(handle)) { + uv_read_stop((uv_stream_t*)handle); uv_close(handle, cliDestroy); } } @@ -1079,12 +1083,14 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { } else { cliCompareAndSwap(&pCtx->retryLimit, TRANS_RETRY_COUNT_LIMIT, TRANS_RETRY_COUNT_LIMIT); if (pCtx->retryCnt < pCtx->retryLimit) { - addConnToPool(pThrd->pool, pConn); if (pResp->contLen == 0) { EPSET_FORWARD_INUSE(&pCtx->epSet); } else { - tDeserializeSEpSet(pResp->pCont, pResp->contLen, &pCtx->epSet); + if (tDeserializeSEpSet(pResp->pCont, pResp->contLen, &pCtx->epSet) < 0) { + tError("%s conn %p failed to deserialize epset", CONN_GET_INST_LABEL(pConn)); + } } + addConnToPool(pThrd->pool, pConn); transFreeMsg(pResp->pCont); cliSchedMsgToNextNode(pMsg, pThrd); return -1; @@ -1213,6 +1219,7 @@ void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STra cliMsg->msg = *pReq; cliMsg->st = taosGetTimestampUs(); cliMsg->type = Normal; + cliMsg->refId = (int64_t)shandle; STraceId* trace = &pReq->info.traceId; tGTrace("%s send request at thread:%08" PRId64 ", dst: %s:%d, app:%p", transLabel(pTransInst), pThrd->pid, @@ -1250,6 +1257,7 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM cliMsg->msg = *pReq; cliMsg->st = taosGetTimestampUs(); cliMsg->type = Normal; + cliMsg->refId = (int64_t)shandle; STraceId* trace = &pReq->info.traceId; tGTrace("%s send request at thread:%08" PRId64 ", dst: %s:%d, app:%p", transLabel(pTransInst), pThrd->pid, @@ -1283,6 +1291,7 @@ void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) { SCliMsg* cliMsg = taosMemoryCalloc(1, sizeof(SCliMsg)); cliMsg->ctx = pCtx; cliMsg->type = Update; + cliMsg->refId = (int64_t)shandle; SCliThrd* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[i]; tDebug("%s update epset at thread:%08" PRId64 "", pTransInst->label, thrd->pid); diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 676985b31c..5f6e3db615 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -19,7 +19,7 @@ static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT; static int32_t refMgt; -int32_t instMgt; +static int32_t instMgt; int transAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey) { T_MD5_CTX context; @@ -490,6 +490,7 @@ static void transDestroyEnv() { transCloseRefMgt(refMgt); transCloseRefMgt(instMgt); } + void transInit() { // init env taosThreadOnce(&transModuleInit, transInitEnv); diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index da1a37917f..7323aa0882 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -1029,8 +1029,9 @@ void transUnrefSrvHandle(void* handle) { } void transReleaseSrvHandle(void* handle) { - SExHandle* exh = handle; - int64_t refId = exh->refId; + SRpcHandleInfo* info = handle; + SExHandle* exh = info->handle; + int64_t refId = info->refId; ASYNC_CHECK_HANDLE(exh, refId); diff --git a/source/libs/transport/test/transUT.cpp b/source/libs/transport/test/transUT.cpp index 86c4830284..b55f771ebd 100644 --- a/source/libs/transport/test/transUT.cpp +++ b/source/libs/transport/test/transUT.cpp @@ -175,7 +175,7 @@ static void processReleaseHandleCb(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) rpcMsg.code = 0; rpcSendResponse(&rpcMsg); - rpcReleaseHandle(pMsg->info.handle, TAOS_CONN_SERVER); + rpcReleaseHandle(&pMsg->info, TAOS_CONN_SERVER); } static void processRegisterFailure(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { { diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 9245c03826..27f12259bc 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -332,21 +332,25 @@ int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLog terrno = TSDB_CODE_WAL_SIZE_LIMIT; return -1; } + taosThreadMutexLock(&pWal->mutex); if (index == pWal->vers.lastVer + 1) { if (taosArrayGetSize(pWal->fileInfoSet) == 0) { pWal->vers.firstVer = index; if (walRoll(pWal) < 0) { + taosThreadMutexUnlock(&pWal->mutex); return -1; } } else { int64_t passed = walGetSeq() - pWal->lastRollSeq; if (pWal->cfg.rollPeriod != -1 && pWal->cfg.rollPeriod != 0 && passed > pWal->cfg.rollPeriod) { if (walRoll(pWal) < 0) { + taosThreadMutexUnlock(&pWal->mutex); return -1; } } else if (pWal->cfg.segSize != -1 && pWal->cfg.segSize != 0 && walGetLastFileSize(pWal) > pWal->cfg.segSize) { if (walRoll(pWal) < 0) { + taosThreadMutexUnlock(&pWal->mutex); return -1; } } @@ -355,6 +359,7 @@ int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLog // reject skip log or rewrite log // must truncate explicitly first terrno = TSDB_CODE_WAL_INVALID_VER; + taosThreadMutexUnlock(&pWal->mutex); return -1; } @@ -362,8 +367,6 @@ int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SSyncLog ASSERT(pWal->writeCur >= 0); - taosThreadMutexLock(&pWal->mutex); - if (pWal->pWriteIdxTFile == NULL || pWal->pWriteLogTFile == NULL) { walSetWrite(pWal); taosLSeekFile(pWal->pWriteLogTFile, 0, SEEK_END); diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt index e15627fe66..f773e4ff58 100644 --- a/source/os/CMakeLists.txt +++ b/source/os/CMakeLists.txt @@ -39,12 +39,16 @@ endif() target_link_libraries( os PUBLIC pthread ) -if(NOT TD_WINDOWS) - target_link_libraries( - os PUBLIC dl m rt - ) -else() +if(TD_WINDOWS) target_link_libraries( os PUBLIC ws2_32 iconv msvcregex wcwidth winmm ) -endif(NOT TD_WINDOWS) +elseif(TD_DARWIN_64) + target_link_libraries( + os PUBLIC dl m iconv + ) +else() + target_link_libraries( + os PUBLIC dl m rt + ) +endif() diff --git a/source/os/src/osAtomic.c b/source/os/src/osAtomic.c index e4d880f40a..7a2353b234 100644 --- a/source/os/src/osAtomic.c +++ b/source/os/src/osAtomic.c @@ -518,7 +518,7 @@ int64_t atomic_add_fetch_64(int64_t volatile *ptr, int64_t val) { #endif } -void* atomic_add_fetch_ptr(void *ptr, void *val) { +void* atomic_add_fetch_ptr(void *ptr, int64_t val) { #ifdef WINDOWS return interlocked_add_fetch_ptr((void* volatile*)(ptr), (void*)(val)); #elif defined(_TD_NINGSI_60) @@ -618,11 +618,13 @@ int64_t atomic_sub_fetch_64(int64_t volatile *ptr, int64_t val) { #endif } -void* atomic_sub_fetch_ptr(void *ptr, void* val) { +void* atomic_sub_fetch_ptr(void *ptr, int64_t val) { #ifdef WINDOWS return interlocked_sub_fetch_ptr(ptr, val); #elif defined(_TD_NINGSI_60) return __sync_sub_and_fetch((ptr), (val)); +#elif defined(_TD_DARWIN_64) + return __atomic_sub_fetch((void **)(ptr), (size_t)(val), __ATOMIC_SEQ_CST); #else return __atomic_sub_fetch((void **)(ptr), (val), __ATOMIC_SEQ_CST); #endif @@ -673,6 +675,8 @@ void* atomic_fetch_sub_ptr(void *ptr, void* val) { return interlocked_fetch_sub_ptr(ptr, val); #elif defined(_TD_NINGSI_60) return __sync_fetch_and_sub((ptr), (val)); +#elif defined(_TD_DARWIN_64) + return __atomic_fetch_sub((void **)(ptr), (size_t)(val), __ATOMIC_SEQ_CST); #else return __atomic_fetch_sub((void **)(ptr), (val), __ATOMIC_SEQ_CST); #endif @@ -723,6 +727,8 @@ void* atomic_and_fetch_ptr(void *ptr, void *val) { return interlocked_and_fetch_ptr((void* volatile*)(ptr), (void*)(val)); #elif defined(_TD_NINGSI_60) return __sync_and_and_fetch((ptr), (val)); +#elif defined(_TD_DARWIN_64) + return (void*)__atomic_and_fetch((size_t *)(ptr), (size_t)(val), __ATOMIC_SEQ_CST); #else return __atomic_and_fetch((void **)(ptr), (val), __ATOMIC_SEQ_CST); #endif @@ -773,6 +779,8 @@ void* atomic_fetch_and_ptr(void *ptr, void *val) { return interlocked_fetch_and_ptr((void* volatile*)(ptr), (void*)(val)); #elif defined(_TD_NINGSI_60) return __sync_fetch_and_and((ptr), (val)); +#elif defined(_TD_DARWIN_64) + return (void*)__atomic_fetch_and((size_t *)(ptr), (size_t)(val), __ATOMIC_SEQ_CST); #else return __atomic_fetch_and((void **)(ptr), (val), __ATOMIC_SEQ_CST); #endif @@ -823,6 +831,8 @@ void* atomic_or_fetch_ptr(void *ptr, void *val) { return interlocked_or_fetch_ptr((void* volatile*)(ptr), (void*)(val)); #elif defined(_TD_NINGSI_60) return __sync_or_and_fetch((ptr), (val)); +#elif defined(_TD_DARWIN_64) + return (void*)__atomic_or_fetch((size_t *)(ptr), (size_t)(val), __ATOMIC_SEQ_CST); #else return __atomic_or_fetch((void **)(ptr), (val), __ATOMIC_SEQ_CST); #endif @@ -873,6 +883,8 @@ void* atomic_fetch_or_ptr(void *ptr, void *val) { return interlocked_fetch_or_ptr((void* volatile*)(ptr), (void*)(val)); #elif defined(_TD_NINGSI_60) return __sync_fetch_and_or((ptr), (val)); +#elif defined(_TD_DARWIN_64) + return (void*)__atomic_fetch_or((size_t *)(ptr), (size_t)(val), __ATOMIC_SEQ_CST); #else return __atomic_fetch_or((void **)(ptr), (val), __ATOMIC_SEQ_CST); #endif @@ -923,6 +935,8 @@ void* atomic_xor_fetch_ptr(void *ptr, void *val) { return interlocked_xor_fetch_ptr((void* volatile*)(ptr), (void*)(val)); #elif defined(_TD_NINGSI_60) return __sync_xor_and_fetch((ptr), (val)); +#elif defined(_TD_DARWIN_64) + return (void*)__atomic_xor_fetch((size_t *)(ptr), (size_t)(val), __ATOMIC_SEQ_CST); #else return __atomic_xor_fetch((void **)(ptr), (val), __ATOMIC_SEQ_CST); #endif @@ -973,6 +987,8 @@ void* atomic_fetch_xor_ptr(void *ptr, void *val) { return interlocked_fetch_xor_ptr((void* volatile*)(ptr), (void*)(val)); #elif defined(_TD_NINGSI_60) return __sync_fetch_and_xor((ptr), (val)); +#elif defined(_TD_DARWIN_64) + return (void*)__atomic_fetch_xor((size_t *)(ptr), (size_t)(val), __ATOMIC_SEQ_CST); #else return __atomic_fetch_xor((void **)(ptr), (val), __ATOMIC_SEQ_CST); #endif diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index 65471df0a9..cb943b9d28 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -35,6 +35,8 @@ #include #define LINUX_FILE_NO_TEXT_OPTION 0 #define O_TEXT LINUX_FILE_NO_TEXT_OPTION + +#define _SEND_FILE_STEP_ 1000 #endif #if defined(WINDOWS) @@ -612,28 +614,34 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in #elif defined(_TD_DARWIN_64) - int r = 0; - if (offset) { - r = fseek(in_file, *offset, SEEK_SET); - if (r == -1) return -1; - } - off_t len = size; - while (len > 0) { - char buf[1024 * 16]; - off_t n = sizeof(buf); - if (len < n) n = len; - size_t m = fread(buf, 1, n, in_file); - if (m < n) { - int e = ferror(in_file); - if (e) return -1; + lseek(pFileIn->fd, (int32_t)(*offset), 0); + int64_t writeLen = 0; + uint8_t buffer[_SEND_FILE_STEP_] = {0}; + + for (int64_t len = 0; len < (size - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) { + size_t rlen = read(pFileIn->fd, (void *)buffer, _SEND_FILE_STEP_); + if (rlen <= 0) { + return writeLen; + } else if (rlen < _SEND_FILE_STEP_) { + write(pFileOut->fd, (void *)buffer, (uint32_t)rlen); + return (int64_t)(writeLen + rlen); + } else { + write(pFileOut->fd, (void *)buffer, _SEND_FILE_STEP_); + writeLen += _SEND_FILE_STEP_; } - if (m == 0) break; - if (m != fwrite(buf, 1, m, out_file)) { - return -1; - } - len -= m; } - return size - len; + + int64_t remain = size - writeLen; + if (remain > 0) { + size_t rlen = read(pFileIn->fd, (void *)buffer, (size_t)remain); + if (rlen <= 0) { + return writeLen; + } else { + write(pFileOut->fd, (void *)buffer, (uint32_t)remain); + writeLen += remain; + } + } + return writeLen; #else diff --git a/source/os/src/osMemory.c b/source/os/src/osMemory.c index 24bc9d0b4c..aa25b85342 100644 --- a/source/os/src/osMemory.c +++ b/source/os/src/osMemory.c @@ -14,7 +14,11 @@ */ #define ALLOW_FORBID_FUNC +#ifdef _TD_DARWIN_64 +#include +#else #include +#endif #include "os.h" #if defined(USE_TD_MEMORY) || defined(USE_ADDR2LINE) @@ -323,6 +327,8 @@ int32_t taosMemorySize(void *ptr) { #else #ifdef WINDOWS return _msize(ptr); +#elif defined(_TD_DARWIN_64) + return malloc_size(ptr); #else return malloc_usable_size(ptr); #endif diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c index 11f62455fd..7ee73d8e2f 100644 --- a/source/os/src/osSemaphore.c +++ b/source/os/src/osSemaphore.c @@ -13,8 +13,10 @@ * along with this program. If not, see . */ +#define ALLOW_FORBID_FUNC #define _DEFAULT_SOURCE #include "os.h" +#include "pthread.h" #ifdef WINDOWS @@ -111,289 +113,501 @@ int32_t tsem_timewait(tsem_t* sem, int64_t nanosecs) { // #define SEM_USE_PTHREAD // #define SEM_USE_POSIX -#define SEM_USE_SEM +// #define SEM_USE_SEM -#ifdef SEM_USE_SEM -#include -#include -#include -#include +// #ifdef SEM_USE_SEM +// #include +// #include +// #include +// #include -static TdThread sem_thread; -static TdThreadOnce sem_once; -static task_t sem_port; -static volatile int sem_inited = 0; -static semaphore_t sem_exit; +// static TdThread sem_thread; +// static TdThreadOnce sem_once; +// static task_t sem_port; +// static volatile int sem_inited = 0; +// static semaphore_t sem_exit; -static void *sem_thread_routine(void *arg) { - (void)arg; - setThreadName("sem_thrd"); +// static void *sem_thread_routine(void *arg) { +// (void)arg; +// setThreadName("sem_thrd"); - sem_port = mach_task_self(); - kern_return_t ret = semaphore_create(sem_port, &sem_exit, SYNC_POLICY_FIFO, 0); - if (ret != KERN_SUCCESS) { - fprintf(stderr, "==%s[%d]%s()==failed to create sem_exit\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__); - sem_inited = -1; - return NULL; - } - sem_inited = 1; - semaphore_wait(sem_exit); - return NULL; -} +// sem_port = mach_task_self(); +// kern_return_t ret = semaphore_create(sem_port, &sem_exit, SYNC_POLICY_FIFO, 0); +// if (ret != KERN_SUCCESS) { +// fprintf(stderr, "==%s[%d]%s()==failed to create sem_exit\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__); +// sem_inited = -1; +// return NULL; +// } +// sem_inited = 1; +// semaphore_wait(sem_exit); +// return NULL; +// } -static void once_init(void) { - int r = 0; - r = taosThreadCreate(&sem_thread, NULL, sem_thread_routine, NULL); - if (r) { - fprintf(stderr, "==%s[%d]%s()==failed to create thread\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__); - return; - } - while (sem_inited == 0) { - ; - } -} -#endif +// static void once_init(void) { +// int r = 0; +// r = taosThreadCreate(&sem_thread, NULL, sem_thread_routine, NULL); +// if (r) { +// fprintf(stderr, "==%s[%d]%s()==failed to create thread\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__); +// return; +// } +// while (sem_inited == 0) { +// ; +// } +// } +// #endif -struct tsem_s { -#ifdef SEM_USE_PTHREAD - TdThreadMutex lock; - TdThreadCond cond; - volatile int64_t val; -#elif defined(SEM_USE_POSIX) - size_t id; - sem_t *sem; -#elif defined(SEM_USE_SEM) - semaphore_t sem; -#else // SEM_USE_PTHREAD - dispatch_semaphore_t sem; -#endif // SEM_USE_PTHREAD +// struct tsem_s { +// #ifdef SEM_USE_PTHREAD +// TdThreadMutex lock; +// TdThreadCond cond; +// volatile int64_t val; +// #elif defined(SEM_USE_POSIX) +// size_t id; +// sem_t *sem; +// #elif defined(SEM_USE_SEM) +// semaphore_t sem; +// #else // SEM_USE_PTHREAD +// dispatch_semaphore_t sem; +// #endif // SEM_USE_PTHREAD - volatile unsigned int valid : 1; -}; +// volatile unsigned int valid : 1; +// }; -int tsem_init(tsem_t *sem, int pshared, unsigned int value) { - // fprintf(stderr, "==%s[%d]%s():[%p]==creating\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - if (*sem) { - fprintf(stderr, "==%s[%d]%s():[%p]==already initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } - struct tsem_s *p = (struct tsem_s *)taosMemoryCalloc(1, sizeof(*p)); - if (!p) { - fprintf(stderr, "==%s[%d]%s():[%p]==out of memory\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - abort(); - } +// int tsem_init(tsem_t *sem, int pshared, unsigned int value) { +// // fprintf(stderr, "==%s[%d]%s():[%p]==creating\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// if (*sem) { +// fprintf(stderr, "==%s[%d]%s():[%p]==already initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// struct tsem_s *p = (struct tsem_s *)taosMemoryCalloc(1, sizeof(*p)); +// if (!p) { +// fprintf(stderr, "==%s[%d]%s():[%p]==out of memory\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// abort(); +// } -#ifdef SEM_USE_PTHREAD - int r = taosThreadMutexInit(&p->lock, NULL); - do { - if (r) break; - r = taosThreadCondInit(&p->cond, NULL); - if (r) { - taosThreadMutexDestroy(&p->lock); - break; +// #ifdef SEM_USE_PTHREAD +// int r = taosThreadMutexInit(&p->lock, NULL); +// do { +// if (r) break; +// r = taosThreadCondInit(&p->cond, NULL); +// if (r) { +// taosThreadMutexDestroy(&p->lock); +// break; +// } +// p->val = value; +// } while (0); +// if (r) { +// fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// abort(); +// } +// #elif defined(SEM_USE_POSIX) +// static size_t tick = 0; +// do { +// size_t id = atomic_add_fetch_64(&tick, 1); +// if (id == SEM_VALUE_MAX) { +// atomic_store_64(&tick, 0); +// id = 0; +// } +// char name[NAME_MAX - 4]; +// snprintf(name, sizeof(name), "/t%ld", id); +// p->sem = sem_open(name, O_CREAT | O_EXCL, pshared, value); +// p->id = id; +// if (p->sem != SEM_FAILED) break; +// int e = errno; +// if (e == EEXIST) continue; +// if (e == EINTR) continue; +// fprintf(stderr, "==%s[%d]%s():[%p]==not created[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem, +// e, strerror(e)); +// abort(); +// } while (p->sem == SEM_FAILED); +// #elif defined(SEM_USE_SEM) +// taosThreadOnce(&sem_once, once_init); +// if (sem_inited != 1) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal resource init failed\n", taosDirEntryBaseName(__FILE__), __LINE__, +// __func__, sem); +// errno = ENOMEM; +// return -1; +// } +// kern_return_t ret = semaphore_create(sem_port, &p->sem, SYNC_POLICY_FIFO, value); +// if (ret != KERN_SUCCESS) { +// fprintf(stderr, "==%s[%d]%s():[%p]==semophore_create failed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// // we fail-fast here, because we have less-doc about semaphore_create for the moment +// abort(); +// } +// #else // SEM_USE_PTHREAD +// p->sem = dispatch_semaphore_create(value); +// if (p->sem == NULL) { +// fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// abort(); +// } +// #endif // SEM_USE_PTHREAD + +// p->valid = 1; + +// *sem = p; + +// return 0; +// } + +// int tsem_wait(tsem_t *sem) { +// if (!*sem) { +// fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// abort(); +// } +// struct tsem_s *p = *sem; +// if (!p->valid) { +// fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// abort(); +// } +// #ifdef SEM_USE_PTHREAD +// if (taosThreadMutexLock(&p->lock)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// p->val -= 1; +// if (p->val < 0) { +// if (taosThreadCondWait(&p->cond, &p->lock)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// } +// if (taosThreadMutexUnlock(&p->lock)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// return 0; +// #elif defined(SEM_USE_POSIX) +// return sem_wait(p->sem); +// #elif defined(SEM_USE_SEM) +// return semaphore_wait(p->sem); +// #else // SEM_USE_PTHREAD +// return dispatch_semaphore_wait(p->sem, DISPATCH_TIME_FOREVER); +// #endif // SEM_USE_PTHREAD +// } + +// int tsem_post(tsem_t *sem) { +// if (!*sem) { +// fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// abort(); +// } +// struct tsem_s *p = *sem; +// if (!p->valid) { +// fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// abort(); +// } +// #ifdef SEM_USE_PTHREAD +// if (taosThreadMutexLock(&p->lock)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// p->val += 1; +// if (p->val <= 0) { +// if (taosThreadCondSignal(&p->cond)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// } +// if (taosThreadMutexUnlock(&p->lock)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// return 0; +// #elif defined(SEM_USE_POSIX) +// return sem_post(p->sem); +// #elif defined(SEM_USE_SEM) +// return semaphore_signal(p->sem); +// #else // SEM_USE_PTHREAD +// return dispatch_semaphore_signal(p->sem); +// #endif // SEM_USE_PTHREAD +// } + +// int tsem_destroy(tsem_t *sem) { +// // fprintf(stderr, "==%s[%d]%s():[%p]==destroying\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// if (!*sem) { +// // fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); +// // abort(); +// return 0; +// } +// struct tsem_s *p = *sem; +// if (!p->valid) { +// // fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// // sem); abort(); +// return 0; +// } +// #ifdef SEM_USE_PTHREAD +// if (taosThreadMutexLock(&p->lock)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// p->valid = 0; +// if (taosThreadCondDestroy(&p->cond)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// if (taosThreadMutexUnlock(&p->lock)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// if (taosThreadMutexDestroy(&p->lock)) { +// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, +// sem); +// abort(); +// } +// #elif defined(SEM_USE_POSIX) +// char name[NAME_MAX - 4]; +// snprintf(name, sizeof(name), "/t%ld", p->id); +// int r = sem_unlink(name); +// if (r) { +// int e = errno; +// fprintf(stderr, "==%s[%d]%s():[%p]==unlink failed[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem, +// e, strerror(e)); +// abort(); +// } +// #elif defined(SEM_USE_SEM) +// semaphore_destroy(sem_port, p->sem); +// #else // SEM_USE_PTHREAD +// #endif // SEM_USE_PTHREAD + +// p->valid = 0; +// taosMemoryFree(p); + +// *sem = NULL; +// return 0; +// } +typedef struct +{ + pthread_mutex_t count_lock; + pthread_cond_t count_bump; + unsigned int count; +}bosal_sem_t; + +int tsem_init(tsem_t *psem, int flags, unsigned int count) +{ + bosal_sem_t *pnewsem; + int result; + + pnewsem = (bosal_sem_t *)malloc(sizeof(bosal_sem_t)); + if (! pnewsem) + { + return -1; } - p->val = value; - } while (0); - if (r) { - fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - abort(); - } -#elif defined(SEM_USE_POSIX) - static size_t tick = 0; - do { - size_t id = atomic_add_fetch_64(&tick, 1); - if (id == SEM_VALUE_MAX) { - atomic_store_64(&tick, 0); - id = 0; + result = pthread_mutex_init(&pnewsem->count_lock, NULL); + if (result) + { + free(pnewsem); + return result; } - char name[NAME_MAX - 4]; - snprintf(name, sizeof(name), "/t%ld", id); - p->sem = sem_open(name, O_CREAT | O_EXCL, pshared, value); - p->id = id; - if (p->sem != SEM_FAILED) break; - int e = errno; - if (e == EEXIST) continue; - if (e == EINTR) continue; - fprintf(stderr, "==%s[%d]%s():[%p]==not created[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem, - e, strerror(e)); - abort(); - } while (p->sem == SEM_FAILED); -#elif defined(SEM_USE_SEM) - taosThreadOnce(&sem_once, once_init); - if (sem_inited != 1) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal resource init failed\n", taosDirEntryBaseName(__FILE__), __LINE__, - __func__, sem); - errno = ENOMEM; - return -1; - } - kern_return_t ret = semaphore_create(sem_port, &p->sem, SYNC_POLICY_FIFO, value); - if (ret != KERN_SUCCESS) { - fprintf(stderr, "==%s[%d]%s():[%p]==semophore_create failed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - // we fail-fast here, because we have less-doc about semaphore_create for the moment - abort(); - } -#else // SEM_USE_PTHREAD - p->sem = dispatch_semaphore_create(value); - if (p->sem == NULL) { - fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - abort(); - } -#endif // SEM_USE_PTHREAD - - p->valid = 1; - - *sem = p; - - return 0; -} - -int tsem_wait(tsem_t *sem) { - if (!*sem) { - fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - abort(); - } - struct tsem_s *p = *sem; - if (!p->valid) { - fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - abort(); - } -#ifdef SEM_USE_PTHREAD - if (taosThreadMutexLock(&p->lock)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } - p->val -= 1; - if (p->val < 0) { - if (taosThreadCondWait(&p->cond, &p->lock)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); + result = pthread_cond_init(&pnewsem->count_bump, NULL); + if (result) + { + pthread_mutex_destroy(&pnewsem->count_lock); + free(pnewsem); + return result; } - } - if (taosThreadMutexUnlock(&p->lock)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } - return 0; -#elif defined(SEM_USE_POSIX) - return sem_wait(p->sem); -#elif defined(SEM_USE_SEM) - return semaphore_wait(p->sem); -#else // SEM_USE_PTHREAD - return dispatch_semaphore_wait(p->sem, DISPATCH_TIME_FOREVER); -#endif // SEM_USE_PTHREAD -} - -int tsem_post(tsem_t *sem) { - if (!*sem) { - fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - abort(); - } - struct tsem_s *p = *sem; - if (!p->valid) { - fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - abort(); - } -#ifdef SEM_USE_PTHREAD - if (taosThreadMutexLock(&p->lock)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } - p->val += 1; - if (p->val <= 0) { - if (taosThreadCondSignal(&p->cond)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } - } - if (taosThreadMutexUnlock(&p->lock)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } - return 0; -#elif defined(SEM_USE_POSIX) - return sem_post(p->sem); -#elif defined(SEM_USE_SEM) - return semaphore_signal(p->sem); -#else // SEM_USE_PTHREAD - return dispatch_semaphore_signal(p->sem); -#endif // SEM_USE_PTHREAD -} - -int tsem_destroy(tsem_t *sem) { - // fprintf(stderr, "==%s[%d]%s():[%p]==destroying\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - if (!*sem) { - // fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem); - // abort(); + pnewsem->count = count; + *psem = (tsem_t)pnewsem; return 0; - } - struct tsem_s *p = *sem; - if (!p->valid) { - // fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - // sem); abort(); +} + +int tsem_destroy(tsem_t *psem) +{ + bosal_sem_t *poldsem; + + if (! psem) + { + return EINVAL; + } + poldsem = (bosal_sem_t *)*psem; + + pthread_mutex_destroy(&poldsem->count_lock); + pthread_cond_destroy(&poldsem->count_bump); + free(poldsem); return 0; - } -#ifdef SEM_USE_PTHREAD - if (taosThreadMutexLock(&p->lock)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } - p->valid = 0; - if (taosThreadCondDestroy(&p->cond)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } - if (taosThreadMutexUnlock(&p->lock)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } - if (taosThreadMutexDestroy(&p->lock)) { - fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, - sem); - abort(); - } -#elif defined(SEM_USE_POSIX) - char name[NAME_MAX - 4]; - snprintf(name, sizeof(name), "/t%ld", p->id); - int r = sem_unlink(name); - if (r) { - int e = errno; - fprintf(stderr, "==%s[%d]%s():[%p]==unlink failed[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem, - e, strerror(e)); - abort(); - } -#elif defined(SEM_USE_SEM) - semaphore_destroy(sem_port, p->sem); -#else // SEM_USE_PTHREAD -#endif // SEM_USE_PTHREAD - - p->valid = 0; - taosMemoryFree(p); - - *sem = NULL; - return 0; } -bool taosCheckPthreadValid(TdThread thread) { - uint64_t id = 0; - int r = TdThreadhreadid_np(thread, &id); - return r ? false : true; +int tsem_post(tsem_t *psem) +{ + bosal_sem_t *pxsem; + int result, xresult; + + if (! psem) + { + return EINVAL; + } + pxsem = (bosal_sem_t *)*psem; + + result = pthread_mutex_lock(&pxsem->count_lock); + if (result) + { + return result; + } + pxsem->count = pxsem->count + 1; + + xresult = pthread_cond_signal(&pxsem->count_bump); + + result = pthread_mutex_unlock(&pxsem->count_lock); + if (result) + { + return result; + } + if (xresult) + { + errno = xresult; + return -1; + } + return 0; } +int tsem_trywait(tsem_t *psem) +{ + bosal_sem_t *pxsem; + int result, xresult; + + if (! psem) + { + return EINVAL; + } + pxsem = (bosal_sem_t *)*psem; + + result = pthread_mutex_lock(&pxsem->count_lock); + if (result) + { + return result; + } + xresult = 0; + + if (pxsem->count > 0) + { + pxsem->count--; + } + else + { + xresult = EAGAIN; + } + result = pthread_mutex_unlock(&pxsem->count_lock); + if (result) + { + return result; + } + if (xresult) + { + errno = xresult; + return -1; + } + return 0; +} + +int tsem_wait(tsem_t *psem) +{ + bosal_sem_t *pxsem; + int result, xresult; + + if (! psem) + { + return EINVAL; + } + pxsem = (bosal_sem_t *)*psem; + + result = pthread_mutex_lock(&pxsem->count_lock); + if (result) + { + return result; + } + xresult = 0; + + if (pxsem->count == 0) + { + xresult = pthread_cond_wait(&pxsem->count_bump, &pxsem->count_lock); + } + if (! xresult) + { + if (pxsem->count > 0) + { + pxsem->count--; + } + } + result = pthread_mutex_unlock(&pxsem->count_lock); + if (result) + { + return result; + } + if (xresult) + { + errno = xresult; + return -1; + } + return 0; +} + +int tsem_timewait(tsem_t *psem, int64_t nanosecs) +{ + struct timespec abstim = { + .tv_sec = 0, + .tv_nsec = nanosecs, + }; + + bosal_sem_t *pxsem; + int result, xresult; + + if (! psem) + { + return EINVAL; + } + pxsem = (bosal_sem_t *)*psem; + + result = pthread_mutex_lock(&pxsem->count_lock); + if (result) + { + return result; + } + xresult = 0; + + if (pxsem->count == 0) + { + xresult = pthread_cond_timedwait(&pxsem->count_bump, &pxsem->count_lock, &abstim); + } + if (! xresult) + { + if (pxsem->count > 0) + { + pxsem->count--; + } + } + result = pthread_mutex_unlock(&pxsem->count_lock); + if (result) + { + return result; + } + if (xresult) + { + errno = xresult; + return -1; + } + return 0; +} + +bool taosCheckPthreadValid(TdThread thread) { + int32_t ret = taosThreadKill(thread, 0); + if (ret == ESRCH) return false; + if (ret == EINVAL) return false; + // alive + return true; + } + int64_t taosGetSelfPthreadId() { - uint64_t id; - TdThreadhreadid_np(0, &id); - return (int64_t)id; + TdThread thread = taosThreadSelf(); + return (int64_t)thread; } int64_t taosGetPthreadId(TdThread thread) { return (int64_t)thread; } diff --git a/source/os/src/osSignal.c b/source/os/src/osSignal.c index d9b225868a..327beb8999 100644 --- a/source/os/src/osSignal.c +++ b/source/os/src/osSignal.c @@ -73,6 +73,10 @@ void taosIgnSignal(int32_t signum) { signal(signum, SIG_IGN); } void taosDflSignal(int32_t signum) { signal(signum, SIG_DFL); } -void taosKillChildOnParentStopped() { prctl(PR_SET_PDEATHSIG, SIGKILL); } +void taosKillChildOnParentStopped() { +#ifndef _TD_DARWIN_64 + prctl(PR_SET_PDEATHSIG, SIGKILL); +#endif +} #endif diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c index f0dd5b974d..5c94d99da0 100644 --- a/source/os/src/osSocket.c +++ b/source/os/src/osSocket.c @@ -49,6 +49,14 @@ #define INVALID_SOCKET -1 #endif +typedef struct TdSocket { +#if SOCKET_WITH_LOCK + TdThreadRwlock rwlock; +#endif + int refId; + SocketFd fd; +} * TdSocketPtr, TdSocket; + typedef struct TdSocketServer { #if SOCKET_WITH_LOCK TdThreadRwlock rwlock; @@ -1029,60 +1037,6 @@ int32_t taosGetSocketName(TdSocketPtr pSocket, struct sockaddr *destAddr, int *a return getsockname(pSocket->fd, destAddr, addrLen); } -TdEpollPtr taosCreateEpoll(int32_t size) { - EpollFd fd = -1; -#ifdef WINDOWS - assert(0); -#else - fd = epoll_create(size); -#endif - if (fd < 0) { - return NULL; - } - - TdEpollPtr pEpoll = (TdEpollPtr)taosMemoryMalloc(sizeof(TdEpoll)); - if (pEpoll == NULL) { - taosCloseSocketNoCheck1(fd); - return NULL; - } - pEpoll->fd = fd; - pEpoll->refId = 0; - return pEpoll; -} -int32_t taosCtlEpoll(TdEpollPtr pEpoll, int32_t epollOperate, TdSocketPtr pSocket, struct epoll_event *event) { - int32_t code = -1; - if (pEpoll == NULL || pEpoll->fd < 0) { - return -1; - } -#ifdef WINDOWS - assert(0); -#else - code = epoll_ctl(pEpoll->fd, epollOperate, pSocket->fd, event); -#endif - return code; -} -int32_t taosWaitEpoll(TdEpollPtr pEpoll, struct epoll_event *event, int32_t maxEvents, int32_t timeout) { - int32_t code = -1; - if (pEpoll == NULL || pEpoll->fd < 0) { - return -1; - } -#ifdef WINDOWS - assert(0); -#else - code = epoll_wait(pEpoll->fd, event, maxEvents, timeout); -#endif - return code; -} -int32_t taosCloseEpoll(TdEpollPtr *ppEpoll) { - int32_t code; - if (ppEpoll == NULL || *ppEpoll == NULL || (*ppEpoll)->fd < 0) { - return -1; - } - code = taosCloseSocketNoCheck1((*ppEpoll)->fd); - (*ppEpoll)->fd = -1; - taosMemoryFree(*ppEpoll); - return code; -} /* * Set TCP connection timeout per-socket level. * ref [https://github.com/libuv/help/issues/54] @@ -1100,6 +1054,11 @@ int32_t taosCreateSocketWithTimeout(uint32_t timeout) { if (0 != setsockopt(fd, IPPROTO_TCP, TCP_MAXRT, (char *)&timeout, sizeof(timeout))) { return -1; } +#elif defined(_TD_DARWIN_64) + uint32_t conn_timeout_ms = timeout * 1000; + if (0 != setsockopt(fd, IPPROTO_TCP, TCP_CONNECTIONTIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) { + return -1; + } #else // Linux like systems uint32_t conn_timeout_ms = timeout * 1000; if (0 != setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) { diff --git a/source/os/src/osString.c b/source/os/src/osString.c index e6904f1963..efa65fe191 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -196,7 +196,7 @@ int32_t taosUcs4len(TdUcs4 *ucs4) { } //dst buffer size should be at least 2*len + 1 -int32_t taosHexEncode(const char *src, char *dst, int32_t len) { +int32_t taosHexEncode(const unsigned char *src, char *dst, int32_t len) { if (!dst) { return -1; } diff --git a/source/os/src/osThread.c b/source/os/src/osThread.c index a2778e44c5..39b68d6b54 100644 --- a/source/os/src/osThread.c +++ b/source/os/src/osThread.c @@ -157,9 +157,9 @@ int32_t taosThreadKill(TdThread thread, int32_t sig) { return pthread_kill(thread, sig); } -int32_t taosThreadMutexConsistent(TdThreadMutex* mutex) { - return pthread_mutex_consistent(mutex); -} +// int32_t taosThreadMutexConsistent(TdThreadMutex* mutex) { +// return pthread_mutex_consistent(mutex); +// } int32_t taosThreadMutexDestroy(TdThreadMutex * mutex) { return pthread_mutex_destroy(mutex); @@ -173,9 +173,9 @@ int32_t taosThreadMutexLock(TdThreadMutex * mutex) { return pthread_mutex_lock(mutex); } -int32_t taosThreadMutexTimedLock(TdThreadMutex * mutex, const struct timespec *abstime) { - return pthread_mutex_timedlock(mutex, abstime); -} +// int32_t taosThreadMutexTimedLock(TdThreadMutex * mutex, const struct timespec *abstime) { +// return pthread_mutex_timedlock(mutex, abstime); +// } int32_t taosThreadMutexTryLock(TdThreadMutex * mutex) { return pthread_mutex_trylock(mutex); @@ -193,9 +193,9 @@ int32_t taosThreadMutexAttrGetPshared(const TdThreadMutexAttr * attr, int32_t *p return pthread_mutexattr_getpshared(attr, pshared); } -int32_t taosThreadMutexAttrGetRobust(const TdThreadMutexAttr * attr, int32_t * robust) { - return pthread_mutexattr_getrobust(attr, robust); -} +// int32_t taosThreadMutexAttrGetRobust(const TdThreadMutexAttr * attr, int32_t * robust) { +// return pthread_mutexattr_getrobust(attr, robust); +// } int32_t taosThreadMutexAttrGetType(const TdThreadMutexAttr * attr, int32_t *kind) { return pthread_mutexattr_gettype(attr, kind); @@ -209,9 +209,9 @@ int32_t taosThreadMutexAttrSetPshared(TdThreadMutexAttr * attr, int32_t pshared) return pthread_mutexattr_setpshared(attr, pshared); } -int32_t taosThreadMutexAttrSetRobust(TdThreadMutexAttr * attr, int32_t robust) { - return pthread_mutexattr_setrobust(attr, robust); -} +// int32_t taosThreadMutexAttrSetRobust(TdThreadMutexAttr * attr, int32_t robust) { +// return pthread_mutexattr_setrobust(attr, robust); +// } int32_t taosThreadMutexAttrSetType(TdThreadMutexAttr * attr, int32_t kind) { return pthread_mutexattr_settype(attr, kind); @@ -233,13 +233,13 @@ int32_t taosThreadRwlockRdlock(TdThreadRwlock * rwlock) { return pthread_rwlock_rdlock(rwlock); } -int32_t taosThreadRwlockTimedRdlock(TdThreadRwlock * rwlock, const struct timespec *abstime) { - return pthread_rwlock_timedrdlock(rwlock, abstime); -} +// int32_t taosThreadRwlockTimedRdlock(TdThreadRwlock * rwlock, const struct timespec *abstime) { +// return pthread_rwlock_timedrdlock(rwlock, abstime); +// } -int32_t taosThreadRwlockTimedWrlock(TdThreadRwlock * rwlock, const struct timespec *abstime) { - return pthread_rwlock_timedwrlock(rwlock, abstime); -} +// int32_t taosThreadRwlockTimedWrlock(TdThreadRwlock * rwlock, const struct timespec *abstime) { +// return pthread_rwlock_timedwrlock(rwlock, abstime); +// } int32_t taosThreadRwlockTryRdlock(TdThreadRwlock * rwlock) { return pthread_rwlock_tryrdlock(rwlock); @@ -303,7 +303,7 @@ int32_t taosThreadSpinDestroy(TdThreadSpinlock * lock) { int32_t taosThreadSpinInit(TdThreadSpinlock * lock, int32_t pshared) { #ifdef TD_USE_SPINLOCK_AS_MUTEX - assert(pshared == NULL); + assert(pshared == 0); return pthread_mutex_init((pthread_mutex_t*)lock, NULL); #else return pthread_spin_init((pthread_spinlock_t*)lock, pshared); diff --git a/source/util/src/tcache.c b/source/util/src/tcache.c index f2939d1661..0975b10d55 100644 --- a/source/util/src/tcache.c +++ b/source/util/src/tcache.c @@ -417,8 +417,8 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v if (pNode == NULL) { pushfrontNodeInEntryList(pe, pNode1); - atomic_add_fetch_64(&pCacheObj->numOfElems, 1); - atomic_add_fetch_64(&pCacheObj->sizeInBytes, pNode1->size); + atomic_add_fetch_ptr(&pCacheObj->numOfElems, 1); + atomic_add_fetch_ptr(&pCacheObj->sizeInBytes, pNode1->size); uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d sizeInBytes:%" PRId64 "bytes size:%" PRId64 "bytes", pCacheObj->name, key, pNode1->data, pNode1->addedTime, pNode1->expireTime, (int32_t)pCacheObj->numOfElems, @@ -667,7 +667,7 @@ void doTraverseElems(SCacheObj *pCacheObj, bool (*fp)(void *param, SCacheNode *p pEntry->next = next; pEntry->num -= 1; - atomic_sub_fetch_64(&pCacheObj->numOfElems, 1); + atomic_sub_fetch_ptr(&pCacheObj->numOfElems, 1); pNode = next; } } diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index 11a1cc1c71..fe3065b2b7 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -56,11 +56,11 @@ int32_t setChkNotInBytes8(const void *pLeft, const void *pRight) { } int32_t compareChkInString(const void *pLeft, const void *pRight) { - return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; + return NULL != taosHashGet((SHashObj *)pRight, pLeft, varDataTLen(pLeft)) ? 1 : 0; } int32_t compareChkNotInString(const void *pLeft, const void *pRight) { - return NULL == taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; + return NULL == taosHashGet((SHashObj *)pRight, pLeft, varDataTLen(pLeft)) ? 1 : 0; } int32_t compareInt8Val(const void *pLeft, const void *pRight) { diff --git a/source/util/src/terror.c b/source/util/src/terror.c index e867af86af..e05b634222 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -177,6 +177,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, "Dnode already exists" TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, "Dnode does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_DNODES, "Too many dnodes") TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, "Out of dnodes") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_MEM_IN_DNODE, "No enough memory in dnode") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CLUSTER_CFG, "Cluster cfg inconsistent") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CLUSTER_ID, "Cluster id not match") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_CFG, "Invalid dnode cfg") @@ -434,6 +435,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_RECONFIG_NOT_READY, "Sync not ready for re TAOS_DEFINE_ERROR(TSDB_CODE_SYN_PROPOSE_NOT_READY, "Sync not ready for propose") TAOS_DEFINE_ERROR(TSDB_CODE_SYN_STANDBY_NOT_READY, "Sync not ready for standby") TAOS_DEFINE_ERROR(TSDB_CODE_SYN_BATCH_ERROR, "Sync batch error") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_TIMEOUT, "Sync timeout") TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INTERNAL_ERROR, "Sync internal error") // wal @@ -591,12 +593,15 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_NO_INDEX_IN_CACHE, "No tsma index in ca TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_ENV, "Invalid rsma env") TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_STAT, "Invalid rsma state") TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_QTASKINFO_CREATE, "Rsma qtaskinfo creation error") +TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FILE_CORRUPTED, "Rsma file corrupted") + //tq TAOS_DEFINE_ERROR(TSDB_CODE_TQ_NO_COMMITTED_OFFSET, "No committed offset") TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding") +TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message") diff --git a/source/util/src/ttimer.c b/source/util/src/ttimer.c index e06d7d8c89..7256331c85 100644 --- a/source/util/src/ttimer.c +++ b/source/util/src/ttimer.c @@ -18,6 +18,7 @@ #include "taoserror.h" #include "tlog.h" #include "tsched.h" +#include "tdef.h" #define tmrFatal(...) \ { \ @@ -110,7 +111,7 @@ typedef struct time_wheel_t { tmr_obj_t** slots; } time_wheel_t; -static int32_t tsMaxTmrCtrl = 512; +static int32_t tsMaxTmrCtrl = TSDB_MAX_VNODES_PER_DB + 100; static TdThreadOnce tmrModuleInit = PTHREAD_ONCE_INIT; static TdThreadMutex tmrCtrlMutex; @@ -132,7 +133,7 @@ static timer_map_t timerMap; static uintptr_t getNextTimerId() { uintptr_t id; do { - id = (uintptr_t)atomic_add_fetch_ptr((void **)&nextTimerId, (void*)1); + id = (uintptr_t)atomic_add_fetch_ptr((void **)&nextTimerId, 1); } while (id == 0); return id; } diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 6425e7acfa..94043ed01a 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -662,6 +662,12 @@ class TDCom: return res_list else: tdLog.exit(f"getOneRow out of range: row_index={location} row_count={self.query_row}") + + def killProcessor(self, processorName): + if (platform.system().lower() == 'windows'): + os.system("TASKKILL /F /IM %s.exe"%processorName) + else: + os.system('pkill %s'%processorName) def is_json(msg): diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index cb432024b7..cbcc2d86ef 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -9,8 +9,8 @@ ## ---- db ./test.sh -f tsim/db/alter_option.sim -./test.sh -f tsim/db/alter_replica_13.sim -./test.sh -f tsim/db/alter_replica_31.sim +# ./test.sh -f tsim/db/alter_replica_13.sim +# ./test.sh -f tsim/db/alter_replica_31.sim ./test.sh -f tsim/db/basic1.sim ./test.sh -f tsim/db/basic2.sim ./test.sh -f tsim/db/basic3.sim @@ -24,26 +24,26 @@ ./test.sh -f tsim/db/taosdlog.sim # ---- dnode -./test.sh -f tsim/dnode/balance_replica1.sim -./test.sh -f tsim/dnode/balance_replica3.sim -./test.sh -f tsim/dnode/balance1.sim -./test.sh -f tsim/dnode/balance2.sim -./test.sh -f tsim/dnode/balance3.sim -./test.sh -f tsim/dnode/balancex.sim +# ./test.sh -f tsim/dnode/balance_replica1.sim +# ./test.sh -f tsim/dnode/balance_replica3.sim +# ./test.sh -f tsim/dnode/balance1.sim +# ./test.sh -f tsim/dnode/balance2.sim +# ./test.sh -f tsim/dnode/balance3.sim +# ./test.sh -f tsim/dnode/balancex.sim ./test.sh -f tsim/dnode/create_dnode.sim ./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim -./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim -./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim -./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim -./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim -./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim +# ./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim +# ./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim +# ./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim +# ./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim +# ./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim ./test.sh -f tsim/dnode/offline_reason.sim -./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim -./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim -./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim -./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim -./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim -./test.sh -f tsim/dnode/vnode_clean.sim +# ./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim +# ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim +# ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim +# ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim +# ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim +# ./test.sh -f tsim/dnode/vnode_clean.sim # ---- insert ./test.sh -f tsim/insert/basic0.sim @@ -71,7 +71,7 @@ ./test.sh -f tsim/qnode/basic1.sim # ---- snode -./test.sh -f tsim/snode/basic1.sim +# ./test.sh -f tsim/snode/basic1.sim # ---- bnode ./test.sh -f tsim/bnode/basic1.sim @@ -93,6 +93,7 @@ ./test.sh -f tsim/stream/basic0.sim ./test.sh -f tsim/stream/basic1.sim ./test.sh -f tsim/stream/basic2.sim +./test.sh -f tsim/stream/drop_stream.sim ./test.sh -f tsim/stream/distributeInterval0.sim # ./test.sh -f tsim/stream/distributeIntervalRetrive0.sim # ./test.sh -f tsim/stream/distributesession0.sim @@ -103,8 +104,9 @@ # ./test.sh -f tsim/stream/triggerSession0.sim ./test.sh -f tsim/stream/partitionby.sim ./test.sh -f tsim/stream/partitionby1.sim -./test.sh -f tsim/stream/schedSnode.sim +# ./test.sh -f tsim/stream/schedSnode.sim ./test.sh -f tsim/stream/windowClose.sim +./test.sh -f tsim/stream/ignoreExpiredData.sim # ---- transaction ./test.sh -f tsim/trans/lossdata1.sim @@ -159,20 +161,22 @@ #./test.sh -f tsim/mnode/basic1.sim -m # --- sma +./test.sh -f tsim/sma/drop_sma.sim ./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim +./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim # --- valgrind ./test.sh -f tsim/valgrind/checkError.sim -v # --- vnode -#./test.sh -f tsim/vnode/replica3_basic.sim -#./test.sh -f tsim/vnode/replica3_repeat.sim -./test.sh -f tsim/vnode/replica3_vgroup.sim -#./test.sh -f tsim/vnode/replica3_many.sim -#./test.sh -f tsim/vnode/replica3_import.sim -./test.sh -f tsim/vnode/stable_balance_replica1.sim -./test.sh -f tsim/vnode/stable_dnode2_stop.sim +# ./test.sh -f tsim/vnode/replica3_basic.sim +# ./test.sh -f tsim/vnode/replica3_repeat.sim +# ./test.sh -f tsim/vnode/replica3_vgroup.sim +# ./test.sh -f tsim/vnode/replica3_many.sim +# ./test.sh -f tsim/vnode/replica3_import.sim +# ./test.sh -f tsim/vnode/stable_balance_replica1.sim +# ./test.sh -f tsim/vnode/stable_dnode2_stop.sim ./test.sh -f tsim/vnode/stable_dnode2.sim ./test.sh -f tsim/vnode/stable_dnode3.sim ./test.sh -f tsim/vnode/stable_replica3_dnode6.sim diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index 5edc0a4d3e..1deea26337 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -121,7 +121,7 @@ echo "firstEp ${HOSTNAME}:7100" >> $TAOS_CFG echo "secondEp ${HOSTNAME}:7200" >> $TAOS_CFG echo "fqdn ${HOSTNAME}" >> $TAOS_CFG echo "serverPort ${NODE}" >> $TAOS_CFG -echo "supportVnodes 128" >> $TAOS_CFG +echo "supportVnodes 1024" >> $TAOS_CFG echo "dataDir $DATA_DIR" >> $TAOS_CFG echo "logDir $LOG_DIR" >> $TAOS_CFG echo "debugFlag 0" >> $TAOS_CFG diff --git a/tests/script/sh/stop_dnodes.sh b/tests/script/sh/stop_dnodes.sh index b431c0627c..d30c75022a 100755 --- a/tests/script/sh/stop_dnodes.sh +++ b/tests/script/sh/stop_dnodes.sh @@ -14,7 +14,7 @@ while [ -n "$PID" ]; do echo kill -9 $PID #pkill -9 taosd kill -9 $PID - echo "Killing processes locking on port 6030" + echo "Killing taosd processes" if [ "$OS_TYPE" != "Darwin" ]; then fuser -k -n tcp 6030 else diff --git a/tests/script/tmp/prepare.sim b/tests/script/tmp/prepare.sim index 3b43656a41..2e16ee7bda 100644 --- a/tests/script/tmp/prepare.sim +++ b/tests/script/tmp/prepare.sim @@ -7,39 +7,33 @@ system sh/deploy.sh -n dnode4 -i 4 return -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 -system sh/deploy.sh -n dnode5 -i 5 -system sh/deploy.sh -n dnode6 -i 6 -system sh/deploy.sh -n dnode7 -i 7 -system sh/deploy.sh -n dnode8 -i 8 -system sh/deploy.sh -n dnode9 -i 9 system sh/exec.sh -n dnode1 -s start -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start -system sh/exec.sh -n dnode5 -s start -system sh/exec.sh -n dnode6 -s start -system sh/exec.sh -n dnode7 -s start -system sh/exec.sh -n dnode8 -s start -system sh/exec.sh -n dnode9 -s start +sql connect -sleep 2000 +$num = 200 +$i = 0 +while $i < $num + $port = $i + 8000 + $i = $i + 1 + sql create dnode $hostname port $port +endw -sql create dnode $hostname port 7200 -sql create dnode $hostname port 7300 -sql create dnode $hostname port 7400 -sql create dnode $hostname port 7500 -sql create dnode $hostname port 7600 -sql create dnode $hostname port 7700 -sql create dnode $hostname port 7800 -sql create dnode $hostname port 7900 +$i = 0 +while $i < $num + $port = $i + 8000 + $i = $i + 1 + $name = dnode . $port + system sh/deploy.sh -n $name -i 1 + system sh/cfg.sh -n $name -c serverPort -v $port + system sh/exec.sh -n $name -s start +endw -sql show dnodes; -print $data00 $data01 -print $data10 $data11 -print $data20 $data21 -print $data30 $data31 -print $data40 $data41 \ No newline at end of file +return + +sql create database db vgroups 1024 buffer 3; +sql use db; +sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned); +sql create table ct1 using stb tags(1000); +sql create table ct2 using stb tags(1000) ; +sql show db.tables; +sql insert into ct1 values(now+0s, 10, 2.0, 3.0); \ No newline at end of file diff --git a/tests/script/tsim/insert/update0.sim b/tests/script/tsim/insert/update0.sim index ed74188bcb..41a389b6e8 100644 --- a/tests/script/tsim/insert/update0.sim +++ b/tests/script/tsim/insert/update0.sim @@ -8,8 +8,8 @@ print =============== create database sql create database d0 keep 365000d,365000d,365000d sql use d0 -print =============== create super table and register rsma -sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min); +print =============== create super table +sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)); sql show stables if $rows != 1 then diff --git a/tests/script/tsim/sma/drop_sma.sim b/tests/script/tsim/sma/drop_sma.sim index 17f19f5df3..78f86f6e19 100644 --- a/tests/script/tsim/sma/drop_sma.sim +++ b/tests/script/tsim/sma/drop_sma.sim @@ -50,6 +50,21 @@ sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) t print --> create sma sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m); +print --> show sma +sql show indexes from stb from d1; +if $rows != 1 then + return -1 +endi +if $data[0][0] != sma_index_name1 then + return -1 +endi +if $data[0][1] != d1 then + return -1 +endi +if $data[0][2] != stb then + return -1 +endi + print --> drop stb sql drop table stb; @@ -61,15 +76,77 @@ sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) t print --> create sma sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m); +print --> show sma +sql show indexes from stb from d1; +if $rows != 1 then + return -1 +endi +if $data[0][0] != sma_index_name1 then + return -1 +endi +if $data[0][1] != d1 then + return -1 +endi +if $data[0][2] != stb then + return -1 +endi + print --> drop stb sql drop table stb; +print ========== step5 +sql drop database if exists db; +sql create database db duration 300; +sql use db; +sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint, c_float float, c_double double, c_bool bool, c_binary binary(16), c_nchar nchar(32), c_ts timestamp, c_tint_un tinyint unsigned, c_sint_un smallint unsigned, c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int); +sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s; + +print ========== step6 repeat +sql drop database if exists db; +sql create database db duration 300; +sql use db; +sql create table stb1(ts timestamp, c_int int, c_bint bigint ) tags (t_int int); +sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s; + +print ========== step7 +sql drop database if exists db; +sql create database db duration 300; +sql use db; +sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int); + +sql create table ct1 using stb1 tags ( 1 ); +sql create table ct2 using stb1 tags ( 2 ); +sql create table ct3 using stb1 tags ( 3 ); +sql create table ct4 using stb1 tags ( 4 ); + +sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s; +sql CREATE SMA INDEX sma_index_2 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) max_delay 6m; +sql CREATE SMA INDEX sma_index_3 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) watermark 5s max_delay 6m; + +sql DROP INDEX sma_index_1 ; +sql DROP INDEX sma_index_2 ; +sql DROP INDEX sma_index_3 ; + +print ========== step8 +sql drop database if exists db; +sql create database db duration 300; +sql use db; +sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int); + +sql create table ct1 using stb1 tags ( 1 ); +sql create table ct2 using stb1 tags ( 2 ); +sql create table ct3 using stb1 tags ( 3 ); +sql create table ct4 using stb1 tags ( 4 ); + +sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s; +sql CREATE SMA INDEX sma_index_2 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) max_delay 6m; +sql CREATE SMA INDEX sma_index_3 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) watermark 5s max_delay 6m; + +sql DROP INDEX sma_index_1 ; +sql DROP INDEX sma_index_2 ; +sql DROP INDEX sma_index_3 ; system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim new file mode 100644 index 0000000000..1b54e5a47d --- /dev/null +++ b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim @@ -0,0 +1,237 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database with retentions +sql create database d0 retentions 5s:7d,5m:21d,15m:365d; +sql use d0 + +print =============== create super table and register rsma +sql create table if not exists stb (ts timestamp, c1 int, c2 float) tags (city binary(20),district binary(20)) rollup(max) max_delay 5s,5s watermark 2s,3s; + +sql show stables +if $rows != 1 then + return -1 +endi + +print =============== create child table +sql create table ct1 using stb tags("BeiJing", "ChaoYang"); + +sql show tables +if $rows != 1 then + return -1 +endi + +print =============== insert data and trigger rollup +sql insert into ct1 values(now, 10, 10.0); +sql insert into ct1 values(now+1s, 1, 1.0); +sql insert into ct1 values(now+2s, 100, 100.0); + +print =============== wait maxdelay 5+1 seconds for results +sleep 6000 + +print =============== select * from retention level 2 from memory +sql select * from ct1; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 2 file rows $rows > 2 + return -1 +endi + + +if $data01 != 100 then + if $data01 != 10 then + print retention level 2 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 1 from memory +sql select * from ct1 where ts > now-8d; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 1 file rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 1 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 0 from memory +sql select * from ct1 where ts > now-3d; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +print $data20 $data21 $data22 + +if $rows < 1 then + print retention level 0 file rows $rows < 1 + return -1 +endi + +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + +#=================================================================== + + +#==================== reboot to trigger commit data to file +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start + +print =============== select * from retention level 2 from file +sql select * from ct1; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 2 file rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 2 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 1 from file +sql select * from ct1 where ts > now-8d; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 1 file rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 1 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 0 from file +sql select * from ct1 where ts > now-3d; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +print $data20 $data21 $data22 +if $rows < 1 then + print retention level 0 file rows $rows < 1 + return -1 +endi + +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + +print =============== insert after rsma qtaskinfo recovery +sql insert into ct1 values(now, 50, 500.0); +sql insert into ct1 values(now+1s, 40, 40.0); + +print =============== wait maxdelay 5+1 seconds for results +sleep 6000 + +print =============== select * from retention level 2 from file and memory after rsma qtaskinfo recovery +sql select * from ct1; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 2 file/mem rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 2 file/mem result $data01 != 100 or 10 + return -1 + endi +endi + +if $data02 != 500.00000 then + if $data02 != 100.00000 then + print retention level 1 file/mem result $data02 != 500.00000 or 100.00000 + return -1 + endi +endi + +print =============== select * from retention level 1 from file and memory after rsma qtaskinfo recovery +sql select * from ct1 where ts > now-8d; +print $data00 $data01 $data02 +if $rows > 2 then + print retention level 1 file/mem rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 1 file/mem result $data01 != 100 or 10 + return -1 + endi +endi + +if $data02 != 500.00000 then + if $data02 != 100.00000 then + print retention level 1 file/mem result $data02 != 500.00000 or 100.00000 + return -1 + endi +endi + + +print =============== select * from retention level 0 from file and memory after rsma qtaskinfo recovery +sql select * from ct1 where ts > now-3d; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +print $data20 $data21 $data22 +print $data30 $data31 $data32 +print $data40 $data41 $data42 + +if $rows < 1 then + print retention level 0 file/mem rows $rows < 1 + return -1 +endi + +if $data01 != 10 then + print retention level 0 file/mem result $data01 != 10 + return -1 +endi + +if $data11 != 1 then + print retention level 0 file/mem result $data11 != 1 + return -1 +endi + +if $data21 != 100 then + print retention level 0 file/mem result $data21 != 100 + return -1 +endi + +if $data31 != 50 then + print retention level 0 file/mem result $data31 != 50 + return -1 +endi + +if $data32 != 500.00000 then + print retention level 0 file/mem result $data32 != 500.00000 + return -1 +endi + + +if $data41 != 40 then + print retention level 0 file/mem result $data41 != 40 + return -1 +endi + +if $data42 != 40.00000 then + print retention level 0 file/mem result $data42 != 40.00000 + return -1 +endi + + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stream/distributeInterval0.sim b/tests/script/tsim/stream/distributeInterval0.sim index a59e989d80..1c0d0a3bd7 100644 --- a/tests/script/tsim/stream/distributeInterval0.sim +++ b/tests/script/tsim/stream/distributeInterval0.sim @@ -83,9 +83,9 @@ sql insert into ts3 values(1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) $loop_count = 0 loop1: +sleep 300 sql select * from streamtST1; -sleep 300 $loop_count = $loop_count + 1 if $loop_count == 10 then return -1 diff --git a/tests/script/tsim/stream/distributeSession0.sim b/tests/script/tsim/stream/distributeSession0.sim index a165b86edd..30c3c641d4 100644 --- a/tests/script/tsim/stream/distributeSession0.sim +++ b/tests/script/tsim/stream/distributeSession0.sim @@ -10,6 +10,30 @@ sql create dnode $hostname2 port 7200 system sh/exec.sh -n dnode2 -s start +print ===== step1 +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +if $rows != 2 then + return -1 +endi +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi + +print ===== step2 + sql create database test vgroups 4; sql use test; sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); diff --git a/tests/script/tsim/stream/drop_stream.sim b/tests/script/tsim/stream/drop_stream.sim new file mode 100644 index 0000000000..bdd88bf780 --- /dev/null +++ b/tests/script/tsim/stream/drop_stream.sim @@ -0,0 +1,222 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 +system sh/cfg.sh -n dnode2 -c supportVnodes -v 4 +system sh/cfg.sh -n dnode3 -c supportVnodes -v 4 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== step2 +sql create dnode $hostname port 7200 +system sh/exec.sh -n dnode2 -s start + +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +if $rows != 2 then + return -1 +endi +if $data(1)[4] != ready then + goto step2 +endi +if $data(2)[4] != ready then + goto step2 +endi + +print ========== step3 +sql drop database if exists test; +sql create database if not exists test vgroups 1 precision "ms" ; +sql use test; +sql create table test.scalar_function_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint unsigned, c6 smallint unsigned, c7 int unsigned, c8 bigint unsigned, c9 float, c10 double, c11 binary(256), c12 nchar(256), c13 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 tinyint unsigned, t6 smallint unsigned, t7 int unsigned, t8 bigint unsigned, t9 float, t10 double, t11 binary(256), t12 nchar(256), t13 bool) ; +sql create table scalar_function_ct1 using scalar_function_stb tags (-38, -32456, 509722288, -1404014954778348330, 87, 8879, 3351927345, 1840080781675115605, 3.002364316200592e+38, 6.698140580387119e+37, "bktezshfyvmrmgzwrwerytfwudlblkyyxismpommiqpqsptpiucptwqutzhajxbiitqxkrpobqhgqvjlvgsudewmelpunjspurbpbbwypvgbwjfrwidrchnojtxyhrwfjwgdiabzfoujxkwcjjxjqsrnhmryjhrykldmdfiwircdfahldtrtuafzvybkihyjatiqivbtpydjtmbfddcgyzjuqidwcchtsamnwyqwvajftayyvfrmqcqygbxmxgjx", "ddlxkxhrvviwnjeqhewbercnlontwbsyevcjsocrwyupautsjkdzqbwuzsuetptgsdfyjzfkqyobkysikpaxtqqonxtocfowaehgovshwyciyzfmdmcmwaolkhdunfhwhcanetepxyppuullxnclockmadyaaufywllwburgsfxizcjgzvboydpqymlwgktslclidbcwiyyubyuvhjgwldkgxswigjkpbpslvlsbigdlmuldmtbqencbntbaohxr", False) ; +sql create table test.scalar_function_tb1 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint unsigned, c6 smallint unsigned, c7 int unsigned, c8 bigint unsigned, c9 float, c10 double, c11 binary(256), c12 nchar(256), c13 bool) ; +sql create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int); +sql create table scalar_ct1 using scalar_stb tags(10); +sql create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)); +sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb; +sql create stream ctb_abs_stream trigger at_once into output_abs_ctb as select ts, abs(c1), abs(c2), c3 from scalar_ct1; +sql create stream tb_abs_stream trigger at_once into output_abs_tb as select ts, abs(c1), abs(c2), c3 from scalar_tb; +sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb; +sql create stream ctb_acos_stream trigger at_once into output_acos_ctb as select ts, acos(c1), acos(c2), c3 from scalar_ct1; +sql create stream tb_acos_stream trigger at_once into output_acos_tb as select ts, acos(c1), acos(c2), c3 from scalar_tb; +sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb; +sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1; +sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb; +sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb; +sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1; +sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb; +sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb; +sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1; +sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb; +sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb; +sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1; +sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb; +sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb; +sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1; +sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb; +sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb; +sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1; +sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb; +sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb; +sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1; +sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb; +sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb; +sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1; +sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb; +sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb; +sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1; +sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb; +sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb; +sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1; +sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb; +sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb; +sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1; +sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb; +sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb; +sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1; +sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb; +sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb; +sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1; +sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb; +sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb; +sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1; +sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb; +sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb; +sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1; +sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb; +sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb; +sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1; +sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb; +sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb; +sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1; +sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb; +sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb; +sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1; +sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb; +sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb; +sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1; +sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb; +sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb; +sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1; +sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb; +sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos"); +sql insert into scalar_ct1 values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata"); +sql insert into scalar_ct1 values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null); +sql insert into scalar_tb values (1656668180503, 100, 100.1, "beijing", "taos", "Taos"); +sql insert into scalar_tb values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata"); +sql insert into scalar_tb values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null); +sql insert into scalar_ct1 values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos"); +sql insert into scalar_tb values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos"); +sql insert into scalar_ct1 values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos"); +sql insert into scalar_tb values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos"); + +print ========== step4 +sql drop database test; + + +print ========== step5 repeat +sql drop database if exists test; +sql create database if not exists test vgroups 1 precision "ms" ; +sql use test; +sql create table test.scalar_function_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint unsigned, c6 smallint unsigned, c7 int unsigned, c8 bigint unsigned, c9 float, c10 double, c11 binary(256), c12 nchar(256), c13 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 tinyint unsigned, t6 smallint unsigned, t7 int unsigned, t8 bigint unsigned, t9 float, t10 double, t11 binary(256), t12 nchar(256), t13 bool) ; +sql create table scalar_function_ct1 using scalar_function_stb tags (-38, -32456, 509722288, -1404014954778348330, 87, 8879, 3351927345, 1840080781675115605, 3.002364316200592e+38, 6.698140580387119e+37, "bktezshfyvmrmgzwrwerytfwudlblkyyxismpommiqpqsptpiucptwqutzhajxbiitqxkrpobqhgqvjlvgsudewmelpunjspurbpbbwypvgbwjfrwidrchnojtxyhrwfjwgdiabzfoujxkwcjjxjqsrnhmryjhrykldmdfiwircdfahldtrtuafzvybkihyjatiqivbtpydjtmbfddcgyzjuqidwcchtsamnwyqwvajftayyvfrmqcqygbxmxgjx", "ddlxkxhrvviwnjeqhewbercnlontwbsyevcjsocrwyupautsjkdzqbwuzsuetptgsdfyjzfkqyobkysikpaxtqqonxtocfowaehgovshwyciyzfmdmcmwaolkhdunfhwhcanetepxyppuullxnclockmadyaaufywllwburgsfxizcjgzvboydpqymlwgktslclidbcwiyyubyuvhjgwldkgxswigjkpbpslvlsbigdlmuldmtbqencbntbaohxr", False) ; +sql create table test.scalar_function_tb1 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint unsigned, c6 smallint unsigned, c7 int unsigned, c8 bigint unsigned, c9 float, c10 double, c11 binary(256), c12 nchar(256), c13 bool) ; +sql create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int); +sql create table scalar_ct1 using scalar_stb tags(10); +sql create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)); +sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb; +sql create stream ctb_abs_stream trigger at_once into output_abs_ctb as select ts, abs(c1), abs(c2), c3 from scalar_ct1; +sql create stream tb_abs_stream trigger at_once into output_abs_tb as select ts, abs(c1), abs(c2), c3 from scalar_tb; +sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb; +sql create stream ctb_acos_stream trigger at_once into output_acos_ctb as select ts, acos(c1), acos(c2), c3 from scalar_ct1; +sql create stream tb_acos_stream trigger at_once into output_acos_tb as select ts, acos(c1), acos(c2), c3 from scalar_tb; +sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb; +sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1; +sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb; +sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb; +sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1; +sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb; +sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb; +sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1; +sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb; +sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb; +sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1; +sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb; +sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb; +sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1; +sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb; +sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb; +sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1; +sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb; +sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb; +sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1; +sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb; +sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb; +sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1; +sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb; +sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb; +sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1; +sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb; +sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb; +sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1; +sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb; +sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb; +sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1; +sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb; +sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb; +sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1; +sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb; +sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb; +sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1; +sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb; +sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb; +sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1; +sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb; +sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb; +sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1; +sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb; +sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb; +sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1; +sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb; +sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb; +sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1; +sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb; +sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb; +sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1; +sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb; +sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb; +sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1; +sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb; +sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb; +sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1; +sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb; +sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos"); +sql insert into scalar_ct1 values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata"); +sql insert into scalar_ct1 values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null); +sql insert into scalar_tb values (1656668180503, 100, 100.1, "beijing", "taos", "Taos"); +sql insert into scalar_tb values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata"); +sql insert into scalar_tb values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null); +sql insert into scalar_ct1 values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos"); +sql insert into scalar_tb values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos"); +sql insert into scalar_ct1 values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos"); +sql insert into scalar_tb values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos"); + +print ========== step6 repeat +sql drop database test; + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stream/ignoreExpiredData.sim b/tests/script/tsim/stream/ignoreExpiredData.sim new file mode 100644 index 0000000000..c6b708d1e8 --- /dev/null +++ b/tests/script/tsim/stream/ignoreExpiredData.sim @@ -0,0 +1,161 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 + +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +sql create dnode $hostname2 port 7200 + +system sh/exec.sh -n dnode2 -s start + +print ===== step1 +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +if $rows != 2 then + return -1 +endi +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi + +print ===== step2 + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED into streamt1 as select _wstartts, count(*) c1, sum(a) c3 from t1 interval(10s); +sql create stream streams2 trigger at_once IGNORE EXPIRED into streamt2 as select _wstartts, count(*) c1, sum(a) c3 from t1 session(ts,10s); +sql create stream streams3 trigger at_once IGNORE EXPIRED into streamt3 as select _wstartts, count(*) c1, sum(a) c3 from t1 state_window(a); +sql insert into t1 values(1648791213000,1,2,3,1.0); +sql insert into t1 values(1648791223001,1,2,3,1.1); +sql insert into t1 values(1648791233002,2,2,3,2.1); +sql insert into t1 values(1648791243003,2,2,3,3.1); +sql insert into t1 values(1648791200000,4,2,3,4.1); + +$loop_count = 0 +loop1: +sleep 300 +sql select * from streamt1; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 4 then + print =====rows=$rows + goto loop1 +endi + +$loop_count = 0 +loop2: +sleep 300 +sql select * from streamt2; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 4 then + print =====rows=$rows + goto loop2 +endi + +$loop_count = 0 +loop3: +sleep 300 +sql select * from streamt3; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 2 then + print =====rows=$rows + goto loop3 +endi + + +print =============== create database +sql create database test1 vgroups 4 +sql show databases + +print ======database=$rows + +sql use test1 + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table ts1 using st tags(1,1,1); +sql create table ts2 using st tags(2,2,2); +sql create stream stream_t1 trigger at_once IGNORE EXPIRED into streamtST1 as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st interval(10s) ; +sql create stream stream_t2 trigger at_once IGNORE EXPIRED into streamtST2 as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st session(ts, 10s) ; +sql insert into ts1 values(1648791211000,1,2,3); +sql insert into ts1 values(1648791222001,2,2,3); +sql insert into ts2 values(1648791211000,1,2,3); +sql insert into ts2 values(1648791222001,2,2,3); + +$loop_count = 0 +loop4: +sleep 300 +sql select * from streamtST1; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop4 +endi + +if $data02 != 1 then + print =====data02=$data02 + goto loop4 +endi + +$loop_count = 0 +loop5: +sleep 300 +sql select * from streamtST2; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop5 +endi + +if $data02 != 1 then + print =====data02=$data02 + goto loop5 +endi + +system sh/stop_dnodes.sh \ No newline at end of file diff --git a/tests/script/tsim/tmq/snapshot1.sim b/tests/script/tsim/tmq/snapshot1.sim index d534bb68da..58541b725d 100644 --- a/tests/script/tsim/tmq/snapshot1.sim +++ b/tests/script/tsim/tmq/snapshot1.sim @@ -25,7 +25,7 @@ $rowsPerCtb = 10 $tstart = 1640966400000 # 2022-01-01 00:00:00.000 #---- global parameters end ----# -$pullDelay = 5 +$pullDelay = 2 $ifcheckdata = 1 $ifmanualcommit = 1 $showMsg = 1 diff --git a/tests/script/tsim/valgrind/basic.sim b/tests/script/tsim/valgrind/basic.sim index 0f11ae0313..b449527568 100644 --- a/tests/script/tsim/valgrind/basic.sim +++ b/tests/script/tsim/valgrind/basic.sim @@ -1,8 +1,33 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode1 -s start -v sql connect -sql create database d0 vgroups 1; +print =============== step1: create drop show dnodes +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +if $rows != 1 then + return -1 +endi +goto _OVER + +print =============== step2: create alter drop show user +sql create user u1 pass 'taosdata' +sql show users +sql alter user u1 sysinfo 1 +sql alter user u1 enable 1 +sql alter user u1 pass 'taosdata' +sql alter user u2 sysinfo 0 +sql drop user u1 + +_OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/system-test/1-insert/alter_stable.py b/tests/system-test/1-insert/alter_stable.py index cd64e3ddfe..a4cec78138 100644 --- a/tests/system-test/1-insert/alter_stable.py +++ b/tests/system-test/1-insert/alter_stable.py @@ -16,135 +16,150 @@ import string from util.log import * from util.cases import * from util.sql import * - +from util.sqlset import * +from util import constant +from util.common import * class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + self.ntbname = 'ntb' + self.stbname = 'stb' + self.binary_length = 20 # the length of binary for column_dict + self.nchar_length = 20 # the length of nchar for column_dict + self.column_dict = { + 'ts' : 'timestamp', + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': f'binary({self.binary_length})', + 'col13': f'nchar({self.nchar_length})' + } + self.tag_dict = { + 'ts_tag' : 'timestamp', + 't1': 'tinyint', + 't2': 'smallint', + 't3': 'int', + 't4': 'bigint', + 't5': 'tinyint unsigned', + 't6': 'smallint unsigned', + 't7': 'int unsigned', + 't8': 'bigint unsigned', + 't9': 'float', + 't10': 'double', + 't11': 'bool', + 't12': f'binary({self.binary_length})', + 't13': f'nchar({self.nchar_length})' + } + self.tag_list = [ + f'now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据"' + ] + self.tbnum = 1 + self.values_list = [ + f'now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据"' + ] + self.column_add_dict = { + 'col_time' : 'timestamp', + 'col_tinyint' : 'tinyint', + 'col_smallint' : 'smallint', + 'col_int' : 'int', + 'col_bigint' : 'bigint', + 'col_untinyint' : 'tinyint unsigned', + 'col_smallint' : 'smallint unsigned', + 'col_int' : 'int unsigned', + 'col_bigint' : 'bigint unsigned', + 'col_bool' : 'bool', + 'col_float' : 'float', + 'col_double' : 'double', + 'col_binary' : f'binary({constant.BINARY_LENGTH_MAX})', + 'col_nchar' : f'nchar({constant.NCAHR_LENGTH_MAX})' - def get_long_name(self, length, mode="mixed"): - """ - generate long name - mode could be numbers/letters/letters_mixed/mixed - """ - if mode == "numbers": - population = string.digits - elif mode == "letters": - population = string.ascii_letters.lower() - elif mode == "letters_mixed": - population = string.ascii_letters.upper() + string.ascii_letters.lower() - else: - population = string.ascii_letters.lower() + string.digits - return "".join(random.choices(population, k=length)) - def alter_stable_column_check(self,dbname,stbname,tbname): - tdSql.execute(f'create database if not exists {dbname}') - tdSql.execute(f'use {dbname}') - tdSql.execute( - f'create stable {stbname} (ts timestamp, c1 tinyint, c2 smallint, c3 int, \ - c4 bigint, c5 tinyint unsigned, c6 smallint unsigned, c7 int unsigned, c8 bigint unsigned, c9 float, c10 double, c11 bool,c12 binary(20),c13 nchar(20)) tags(t0 int) ') - tdSql.execute(f'create table {tbname} using {stbname} tags(1)') - tdSql.execute(f'insert into {tbname} values (now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据")') - tdSql.execute(f'alter stable {stbname} add column c14 int') - tdSql.query(f'select c14 from {stbname}') - tdSql.checkRows(1) - tdSql.execute(f'alter stable {stbname} add column `c15` int') - tdSql.query(f'select c15 from {stbname}') - tdSql.checkRows(1) - tdSql.query(f'describe {stbname}') - tdSql.checkRows(17) - tdSql.execute(f'alter stable {stbname} drop column c14') - tdSql.query(f'describe {stbname}') - tdSql.checkRows(16) - tdSql.execute(f'alter stable {stbname} drop column `c15`') - tdSql.query(f'describe {stbname}') - tdSql.checkRows(15) - tdSql.execute(f'alter stable {stbname} modify column c12 binary(30)') - tdSql.query(f'describe {stbname}') - tdSql.checkData(12,2,30) - tdSql.execute(f'alter stable {stbname} modify column `c12` binary(35)') - tdSql.query(f'describe {stbname}') - tdSql.checkData(12,2,35) - tdSql.error(f'alter stable {stbname} modify column `c12` binary(34)') - tdSql.execute(f'alter stable {stbname} modify column c13 nchar(30)') - tdSql.query(f'describe {stbname}') - tdSql.checkData(13,2,30) - tdSql.error(f'alter stable {stbname} modify column c13 nchar(29)') - tdSql.error(f'alter stable {stbname} rename column c1 c21') - tdSql.error(f'alter stable {stbname} modify column c1 int') - tdSql.error(f'alter stable {stbname} modify column c4 int') - tdSql.error(f'alter stable {stbname} modify column c8 int') - tdSql.error(f'alter stable {stbname} modify column c1 unsigned int') - tdSql.error(f'alter stable {stbname} modify column c9 double') - tdSql.error(f'alter stable {stbname} modify column c10 float') - tdSql.error(f'alter stable {stbname} modify column c11 int') - tdSql.error(f'alter stable {stbname} drop tag t0') - tdSql.execute(f'drop database {dbname}') - - def alter_stable_tag_check(self,dbname,stbname,tbname): - tdSql.execute(f'create database if not exists {dbname}') - tdSql.execute(f'use {dbname}') - tdSql.execute( - f'create stable {stbname} (ts timestamp, c1 int) tags(ts_tag timestamp, t1 tinyint, t2 smallint, t3 int, \ - t4 bigint, t5 tinyint unsigned, t6 smallint unsigned, t7 int unsigned, t8 bigint unsigned, t9 float, t10 double, t11 bool,t12 binary(20),t13 nchar(20)) ') - tdSql.execute(f'create table {tbname} using {stbname} tags(now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据")') - tdSql.execute(f'insert into {tbname} values(now,1)') - - tdSql.execute(f'alter stable {stbname} add tag t14 int') - tdSql.query(f'select t14 from {stbname}') - tdSql.checkRows(1) - tdSql.execute(f'alter stable {stbname} add tag `t15` int') - tdSql.query(f'select t14 from {stbname}') - tdSql.checkRows(1) - tdSql.query(f'describe {stbname}') - tdSql.checkRows(18) - tdSql.execute(f'alter stable {stbname} drop tag t14') - tdSql.query(f'describe {stbname}') - tdSql.checkRows(17) - tdSql.execute(f'alter stable {stbname} drop tag `t15`') - tdSql.query(f'describe {stbname}') - tdSql.checkRows(16) - tdSql.execute(f'alter stable {stbname} modify tag t12 binary(30)') - tdSql.query(f'describe {stbname}') - tdSql.checkData(14,2,30) - tdSql.execute(f'alter stable {stbname} modify tag `t12` binary(35)') - tdSql.query(f'describe {stbname}') - tdSql.checkData(14,2,35) - tdSql.error(f'alter stable {stbname} modify tag `t12` binary(34)') - tdSql.execute(f'alter stable {stbname} modify tag t13 nchar(30)') - tdSql.query(f'describe {stbname}') - tdSql.checkData(15,2,30) - tdSql.error(f'alter stable {stbname} modify tag t13 nchar(29)') - tdSql.execute(f'alter table {stbname} rename tag t1 t21') - tdSql.query(f'describe {stbname}') - tdSql.checkData(3,0,'t21') - tdSql.execute(f'alter table {stbname} rename tag `t21` t1') - tdSql.query(f'describe {stbname}') - tdSql.checkData(3,0,'t1') - - for i in ['bigint','unsigned int','float','double','binary(10)','nchar(10)']: - for j in [1,2,3]: - tdSql.error(f'alter stable {stbname} modify tag t{j} {i}') - for i in ['int','unsigned int','float','binary(10)','nchar(10)']: - tdSql.error(f'alter stable {stbname} modify tag t8 {i}') - tdSql.error(f'alter stable {stbname} modify tag t4 int') - tdSql.error(f'alter stable {stbname} drop column t0') - #!bug TD-16410 - # tdSql.error(f'alter stable {tbname} set tag t1=100 ') - # tdSql.execute(f'create table ntb (ts timestamp,c0 int)') - tdSql.error(f'alter stable ntb add column c2 ') - tdSql.execute(f'drop database {dbname}') + } + def alter_stable_check(self): + tdSql.prepare() + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) + tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) + for i in self.values_list: + tdSql.execute(f'insert into {self.ntbname} values({i})') + for i in range(self.tbnum): + tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_list[i]})') + for j in self.values_list: + tdSql.execute(f'insert into {self.stbname}_{i} values({j})') + for key,values in self.column_add_dict.items(): + tdSql.execute(f'alter stable {self.stbname} add column {key} {values}') + tdSql.query(f'describe {self.stbname}') + tdSql.checkRows(len(self.column_dict)+len(self.tag_dict)+1) + for i in range(self.tbnum): + tdSql.query(f'describe {self.stbname}_{i}') + tdSql.checkRows(len(self.column_dict)+len(self.tag_dict)+1) + tdSql.query(f'select {key} from {self.stbname}_{i}') + tdSql.checkRows(len(self.values_list)) + for i in range(self.tbnum): + tdSql.error(f'alter stable {self.stbname}_{i} add column {key} {values}') + tdSql.error(f'alter stable {self.stbname}_{i} drop column {key}') + #! bug TD-16921 + #tdSql.error(f'alter stable {self.ntbname} add column {key} {values}') + #tdSql.error(f'alter stable {self.ntbname} drop column {key}') + tdSql.execute(f'alter stable {self.stbname} drop column {key}') + tdSql.query(f'describe {self.stbname}') + tdSql.checkRows(len(self.column_dict)+len(self.tag_dict)) + for i in range(self.tbnum): + tdSql.query(f'describe {self.stbname}_{i}') + tdSql.checkRows(len(self.column_dict)+len(self.tag_dict)) + tdSql.error(f'select {key} from {self.stbname} ') + for key,values in self.column_dict.items(): + if 'binary' in values.lower(): + v = f'binary({self.binary_length+1})' + v_error = f'binary({self.binary_length-1})' + tdSql.error(f'alter stable {self.stbname} modify column {key} {v_error}') + tdSql.execute(f'alter stable {self.stbname} modify column {key} {v}') + tdSql.query(f'describe {self.stbname}') + result = tdCom.getOneRow(1,'VARCHAR') + tdSql.checkEqual(result[0][2],self.binary_length+1) + for i in range(self.tbnum): + tdSql.query(f'describe {self.stbname}_{i}') + result = tdCom.getOneRow(1,'VARCHAR') + tdSql.checkEqual(result[0][2],self.binary_length+1) + tdSql.error(f'alter stable {self.stbname}_{i} modify column {key} {v}') + #! bug TD-16921 + # tdSql.error(f'alter stable {self.ntbname} modify column {key} {v}') + elif 'nchar' in values.lower(): + v = f'nchar({self.binary_length+1})' + v_error = f'nchar({self.binary_length-1})' + tdSql.error(f'alter stable {self.stbname} modify column {key} {v_error}') + tdSql.execute(f'alter stable {self.stbname} modify column {key} {v}') + tdSql.query(f'describe {self.stbname}') + result = tdCom.getOneRow(1,'NCHAR') + tdSql.checkEqual(result[0][2],self.binary_length+1) + for i in range(self.tbnum): + tdSql.query(f'describe {self.stbname}_{i}') + result = tdCom.getOneRow(1,'NCHAR') + tdSql.checkEqual(result[0][2],self.binary_length+1) + tdSql.error(f'alter stable {self.stbname}_{i} modify column {key} {v}') + #! bug TD-16921 + #tdSql.error(f'alter stable {self.ntbname} modify column {key} {v}') + else: + for v in self.column_dict.values(): + tdSql.error(f'alter stable {self.stbname} modify column {key} {v}') + # tdSql.error(f'alter stable {self.ntbname} modify column {key} {v}') + for i in range(self.tbnum): + tdSql.error(f'alter stable {self.stbname}_{i} modify column {key} {v}') def run(self): - dbname = self.get_long_name(length=10, mode="letters") - stbname = self.get_long_name(length=5, mode="letters") - tbname = self.get_long_name(length=5, mode="letters") - self.alter_stable_column_check(dbname,stbname,tbname) - self.alter_stable_tag_check(dbname,stbname,tbname) - + self.alter_stable_check() def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/alter_table.py b/tests/system-test/1-insert/alter_table.py index 0f7a830634..a2613c39e7 100644 --- a/tests/system-test/1-insert/alter_table.py +++ b/tests/system-test/1-insert/alter_table.py @@ -126,6 +126,7 @@ class TDTestCase: tdSql.execute(f'alter table {self.ntbname} rename column {key} {rename_str}') tdSql.query(f'select {rename_str} from {self.ntbname}') tdSql.checkRows(1) + tdSql.error(f'select {key} from {self.ntbname}') def alter_check_tb(self): tag_tinyint = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX) @@ -277,7 +278,11 @@ class TDTestCase: else: for v in self.column_dict.values(): tdSql.error(f'alter table {self.stbname} modify column {key} {v}') - + for key,values in self.column_dict.items(): + rename_str = f'{tdCom.getLongName(constant.COL_NAME_LENGTH_MAX,"letters")}' + tdSql.error(f'alter table {self.stbname} rename column {key} {rename_str}') + for i in range(self.tbnum): + tdSql.error(f'alter table {self.stbname}_{i} rename column {key} {rename_str}') def run(self): self.alter_check_ntb() self.alter_check_tb() diff --git a/tests/system-test/1-insert/block_wise.py b/tests/system-test/1-insert/block_wise.py new file mode 100644 index 0000000000..6c779c64d7 --- /dev/null +++ b/tests/system-test/1-insert/block_wise.py @@ -0,0 +1,442 @@ +import datetime +import re + +from dataclasses import dataclass, field +from typing import List, Any, Tuple +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.constant import * + +PRIMARY_COL = "ts" + +INT_COL = "c_int" +BINT_COL = "c_bint" +SINT_COL = "c_sint" +TINT_COL = "c_tint" +FLOAT_COL = "c_float" +DOUBLE_COL = "c_double" +BOOL_COL = "c_bool" +TINT_UN_COL = "c_utint" +SINT_UN_COL = "c_usint" +BINT_UN_COL = "c_ubint" +INT_UN_COL = "c_uint" +BINARY_COL = "c_binary" +NCHAR_COL = "c_nchar" +TS_COL = "c_ts" + +NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [BOOL_COL, ] +TS_TYPE_COL = [TS_COL, ] + +INT_TAG = "t_int" + +ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL] +TAG_COL = [INT_TAG] + +# insert data args: +TIME_STEP = 10000 +NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + +# init db/table +DBNAME = "db" +STBNAME = "stb1" +CTBNAME = "ct1" +NTBNAME = "nt1" + + +@dataclass +class DataSet: + ts_data : List[int] = field(default_factory=list) + int_data : List[int] = field(default_factory=list) + bint_data : List[int] = field(default_factory=list) + sint_data : List[int] = field(default_factory=list) + tint_data : List[int] = field(default_factory=list) + int_un_data : List[int] = field(default_factory=list) + bint_un_data: List[int] = field(default_factory=list) + sint_un_data: List[int] = field(default_factory=list) + tint_un_data: List[int] = field(default_factory=list) + float_data : List[float] = field(default_factory=list) + double_data : List[float] = field(default_factory=list) + bool_data : List[int] = field(default_factory=list) + binary_data : List[str] = field(default_factory=list) + nchar_data : List[str] = field(default_factory=list) + + +@dataclass +class BSMAschema: + creation : str = "CREATE" + tb_type : str = "stable" + tbname : str = STBNAME + cols : Tuple[str] = None + tags : Tuple[str] = None + sma_flag : str = "SMA" + sma_cols : Tuple[str] = None + create_tabel_sql : str = None + other : Any = None + + drop : str = "DROP" + drop_flag : str = "INDEX" + querySmaOptimize : int = 1 + show : str = "SHOW" + show_msg : str = "INDEXES" + show_oper : str = "FROM" + dbname : str = None + rollup_db : bool = False + + def __post_init__(self): + if isinstance(self.other, dict): + for k,v in self.other.items(): + + if k.lower() == "tbname" and isinstance(v, str) and not self.tbname: + self.tbname = v + del self.other[k] + + if k.lower() == "cols" and (isinstance(v, tuple) or isinstance(v, list)) and not self.cols: + self.cols = v + del self.other[k] + + if k.lower() == "tags" and (isinstance(v, tuple) or isinstance(v, list)) and not self.tags: + self.tags = v + del self.other[k] + + if k.lower() == "sma_flag" and isinstance(v, str) and not self.sma_flag: + self.sma_flag = v + del self.other[k] + + if k.lower() == "sma_cols" and (isinstance(v, tuple) or isinstance(v, list)) and not self.sma_cols: + self.sma_cols = v + del self.other[k] + + if k.lower() == "create_tabel_sql" and isinstance(v, str) and not self.create_tabel_sql: + self.create_tabel_sql = v + del self.other[k] + + # bSma show and drop operator is not completed + if k.lower() == "drop_flag" and isinstance(v, str) and not self.drop_flag: + self.drop_flag = v + del self.other[k] + + if k.lower() == "show_msg" and isinstance(v, str) and not self.show_msg: + self.show_msg = v + del self.other[k] + + if k.lower() == "dbname" and isinstance(v, str) and not self.dbname: + self.dbname = v + del self.other[k] + + if k.lower() == "show_oper" and isinstance(v, str) and not self.show_oper: + self.show_oper = v + del self.other[k] + + if k.lower() == "rollup_db" and isinstance(v, bool) and not self.rollup_db: + self.rollup_db = v + del self.other[k] + + + +# from ...pytest.util.sql import * +# from ...pytest.util.constant import * + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + self.precision = "ms" + self.sma_count = 0 + self.sma_created_index = [] + + def __create_sma_index(self, sma:BSMAschema): + if sma.create_tabel_sql: + sql = sma.create_tabel_sql + else: + sql = f"{sma.creation} {sma.tb_type} {sma.tbname} ({', '.join(sma.cols)}) " + + if sma.tb_type == "stable" or (sma.tb_type=="table" and sma.tags): + sql = f"{sma.creation} {sma.tb_type} {sma.tbname} ({', '.join(sma.cols)}) tags ({', '.join(sma.tags)}) " + + + if sma.sma_flag: + sql += sma.sma_flag + if sma.sma_cols: + sql += f"({', '.join(sma.sma_cols)})" + + if isinstance(sma.other, dict): + for k,v in sma.other.items(): + if isinstance(v,tuple) or isinstance(v, list): + sql += f" {k} ({' '.join(v)})" + else: + sql += f" {k} {v}" + if isinstance(sma.other, tuple) or isinstance(sma.other, list): + sql += " ".join(sma.other) + if isinstance(sma.other, int) or isinstance(sma.other, float) or isinstance(sma.other, str): + sql += f" {sma.other}" + + return sql + + def __get_bsma_table_col_tag_str(self, sql:str): + p = re.compile(r"[(](.*)[)]", re.S) + + if "tags" in (col_str := sql): + col_str = re.findall(p, sql.split("tags")[0])[0].split(",") + if (tag_str := re.findall(p, sql.split("tags")[1])[0].split(",") ): + col_str.extend(tag_str) + + return col_str + + def __get_bsma_col_tag_names(self, col_tags:list): + return [ col_tag.strip().split(" ")[0] for col_tag in col_tags ] + + @property + def __get_db_tbname(self): + tb_list = [] + tdSql.query("show tables") + for row in tdSql.queryResult: + tb_list.append(row[0]) + tdSql.query("show tables") + for row in tdSql.queryResult: + tb_list.append(row[0]) + + return tb_list + + def __bsma_create_check(self, sma:BSMAschema): + if not sma.creation: + return False + if not sma.create_tabel_sql and (not sma.tbname or not sma.tb_type or not sma.cols): + return False + if not sma.create_tabel_sql and (sma.tb_type == "stable" and not sma.tags): + return False + if not sma.sma_flag or not isinstance(sma.sma_flag, str) or sma.sma_flag.upper() != "SMA": + return False + if sma.tbname in self.__get_db_tbname: + return False + + if sma.create_tabel_sql: + col_tag_list = self.__get_bsma_col_tag_names(self.__get_bsma_table_col_tag_str(sma.create_tabel_sql)) + else: + col_str = list(sma.cols) + if sma.tags: + col_str.extend(list(sma.tags)) + col_tag_list = self.__get_bsma_col_tag_names(col_str) + if not sma.sma_cols: + return False + for col in sma.sma_cols: + if col not in col_tag_list: + return False + + return True + + def bsma_create_check(self, sma:BSMAschema): + if self.__bsma_create_check(sma): + tdSql.query(self.__create_sma_index(sma)) + tdLog.info(f"current sql: {self.__create_sma_index(sma)}") + + else: + tdSql.error(self.__create_sma_index(sma)) + + + def __sma_drop_check(self, sma:BSMAschema): + pass + + def sma_drop_check(self, sma:BSMAschema): + pass + + def __show_sma_index(self, sma:BSMAschema): + pass + + def __sma_show_check(self, sma:BSMAschema): + pass + + def sma_show_check(self, sma:BSMAschema): + pass + + @property + def __create_sma_sql(self): + err_sqls = [] + cur_sqls = [] + # err_set + ### case 1: required fields check + err_sqls.append( BSMAschema(creation="", tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=(PRIMARY_COL, INT_COL ) ) ) + err_sqls.append( BSMAschema(tbname="", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=(PRIMARY_COL, INT_COL ) ) ) + err_sqls.append( BSMAschema(tbname="stb2", cols=(), tags=(f"{INT_TAG} int",), sma_cols=(PRIMARY_COL, INT_COL ) ) ) + err_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(), sma_cols=(PRIMARY_COL, INT_COL ) ) ) + err_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_flag="", sma_cols=(PRIMARY_COL, INT_COL ) ) ) + err_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=() ) ) + ### case 2: + err_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=({BINT_COL}) ) ) + + # current_set + cur_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=(PRIMARY_COL, INT_COL ) ) ) + + return err_sqls, cur_sqls + + def test_create_sma(self): + err_sqls , cur_sqls = self.__create_sma_sql + for err_sql in err_sqls: + self.bsma_create_check(err_sql) + for cur_sql in cur_sqls: + self.bsma_create_check(cur_sql) + + @property + def __drop_sma_sql(self): + err_sqls = [] + cur_sqls = [] + # err_set + ## case 1: required fields check + return err_sqls, cur_sqls + + def test_drop_sma(self): + err_sqls , cur_sqls = self.__drop_sma_sql + for err_sql in err_sqls: + self.sma_drop_check(err_sql) + for cur_sql in cur_sqls: + self.sma_drop_check(cur_sql) + + def all_test(self): + self.test_create_sma() + + def __create_tb(self): + tdLog.printNoPrefix("==========step: create table") + create_stb_sql = f'''create table {STBNAME}( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, + {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, + {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned + ) tags ({INT_TAG} int) + ''' + create_ntb_sql = f'''create table {NTBNAME}( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, + {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, + {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __data_set(self, rows): + data_set = DataSet() + + for i in range(rows): + data_set.ts_data.append(NOW + 1 * (rows - i)) + data_set.int_data.append(rows - i) + data_set.bint_data.append(11111 * (rows - i)) + data_set.sint_data.append(111 * (rows - i) % 32767) + data_set.tint_data.append(11 * (rows - i) % 127) + data_set.int_un_data.append(rows - i) + data_set.bint_un_data.append(11111 * (rows - i)) + data_set.sint_un_data.append(111 * (rows - i) % 32767) + data_set.tint_un_data.append(11 * (rows - i) % 127) + data_set.float_data.append(1.11 * (rows - i)) + data_set.double_data.append(1100.0011 * (rows - i)) + data_set.bool_data.append((rows - i) % 2) + data_set.binary_data.append(f'binary{(rows - i)}') + data_set.nchar_data.append(f'nchar_测试_{(rows - i)}') + + return data_set + + def __insert_data(self): + tdLog.printNoPrefix("==========step: start inser data into tables now.....") + data = self.__data_set(rows=self.rows) + + # now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + null_data = '''null, null, null, null, null, null, null, null, null, null, null, null, null, null''' + zero_data = "0, 0, 0, 0, 0, 0, 0, 'binary_0', 'nchar_0', 0, 0, 0, 0, 0" + + for i in range(self.rows): + row_data = f''' + {data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]}, + {data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.tint_un_data[i]}, + {data.sint_un_data[i]}, {data.int_un_data[i]}, {data.bint_un_data[i]} + ''' + neg_row_data = f''' + {-1 * data.int_data[i]}, {-1 * data.bint_data[i]}, {-1 * data.sint_data[i]}, {-1 * data.tint_data[i]}, {-1 * data.float_data[i]}, {-1 * data.double_data[i]}, + {data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {1 * data.tint_un_data[i]}, + {1 * data.sint_un_data[i]}, {1 * data.int_un_data[i]}, {1 * data.bint_un_data[i]} + ''' + + tdSql.execute( + f"insert into ct1 values ( {NOW - i * TIME_STEP}, {row_data} )") + tdSql.execute( + f"insert into ct2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )") + tdSql.execute( + f"insert into ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )") + tdSql.execute( + f"insert into {NTBNAME} values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )") + + tdSql.execute( + f"insert into ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )") + tdSql.execute( + f"insert into ct2 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.6)}, {null_data} )") + tdSql.execute( + f"insert into ct2 values ( {NOW - self.rows * int(TIME_STEP * 0.29) }, {null_data} )") + + tdSql.execute( + f"insert into ct4 values ( {NOW + int(TIME_STEP * 0.8)}, {null_data} )") + tdSql.execute( + f"insert into ct4 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.8)}, {null_data} )") + tdSql.execute( + f"insert into ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )") + + tdSql.execute( + f"insert into {NTBNAME} values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )") + tdSql.execute( + f"insert into {NTBNAME} values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )") + tdSql.execute( + f"insert into {NTBNAME} values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )") + + def run(self): + self.rows = 10 + + tdLog.printNoPrefix("==========step0:all check") + + tdLog.printNoPrefix("==========step1:create table in normal database") + tdSql.prepare() + self.__create_tb() + self.__insert_data() + self.all_test() + + # drop databases, create same name db、stb and sma index + tdSql.prepare() + self.__create_tb() + self.__insert_data() + self.all_test() + + tdLog.printNoPrefix("==========step2:create table in rollup database") + tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m") + tdSql.execute("use db3") + tdSql.query(f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL})") + + tdSql.execute("drop database if exists db1 ") + tdSql.execute("drop database if exists db2 ") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + tdSql.prepare() + self.__create_tb() + self.__insert_data() + self.all_test() + + # drop databases, create same name db、stb and sma index + tdSql.prepare() + self.__create_tb() + self.__insert_data() + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/create_retentions.py b/tests/system-test/1-insert/create_retentions.py index 4b37eeb9a5..db902d0031 100644 --- a/tests/system-test/1-insert/create_retentions.py +++ b/tests/system-test/1-insert/create_retentions.py @@ -1,6 +1,6 @@ import datetime -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import List from util.log import * from util.sql import * @@ -36,36 +36,20 @@ NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) @dataclass class DataSet: - ts_data : List[int] = None - int_data : List[int] = None - bint_data : List[int] = None - sint_data : List[int] = None - tint_data : List[int] = None - int_un_data : List[int] = None - bint_un_data : List[int] = None - sint_un_data : List[int] = None - tint_un_data : List[int] = None - float_data : List[float] = None - double_data : List[float] = None - bool_data : List[int] = None - binary_data : List[str] = None - nchar_data : List[str] = None - - def __post_init__(self): - self.ts_data = [] - self.int_data = [] - self.bint_data = [] - self.sint_data = [] - self.tint_data = [] - self.int_un_data = [] - self.bint_un_data = [] - self.sint_un_data = [] - self.tint_un_data = [] - self.float_data = [] - self.double_data = [] - self.bool_data = [] - self.binary_data = [] - self.nchar_data = [] + ts_data : List[int] = field(default_factory=list) + int_data : List[int] = field(default_factory=list) + bint_data : List[int] = field(default_factory=list) + sint_data : List[int] = field(default_factory=list) + tint_data : List[int] = field(default_factory=list) + int_un_data : List[int] = field(default_factory=list) + bint_un_data: List[int] = field(default_factory=list) + sint_un_data: List[int] = field(default_factory=list) + tint_un_data: List[int] = field(default_factory=list) + float_data : List[float] = field(default_factory=list) + double_data : List[float] = field(default_factory=list) + bool_data : List[int] = field(default_factory=list) + binary_data : List[str] = field(default_factory=list) + nchar_data : List[str] = field(default_factory=list) class TDTestCase: @@ -107,15 +91,15 @@ class TDTestCase: f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(count) watermark 1min", f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay -1s", f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark -1m", - # f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 1m ", - # f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) max_delay 1m ", + f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 1m ", + f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) max_delay 1m ", f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} binary(16)) tags (tag1 int) rollup(avg) watermark 1s", - f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) tags (tag1 int) rollup(avg) max_delay 1m", - # f"create table ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) rollup(avg) watermark 1s max_delay 1s", - # f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) tags (tag1 int) " , - # f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) " , - # f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) " , - # f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) " , + f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) tags (tag1 int) rollup(avg) max_delay 1m", + # f"create table ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) rollup(avg) watermark 1s max_delay 1s", + f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) tags (tag1 int) " , + f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) " , + f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) " , + f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) " , # watermark, max_delay: [0, 900000], [ms, s, m, ?] f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay 1u", @@ -136,8 +120,9 @@ class TDTestCase: f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark 5s max_delay 1m", f"create stable stb3 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(max) watermark 5s max_delay 1m", f"create stable stb4 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(sum) watermark 5s max_delay 1m", - # f"create stable stb5 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(last) watermark 5s max_delay 1m", - # f"create stable stb6 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m", + f"create stable stb5 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(last) watermark 5s max_delay 1m", + f"create stable stb6 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m", + f"create stable stb7 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL})", ] def test_create_stb(self): @@ -150,7 +135,7 @@ class TDTestCase: # assert "rollup" in tdSql.description tdSql.checkRows(len(self.create_stable_sql_current)) - # tdSql.execute("use db") # because db is a noraml database, not a rollup database, should not be able to create a rollup database + tdSql.execute("use db") # because db is a noraml database, not a rollup database, should not be able to create a rollup stable # tdSql.error(f"create stable nor_db_rollup_stb ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 5s max_delay 1m") @@ -210,20 +195,6 @@ class TDTestCase: data_set.binary_data.append(f'binary{(rows - i)}') data_set.nchar_data.append(f'nchar_测试_{(rows - i)}') - # neg_data_set.ts_data.append(-1 * i) - # neg_data_set.int_data.append(-i) - # neg_data_set.bint_data.append(-11111 * i) - # neg_data_set.sint_data.append(-111 * i % 32767) - # neg_data_set.tint_data.append(-11 * i % 127) - # neg_data_set.int_un_data.append(-i) - # neg_data_set.bint_un_data.append(-11111 * i) - # neg_data_set.sint_un_data.append(-111 * i % 32767) - # neg_data_set.tint_un_data.append(-11 * i % 127) - # neg_data_set.float_data.append(-1.11 * i) - # neg_data_set.double_data.append(-1100.0011 * i) - # neg_data_set.binary_data.append(f'binary{i}') - # neg_data_set.nchar_data.append(f'nchar_测试_{i}') - return data_set def __insert_data(self): @@ -279,9 +250,14 @@ class TDTestCase: tdLog.printNoPrefix("==========step2:create table in rollup database") tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m") + + tdSql.execute("drop database if exists db1 ") + tdSql.execute("drop database if exists db2 ") + tdSql.execute("use db3") - self.__create_tb() - self.__insert_data() + # self.__create_tb() + # self.__insert_data() + self.all_test() tdSql.execute("drop database if exists db1 ") tdSql.execute("drop database if exists db2 ") diff --git a/tests/system-test/1-insert/test_stmt_set_tbname_tag.py b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py index b540642847..387492c4d6 100644 --- a/tests/system-test/1-insert/test_stmt_set_tbname_tag.py +++ b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py @@ -78,21 +78,29 @@ class TDTestCase: password = "taosdata" port =6030 con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) - print(con) + tdLog.debug(con) return con - + + def stmtExe(self,conn,sql,bindStat): + queryStat=conn.statement("%s"%sql) + queryStat.bind_param(bindStat) + queryStat.execute() + result=queryStat.use_result() + rows=result.fetch_all() + return rows + def test_stmt_set_tbname_tag(self,conn): - dbname = "stmt_set_tbname_tag" - + dbname = "stmt_tag" + stablename = 'log' try: conn.execute("drop database if exists %s" % dbname) conn.execute("create database if not exists %s PRECISION 'us' " % dbname) conn.select_db(dbname) - conn.execute("create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + conn.execute("create table if not exists %s(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , vc varchar(100)) tags (t1 timestamp, t2 bool,\ t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \ - t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)") + t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)"%stablename) stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \ values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") @@ -139,142 +147,93 @@ class TDTestCase: assert stmt.affected_rows == 3 #query all - querystmt1=conn.statement("select * from log where bu < ?") - queryparam1=new_bind_params(1) - print(type(queryparam1)) - queryparam1[0].int(10) - querystmt1.bind_param(queryparam1) - querystmt1.execute() - result1=querystmt1.use_result() - rows1=result1.fetch_all() - print(rows1[0]) - print(rows1[1]) - print(rows1[2]) - assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111" - assert rows1[0][10] == 3 - assert rows1[1][10] == 4 + queryparam=new_bind_params(1) + queryparam[0].int(10) + rows=self.stmtExe(conn,"select * from log where bu < ?",queryparam) + tdLog.debug("assert 1st case %s"%rows) + assert str(rows[0][0]) == "2021-07-21 17:56:32.589111" + assert rows[0][10] == 3 , '1st case is failed' + assert rows[1][10] == 4 , '1st case is failed' #query: Numeric Functions - querystmt2=conn.statement("select abs(?) from log where bu < ?") - queryparam2=new_bind_params(2) - print(type(queryparam2)) - queryparam2[0].int(5) - queryparam2[1].int(5) - querystmt2.bind_param(queryparam2) - querystmt2.execute() - result2=querystmt2.use_result() - rows2=result2.fetch_all() - print("2",rows2) - assert rows2[0][0] == 5 - assert rows2[1][0] == 5 + queryparam=new_bind_params(2) + queryparam[0].int(5) + queryparam[1].int(5) + rows=self.stmtExe(conn,"select abs(?) from log where bu < ?",queryparam) + tdLog.debug("assert 2nd case %s"%rows) + assert rows[0][0] == 5 , '2nd case is failed' + assert rows[1][0] == 5 , '2nd case is failed' #query: Numeric Functions and escapes + queryparam=new_bind_params(1) + queryparam[0].int(5) + rows=self.stmtExe(conn,"select abs(?) from log where nn= 'a? long string with 中文字符'",queryparam) + tdLog.debug("assert 3rd case %s"%rows) + assert rows == [] , '3rd case is failed' - querystmt3=conn.statement("select abs(?) from log where nn= 'a? long string with 中文字符' ") - queryparam3=new_bind_params(1) - print(type(queryparam3)) - queryparam3[0].int(5) - querystmt3.bind_param(queryparam3) - querystmt3.execute() - result3=querystmt3.use_result() - rows3=result3.fetch_all() - print("3",rows3) - assert rows3 == [] + #query: string Functions + queryparam=new_bind_params(1) + queryparam[0].binary('中文字符') + rows=self.stmtExe(conn,"select CHAR_LENGTH(?) from log ",queryparam) + tdLog.debug("assert 4th case %s"%rows) + assert rows[0][0] == 12, '4th case is failed' + assert rows[1][0] == 12, '4th case is failed' - # #query: string Functions + queryparam=new_bind_params(1) + queryparam[0].binary('123') + rows=self.stmtExe(conn,"select CHAR_LENGTH(?) from log ",queryparam) + tdLog.debug("assert 4th case %s"%rows) + assert rows[0][0] == 3, '4th.1 case is failed' + assert rows[1][0] == 3, '4th.1 case is failed' - # querystmt3=conn.statement("select CHAR_LENGTH(?) from log ") - # queryparam3=new_bind_params(1) - # print(type(queryparam3)) - # queryparam3[0].binary('中文字符') - # querystmt3.bind_param(queryparam3) - # querystmt3.execute() - # result3=querystmt3.use_result() - # rows3=result3.fetch_all() - # print("4",rows3) - # assert rows3[0][0] == 12, 'fourth case is failed' - # assert rows3[1][0] == 12, 'fourth case is failed' + #query: conversion Functions + queryparam=new_bind_params(1) + queryparam[0].binary('1232a') + rows=self.stmtExe(conn,"select cast( ? as bigint) from log",queryparam) + tdLog.debug("assert 5th case %s"%rows) + assert rows[0][0] == 1232, '5th.1 case is failed' + assert rows[1][0] == 1232, '5th.1 case is failed' - # #query: conversion Functions + querystmt4=conn.statement("select cast( ? as binary(10)) from log ") + queryparam=new_bind_params(1) + queryparam[0].int(123) + rows=self.stmtExe(conn,"select cast( ? as bigint) from log",queryparam) + tdLog.debug("assert 6th case %s"%rows) + assert rows[0][0] == 123, '6th.1 case is failed' + assert rows[1][0] == 123, '6th.1 case is failed' - # querystmt4=conn.statement("select cast( ? as bigint) from log ") - # queryparam4=new_bind_params(1) - # print(type(queryparam4)) - # queryparam4[0].binary('1232a') - # querystmt4.bind_param(queryparam4) - # querystmt4.execute() - # result4=querystmt4.use_result() - # rows4=result4.fetch_all() - # print("5",rows4) - # assert rows4[0][0] == 1232 - # assert rows4[1][0] == 1232 - - # querystmt4=conn.statement("select cast( ? as binary(10)) from log ") - # queryparam4=new_bind_params(1) - # print(type(queryparam4)) - # queryparam4[0].int(123) - # querystmt4.bind_param(queryparam4) - # querystmt4.execute() - # result4=querystmt4.use_result() - # rows4=result4.fetch_all() - # print("6",rows4) - # assert rows4[0][0] == '123' - # assert rows4[1][0] == '123' - - # #query: datatime Functions - - # querystmt4=conn.statement(" select timediff('2021-07-21 17:56:32.590111',?,1s) from log ") - # queryparam4=new_bind_params(1) - # print(type(queryparam4)) - # queryparam4[0].timestamp(1626861392591111) - # querystmt4.bind_param(queryparam4) - # querystmt4.execute() - # result4=querystmt4.use_result() - # rows4=result4.fetch_all() - # print("7",rows4) - # assert rows4[0][0] == 1, 'seventh case is failed' - # assert rows4[1][0] == 1, 'seventh case is failed' + #query: datatime Functions + queryparam=new_bind_params(1) + queryparam[0].timestamp(1626861392591112) + rows=self.stmtExe(conn,"select timediff('2021-07-21 17:56:32.590111',?,1a) from log",queryparam) + tdLog.debug("assert 7th case %s"%rows) + assert rows[0][0] == 1, '7th case is failed' + assert rows[1][0] == 1, '7th case is failed' #query: aggregate Functions + queryparam=new_bind_params(1) + queryparam[0].int(123) + rows=self.stmtExe(conn,"select count(?) from log ",queryparam) + tdLog.debug("assert 8th case %s"%rows) + assert rows[0][0] == 3, ' 8th case is failed' - querystmt4=conn.statement(" select count(?) from log ") - queryparam4=new_bind_params(1) - print(type(queryparam4)) - queryparam4[0].int(123) - querystmt4.bind_param(queryparam4) - querystmt4.execute() - result4=querystmt4.use_result() - rows4=result4.fetch_all() - print("8",rows4) - assert rows4[0][0] == 3, ' 8 case is failed' - - #query: selector Functions 9 - - querystmt4=conn.statement(" select bottom(bu,?) from log group by bu ; ") - queryparam4=new_bind_params(1) - print(type(queryparam4)) - queryparam4[0].int(2) - querystmt4.bind_param(queryparam4) - querystmt4.execute() - result4=querystmt4.use_result() - rows4=result4.fetch_all() - print("9",rows4) - assert rows4[0][0] == 4, ' 9 case is failed' - assert rows4[1][0] == 3, ' 9 case is failed' + # #query: selector Functions 9 + # queryparam=new_bind_params(1) + # queryparam[0].int(2) + # rows=self.stmtExe(conn,"select bottom(bu,?) from log group by bu ; ",queryparam) + # tdLog.debug("assert 9th case %s"%rows) + # assert rows[0][0] == 4, ' 9 case is failed' + # assert rows[1][0] == 3, ' 9 case is failed' # #query: time-series specific Functions 10 - querystmt4=conn.statement(" select twa(?) from log; ") - queryparam4=new_bind_params(1) - print(type(queryparam4)) - queryparam4[0].int(15) - querystmt4.bind_param(queryparam4) - querystmt4.execute() - result4=querystmt4.use_result() - rows4=result4.fetch_all() - print("10",rows4) - assert rows4[0][0] == 15, ' 10 case is failed' + querystmt=conn.statement(" select twa(?) from log; ") + queryparam=new_bind_params(1) + queryparam[0].int(15) + rows=self.stmtExe(conn," select twa(?) from log; ",queryparam) + tdLog.debug("assert 10th case %s"%rows) + assert rows[0][0] == 15, ' 10th case is failed' # conn.execute("drop database if exists %s" % dbname) diff --git a/tests/system-test/1-insert/time_range_wise.py b/tests/system-test/1-insert/time_range_wise.py index d4434987a6..a620a4b51a 100644 --- a/tests/system-test/1-insert/time_range_wise.py +++ b/tests/system-test/1-insert/time_range_wise.py @@ -325,7 +325,7 @@ class TDTestCase: def __sma_create_check(self, sma:SMAschema): if self.updatecfgDict["querySmaOptimize"] == 0: return False - # # TODO: if database is a rollup-db, can not create sma index + # TODO: if database is a rollup-db, can not create sma index # tdSql.query("select database()") # if sma.rollup_db : # return False @@ -493,8 +493,8 @@ class TDTestCase: err_sqls , cur_sqls = self.__drop_sma_sql for err_sql in err_sqls: self.sma_drop_check(err_sql) - # for cur_sql in cur_sqls: - # self.sma_drop_check(cur_sql) + for cur_sql in cur_sqls: + self.sma_drop_check(cur_sql) def all_test(self): self.test_create_sma() @@ -605,24 +605,23 @@ class TDTestCase: tdLog.printNoPrefix("==========step1:create table in normal database") tdSql.prepare() self.__create_tb() - # self.__insert_data() + self.__insert_data() self.all_test() # drop databases, create same name db、stb and sma index - # tdSql.prepare() - # self.__create_tb() - # self.__insert_data() - # self.all_test() - - - - return + tdSql.prepare() + self.__create_tb() + self.__insert_data() + self.all_test() tdLog.printNoPrefix("==========step2:create table in rollup database") tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m") tdSql.execute("use db3") - self.__create_tb() - self.__insert_data() + # self.__create_tb() + tdSql.execute(f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL}) ") + self.all_test() + + # self.__insert_data() tdSql.execute("drop database if exists db1 ") tdSql.execute("drop database if exists db2 ") diff --git a/tests/system-test/2-query/Now.py b/tests/system-test/2-query/Now.py index 6785fddc6f..3caf632209 100644 --- a/tests/system-test/2-query/Now.py +++ b/tests/system-test/2-query/Now.py @@ -40,6 +40,7 @@ class TDTestCase: self.time_unit = ['b','u','a','s','m','h','d','w'] self.symbol = ['+','-','*','/'] self.error_values = [1.5,'abc','"abc"','!@','today()'] + self.db_percision = ['ms','us','ns'] def tbtype_check(self,tb_type): if tb_type == 'normal table' or tb_type == 'child table': tdSql.checkRows(len(self.values_list)) @@ -70,23 +71,29 @@ class TDTestCase: tdSql.checkData(i,0,None) def now_check_ntb(self): - tdSql.prepare() - tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) - for value in self.values_list: - tdSql.execute( - f'insert into {self.ntbname} values({value})') - self.data_check(self.ntbname,'normal table') + for time_unit in self.db_percision: + tdSql.execute(f'create database db precision "{time_unit}"') + tdSql.execute('use db') + tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) + for value in self.values_list: + tdSql.execute( + f'insert into {self.ntbname} values({value})') + self.data_check(self.ntbname,'normal table') + tdSql.execute('drop database db') def now_check_stb(self): - tdSql.prepare() - tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) - for i in range(self.tbnum): - tdSql.execute(f"create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[0]})") - for value in self.values_list: - tdSql.execute(f'insert into {self.stbname}_{i} values({value})') - for i in range(self.tbnum): - self.data_check(f'{self.stbname}_{i}','child table') - self.data_check(self.stbname,'stable') + for time_unit in self.db_percision: + tdSql.execute(f'create database db precision "{time_unit}"') + tdSql.execute('use db') + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) + for i in range(self.tbnum): + tdSql.execute(f"create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[0]})") + for value in self.values_list: + tdSql.execute(f'insert into {self.stbname}_{i} values({value})') + for i in range(self.tbnum): + self.data_check(f'{self.stbname}_{i}','child table') + self.data_check(self.stbname,'stable') + tdSql.execute('drop database db') def run(self): # sourcery skip: extract-duplicate-method self.now_check_ntb() diff --git a/tests/system-test/2-query/Timediff.py b/tests/system-test/2-query/Timediff.py index ad64d29007..b8f3649eff 100644 --- a/tests/system-test/2-query/Timediff.py +++ b/tests/system-test/2-query/Timediff.py @@ -3,7 +3,7 @@ from util.sql import * from util.cases import * class TDTestCase: - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) @@ -33,7 +33,7 @@ class TDTestCase: 'insert into ntb values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())') tdSql.execute( 'insert into stb_1 values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())') - + tdSql.query("select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00') from ntb") tdSql.checkRows(3) tdSql.query("select timediff(1,0,1d) from ntb") @@ -72,12 +72,12 @@ class TDTestCase: tdSql.query("select timediff(1,0,1a) from db.ntb") tdSql.checkRows(3) tdSql.checkData(0,0,1000) - tdSql.query("select timediff(1,0,1u) from ntb") - tdSql.checkRows(3) - tdSql.checkData(0,0,1000000) - tdSql.query("select timediff(1,0,1u) from db.ntb") - tdSql.checkRows(3) - tdSql.checkData(0,0,1000000) + tdSql.error("select timediff(1,0,1u) from ntb") + #tdSql.checkRows(3) + #tdSql.checkData(0,0,1000000) + tdSql.error("select timediff(1,0,1u) from db.ntb") + #tdSql.checkRows(3) + #tdSql.checkData(0,0,1000000) tdSql.query("select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00') from stb") tdSql.checkRows(3) @@ -116,12 +116,12 @@ class TDTestCase: tdSql.query("select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1a) from db.stb") tdSql.checkRows(3) tdSql.checkData(0,0,86400000) - tdSql.query("select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1u) from stb") - tdSql.checkRows(3) - tdSql.checkData(0,0,86400000000) - tdSql.query("select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1u) from db.stb") - tdSql.checkRows(3) - tdSql.checkData(0,0,86400000000) + tdSql.error("select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1u) from stb") + #tdSql.checkRows(3) + #tdSql.checkData(0,0,86400000000) + tdSql.error("select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1u) from db.stb") + #tdSql.checkRows(3) + #tdSql.checkData(0,0,86400000000) tdSql.query("select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00') from stb_1") @@ -164,12 +164,12 @@ class TDTestCase: tdSql.query("select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1a) from db.stb_1") tdSql.checkRows(3) tdSql.checkData(0,0,43200000) - tdSql.query("select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1u) from stb_1") - tdSql.checkRows(3) - tdSql.checkData(0,0,43200000000) - tdSql.query("select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1u) from db.stb_1") - tdSql.checkRows(3) - tdSql.checkData(0,0,43200000000) + tdSql.error("select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1u) from stb_1") + #tdSql.checkRows(3) + #tdSql.checkData(0,0,43200000000) + tdSql.error("select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1u) from db.stb_1") + #tdSql.checkRows(3) + #tdSql.checkData(0,0,43200000000) tdSql.query("select timediff('a','b') from stb") tdSql.checkRows(3) @@ -202,4 +202,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/Today.py b/tests/system-test/2-query/Today.py index a9c3215a12..e6199d629e 100644 --- a/tests/system-test/2-query/Today.py +++ b/tests/system-test/2-query/Today.py @@ -5,6 +5,7 @@ from util.log import * from util.sql import * from util.cases import * import datetime +import pandas as pd class TDTestCase: @@ -12,8 +13,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - self.today_date = datetime.datetime.strptime( - datetime.datetime.now().strftime("%Y-%m-%d"), "%Y-%m-%d") + self.today_date = datetime.datetime.strptime(datetime.datetime.now().strftime("%Y-%m-%d"), "%Y-%m-%d") + self.today_ts = datetime.datetime.strptime(datetime.datetime.now().strftime("%Y-%m-%d"), "%Y-%m-%d").timestamp() self.time_unit = ['b','u','a','s','m','h','d','w'] self.error_param = ['1.5','abc','!@#','"abc"','today()'] self.arithmetic_operators = ['+','-','*','/'] @@ -41,7 +42,7 @@ class TDTestCase: f'today(),3,3.333,333.333333,now()', f'today()-1d,10,11.11,99.999999,now()', f'today()+1d,1,1.55,100.555555,today()'] - + self.db_percision = ['ms','us','ns'] def set_create_normaltable_sql(self, ntbname, column_dict): column_sql = '' for k, v in column_dict.items(): @@ -57,7 +58,8 @@ class TDTestCase: tag_sql += f"{k} {v}," create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})' return create_stb_sql - def data_check(self,column_dict={},tbname = '',values_list = [],tb_num = 1,tb = 'tb'): + + def data_check(self,column_dict={},tbname = '',values_list = [],tb_num = 1,tb = 'tb',precision = 'ms'): for k,v in column_dict.items(): num_up = 0 num_down = 0 @@ -65,12 +67,27 @@ class TDTestCase: if v.lower() == 'timestamp': tdSql.query(f'select {k} from {tbname}') for i in tdSql.queryResult: - if i[0] > self.today_date: - num_up += 1 - elif i[0] == self.today_date: - num_same += 1 - elif i[0] < self.today_date: - num_down += 1 + if precision == 'ms': + if int(i[0].timestamp())*1000 > int(self.today_ts)*1000: + num_up += 1 + elif int(i[0].timestamp())*1000 == int(self.today_ts)*1000: + num_same += 1 + elif int(i[0].timestamp())*1000 < int(self.today_ts)*1000: + num_down += 1 + elif precision == 'us': + if int(i[0].timestamp())*1000000 > int(self.today_ts)*1000000: + num_up += 1 + elif int(i[0].timestamp())*1000000 == int(self.today_ts)*1000000: + num_same += 1 + elif int(i[0].timestamp())*1000000 < int(self.today_ts)*1000000: + num_down += 1 + elif precision == 'ns': + if i[0] > int(self.today_ts)*1000000000: + num_up += 1 + elif i[0] == int(self.today_ts)*1000000000: + num_same += 1 + elif i[0] < int(self.today_ts)*1000000000: + num_down += 1 tdSql.query(f"select today() from {tbname}") tdSql.checkRows(len(values_list)*tb_num) tdSql.checkData(0, 0, str(self.today_date)) @@ -130,32 +147,36 @@ class TDTestCase: for i in range(num_same): tdSql.checkData(i, 0, str(self.today_date)) def today_check_ntb(self): - tdSql.prepare() - tdSql.execute(self.set_create_normaltable_sql(self.ntbname,self.column_dict)) - for i in self.values_list: - tdSql.execute( - f'insert into {self.ntbname} values({i})') - self.data_check(self.column_dict,self.ntbname,self.values_list) - tdSql.execute('drop database db') + for time_unit in self.db_percision: + print(time_unit) + tdSql.execute(f'create database db precision "{time_unit}"') + tdSql.execute('use db') + tdSql.execute(self.set_create_normaltable_sql(self.ntbname,self.column_dict)) + for i in self.values_list: + tdSql.execute( + f'insert into {self.ntbname} values({i})') + self.data_check(self.column_dict,self.ntbname,self.values_list,1,'tb',time_unit) + tdSql.execute('drop database db') def today_check_stb_tb(self): - tdSql.prepare() - tdSql.execute(self.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) - for i in range(self.tbnum): - tdSql.execute(f'create table if not exists {self.stbname}_{i} using {self.stbname} tags({self.tag_values[i]})') - for j in self.values_list: - tdSql.execute(f'insert into {self.stbname}_{i} values ({j})') - # check child table - for i in range(self.tbnum): - self.data_check(self.column_dict,f'{self.stbname}_{i}',self.values_list) - # check stable - self.data_check(self.column_dict,self.stbname,self.values_list,self.tbnum,'stb') - tdSql.execute('drop database db') + for time_unit in self.db_percision: + print(time_unit) + tdSql.execute(f'create database db precision "{time_unit}"') + tdSql.execute('use db') + tdSql.execute(self.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) + for i in range(self.tbnum): + tdSql.execute(f'create table if not exists {self.stbname}_{i} using {self.stbname} tags({self.tag_values[i]})') + for j in self.values_list: + tdSql.execute(f'insert into {self.stbname}_{i} values ({j})') + # check child table + for i in range(self.tbnum): + self.data_check(self.column_dict,f'{self.stbname}_{i}',self.values_list,1,'tb',time_unit) + # check stable + self.data_check(self.column_dict,self.stbname,self.values_list,self.tbnum,'stb',time_unit) + tdSql.execute('drop database db') def run(self): # sourcery skip: extract-duplicate-method - tdLog.printNoPrefix("==========check today() for normal table ==========") self.today_check_ntb() - tdLog.printNoPrefix("==========check today() for stable and child table==========") self.today_check_stb_tb() def stop(self): diff --git a/tests/system-test/2-query/abs.py b/tests/system-test/2-query/abs.py index 961a6446b5..90a1b8f343 100644 --- a/tests/system-test/2-query/abs.py +++ b/tests/system-test/2-query/abs.py @@ -139,7 +139,7 @@ class TDTestCase: ) for i in range(4): tdSql.execute( - f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py index 20ee6df7fc..2e30ac7ea7 100644 --- a/tests/system-test/2-query/avg.py +++ b/tests/system-test/2-query/avg.py @@ -14,7 +14,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), True) - + def prepare_datas(self): tdSql.execute( '''create table stb1 @@ -22,7 +22,7 @@ class TDTestCase: tags (t1 int) ''' ) - + tdSql.execute( ''' create table t1 @@ -64,7 +64,7 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_avg(self ,origin_query , check_query): avg_result = tdSql.getResult(origin_query) origin_result = tdSql.getResult(check_query) @@ -73,13 +73,13 @@ class TDTestCase: for row_index , row in enumerate(avg_result): for col_index , elem in enumerate(row): if avg_result[row_index][col_index] != origin_result[row_index][col_index]: - check_status = False + check_status = False if not check_status: tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query ) sys.exit(1) else: tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query ) - + def test_errors(self): error_sql_lists = [ "select avg from t1", @@ -113,42 +113,42 @@ class TDTestCase: ] for error_sql in error_sql_lists: tdSql.error(error_sql) - + def support_types(self): type_error_sql_lists = [ - "select avg(ts) from t1" , + "select avg(ts) from t1" , "select avg(c7) from t1", "select avg(c8) from t1", "select avg(c9) from t1", - "select avg(ts) from ct1" , + "select avg(ts) from ct1" , "select avg(c7) from ct1", "select avg(c8) from ct1", "select avg(c9) from ct1", - "select avg(ts) from ct3" , + "select avg(ts) from ct3" , "select avg(c7) from ct3", "select avg(c8) from ct3", "select avg(c9) from ct3", - "select avg(ts) from ct4" , + "select avg(ts) from ct4" , "select avg(c7) from ct4", "select avg(c8) from ct4", "select avg(c9) from ct4", - "select avg(ts) from stb1" , + "select avg(ts) from stb1" , "select avg(c7) from stb1", "select avg(c8) from stb1", "select avg(c9) from stb1" , - "select avg(ts) from stbbb1" , + "select avg(ts) from stbbb1" , "select avg(c7) from stbbb1", "select avg(ts) from tbname", "select avg(c9) from tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ "select avg(c1) from t1", "select avg(c2) from t1", @@ -178,16 +178,16 @@ class TDTestCase: "select avg(c5) from stb1", "select avg(c6) from stb1", - "select avg(c6) as alisb from stb1", - "select avg(c6) alisb from stb1", + "select avg(c6) as alisb from stb1", + "select avg(c6) alisb from stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - + def basic_avg_function(self): - # basic query + # basic query tdSql.query("select c1 from ct3") tdSql.checkRows(0) tdSql.query("select c1 from t1") @@ -207,18 +207,18 @@ class TDTestCase: tdSql.query("select avg(c5) from ct3") tdSql.checkRows(0) tdSql.query("select avg(c6) from ct3") - + # used for regular table tdSql.query("select avg(c1) from t1") tdSql.checkData(0, 0, 5.000000000) - - + + tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) self.check_avg(" select avg(c1) , avg(c2) , avg(c3) from t1 " , " select sum(c1)/count(c1) , sum(c2)/count(c2) , sum(c3)/count(c3) from t1 ") - + # used for sub table tdSql.query("select avg(c1) from ct1") tdSql.checkData(0, 0, 4.846153846) @@ -229,8 +229,8 @@ class TDTestCase: self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from t1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from t1 ") self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from stb1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from stb1 ") - # used for stable table - + # used for stable table + tdSql.query("select avg(c1) from stb1") tdSql.checkRows(1) @@ -241,10 +241,10 @@ class TDTestCase: tdSql.error("select avg(c1) from tbname") tdSql.error("select avg(c1) from ct5") - # mix with common col + # mix with common col tdSql.error("select c1, avg(c1) from ct1") tdSql.error("select c1, avg(c1) from ct4") - + # mix with common functions tdSql.error("select c1, avg(c1),c5, floor(c5) from ct4 ") @@ -278,11 +278,11 @@ class TDTestCase: tdSql.query("select count(*) from stb1 ") tdSql.checkData(0,0,25) - # bug fix for compute + # bug fix for compute tdSql.error("select c1, avg(c1) -0 ,ceil(c1)-0 from ct4 ") tdSql.error(" select c1, avg(c1) -0 ,avg(ceil(c1-0.1))-0.1 from ct4") - # mix with nest query + # mix with nest query self.check_avg("select avg(col) from (select abs(c1) col from stb1)" , "select avg(abs(c1)) from stb1") self.check_avg("select avg(col) from (select ceil(abs(c1)) col from stb1)" , "select avg(abs(c1)) from stb1") @@ -297,7 +297,7 @@ class TDTestCase: tdSql.query(" select avg(c1) from stb1 where c1 is null ") tdSql.checkRows(0) - + def avg_func_filter(self): tdSql.execute("use db") tdSql.query(" select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1>5 ") @@ -324,7 +324,7 @@ class TDTestCase: def avg_Arithmetic(self): pass - + def check_boundary_values(self): tdSql.execute("drop database if exists bound_test") @@ -344,11 +344,11 @@ class TDTestCase: tdSql.execute( f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.execute( f"insert into sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.execute( f"insert into sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" ) @@ -359,14 +359,14 @@ class TDTestCase: tdSql.execute( f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.error( f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) self.check_avg("select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from sub1_bound " , " select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from sub1_bound ") - + # check basic elem for table per row tdSql.query("select avg(c1) ,avg(c2) , avg(c3) , avg(c4), avg(c5), avg(c6) from sub1_bound ") tdSql.checkRows(1) @@ -376,8 +376,8 @@ class TDTestCase: tdSql.checkData(0,3,53.571428571) tdSql.checkData(0,4,5.828571332045761e+37) # tdSql.checkData(0,5,None) - - + + # check + - * / in functions tdSql.query(" select avg(c1+1) ,avg(c2) , avg(c3*1) , avg(c4/2), avg(c5)/2, avg(c6) from sub1_bound ") tdSql.checkData(0,0,920350134.5714285) @@ -386,33 +386,33 @@ class TDTestCase: tdSql.checkData(0,3,26.785714286) tdSql.checkData(0,4,2.9142856660228804e+37) # tdSql.checkData(0,5,None) - - + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: avg basic query ============") + tdLog.printNoPrefix("==========step4: avg basic query ============") self.basic_avg_function() - tdLog.printNoPrefix("==========step5: avg boundary query ============") + tdLog.printNoPrefix("==========step5: avg boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step6: avg filter query ============") + tdLog.printNoPrefix("==========step6: avg filter query ============") self.avg_func_filter() diff --git a/tests/system-test/2-query/distribute_agg_apercentile.py b/tests/system-test/2-query/distribute_agg_apercentile.py index fd1455ce16..022d13c5ae 100644 --- a/tests/system-test/2-query/distribute_agg_apercentile.py +++ b/tests/system-test/2-query/distribute_agg_apercentile.py @@ -36,7 +36,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_avg.py b/tests/system-test/2-query/distribute_agg_avg.py index 647a262558..c690a17b4a 100644 --- a/tests/system-test/2-query/distribute_agg_avg.py +++ b/tests/system-test/2-query/distribute_agg_avg.py @@ -53,7 +53,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_count.py b/tests/system-test/2-query/distribute_agg_count.py index 2ac9c86df0..b3638dac4b 100644 --- a/tests/system-test/2-query/distribute_agg_count.py +++ b/tests/system-test/2-query/distribute_agg_count.py @@ -55,7 +55,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_max.py b/tests/system-test/2-query/distribute_agg_max.py index 5c9760cbcf..0924ea16ac 100644 --- a/tests/system-test/2-query/distribute_agg_max.py +++ b/tests/system-test/2-query/distribute_agg_max.py @@ -55,7 +55,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_min.py b/tests/system-test/2-query/distribute_agg_min.py index dd20d88229..8d077fd59b 100644 --- a/tests/system-test/2-query/distribute_agg_min.py +++ b/tests/system-test/2-query/distribute_agg_min.py @@ -55,7 +55,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_spread.py b/tests/system-test/2-query/distribute_agg_spread.py index 94f1a61d77..c91fd1d30b 100644 --- a/tests/system-test/2-query/distribute_agg_spread.py +++ b/tests/system-test/2-query/distribute_agg_spread.py @@ -55,7 +55,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_stddev.py b/tests/system-test/2-query/distribute_agg_stddev.py index 5050e6e940..59ede38983 100644 --- a/tests/system-test/2-query/distribute_agg_stddev.py +++ b/tests/system-test/2-query/distribute_agg_stddev.py @@ -64,7 +64,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/distribute_agg_sum.py b/tests/system-test/2-query/distribute_agg_sum.py index add4d75c61..8dcd902b3d 100644 --- a/tests/system-test/2-query/distribute_agg_sum.py +++ b/tests/system-test/2-query/distribute_agg_sum.py @@ -53,7 +53,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/elapsed.py b/tests/system-test/2-query/elapsed.py index 154d79ff01..a62e946866 100644 --- a/tests/system-test/2-query/elapsed.py +++ b/tests/system-test/2-query/elapsed.py @@ -23,18 +23,18 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record self.num = 10 def caseDescription(self): ''' - case1 : [TD-11804] test case for elapsed function : - - this test case is for aggregate function elapsed , elapsed function can only used for the timestamp primary key column (ts) , + case1 : [TD-11804] test case for elapsed function : + + this test case is for aggregate function elapsed , elapsed function can only used for the timestamp primary key column (ts) , it has two input parameters, the first parameter is necessary, basic SQL as follow: - + =================================================================================================================================== SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; =================================================================================================================================== @@ -49,18 +49,18 @@ class TDTestCase: case: select * from table|stable[group by tbname]|regular_table case:select elapsed(ts) from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; - + case:select elapsed(ts) , elapsed(ts,unit_time1)*regular_num1 , elapsed(ts,unit_time1)+regular_num2 from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; - + //mixup with all functions only once query (it's different with nest query) case:select elapsed(ts), count(*), avg(col), twa(col), irate(col), sum(col), stddev(col), leastsquares(col, 1, 1),min(col), max(col), first(col), last(col), percentile(col, 20), apercentile(col, 30), last_row(col), spread(col)from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; - - //mixup with ordinary col + + //mixup with ordinary col case:select ts ,elapsed(ts)*10 ,col+5 from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; - + //nest query case:select elapsed(ts) from (select elapsed(ts), count(*), avg(col), twa(col), irate(col), sum(col), stddev(col), leastsquares(col, 1, 1),min(col), max(col), first(col), last(col), percentile(col, 20), apercentile(col, 30), last_row(col), spread(col)from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]) where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; - + //clause about filter condition case:select elapsed(ts) from table|stable[group by tbname] where [ts|col|tag >|<|=|>=|<=|=|<>|!= value] | [between ... and ...] |[in] |[is null|not null] interval (unit_time) ; case:select elapsed(ts) from table|stable[group by tbname] where clause1 and clause 2 and clause3 interval (unit_time) ; @@ -74,7 +74,7 @@ class TDTestCase: // Window aggregation case:select elapsed(ts) from t1 where clause session(ts, time_units) ; - case:select elapsed(ts) from t1 where clause state_window(regular_nums); + case:select elapsed(ts) from t1 where clause state_window(regular_nums); // Continuous query case:create table select elapsed(ts) ,avg(col) from (select elapsed(ts) ts_inter ,avg(col) col from stable|table interval (unit_time) [fill(LINEAR,NEXT,PREV,VALUE,NULL)][group by tbname]) interval (unit_time) [fill(LINEAR,NEXT,PREV,VALUE,NULL) sliding(unit_time_windows); @@ -83,13 +83,13 @@ class TDTestCase: this test case notice successful execution and correctness of results. - ''' - return + ''' + return def prepare_data(self): tdLog.info (" ====================================== prepare data ==================================================") - + tdSql.execute('drop database if exists testdb ;') tdSql.execute('create database testdb keep 36500;') tdSql.execute('use testdb;') @@ -120,14 +120,14 @@ class TDTestCase: tdSql.execute('create table regular_table_1 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double, bin_chars binary(20)) ;') tdSql.execute('create table regular_table_2 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') tdSql.execute('create table regular_table_3 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') - + tablenames = ["sub_table1_1","sub_table1_2","sub_table1_3","sub_table2_1","sub_table2_2","sub_table2_3","regular_table_1","regular_table_2","regular_table_3"] tdLog.info("insert into records ") for tablename in tablenames: - for i in range(self.num): + for i in range(self.num): sql= 'insert into %s values(%d, %d,%d, %d, %d, %d, %f, %f, "%s")' % (tablename,self.ts + i*10000, self.ts + i*10,2147483647-i, 9223372036854775807-i, 32767-i, 127-i, i, i,("bintest"+str(i))) print(sql) tdSql.execute(sql) @@ -144,7 +144,7 @@ class TDTestCase: "(ts,2d+3m-2s,NULL)","(ts+10d,NULL)" ,"(ts,now -1m%1d)","(ts+10d,_c0)","(ts+10d,)","(ts,%)","(ts, , m)","(ts,abc)","(ts,/)","(ts,*)","(ts,1s,100)", "(ts,1s,abc)","(ts,1s,_c0)","(ts,1s,*)","(ts,1s,NULL)","(ts,,_c0)","(ts,tbname,ts)","(ts,0,tbname)","('2021-11-18 00:00:10')","('2021-11-18 00:00:10', 1s)", "('2021-11-18T00:00:10+0800', '1s')","('2021-11-18T00:00:10Z', '1s')","('2021-11-18T00:00:10+0800', 10000000d,)","('ts', ,2021-11-18T00:00:10+0800, )"] - + for tablename in tablenames: for abnormal_param in abnormal_list: @@ -191,7 +191,7 @@ class TDTestCase: def query_filter(self): tdLog.info (" ====================================== elapsed query filter ==================================================") - + for i in range(self.num): ts_start_time = self.ts + i*10000 ts_col_start_time = self.ts + i*10 @@ -210,7 +210,7 @@ class TDTestCase: tdSql.query(filter_sql) tdSql.checkRows(1) tdSql.checkData(0,0,float(self.num -i-1)) - + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d and tscol >= %d and tstag='2015-01-01 00:01:00'group by tbname " %(ts_start_time,ts_col_start_time) tdSql.query(filter_sql) @@ -224,7 +224,7 @@ class TDTestCase: filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d and tscol > %d and tstag='2015-01-01 00:01:00' group by tbname" %(ts_start_time,ts_col_start_time) tdSql.query(filter_sql) - + if i == self.num-1: tdSql.checkRows(0) else: @@ -233,7 +233,7 @@ class TDTestCase: filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d and tscol > %d " %(ts_start_time,ts_col_start_time) tdSql.query(filter_sql) - + if i == self.num-1: tdSql.checkRows(0) else: @@ -268,7 +268,7 @@ class TDTestCase: filter_sql = "select elapsed(ts,10s) from stable_1 where ts < %d and tscol <= %d and tstag < '2015-01-01 00:01:00' group by tbname" %(ts_end_time,ts_col_end_time) tdSql.query(filter_sql) - + if i == self.num-1: tdSql.checkRows(0) else: @@ -277,7 +277,7 @@ class TDTestCase: filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts < %d and tscol <= %d " %(ts_end_time,ts_col_end_time) tdSql.query(filter_sql) - + if i == self.num-1: tdSql.checkRows(0) else: @@ -303,7 +303,7 @@ class TDTestCase: else: tdSql.checkRows(1) tdSql.checkData(0,0,float(self.num - i - 2)) - + filter_sql = "select elapsed(ts,10s) from stable_1 where ts = %d and tscol < %d group by tbname " %(ts_end_time,ts_col_end_time) tdSql.query(filter_sql) tdSql.checkRows(0) @@ -331,10 +331,10 @@ class TDTestCase: else: tdSql.checkRows(1) tdSql.checkData(0,0,float(self.num -i-2)) - + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint != %d and tscol <= %d group by tbname " %(i,ts_col_end_time) tdSql.query(filter_sql) - + if i == self.num: tdSql.checkRows(0) else: @@ -345,7 +345,7 @@ class TDTestCase: filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint != %d and tscol <= %d " %(i,ts_col_end_time) tdSql.query(filter_sql) - + if i == self.num: tdSql.checkRows(0) else: @@ -374,7 +374,7 @@ class TDTestCase: filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint <> %d and tscol <= %d group by tbname " %(i,ts_col_end_time) tdSql.query(filter_sql) - + if i == self.num: tdSql.checkRows(0) else: @@ -385,14 +385,14 @@ class TDTestCase: filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint <> %d and tscol <= %d " %(i,ts_col_end_time) tdSql.query(filter_sql) - + if i == self.num: tdSql.checkRows(0) else: tdSql.checkRows(1) tdSql.checkData(0,0,float(self.num - i - 1)) - # filter between and + # filter between and tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' ") tdSql.checkData(0,0,2) tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and \ @@ -401,7 +401,7 @@ class TDTestCase: tdSql.checkData(1,0,2) tdSql.checkData(2,0,2) - # filter in and or + # filter in and or tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' ") tdSql.checkData(0,0,2) @@ -424,7 +424,7 @@ class TDTestCase: tdSql.checkData(0,0,1) tdSql.checkData(1,0,1) tdSql.checkData(2,0,1) - + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars like 'bintest_' and tscol <= '2015-01-01 00:01:00.000' group by tbname ") tdSql.checkData(0,0,6) tdSql.checkData(1,0,6) @@ -477,16 +477,16 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query("select elapsed(ts,10s)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") tdSql.checkRows(0) - + for i in range(self.num): ts_start_time = self.ts + i*10000 ts_col_start_time = self.ts + i*10 ts_tag_time = "2015-01-01 00:01:00" ts_end_time = self.ts + (self.num-1-i)*10000 ts_col_end_time = self.ts + (self.num-1-i)*10 - - # only interval + + # only interval interval_sql = "select elapsed(ts,10s) from stable_1 where ts <=%d interval(10s) group by tbname " %(ts_start_time) tdSql.query(interval_sql) tdSql.checkRows(3*(i+1)) @@ -499,10 +499,10 @@ class TDTestCase: tdSql.checkData(x,1,0) else : tdSql.checkData(x,1,1) - + # interval and fill , fill_type = ["NULL","value,100","prev","next","linear"] - # interval (10s) and time range is outer records + # interval (10s) and time range is outer records tdSql.query("select elapsed(ts,10s)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") tdSql.checkRows(0) @@ -552,8 +552,8 @@ class TDTestCase: tdSql.checkData(59,1,2) tdSql.checkData(60,1,10) tdSql.checkData(61,1,10) - - # interval (20s) and time range is outer records + + # interval (20s) and time range is outer records tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(prev) group by tbname,ind ;") tdSql.checkRows(90) tdSql.checkData(0,1,20) @@ -562,7 +562,7 @@ class TDTestCase: tdSql.checkData(29,1,10) tdSql.checkData(30,1,20) tdSql.checkData(31,1,20) - + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(next) group by tbname,ind ;") tdSql.checkRows(90) tdSql.checkData(0,1,20) @@ -589,7 +589,7 @@ class TDTestCase: tdSql.checkData(29,1,None) tdSql.checkData(30,1,20) tdSql.checkData(31,1,20) - + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(value ,2) group by tbname,ind ;") tdSql.checkRows(90) tdSql.checkData(0,1,20) @@ -599,7 +599,7 @@ class TDTestCase: tdSql.checkData(30,1,20) tdSql.checkData(31,1,20) - # interval (20s) and time range is in records + # interval (20s) and time range is in records tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(prev) group by tbname,ind ;") tdSql.checkRows(9) @@ -689,7 +689,7 @@ class TDTestCase: tdSql.checkData(19,1,10) tdSql.checkData(20,1,20) tdSql.checkData(25,1,0) - + def query_mix_common(self): tdLog.info (" ======================================elapsed mixup with common col, it will not support =======================================") @@ -730,7 +730,7 @@ class TDTestCase: tdSql.checkData(0,0,data[0][index]) tdSql.query("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from stable_1 group by tbname; ") - + # Arithmetic with elapsed for common table operators = ["+" ,"-" , "*" ,"/" ,"%"] @@ -743,9 +743,9 @@ class TDTestCase: sql_common= "select " for index , query in enumerate(querys_oper): - + query_data = tdSql.getResult("select %s from sub_table1_1;"%query) - + query_datas.append(query_data[0][0]) sql_common += " %s %s " %(query,operator) sql_common=sql_common[:-2] + " from sub_table1_1;" @@ -753,7 +753,7 @@ class TDTestCase: tdSql.query(sql_common) results= query_datas[0] if operator == "+": - for data in query_datas[1:]: + for data in query_datas[1:]: results += data tdSql.checkData(0,0,results) @@ -794,9 +794,9 @@ class TDTestCase: sql_common= "select " for index , query in enumerate(querys_oper): - + query_data = tdSql.getResult("select %s from stable_1 group by tbname;"%query) - + query_datas.append(query_data[0][0]) sql_common += " %s %s " %(query,operator) sql_common=sql_common[:-2] + " from stable_1 group by tbname;" @@ -804,7 +804,7 @@ class TDTestCase: tdSql.query(sql_common) results= query_datas[0] if operator == "+": - for data in query_datas[1:]: + for data in query_datas[1:]: results += data tdSql.checkData(0,0,results) tdSql.checkData(1,0,results) @@ -849,9 +849,9 @@ class TDTestCase: querys = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(*)","last(q_int)","last(*)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] - + querys_mix = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(q_int)","last(q_int)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] - + tdSql.query("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),PERCENTILE(q_int,10),APERCENTILE(q_int,10) ,elapsed(ts,10s) from sub_table1_1 ; ") data = tdSql.getResult("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),PERCENTILE(q_int,10),APERCENTILE(q_int,10) ,elapsed(ts,10s) from sub_table1_1 ; ") @@ -873,7 +873,7 @@ class TDTestCase: tdSql.checkData(0,0,data[0][index]) tdSql.checkData(1,0,data[0][index]) tdSql.checkData(2,0,data[0][index]) - + operators = ["+" ,"-" , "*" ,"/" ,"%"] querys_oper = querys_mix @@ -884,9 +884,9 @@ class TDTestCase: sql_common= "select " for index , query in enumerate(querys_oper): - + query_data = tdSql.getResult("select %s from sub_table1_1;"%query) - + query_datas.append(query_data[0][0]) sql_common += " %s %s " %(query,operator) sql_common=sql_common[:-2] + " from sub_table1_1;" @@ -935,9 +935,9 @@ class TDTestCase: sql_common= "select " for index , query in enumerate(querys_oper): - + query_data = tdSql.getResult("select %s from stable_1 group by tbname;"%query) - + query_datas.append(query_data[0][0]) sql_common += " %s %s " %(query,operator) sql_common=sql_common[:-2] + " from stable_1 group by tbname;" @@ -945,7 +945,7 @@ class TDTestCase: tdSql.query(sql_common) results= query_datas[0] if operator == "+": - for data in query_datas[1:]: + for data in query_datas[1:]: results += data tdSql.checkData(0,0,results) tdSql.checkData(1,0,results) @@ -983,7 +983,7 @@ class TDTestCase: tdSql.checkData(0,0,results) tdSql.checkData(1,0,results) tdSql.checkData(2,0,results) - + def query_mix_compute(self): tdLog.info (" ====================================== elapsed mixup with compute function =================================================") @@ -1000,8 +1000,8 @@ class TDTestCase: continue tdSql.query(sql1) tdSql.query(sql2) - - # only support mixup with spread + + # only support mixup with spread sql = "select spread(ts)*10,spread(q_tinyint)-10,elapsed(ts,10s) from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" tdSql.execute(sql) @@ -1016,7 +1016,7 @@ class TDTestCase: for index , query in enumerate(querys_mix): sql = "select %s from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; " %(query) tdSql.query(sql) - + operators = ["+" ,"-" , "*" ,"/" ,"%"] querys_oper = querys_mix @@ -1025,7 +1025,7 @@ class TDTestCase: sql_common= "select " for index , query in enumerate(querys_oper): - + sql_common += " %s %s " %(query,operator) sql_common=sql_common[:-2] + " from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" @@ -1034,7 +1034,7 @@ class TDTestCase: for index , query in enumerate(querys_mix): sql = "select %s from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; " %(query) tdSql.query(sql) - + operators = ["+" ,"-" , "*" ,"/" ,"%"] querys_oper = querys_mix @@ -1043,21 +1043,21 @@ class TDTestCase: sql_common= "select " for index , query in enumerate(querys_oper): - + sql_common += " %s %s " %(query,operator) sql_common=sql_common[:-2] + " from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" tdSql.query(sql_common) - + def query_mix_arithmetic(self): - + tdLog.info (" ====================================== elapsed mixup with arithmetic =================================================") - + tdSql.execute("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; ") tdSql.execute("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; ") - + # queries = ["elapsed(ts,10s)+1" ,"elapsed(ts,10s)-2","elapsed(ts,10s)*3","elapsed(ts,10s)/4","elapsed(ts,10s)%5" ] - + # for index ,query in enumerate(queries): # sql = "select %s from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) ;" % (query) # data = tdSql.getResult(sql) @@ -1067,7 +1067,7 @@ class TDTestCase: def query_with_join(self): tdLog.info (" ====================================== elapsed mixup with join =================================================") - + tdSql.error("select elapsed(ts,10s) from stable_empty TABLE1 , stable_empty TABLE2 where TABLE1.ts =TABLE2.ts; ") tdSql.error("select elapsed(ts,10s) from stable_empty TABLE1 , stable_empty TABLE2 where TABLE1.ts =TABLE2.ts group by tbname; ") @@ -1102,7 +1102,7 @@ class TDTestCase: tdLog.info (" ====================================== elapsed mixup with union all =================================================") - # union all with empty + # union all with empty tdSql.query("select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from regular_table_2;") @@ -1117,7 +1117,7 @@ class TDTestCase: tdSql.checkRows(600) tdSql.checkData(0,1,0.1) tdSql.checkData(500,0,0) - + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from sub_empty_2;') tdSql.checkRows(0) @@ -1149,11 +1149,11 @@ class TDTestCase: tdSql.checkRows(0) tdSql.error('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from stable_empty group by tbname;') - + tdSql.error('select elapsed(ts,10s) from sub_empty_1 interval(1s) union all select elapsed(ts,10s) from stable_empty interval(1s) group by tbname;') - + # tdSql.error('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) union all select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) group by tbname;') - + tdSql.query("select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_empty group by tbname ;") tdSql.checkRows(0) @@ -1176,7 +1176,7 @@ class TDTestCase: tdSql.checkRows(360) tdSql.checkData(0,1,1) tdSql.checkData(50,1,0) - + #case : TD-12229 tdSql.query('select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_2 group by tbname ;') tdSql.checkRows(3) @@ -1195,7 +1195,7 @@ class TDTestCase: # union all with sub table and regular table - # sub_table with sub_table + # sub_table with sub_table tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ select elapsed(ts,10s) from sub_table2_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') @@ -1232,7 +1232,7 @@ class TDTestCase: tdSql.checkRows(120) tdSql.checkData(0,1,1) tdSql.checkData(12,1,0) - + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') tdSql.checkRows(60) @@ -1245,7 +1245,7 @@ class TDTestCase: tdSql.checkData(0,1,1) tdSql.checkData(12,1,0) - # stable with stable + # stable with stable tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname;') @@ -1300,23 +1300,23 @@ class TDTestCase: tdLog.info (" ====================================== elapsed query for nest =================================================") # ===============================================outer nest============================================ - + # regular table # ts can't be used at outer query tdSql.query("select elapsed(ts,10s) from (select ts from regular_table_1 );") - # case : TD-12164 + # case : TD-12164 tdSql.error("select elapsed(ts,10s) from (select qint ts from regular_table_1 );") tdSql.error("select elapsed(tbname ,10s) from (select qint tbname from regular_table_1 );") tdSql.error("select elapsed(tsc ,1s) from (select q_int tsc from regular_table_1) ;") tdSql.error("select elapsed(tsv ,1s) from (select elapsed(ts,1s) tsv from regular_table_1);") tdSql.error("select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from regular_table_1);") - # # bug fix + # # bug fix # tdSql.error("select elapsed(tsc ,1s) from (select tscol tsc from regular_table_1) ;") - + # case TD-12276 # tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts asc );") @@ -1333,11 +1333,11 @@ class TDTestCase: # tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from sub_table1_1 order by ts ) interval(1s);") # tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from sub_table1_1 order by ts ) interval(1s);") - + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname,top(q_int,3) from sub_table1_1 ) interval(10s);") - + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname,bottom(q_int,3) from sub_table1_1 ) interval(10s);") - + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname from sub_table1_1 ) interval(10s);") tdSql.query("select elapsed(ts,10s) from (select ts ,tbname from sub_table1_1 ) interval(10s);") @@ -1345,7 +1345,7 @@ class TDTestCase: # tdSql.error("select elapsed(ts,10s) from (select ts ,count(*),tbname from sub_table1_1 order by ts ) interval(1s);") querys = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)","LEASTSQUARES(q_int,0,1)","elapsed(ts,10s)"] - + for query in querys: sql1 = "select elapsed(ts,10s) from (select %s from regular_table_1 order by ts ) interval(1s); " % query sql2 = "select elapsed(ts,10s) from (select ts , tbname ,%s from regular_table_1 order by ts ) interval(1s); " % query @@ -1359,16 +1359,16 @@ class TDTestCase: tdSql.error(sql4) tdSql.error(sql5) - + # case TD-12164 tdSql.error( "select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from regular_table_1) ; " ) tdSql.error( "select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from regular_table_1) ; " ) - + tdSql.error( "select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from stable_1 group by tbname ) ; " ) tdSql.error( "select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from stable_1 group by tbname) ; " ) - # stable + # stable tdSql.error("select elapsed(ts,10s) from (select ts from stable_1 ) group by tbname ;") @@ -1376,7 +1376,7 @@ class TDTestCase: tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from stable_1 order by ts ) interval(1s) group by tbname;") - # mixup with aggregate + # mixup with aggregate querys = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(*)","last(q_int)","last(*)","top(q_double,1)", "bottom(q_float,1)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)" ,"elapsed(ts,10s)"] @@ -1387,7 +1387,7 @@ class TDTestCase: sql2 = "select elapsed(ts,10s) from (select %s from stable_1 ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) group by tbname; " %(query) sql3 = "select elapsed(ts,10s) from (select %s from stable_1 group by tbname) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) group by tbname; " %(query) - if query in ["interp(q_int)" ]: + if query in ["interp(q_int)" ]: # print(sql1 ) # print(sql2) tdSql.query(sql1) @@ -1444,7 +1444,7 @@ class TDTestCase: # tdSql.query("select spread(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") # tdSql.checkRows(1) - + # tdSql.query("select diff(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") # tdSql.checkRows(599) @@ -1474,8 +1474,8 @@ class TDTestCase: # tdSql.checkRows(600) def query_session_windows(self): - - # case TD-12344 + + # case TD-12344 # session not support stable tdSql.error('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts ,10s) group by tbname,ind order by ts asc ') @@ -1488,10 +1488,9 @@ class TDTestCase: tdSql.query('select elapsed(ts,10s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ') - tdSql.query('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ') - + tdSql.error('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ') tdSql.error('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) session(ts,1w) ; ') - + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1w) ; ') tdSql.checkRows(0) @@ -1512,25 +1511,25 @@ class TDTestCase: # tdSql.error('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ') # tdSql.error('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) state_window(q_int) ; ') - + # tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int); ') # tdSql.checkRows(0) - + def continuous_query(self): tdSql.error('create table elapsed_t as select elapsed(ts) from sub_table1_1 interval(1m) sliding(30s);') tdSql.error('create table elapsed_tb as select elapsed(ts) from stable_1 interval(1m) sliding(30s) group by tbname;') tdSql.error('create table elapsed_tc as select elapsed(ts) from stable_1 interval(10s) sliding(5s) interval(1m) sliding(30s) group by tbname;') - + def query_precision(self): def generate_data(precision="ms"): - + tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision)) tdSql.execute("use db_%s;" %precision) tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision) tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision) tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision) - + if precision == "ms": start_ts = self.ts step = 10000 @@ -1594,7 +1593,7 @@ class TDTestCase: self.query_session_windows() self.continuous_query() self.query_precision() - + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/function_null.py b/tests/system-test/2-query/function_null.py index 4de7a7f113..545872b39d 100644 --- a/tests/system-test/2-query/function_null.py +++ b/tests/system-test/2-query/function_null.py @@ -42,7 +42,7 @@ class TDTestCase: ) for i in range(4): tdSql.execute( - f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/function_stateduration.py b/tests/system-test/2-query/function_stateduration.py index 9aa6fdaefa..bdbd92acd6 100644 --- a/tests/system-test/2-query/function_stateduration.py +++ b/tests/system-test/2-query/function_stateduration.py @@ -18,7 +18,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - + def prepare_datas(self): tdSql.execute( '''create table stb1 @@ -26,7 +26,7 @@ class TDTestCase: tags (t1 int) ''' ) - + tdSql.execute( ''' create table t1 @@ -68,7 +68,7 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def test_errors(self): error_sql_lists = [ # "select stateduration(c1,'GT',5,1s) from t1" @@ -110,35 +110,35 @@ class TDTestCase: for error_sql in error_sql_lists: tdSql.error(error_sql) pass - + def support_types(self): other_no_value_types = [ - "select stateduration(ts,'GT',1,1s) from t1" , + "select stateduration(ts,'GT',1,1s) from t1" , "select stateduration(c7,'GT',1,1s) from t1", "select stateduration(c8,'GT',1,1s) from t1", "select stateduration(c9,'GT',1,1s) from t1", - "select stateduration(ts,'GT',1,1s) from ct1" , + "select stateduration(ts,'GT',1,1s) from ct1" , "select stateduration(c7,'GT',1,1s) from ct1", "select stateduration(c8,'GT',1,1s) from ct1", "select stateduration(c9,'GT',1,1s) from ct1", - "select stateduration(ts,'GT',1,1s) from ct3" , + "select stateduration(ts,'GT',1,1s) from ct3" , "select stateduration(c7,'GT',1,1s) from ct3", "select stateduration(c8,'GT',1,1s) from ct3", "select stateduration(c9,'GT',1,1s) from ct3", - "select stateduration(ts,'GT',1,1s) from ct4" , + "select stateduration(ts,'GT',1,1s) from ct4" , "select stateduration(c7,'GT',1,1s) from ct4", "select stateduration(c8,'GT',1,1s) from ct4", "select stateduration(c9,'GT',1,1s) from ct4", - "select stateduration(ts,'GT',1,1s) from stb1 partition by tbname" , + "select stateduration(ts,'GT',1,1s) from stb1 partition by tbname" , "select stateduration(c7,'GT',1,1s) from stb1 partition by tbname", "select stateduration(c8,'GT',1,1s) from stb1 partition by tbname", - "select stateduration(c9,'GT',1,1s) from stb1 partition by tbname" + "select stateduration(c9,'GT',1,1s) from stb1 partition by tbname" ] - + for type_sql in other_no_value_types: tdSql.error(type_sql) tdLog.info("support type ok , sql is : %s"%type_sql) - + type_sql_lists = [ "select stateduration(c1,'GT',1,1s) from t1", "select stateduration(c2,'GT',1,1s) from t1", @@ -168,8 +168,8 @@ class TDTestCase: "select stateduration(c5,'GT',1,1s) from stb1 partition by tbname", "select stateduration(c6,'GT',1,1s) from stb1 partition by tbname", - "select stateduration(c6,'GT',1,1s) as alisb from stb1 partition by tbname", - "select stateduration(c6,'GT',1,1s) alisb from stb1 partition by tbname", + "select stateduration(c6,'GT',1,1s) as alisb from stb1 partition by tbname", + "select stateduration(c6,'GT',1,1s) alisb from stb1 partition by tbname", ] for type_sql in type_sql_lists: @@ -177,7 +177,7 @@ class TDTestCase: def support_opers(self): oper_lists = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ'] - + oper_errors = [",","*","NULL","tbname","ts","sum","_c0"] for oper in oper_lists: @@ -190,7 +190,7 @@ class TDTestCase: def basic_stateduration_function(self): - # basic query + # basic query tdSql.query("select c1 from ct3") tdSql.checkRows(0) tdSql.query("select c1 from t1") @@ -211,9 +211,9 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query("select stateduration(c6,'GT',1,1s) from ct3") - # will support _rowts mix with + # will support _rowts mix with # tdSql.query("select (c6,'GT',1,1s),_rowts from ct3") - + # auto check for t1 table # used for regular table tdSql.query("select stateduration(c6,'GT',1,1s) from t1") @@ -229,17 +229,17 @@ class TDTestCase: tdSql.error("select stateduration(c6,'GT',1,1s),tbname from ct1") tdSql.error("select stateduration(c6,'GT',1,1s),t1 from ct1") - # unique with common col + # unique with common col tdSql.error("select stateduration(c6,'GT',1,1s) ,ts from ct1") tdSql.error("select stateduration(c6,'GT',1,1s) ,c1 from ct1") - # unique with scalar function + # unique with scalar function tdSql.error("select stateduration(c6,'GT',1,1s) ,abs(c1) from ct1") tdSql.error("select stateduration(c6,'GT',1,1s) , unique(c2) from ct1") tdSql.error("select stateduration(c6,'GT',1,1s) , abs(c2)+2 from ct1") - - # unique with aggregate function + + # unique with aggregate function tdSql.error("select stateduration(c6,'GT',1,1s) ,sum(c1) from ct1") tdSql.error("select stateduration(c6,'GT',1,1s) ,max(c1) from ct1") tdSql.error("select stateduration(c6,'GT',1,1s) ,csum(c1) from ct1") @@ -262,16 +262,16 @@ class TDTestCase: tdSql.checkData(0, 0, 0) tdSql.checkData(1, 0, 6134400) tdSql.checkData(6, 0, -1) - - # unique with union all + + # unique with union all tdSql.query("select stateduration(c1,'GT',1,1s) from ct4 union all select stateduration(c1,'GT',1,1s) from ct1") tdSql.checkRows(25) tdSql.query("select stateduration(c1,'GT',1,1s) from ct4 union all select distinct(c1) from ct4") tdSql.checkRows(22) - # unique with join - # prepare join datas with same ts + # unique with join + # prepare join datas with same ts tdSql.execute(" use db ") tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") @@ -328,7 +328,7 @@ class TDTestCase: tdSql.checkRows(12) tdSql.query("select stateduration(c1+2 ,'GT',1,1s) from t1") tdSql.checkRows(12) - + # bug for stable #partition by tbname @@ -337,21 +337,20 @@ class TDTestCase: # tdSql.query(" select unique(c1) from stb1 partition by tbname ") # tdSql.checkRows(21) - - # group by + + # group by tdSql.error("select stateduration(c1,'GT',1,1s) from ct1 group by c1") tdSql.error("select stateduration(c1,'GT',1,1s) from ct1 group by tbname") # super table - + def check_unit_time(self): tdSql.execute(" use db ") tdSql.error("select stateduration(c1,'GT',1,1b) from ct1") tdSql.error("select stateduration(c1,'GT',1,1u) from ct1") + tdSql.error("select stateduration(c1,'GT',1,1000s) from t1") tdSql.query("select stateduration(c1,'GT',1,1s) from t1") tdSql.checkData(10,0,63072035) - tdSql.query("select stateduration(c1,'GT',1,1000s) from t1") - tdSql.checkData(10,0,int(63072035/1000)) tdSql.query("select stateduration(c1,'GT',1,1m) from t1") tdSql.checkData(10,0,int(63072035/60)) tdSql.query("select stateduration(c1,'GT',1,1h) from t1") @@ -360,8 +359,8 @@ class TDTestCase: tdSql.checkData(10,0,int(63072035/60/24/60)) tdSql.query("select stateduration(c1,'GT',1,1w) from t1") tdSql.checkData(10,0,int(63072035/60/7/24/60)) - - + + def check_boundary_values(self): tdSql.execute("drop database if exists bound_test") diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py index 238924d851..d0573a6bf4 100644 --- a/tests/system-test/2-query/irate.py +++ b/tests/system-test/2-query/irate.py @@ -97,7 +97,7 @@ class TDTestCase: ) for i in range(4): tdSql.execute( - f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py index d03ed5e03a..9e48f7d45a 100644 --- a/tests/system-test/2-query/json_tag.py +++ b/tests/system-test/2-query/json_tag.py @@ -566,7 +566,7 @@ class TDTestCase: tdSql.checkRows(3) tdSql.query("select bottom(dataint,100) from jsons1 where jtag->'tag1'>1") tdSql.checkRows(3) - tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1") + #tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1") tdSql.query("select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 1.5) # tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1") @@ -675,7 +675,7 @@ class TDTestCase: tdSql.checkRows(3) tdSql.query("select TO_UNIXTIMESTAMP(datastr) from jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select TIMETRUNCATE(ts,1u) from jsons1 where jtag->'tag1'>1;") + tdSql.query("select TIMETRUNCATE(ts,1s) from jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) tdSql.query("select TIMEDIFF(ts,_c0) from jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) @@ -687,8 +687,8 @@ class TDTestCase: # to_json() tdSql.query("select to_json('{\"abc\":123}') from jsons1_1") tdSql.checkRows(2) - # tdSql.checkData(0, 0, '{"abc":123}') - # tdSql.checkData(1, 0, '{"abc":123}') + tdSql.checkData(0, 0, '{"abc":123}') + tdSql.checkData(1, 0, '{"abc":123}') tdSql.query("select to_json('null') from jsons1_1") tdSql.checkRows(2) tdSql.checkData(0, 0, 'null') diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py index 46426a5a95..3cd023648e 100644 --- a/tests/system-test/2-query/max.py +++ b/tests/system-test/2-query/max.py @@ -109,7 +109,7 @@ class TDTestCase: ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py index ccda55f7ba..fbeb04bc2f 100644 --- a/tests/system-test/2-query/statecount.py +++ b/tests/system-test/2-query/statecount.py @@ -18,7 +18,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - + def prepare_datas(self): tdSql.execute( '''create table stb1 @@ -26,7 +26,7 @@ class TDTestCase: tags (t1 int) ''' ) - + tdSql.execute( ''' create table t1 @@ -68,7 +68,7 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def test_errors(self): error_sql_lists = [ # "select statecount(c1,'GT',5) from t1" @@ -110,35 +110,35 @@ class TDTestCase: for error_sql in error_sql_lists: tdSql.error(error_sql) pass - + def support_types(self): other_no_value_types = [ - "select statecount(ts,'GT',1) from t1" , + "select statecount(ts,'GT',1) from t1" , "select statecount(c7,'GT',1) from t1", "select statecount(c8,'GT',1) from t1", "select statecount(c9,'GT',1) from t1", - "select statecount(ts,'GT',1) from ct1" , + "select statecount(ts,'GT',1) from ct1" , "select statecount(c7,'GT',1) from ct1", "select statecount(c8,'GT',1) from ct1", "select statecount(c9,'GT',1) from ct1", - "select statecount(ts,'GT',1) from ct3" , + "select statecount(ts,'GT',1) from ct3" , "select statecount(c7,'GT',1) from ct3", "select statecount(c8,'GT',1) from ct3", "select statecount(c9,'GT',1) from ct3", - "select statecount(ts,'GT',1) from ct4" , + "select statecount(ts,'GT',1) from ct4" , "select statecount(c7,'GT',1) from ct4", "select statecount(c8,'GT',1) from ct4", "select statecount(c9,'GT',1) from ct4", - "select statecount(ts,'GT',1) from stb1 partition by tbname" , + "select statecount(ts,'GT',1) from stb1 partition by tbname" , "select statecount(c7,'GT',1) from stb1 partition by tbname", "select statecount(c8,'GT',1) from stb1 partition by tbname", - "select statecount(c9,'GT',1) from stb1 partition by tbname" + "select statecount(c9,'GT',1) from stb1 partition by tbname" ] - + for type_sql in other_no_value_types: tdSql.error(type_sql) tdLog.info("support type ok , sql is : %s"%type_sql) - + type_sql_lists = [ "select statecount(c1,'GT',1) from t1", "select statecount(c2,'GT',1) from t1", @@ -168,8 +168,8 @@ class TDTestCase: "select statecount(c5,'GT',1) from stb1 partition by tbname", "select statecount(c6,'GT',1) from stb1 partition by tbname", - "select statecount(c6,'GT',1) as alisb from stb1 partition by tbname", - "select statecount(c6,'GT',1) alisb from stb1 partition by tbname", + "select statecount(c6,'GT',1) as alisb from stb1 partition by tbname", + "select statecount(c6,'GT',1) alisb from stb1 partition by tbname", ] for type_sql in type_sql_lists: @@ -177,7 +177,7 @@ class TDTestCase: def support_opers(self): oper_lists = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ'] - + oper_errors = [",","*","NULL","tbname","ts","sum","_c0"] for oper in oper_lists: @@ -190,7 +190,7 @@ class TDTestCase: def basic_statecount_function(self): - # basic query + # basic query tdSql.query("select c1 from ct3") tdSql.checkRows(0) tdSql.query("select c1 from t1") @@ -211,9 +211,9 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query("select statecount(c6,'GT',1) from ct3") - # will support _rowts mix with + # will support _rowts mix with # tdSql.query("select (c6,'GT',1),_rowts from ct3") - + # auto check for t1 table # used for regular table tdSql.query("select statecount(c6,'GT',1) from t1") @@ -229,17 +229,17 @@ class TDTestCase: tdSql.error("select statecount(c6,'GT',1),tbname from ct1") tdSql.error("select statecount(c6,'GT',1),t1 from ct1") - # unique with common col + # unique with common col tdSql.error("select statecount(c6,'GT',1) ,ts from ct1") tdSql.error("select statecount(c6,'GT',1) ,c1 from ct1") - # unique with scalar function + # unique with scalar function tdSql.error("select statecount(c6,'GT',1) ,abs(c1) from ct1") tdSql.error("select statecount(c6,'GT',1) , unique(c2) from ct1") tdSql.error("select statecount(c6,'GT',1) , abs(c2)+2 from ct1") - - # unique with aggregate function + + # unique with aggregate function tdSql.error("select statecount(c6,'GT',1) ,sum(c1) from ct1") tdSql.error("select statecount(c6,'GT',1) ,max(c1) from ct1") tdSql.error("select statecount(c6,'GT',1) ,csum(c1) from ct1") @@ -262,16 +262,16 @@ class TDTestCase: tdSql.checkData(0, 0, 1) tdSql.checkData(1, 0, 2) tdSql.checkData(6, 0, -1) - - # unique with union all + + # unique with union all tdSql.query("select statecount(c1,'GT',1) from ct4 union all select statecount(c1,'GT',1) from ct1") tdSql.checkRows(25) tdSql.query("select statecount(c1,'GT',1) from ct4 union all select distinct(c1) from ct4") tdSql.checkRows(22) - # unique with join - # prepare join datas with same ts + # unique with join + # prepare join datas with same ts tdSql.execute(" use db ") tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") @@ -323,7 +323,7 @@ class TDTestCase: tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, 0.000000000) tdSql.checkData(3, 0, -1.000000000) - + # bug for stable #partition by tbname @@ -332,21 +332,20 @@ class TDTestCase: # tdSql.query(" select unique(c1) from stb1 partition by tbname ") # tdSql.checkRows(21) - - # group by + + # group by tdSql.query("select statecount(c1,'GT',1) from ct1 group by c1") tdSql.error("select statecount(c1,'GT',1) from ct1 group by tbname") # super table - + def check_unit_time(self): tdSql.execute(" use db ") tdSql.error("select stateduration(c1,'GT',1,1b) from ct1") tdSql.error("select stateduration(c1,'GT',1,1u) from ct1") + tdSql.error("select stateduration(c1,'GT',1,1000s) from t1") tdSql.query("select stateduration(c1,'GT',1,1s) from t1") tdSql.checkData(10,0,63072035) - tdSql.query("select stateduration(c1,'GT',1,1000s) from t1") - tdSql.checkData(10,0,int(63072035/1000)) tdSql.query("select stateduration(c1,'GT',1,1m) from t1") tdSql.checkData(10,0,int(63072035/60)) tdSql.query("select stateduration(c1,'GT',1,1h) from t1") @@ -355,8 +354,8 @@ class TDTestCase: tdSql.checkData(10,0,int(63072035/60/24/60)) tdSql.query("select stateduration(c1,'GT',1,1w) from t1") tdSql.checkData(10,0,int(63072035/60/7/24/60)) - - + + def check_boundary_values(self): tdSql.execute("drop database if exists bound_test") @@ -384,11 +383,11 @@ class TDTestCase: tdSql.execute( f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.error( f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.query("select statecount(c1,'GT',1) from sub1_bound") tdSql.checkRows(5) @@ -396,29 +395,29 @@ class TDTestCase: tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4:support opers ============") + tdLog.printNoPrefix("==========step4:support opers ============") self.support_opers() - tdLog.printNoPrefix("==========step5: statecount basic query ============") + tdLog.printNoPrefix("==========step5: statecount basic query ============") self.basic_statecount_function() - tdLog.printNoPrefix("==========step6: statecount boundary query ============") + tdLog.printNoPrefix("==========step6: statecount boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step6: statecount unit time test ============") + tdLog.printNoPrefix("==========step6: statecount unit time test ============") self.check_unit_time() diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py index a48851b251..7fcdee3d60 100644 --- a/tests/system-test/2-query/timetruncate.py +++ b/tests/system-test/2-query/timetruncate.py @@ -3,7 +3,8 @@ from util.log import * from util.cases import * from util.sql import * import numpy as np - +import time +from datetime import datetime class TDTestCase: def init(self, conn, logSql): @@ -12,209 +13,213 @@ class TDTestCase: self.rowNum = 10 self.ts = 1537146000000 # 2018-9-17 09:00:00.000 - + + self.ts_str = [ + '2020-1-1', + '2020-2-1 00:00:01', + '2020-3-1 00:00:00.001', + '2020-4-1 00:00:00.001002', + '2020-5-1 00:00:00.001002001' + + ] + self.db_param_precision = ['ms','us','ns'] + self.time_unit = ['1w','1d','1h','1m','1s','1a','1u'] + self.error_unit = ['1b','2w','2d','2h','2m','2s','2a','2u','1c','#1'] + self.ntbname = 'ntb' + self.stbname = 'stb' + self.ctbname = 'ctb' + def get_ms_timestamp(self,ts_str): + _ts_str = ts_str + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 15 : + _ts_str = ts_str[:-3] + if ':' in _ts_str and '.' in _ts_str: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S.%f") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + elif ':' in _ts_str and '.' not in _ts_str: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + else: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + return date_time + def get_us_timestamp(self,ts_str): + _ts = self.get_ms_timestamp(ts_str) * 1000 + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 12: + us_ts = p[12:15] + _ts += int(us_ts) + return _ts + def get_ns_timestamp(self,ts_str): + _ts = self.get_us_timestamp(ts_str) *1000 + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 15: + us_ts = p[15:] + _ts += int(us_ts) + return _ts + def time_transform(self,ts_str,precision): + date_time = [] + if precision == 'ms': + for i in ts_str: + date_time.append(self.get_ms_timestamp(i)) + elif precision == 'us': + for i in ts_str: + date_time.append(self.get_us_timestamp(i)) + elif precision == 'ns': + for i in ts_str: + date_time.append(self.get_us_timestamp(i)) + return date_time + def check_ms_timestamp(self,unit,date_time): + if unit.lower() == '1a': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i])) + elif unit.lower() == '1s': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000)*1000) + elif unit.lower() == '1m': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60)*60*1000) + elif unit.lower() == '1h': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60)*60*60*1000 ) + elif unit.lower() == '1d': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000) + elif unit.lower() == '1w': + for i in range(len(self.ts_str)): + ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24/7)*7*24*60*60*1000) + def check_us_timestamp(self,unit,date_time): + if unit.lower() == '1u': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i])) + elif unit.lower() == '1a': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000)*1000) + elif unit.lower() == '1s': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000)*1000*1000) + elif unit.lower() == '1m': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60)*60*1000*1000) + elif unit.lower() == '1h': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60)*60*60*1000*1000 ) + elif unit.lower() == '1d': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 ) + elif unit.lower() == '1w': + for i in range(len(self.ts_str)): + ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0])) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24/7)*7*24*60*60*1000*1000) + def check_ns_timestamp(self,unit,date_time): + if unit.lower() == '1u': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000)*1000) + elif unit.lower() == '1a': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000)*1000*1000) + elif unit.lower() == '1s': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000)*1000*1000*1000) + elif unit.lower() == '1m': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60)*60*1000*1000*1000) + elif unit.lower() == '1h': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 ) + elif unit.lower() == '1d': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 ) + elif unit.lower() == '1w': + for i in range(len(self.ts_str)): + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000) + def data_check(self,date_time,precision,tb_type): + for unit in self.time_unit: + if (unit.lower() == '1u' and precision.lower() == 'ms') or () : + if tb_type.lower() == 'ntb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}') + elif precision.lower() == 'ms': + if tb_type.lower() == 'ntb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}') + tdSql.checkRows(len(self.ts_str)) + self.check_ms_timestamp(unit,date_time) + elif precision.lower() == 'us': + if tb_type.lower() == 'ntb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}') + tdSql.checkRows(len(self.ts_str)) + self.check_us_timestamp(unit,date_time) + elif precision.lower() == 'ns': + if tb_type.lower() == 'ntb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}') + tdSql.checkRows(len(self.ts_str)) + self.check_ns_timestamp(unit,date_time) + for unit in self.error_unit: + if tb_type.lower() == 'ntb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}') + elif tb_type.lower() == 'ctb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}') + elif tb_type.lower() == 'stb': + tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}') + def function_check_ntb(self): + for precision in self.db_param_precision: + tdSql.execute('drop database if exists db') + tdSql.execute(f'create database db precision "{precision}"') + tdSql.execute('use db') + tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)') + for ts in self.ts_str: + tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)') + date_time = self.time_transform(self.ts_str,precision) + self.data_check(date_time,precision,'ntb') + + def function_check_stb(self): + for precision in self.db_param_precision: + tdSql.execute('drop database if exists db') + tdSql.execute(f'create database db precision "{precision}"') + tdSql.execute('use db') + tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)') + tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags(1)') + for ts in self.ts_str: + tdSql.execute(f'insert into {self.ctbname} values("{ts}",1)') + date_time = self.time_transform(self.ts_str,precision) + self.data_check(date_time,precision,'ctb') + self.data_check(date_time,precision,'stb') def run(self): - tdSql.prepare() - - intData = [] - floatData = [] - - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table stb_1 using stb tags('beijing')") - tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') - for i in range(self.rowNum): - tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - intData.append(i + 1) - floatData.append(i + 0.1) - for i in range(self.rowNum): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - intData.append(i + 1) - floatData.append(i + 0.1) - - tdSql.query("select timetruncate(1,1d) from ntb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1u) from ntb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1a) from ntb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1m) from ntb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1h) from ntb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(ts,1d) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 08:00:00.000") - tdSql.query("select timetruncate(ts,1h) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1m) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1s) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1a) from ntb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.checkData(1,0,"2018-09-17 09:00:00.001") - tdSql.checkData(2,0,"2018-09-17 09:00:00.002") - tdSql.checkData(3,0,"2018-09-17 09:00:00.003") - tdSql.checkData(4,0,"2018-09-17 09:00:00.004") - tdSql.checkData(5,0,"2018-09-17 09:00:00.005") - tdSql.checkData(6,0,"2018-09-17 09:00:00.006") - tdSql.checkData(7,0,"2018-09-17 09:00:00.007") - tdSql.checkData(8,0,"2018-09-17 09:00:00.008") - tdSql.checkData(9,0,"2018-09-17 09:00:00.009") - # tdSql.query("select timetruncate(ts,1u) from ntb") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000") - # tdSql.query("select timetruncate(ts,1b) from ntb") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000000") - - - tdSql.query("select timetruncate(1,1d) from stb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1u) from stb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1a) from stb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1m) from stb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1h) from stb") - tdSql.checkRows(10) - tdSql.query("select timetruncate(ts,1d) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 08:00:00.000") - tdSql.query("select timetruncate(ts,1h) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1m) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1s) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1a) from stb") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.checkData(1,0,"2018-09-17 09:00:00.001") - tdSql.checkData(2,0,"2018-09-17 09:00:00.002") - tdSql.checkData(3,0,"2018-09-17 09:00:00.003") - tdSql.checkData(4,0,"2018-09-17 09:00:00.004") - tdSql.checkData(5,0,"2018-09-17 09:00:00.005") - tdSql.checkData(6,0,"2018-09-17 09:00:00.006") - tdSql.checkData(7,0,"2018-09-17 09:00:00.007") - tdSql.checkData(8,0,"2018-09-17 09:00:00.008") - tdSql.checkData(9,0,"2018-09-17 09:00:00.009") - # tdSql.query("select timetruncate(ts,1u) from stb") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000") - # tdSql.query("select timetruncate(ts,1b) from stb") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000000") - - tdSql.query("select timetruncate(1,1d) from stb_1") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1u) from stb_1") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1a) from stb_1") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1m) from stb_1") - tdSql.checkRows(10) - tdSql.query("select timetruncate(1,1h) from stb_1") - tdSql.checkRows(10) - tdSql.query("select timetruncate(ts,1d) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 08:00:00.000") - tdSql.query("select timetruncate(ts,1h) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1m) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1s) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.query("select timetruncate(ts,1a) from stb_1") - tdSql.checkRows(10) - tdSql.checkData(0,0,"2018-09-17 09:00:00.000") - tdSql.checkData(1,0,"2018-09-17 09:00:00.001") - tdSql.checkData(2,0,"2018-09-17 09:00:00.002") - tdSql.checkData(3,0,"2018-09-17 09:00:00.003") - tdSql.checkData(4,0,"2018-09-17 09:00:00.004") - tdSql.checkData(5,0,"2018-09-17 09:00:00.005") - tdSql.checkData(6,0,"2018-09-17 09:00:00.006") - tdSql.checkData(7,0,"2018-09-17 09:00:00.007") - tdSql.checkData(8,0,"2018-09-17 09:00:00.008") - tdSql.checkData(9,0,"2018-09-17 09:00:00.009") - # tdSql.query("select timetruncate(ts,1u) from stb_1") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000") - # tdSql.query("select timetruncate(ts,1b) from stb_1") - # tdSql.checkRows(10) - # tdSql.checkData(0,0,"2018-09-17 09:00:00.000000000") - # tdSql.checkData(1,0,"2018-09-17 09:00:00.001000000") - # tdSql.checkData(2,0,"2018-09-17 09:00:00.002000000") - # tdSql.checkData(3,0,"2018-09-17 09:00:00.003000000") - # tdSql.checkData(4,0,"2018-09-17 09:00:00.004000000") - # tdSql.checkData(5,0,"2018-09-17 09:00:00.005000000") - # tdSql.checkData(6,0,"2018-09-17 09:00:00.006000000") - # tdSql.checkData(7,0,"2018-09-17 09:00:00.007000000") - # tdSql.checkData(8,0,"2018-09-17 09:00:00.008000000") - # tdSql.checkData(9,0,"2018-09-17 09:00:00.009000000") + self.function_check_ntb() + self.function_check_stb() def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py index b400e503d9..9f0e189a5f 100644 --- a/tests/system-test/2-query/twa.py +++ b/tests/system-test/2-query/twa.py @@ -34,7 +34,7 @@ class TDTestCase: ) for i in range(self.tb_nums): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') ts = self.ts for j in range(self.row_nums): ts+=j*self.time_step diff --git a/tests/system-test/7-tmq/stbFilter.py b/tests/system-test/7-tmq/stbFilter.py index 542894574b..7ad3cc99e7 100644 --- a/tests/system-test/7-tmq/stbFilter.py +++ b/tests/system-test/7-tmq/stbFilter.py @@ -20,18 +20,54 @@ class TDTestCase: tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def tmqCase1(self): - tdLog.printNoPrefix("======== test case 1: ") - paraDict = {'dbName': 'db1', + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', 'vgroups': 4, + 'replica': 1, 'stbName': 'stb', 'colPrefix': 'c', 'tagPrefix': 't', - 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}], 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 100, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1} + + tmqCom.initConsumerTable() + tmqCom.create_database(tsql=tdSql, dbName=paraDict["dbName"],dropFlag=paraDict["dropFlag"], vgroups=paraDict['vgroups'],replica=paraDict['replica']) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], ctbNum=paraDict['ctbNum']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'replica': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, 'ctbNum': 10, 'rowsPerTbl': 10000, 'batchNum': 100, @@ -43,13 +79,6 @@ class TDTestCase: topicNameList = ['topic1', 'topic2', 'topic3'] expectRowsList = [] tmqCom.initConsumerTable() - tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) - tdLog.info("create stb") - tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) - tdLog.info("create ctb") - tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) - tdLog.info("insert data") - tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) tdLog.info("create topics from stb with filter") queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 4 == 0" %(paraDict['dbName'], paraDict['stbName']) @@ -122,9 +151,9 @@ class TDTestCase: tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - # if expectRowsList[2] != resultList[0]: - # tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0])) - # tdLog.exit("2 tmq consume rows error!") + if expectRowsList[2] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0])) + tdLog.exit("2 tmq consume rows error!") time.sleep(10) for i in range(len(topicNameList)): @@ -134,16 +163,18 @@ class TDTestCase: def tmqCase2(self): tdLog.printNoPrefix("======== test case 2: ") - paraDict = {'dbName': 'db2', + paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', 'vgroups': 4, + 'replica': 1, 'stbName': 'stb', 'colPrefix': 'c', 'tagPrefix': 't', 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}], 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, 'ctbNum': 10, 'rowsPerTbl': 10000, 'batchNum': 100, @@ -155,13 +186,6 @@ class TDTestCase: topicNameList = ['topic1', 'topic2', 'topic3'] expectRowsList = [] tmqCom.initConsumerTable() - tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) - tdLog.info("create stb") - tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) - tdLog.info("create ctb") - tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], ctbNum=paraDict['ctbNum']) - tdLog.info("insert data") - tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) tdLog.info("create topics from stb with filter") # sqlString = "create topic %s as select ts, sin(c1), pow(c2,3) from %s.%s where c2 >= 0" %(topicNameList[0], paraDict['dbName'], paraDict['stbName']) @@ -247,6 +271,7 @@ class TDTestCase: def run(self): tdSql.prepare() + self.prepareTestEnv() self.tmqCase1() self.tmqCase2() diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py index f8488aee49..7f9d36bd26 100644 --- a/tests/system-test/7-tmq/tmqCommon.py +++ b/tests/system-test/7-tmq/tmqCommon.py @@ -170,33 +170,42 @@ class TMQCom: tdLog.debug("complete to create database %s"%(dbName)) return + # self.create_stable() and self.create_ctable() and self.insert_data_interlaceByMultiTbl() : The three functions are matched + # schema: (ts timestamp, c1 int, c2 bigint, c3 double, c4 binary(32), c5 nchar(32), c6 timestamp) tags (t1 int, t2 bigint, t3 double, t4 binary(32), t5 nchar(32)) def create_stable(self,tsql, dbName,stbName): - tsql.execute("create table if not exists %s.%s (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbName, stbName)) + schemaString = "(ts timestamp, c1 int, c2 bigint, c3 double, c4 binary(32), c5 nchar(32), c6 timestamp) tags (t1 int, t2 bigint, t3 double, t4 binary(32), t5 nchar(32))" + tsql.execute("create table if not exists %s.%s %s"%(dbName, stbName, schemaString)) tdLog.debug("complete to create %s.%s" %(dbName, stbName)) return def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1,ctbStartIdx=0): - tsql.execute("use %s" %dbName) + # tsql.execute("use %s" %dbName) pre_create = "create table" sql = pre_create #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + batchNum = 10 + tblBatched = 0 for i in range(ctbNum): - tagValue = 'beijing' + tagBinaryValue = 'beijing' if (i % 2 == 0): - tagValue = 'shanghai' + tagBinaryValue = 'shanghai' elif (i % 3 == 0): - tagValue = 'changsha' + tagBinaryValue = 'changsha' - sql += " %s%d using %s tags(%d, '%s')"%(ctbPrefix,i+ctbStartIdx,stbName,i+ctbStartIdx+1, tagValue) - if (i > 0) and (i%100 == 0): + sql += " %s.%s%d using %s.%s tags(%d, %d, %d, '%s', '%s')"%(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,tagBinaryValue,tagBinaryValue) + tblBatched += 1 + if (i == ctbNum-1 ) or (tblBatched == batchNum): tsql.execute(sql) + tblBatched = 0 sql = pre_create + if sql != pre_create: tsql.execute(sql) - tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) - return + tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName)) + return + # schema: (ts timestamp, c1 int, c2 binary(16)) def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=None): tdLog.debug("start to insert data ............") tsql.execute("use %s" %dbName) @@ -208,11 +217,14 @@ class TMQCom: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) for i in range(ctbNum): + rowsBatched = 0 sql += " %s%d values "%(stbName,i) for j in range(rowsPerTbl): sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) - if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + rowsBatched += 1 + if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)): tsql.execute(sql) + rowsBatched = 0 if j < rowsPerTbl - 1: sql = "insert into %s%d values " %(stbName,i) else: @@ -224,6 +236,7 @@ class TMQCom: tdLog.debug("insert data ............ [OK]") return + # schema: (ts timestamp, c1 int, c2 int, c3 binary(16)) def insert_data_1(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs): tdLog.debug("start to insert data ............") tsql.execute("use %s" %dbName) @@ -234,14 +247,17 @@ class TMQCom: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) for i in range(ctbNum): + rowsBatched = 0 sql += " %s%d values "%(ctbPrefix,i) for j in range(rowsPerTbl): if (j % 2 == 0): sql += "(%d, %d, %d, 'tmqrow_%d') "%(startTs + j, j, j, j) else: sql += "(%d, %d, %d, 'tmqrow_%d') "%(startTs + j, j, -j, j) - if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + rowsBatched += 1 + if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)): tsql.execute(sql) + rowsBatched = 0 if j < rowsPerTbl - 1: sql = "insert into %s%d values " %(ctbPrefix,i) else: @@ -253,6 +269,7 @@ class TMQCom: tdLog.debug("insert data ............ [OK]") return + # schema: (ts timestamp, c1 int, c2 int, c3 binary(16), c4 timestamp) def insert_data_2(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs,ctbStartIdx=0): tdLog.debug("start to insert data ............") tsql.execute("use %s" %dbName) @@ -263,14 +280,17 @@ class TMQCom: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) for i in range(ctbNum): + rowsBatched = 0 sql += " %s%d values "%(ctbPrefix,i+ctbStartIdx) for j in range(rowsPerTbl): if (j % 2 == 0): sql += "(%d, %d, %d, 'tmqrow_%d', now) "%(startTs + j, j, j, j) else: sql += "(%d, %d, %d, 'tmqrow_%d', now) "%(startTs + j, j, -j, j) - if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + rowsBatched += 1 + if (rowsBatched == batchNum) or (j == rowsPerTbl - 1): tsql.execute(sql) + rowsBatched = 0 if j < rowsPerTbl - 1: sql = "insert into %s%d values " %(ctbPrefix,i+ctbStartIdx) else: @@ -282,7 +302,8 @@ class TMQCom: tdLog.debug("insert data ............ [OK]") return - def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + # schema: (ts timestamp, c1 int, c2 bigint, c3 double, c4 binary(32), c5 nchar(32), c6 timestamp) tags (t1 int, t2 bigint, t3 double, t4 binary(32), t5 nchar(32)) + def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0,ctbStartIdx=0): tdLog.debug("start to insert data ............") tsql.execute("use %s" %dbName) pre_insert = "insert into " @@ -297,15 +318,22 @@ class TMQCom: ctbDict[i] = 0 #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfCtb = 0 + rowsOfCtb = 0 while rowsOfCtb < rowsPerTbl: for i in range(ctbNum): - sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) + sql += " %s.%s%d values "%(dbName,ctbPrefix,i+ctbStartIdx) + rowsBatched = 0 for k in range(batchNum): - sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i]) + if (k % 2 == 0): + sql += "(%d, %d, %d, %d, 'binary_%d', 'nchar_%d', now) "%(startTs+ctbDict[i], ctbDict[i],ctbDict[i], ctbDict[i],i+ctbStartIdx,k) + else: + sql += "(%d, %d, %d, %d, 'binary_%d', 'nchar_%d', now) "%(startTs+ctbDict[i],-ctbDict[i],ctbDict[i],-ctbDict[i],i+ctbStartIdx,k) + + rowsBatched += 1 ctbDict[i] += 1 - if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl): + if (rowsBatched == batchNum) or (ctbDict[i] == rowsPerTbl): tsql.execute(sql) + rowsBatched = 0 sql = "insert into " break rowsOfCtb = ctbDict[0] @@ -313,7 +341,18 @@ class TMQCom: tdLog.debug("insert data ............ [OK]") return - def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + def threadFunctionForInsertByInterlace(self, **paraDict): + # create new connector for new tdSql instance in my thread + newTdSql = tdCom.newTdSql() + self.insert_data_interlaceByMultiTbl(newTdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"],paraDict["ctbStartIdx"]) + return + + def asyncInsertDataByInterlace(self, paraDict): + pThread = threading.Thread(target=self.threadFunctionForInsertByInterlace, kwargs=paraDict) + pThread.start() + return pThread + + def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0,ctbStartIdx=0): tdLog.debug("start to insert data wiht auto create child table ............") tsql.execute("use %s" %dbName) pre_insert = "insert into " @@ -324,17 +363,17 @@ class TMQCom: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsBatched = 0 for i in range(ctbNum): - sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) + sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i) for j in range(rowsPerTbl): sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) - rowsOfSql += 1 - if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + rowsBatched += 1 + if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)): tsql.execute(sql) - rowsOfSql = 0 + rowsBatched = 0 if j < rowsPerTbl - 1: - sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i) + sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i) else: sql = "insert into " #end sql diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py new file mode 100644 index 0000000000..bd8531d7c8 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py @@ -0,0 +1,264 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 1 + self.rowsPerTbl = 100000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 100000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(totalRowsInserted/3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + firstConsumeRows = resultList[0] + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(totalRowsInserted/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + secondConsumeRows = resultList[0] + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 3 + expectrowcnt = math.ceil(totalRowsInserted/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + thirdConsumeRows = resultList[0] + + if not (totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # total consume + actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py new file mode 100644 index 0000000000..1d2aaccda5 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py @@ -0,0 +1,263 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 1 + self.rowsPerTbl = 100000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 100000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + firstConsumeRows = resultList[0] + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + secondConsumeRows = resultList[0] + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 3 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + thirdConsumeRows = resultList[0] + + if not (totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # total consume + actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py new file mode 100644 index 0000000000..2720b7cdce --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py @@ -0,0 +1,245 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 3000 + self.rowsPerTbl = 150 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 200, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + # tdDnodes.start(1) + tdDnodes.starttaosd(1) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 20, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + tdLog.info("select result rows: %d"%totalRowsInserted) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(totalRowsInserted/3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + firstConsumeRows = resultList[0] + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(totalRowsInserted*2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = firstConsumeRows + resultList[0] + + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py new file mode 100644 index 0000000000..0b2dddd24a --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py @@ -0,0 +1,242 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 3000 + self.rowsPerTbl = 70 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 100, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + # tdDnodes.start(1) + tdDnodes.starttaosd(1) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + firstConsumeRows = resultList[0] + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = firstConsumeRows + resultList[0] + + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py new file mode 100644 index 0000000000..a4d6648276 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py @@ -0,0 +1,242 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 10 + self.rowsPerTbl = 10000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + # tdDnodes.start(1) + tdDnodes.starttaosd(1) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + # tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + firstConsumeRows = resultList[0] + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = firstConsumeRows + resultList[0] + + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb.py b/tests/system-test/7-tmq/tmqConsFromTsdb.py index ae0e658344..c18474dcc3 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb.py @@ -16,7 +16,7 @@ sys.path.append("./7-tmq") from tmqCommon import * class TDTestCase: - def __int__(self): + def __init__(self): self.vgroups = 1 self.ctbNum = 10 self.rowsPerTbl = 10000 @@ -25,6 +25,49 @@ class TDTestCase: tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'dbt', @@ -34,10 +77,11 @@ class TDTestCase: 'stbName': 'stb', 'colPrefix': 'c', 'tagPrefix': 't', - 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], - 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], 'ctbPrefix': 'ctb', - 'ctbNum': 1, + 'ctbStartIdx': 0, + 'ctbNum': 10, 'rowsPerTbl': 10000, 'batchNum': 10, 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 @@ -53,16 +97,6 @@ class TDTestCase: topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) - tdLog.info("create stb") - tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) - tdLog.info("create ctb") - tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) - tdLog.info("insert data") - tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - - tdDnodes.stop(1) - tdDnodes.start(1) tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) @@ -111,10 +145,11 @@ class TDTestCase: 'stbName': 'stb', 'colPrefix': 'c', 'tagPrefix': 't', - 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], - 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], 'ctbPrefix': 'ctb', - 'ctbNum': 1, + 'ctbStartIdx': 0, + 'ctbNum': 10, 'rowsPerTbl': 10000, 'batchNum': 10, 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 @@ -130,16 +165,6 @@ class TDTestCase: topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) - # tdLog.info("create stb") - # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) - # tdLog.info("create ctb") - # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) - # tdLog.info("insert data") - # tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - - # tdDnodes.stop(1) - # tdDnodes.start(1) tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) @@ -200,89 +225,9 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self): - tdLog.printNoPrefix("======== test case 3: ") - paraDict = {'dbName': 'dbt', - 'dropFlag': 1, - 'event': '', - 'vgroups': 1, - 'stbName': 'stb', - 'colPrefix': 'c', - 'tagPrefix': 't', - 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], - 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], - 'ctbPrefix': 'ctb', - 'ctbNum': 1, - 'rowsPerTbl': 10000, - 'batchNum': 10, - 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 - 'pollDelay': -1, - 'showMsg': 1, - 'showRow': 1, - 'snapshot': 1} - - paraDict['vgroups'] = self.vgroups - paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - - topicNameList = ['topic1'] - expectRowsList = [] - tmqCom.initConsumerTable() - # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) - # tdLog.info("create stb") - # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) - # tdLog.info("create ctb") - # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) - # tdLog.info("insert data") - # tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - - # tdDnodes.stop(1) - # tdDnodes.start(1) - - tdLog.info("create topics from stb with filter") - queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) - # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) - sqlString = "create topic %s as %s" %(topicNameList[0], queryString) - tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - tdSql.query(queryString) - expectRowsList.append(tdSql.getRows()) - totalRowsInserted = expectRowsList[0] - - # init consume info, and start tmq_sim, then check consume result - tdLog.info("insert consume info to consume processor") - consumerId = 3 - expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) - topicList = topicNameList[0] - ifcheckdata = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' - tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 4 - expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) - tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor 0") - tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - - expectRows = 2 - resultList = tmqCom.selectConsumeResult(expectRows) - actConsumeTotalRows = resultList[0] + resultList[1] - - if not (totalRowsInserted == actConsumeTotalRows): - tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) - tdLog.exit("%d tmq consume rows error!"%consumerId) - - time.sleep(10) - for i in range(len(topicNameList)): - tdSql.query("drop topic %s"%topicNameList[i]) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - def run(self): tdSql.prepare() + self.prepareTestEnv() self.tmqCase1() self.tmqCase2() diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py new file mode 100644 index 0000000000..540c9dbbe1 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py @@ -0,0 +1,243 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 1 + self.rowsPerTbl = 1000000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 1000, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 15, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(totalRowsInserted/3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(totalRowsInserted * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 25, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(totalRowsInserted) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:500, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py new file mode 100644 index 0000000000..5cb373092b --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py @@ -0,0 +1,241 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 1 + self.rowsPerTbl = 100000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 15, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"]) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py new file mode 100644 index 0000000000..fc2552d6f2 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py @@ -0,0 +1,247 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 4000 + self.rowsPerTbl = 150 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 200, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + tdLog.info("select result rows: %d"%totalRowsInserted) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(totalRowsInserted/3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(totalRowsInserted*2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) + # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + tdLog.info("select result rows: %d"%totalRowsInserted) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(totalRowsInserted) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:300, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + # tdLog.info("wait start consume notify") + # tmqCom.getStartConsumeNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py new file mode 100644 index 0000000000..c1b327e5f1 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py @@ -0,0 +1,241 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 3000 + self.rowsPerTbl = 70 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 100, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"]) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py new file mode 100644 index 0000000000..dd8a0ad33a --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py @@ -0,0 +1,241 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 10 + self.rowsPerTbl = 10000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"]) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1.py b/tests/system-test/7-tmq/tmqConsFromTsdb1.py new file mode 100644 index 0000000000..a183eda1c7 --- /dev/null +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1.py @@ -0,0 +1,241 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 10 + self.rowsPerTbl = 10000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdDnodes.stop(1) + tdDnodes.start(1) + return + + def tmqCase3(self): + tdLog.printNoPrefix("======== test case 3: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 15, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 3 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 4 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + if not (totalRowsInserted == actConsumeTotalRows): + tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self): + tdLog.printNoPrefix("======== test case 4: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 5 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"]) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("wait commit notify") + tmqCom.getStartCommitNotifyFromTmqsim() + + tdLog.info("pkill consume processor") + tdCom.killProcessor("tmq_sim") + + # time.sleep(10) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 6 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = resultList[0] + + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase3() + self.tmqCase4() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 957ee17e0b..467fd67f55 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -18,11 +18,16 @@ python3 ./test.py -f 0-others/fsync.py python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py -# BUG python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py +python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py +python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py python3 ./test.py -f 1-insert/alter_stable.py python3 ./test.py -f 1-insert/alter_table.py python3 ./test.py -f 1-insert/insertWithMoreVgroup.py python3 ./test.py -f 1-insert/table_comment.py +python3 ./test.py -f 1-insert/time_range_wise.py +python3 ./test.py -f 1-insert/block_wise.py +python3 ./test.py -f 1-insert/create_retentions.py + #python3 ./test.py -f 1-insert/table_param_ttl.py python3 ./test.py -f 2-query/between.py python3 ./test.py -f 2-query/distinct.py @@ -89,7 +94,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py # python3 ./test.py -f 2-query/nestedQuery_str.py python3 ./test.py -f 2-query/avg.py -python3 ./test.py -f 2-query/elapsed.py +#python3 ./test.py -f 2-query/elapsed.py python3 ./test.py -f 2-query/csum.py python3 ./test.py -f 2-query/mavg.py python3 ./test.py -f 2-query/diff.py @@ -113,19 +118,19 @@ python3 ./test.py -f 2-query/twa.py python3 ./test.py -f 2-query/irate.py python3 ./test.py -f 2-query/function_null.py -python3 ./test.py -f 2-query/queryQnode.py +python3 ./test.py -f 2-query/queryQnode.py -python3 ./test.py -f 6-cluster/5dnode1mnode.py -python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3 +#python3 ./test.py -f 6-cluster/5dnode1mnode.py +#python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3 +#python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 +#python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3 # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3 -# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3 +# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3 -# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3 -# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3 +# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3 +# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3 # python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3 -# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py +# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py # python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 # python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3 @@ -154,3 +159,16 @@ python3 ./test.py -f 7-tmq/tmqUdf.py python3 ./test.py -f 7-tmq/tmqConsumerGroup.py python3 ./test.py -f 7-tmq/tmqShow.py python3 ./test.py -f 7-tmq/tmqAlterSchema.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py + diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 2d7f78661a..9596efdfc8 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -63,8 +63,9 @@ if __name__ == "__main__": mnodeNums = 0 updateCfgDict = {} execCmd = "" - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums']) + queryPolicy = 1 + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -82,6 +83,7 @@ if __name__ == "__main__": tdLog.printNoPrefix('-e eval str to run') tdLog.printNoPrefix('-N create dnodes numbers in clusters') tdLog.printNoPrefix('-M create mnode numbers in clusters') + tdLog.printNoPrefix('-Q set queryPolicy in one dnode') sys.exit(0) @@ -138,6 +140,9 @@ if __name__ == "__main__": if key in ['-M', '--mnodeNums']: mnodeNums = value + if key in ['-Q', '--queryPolicy']: + queryPolicy = value + if not execCmd == "": tdDnodes.init(deployPath) print(execCmd) @@ -276,6 +281,22 @@ if __name__ == "__main__": tdDnodes.deploy(1,updateCfgDict) tdDnodes.start(1) tdCases.logSql(logSql) + if queryPolicy != 1: + queryPolicy=int(queryPolicy) + conn = taos.connect( + host, + config=tdDnodes.getSimCfgPath()) + tdSql.init(conn.cursor()) + tdSql.execute("create qnode on dnode 1") + tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy) + tdSql.query("show local variables;") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "queryPolicy" : + if int(tdSql.queryResult[i][1]) == int(queryPolicy): + tdLog.success('alter queryPolicy to %d successfully'%queryPolicy) + else : + tdLog.debug(tdSql.queryResult) + tdLog.exit("alter queryPolicy to %d failed"%queryPolicy) else : tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums)) dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums) diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index 3f2a81197b..85fe456670 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -90,6 +90,7 @@ typedef struct { int32_t consumeDelay; // unit s int32_t numOfThread; int32_t useSnapshot; + int64_t nowTime; SThreadInfo stThreads[MAX_CONSUMER_THREAD_CNT]; } SConfInfo; @@ -198,6 +199,8 @@ void parseArgument(int32_t argc, char* argv[]) { g_stConfInfo.saveRowFlag = 0; g_stConfInfo.consumeDelay = 5; + g_stConfInfo.nowTime = taosGetTimestampMs(); + for (int32_t i = 1; i < argc; i++) { if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) { printHelp(); @@ -510,10 +513,8 @@ static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) {} int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { char sqlStr[1024] = {0}; - int64_t now = taosGetTimestampMs(); - // schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int - sprintf(sqlStr, "insert into %s.notifyinfo values (%" PRId64 ", %d, %d)", g_stConfInfo.cdbName, now, cmdId, + sprintf(sqlStr, "insert into %s.notifyinfo values (%" PRId64 ", %d, %d)", g_stConfInfo.cdbName, atomic_fetch_add_64(&g_stConfInfo.nowTime, 1), cmdId, pInfo->consumerId); taos_query_a(pInfo->taos, sqlStr, appNothing, NULL); @@ -525,15 +526,15 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { static int32_t g_once_commit_flag = 0; static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { - pError("tmq_commit_cb_print() commit %d\n", code); + pError("tmq_commit_cb_print() commit %d\n", code); - if (0 == g_once_commit_flag) { - g_once_commit_flag = 1; - notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT); + if (0 == g_once_commit_flag) { + g_once_commit_flag = 1; + notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT); } - char tmpString[128]; - taosFprintfFile(g_fp, "%s tmq_commit_cb_print() be called\n", getCurrentTimeString(tmpString)); + char tmpString[128]; + taosFprintfFile(g_fp, "%s tmq_commit_cb_print() be called\n", getCurrentTimeString(tmpString)); } void build_consumer(SThreadInfo* pInfo) { @@ -588,12 +589,10 @@ void build_topic_list(SThreadInfo* pInfo) { int32_t saveConsumeResult(SThreadInfo* pInfo) { char sqlStr[1024] = {0}; - - int64_t now = taosGetTimestampMs(); - // schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int sprintf(sqlStr, "insert into %s.consumeresult values (%" PRId64 ", %d, %" PRId64 ", %" PRId64 ", %d)", - g_stConfInfo.cdbName, now, pInfo->consumerId, pInfo->consumeMsgCnt, pInfo->consumeRowCnt, pInfo->checkresult); + g_stConfInfo.cdbName, atomic_fetch_add_64(&g_stConfInfo.nowTime, 1), pInfo->consumerId, pInfo->consumeMsgCnt, + pInfo->consumeRowCnt, pInfo->checkresult); char tmpString[128]; taosFprintfFile(g_fp, "%s, consume id %d result: %s\n", getCurrentTimeString(tmpString), pInfo->consumerId, sqlStr); @@ -637,9 +636,9 @@ void loop_consume(SThreadInfo* pInfo) { } } - int64_t lastTotalMsgs = 0; - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); + int64_t lastTotalMsgs = 0; + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); int32_t consumeDelay = g_stConfInfo.consumeDelay == -1 ? -1 : (g_stConfInfo.consumeDelay * 1000); while (running) { @@ -652,16 +651,16 @@ void loop_consume(SThreadInfo* pInfo) { taos_free_result(tmqMsg); totalMsgs++; - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 10 * 1000) { - taosFprintfFile(g_fp, - "consumer id %d has currently poll total msgs: %" PRId64 ", period rate: %.3f msgs/second\n", - pInfo->consumerId, totalMsgs, (totalMsgs - lastTotalMsgs) * 1000.0/(currentPrintTime - lastPrintTime)); - lastPrintTime = currentPrintTime; - lastTotalMsgs = totalMsgs; - } - + + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 10 * 1000) { + taosFprintfFile( + g_fp, "consumer id %d has currently poll total msgs: %" PRId64 ", period rate: %.3f msgs/second\n", + pInfo->consumerId, totalMsgs, (totalMsgs - lastTotalMsgs) * 1000.0 / (currentPrintTime - lastPrintTime)); + lastPrintTime = currentPrintTime; + lastTotalMsgs = totalMsgs; + } + if (0 == once_flag) { once_flag = 1; notifyMainScript(pInfo, NOTIFY_CMD_START_CONSUM); @@ -678,7 +677,7 @@ void loop_consume(SThreadInfo* pInfo) { break; } } - + if (0 == running) { taosFprintfFile(g_fp, "receive stop signal and not continue consume\n"); } @@ -696,6 +695,7 @@ void* consumeThreadFunc(void* param) { pInfo->taos = taos_connect(NULL, "root", "taosdata", NULL, 0); if (pInfo->taos == NULL) { taosFprintfFile(g_fp, "taos_connect() fail, can not notify and save consume result to main scripte\n"); + ASSERT(0); return NULL; } @@ -888,11 +888,11 @@ int main(int32_t argc, char* argv[]) { int64_t t = end - start; if (0 == t) t = 1; - + double tInMs = (double)t / 1000000.0; taosFprintfFile(g_fp, - "Spent %.3f seconds to poll msgs: %" PRIu64 " with %d thread(s), throughput: %.3f msgs/second\n\n", - tInMs, totalMsgs, g_stConfInfo.numOfThread, (double)(totalMsgs / tInMs)); + "Spent %.3f seconds to poll msgs: %" PRIu64 " with %d thread(s), throughput: %.3f msgs/second\n\n", + tInMs, totalMsgs, g_stConfInfo.numOfThread, (double)(totalMsgs / tInMs)); taosFprintfFile(g_fp, "==== close tmqlog ====\n"); taosCloseFile(&g_fp); diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 2037f67089..41ce1cc280 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -13,6 +13,10 @@ * along with this program. If not, see . */ +#ifdef _TD_DARWIN_64 +#include +#endif + #include "shellInt.h" #define SHELL_HOST "The auth string to use when connecting to the server." diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 96d017ec7a..4500188b95 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#define ALLOW_FORBID_FUNC #define _BSD_SOURCE #define _GNU_SOURCE #define _XOPEN_SOURCE diff --git a/tools/taos-tools b/tools/taos-tools index 5fdd694621..1163c0f60a 160000 --- a/tools/taos-tools +++ b/tools/taos-tools @@ -1 +1 @@ -Subproject commit 5fdd694621fbb7bd2d6102ff4feaec92a7001038 +Subproject commit 1163c0f60aa65d6cc58283247c8bf8c56ba43b92 diff --git a/tools/taosadapter b/tools/taosadapter index c885e967e4..c3815951fc 160000 --- a/tools/taosadapter +++ b/tools/taosadapter @@ -1 +1 @@ -Subproject commit c885e967e490105999b84d009a15168728dfafaf +Subproject commit c3815951fc80617ecd171f3743b8b4a4d0bc712e