diff --git a/include/common/tcommon.h b/include/common/tcommon.h index c36dc51220..383429bb5e 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -52,6 +52,8 @@ typedef enum EStreamType { typedef struct { SArray* pTableList; SHashObj* map; // speedup acquire the tableQueryInfo by table uid + void* pTagCond; + void* pTagIndexCond; } STableListInfo; typedef struct SColumnDataAgg { diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 709462a744..92af7208a2 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -71,7 +71,8 @@ SEpSet getEpSet_s(SCorEpSet* pEpSet); #define colDataGetData(p1_, r_) \ ((IS_VAR_DATA_TYPE((p1_)->info.type)) ? colDataGetVarData(p1_, r_) : colDataGetNumData(p1_, r_)) -#define IS_JSON_NULL(type, data) ((type) == TSDB_DATA_TYPE_JSON && *(data) == TSDB_DATA_TYPE_NULL) +#define IS_JSON_NULL(type, data) ((type) == TSDB_DATA_TYPE_JSON && \ + (*(data) == TSDB_DATA_TYPE_NULL || tTagIsJsonNull(data))) static FORCE_INLINE bool colDataIsNull_s(const SColumnInfoData* pColumnInfoData, uint32_t row) { if (!pColumnInfoData->hasNull) { diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index 2e69640a06..f9ede63f7f 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -70,6 +70,8 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow); // STag int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag); void tTagFree(STag *pTag); +bool tTagIsJson(const void *pTag); +bool tTagIsJsonNull(void *tagVal); bool tTagGet(const STag *pTag, STagVal *pTagVal); char *tTagValToData(const STagVal *pTagVal, bool isJson); int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag); diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 8c03d3ff42..e083ebcf78 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -137,6 +137,8 @@ extern bool tsSmlDataFormat; // internal extern int32_t tsTransPullupInterval; extern int32_t tsMqRebalanceInterval; +extern int32_t tsTtlUnit; +extern int32_t tsTtlPushInterval; #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index c9307ab912..dd0a81af03 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -834,6 +834,14 @@ typedef struct { int32_t tSerializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq); int32_t tDeserializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq); +typedef struct { + int32_t rowNum; +} SDnodeListReq; + +int32_t tSerializeSDnodeListReq(void* buf, int32_t bufLen, SDnodeListReq* pReq); +int32_t tDeserializeSDnodeListReq(void* buf, int32_t bufLen, SDnodeListReq* pReq); + + typedef struct SQueryNodeAddr { int32_t nodeId; // vgId or qnodeId SEpSet epSet; @@ -852,6 +860,15 @@ int32_t tSerializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp); int32_t tDeserializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp); void tFreeSQnodeListRsp(SQnodeListRsp* pRsp); +typedef struct { + SArray* dnodeList; // SArray +} SDnodeListRsp; + +int32_t tSerializeSDnodeListRsp(void* buf, int32_t bufLen, SDnodeListRsp* pRsp); +int32_t tDeserializeSDnodeListRsp(void* buf, int32_t bufLen, SDnodeListRsp* pRsp); +void tFreeSDnodeListRsp(SDnodeListRsp* pRsp); + + typedef struct { SArray* pArray; // Array of SUseDbRsp } SUseDbBatchRsp; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 4105774739..1babb45003 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -81,6 +81,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_DND_SERVER_STATUS, "server-status", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_DND_SYSTABLE_RETRIEVE, "dnode-retrieve", NULL, NULL) TD_NEW_MSG_SEG(TDMT_MND_MSG) TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL) @@ -101,6 +102,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_ALTER_QNODE, "alter-qnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_DROP_QNODE, "drop-qnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_QNODE_LIST, "qnode-list", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DNODE_LIST, "dnode-list", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_CREATE_SNODE, "create-snode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_ALTER_SNODE, "alter-snode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_DROP_SNODE, "drop-snode", NULL, NULL) @@ -145,13 +147,14 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_MQ_TIMER, "mq-tmr", SMTimerReq, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq) TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_TTL_TIMER, "ttl-tmr", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_KILL_TRANS, "kill-trans", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_KILL_QUERY, "kill-query", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_KILL_CONN, "kill-conn", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_HEARTBEAT, "heartbeat", SClientHbBatchReq, SClientHbBatchRsp) TD_DEF_MSG_TYPE(TDMT_MND_STATUS, "status", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SHOW, "show", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "retrieve", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "mnd-retrieve", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_GRANT, "grant", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_AUTH, "auth", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_APPLY_MSG, "mnode-apply", NULL, NULL) diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 18ddcbcd94..4f18cb19c2 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -36,6 +36,10 @@ extern "C" { #define SHOW_CREATE_TB_RESULT_FIELD1_LEN (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE) #define SHOW_CREATE_TB_RESULT_FIELD2_LEN (TSDB_MAX_BINARY_LEN + VARSTR_HEADER_SIZE) +#define SHOW_LOCAL_VARIABLES_RESULT_COLS 2 +#define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE) +#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE) + #define PRIVILEGE_TYPE_MASK(n) (1 << n) diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 26a100bb1b..5f89d1f665 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -226,6 +226,7 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_EXCHANGE, QUERY_NODE_PHYSICAL_PLAN_MERGE, QUERY_NODE_PHYSICAL_PLAN_SORT, + QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT, QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL, diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index cb09bf6a5f..5559cb8060 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -66,6 +66,7 @@ typedef struct SScanLogicNode { int8_t intervalUnit; int8_t slidingUnit; SNode* pTagCond; + SNode* pTagIndexCond; int8_t triggerType; int64_t watermark; int16_t tsColId; @@ -420,6 +421,8 @@ typedef struct SSortPhysiNode { SNodeList* pTargets; } SSortPhysiNode; +typedef SSortPhysiNode SGroupSortPhysiNode; + typedef struct SPartitionPhysiNode { SPhysiNode node; SNodeList* pExprs; // these are expression list of partition_by_clause @@ -466,6 +469,7 @@ typedef struct SSubplan { SPhysiNode* pNode; // physical plan of current subplan SDataSinkNode* pDataSink; // data of the subplan flow into the datasink SNode* pTagCond; + SNode* pTagIndexCond; } SSubplan; typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 67e01e66f7..a6e466e73e 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -288,11 +288,11 @@ typedef enum ESqlClause { } ESqlClause; typedef struct SDeleteStmt { - ENodeType type; // QUERY_NODE_DELETE_STMT - SNode* pFromTable; // FROM clause - SNode* pWhere; // WHERE clause - SNode* pCountFunc; // count the number of rows affected - SNode* pTagIndexCond; // pWhere divided into pTagIndexCond and timeRange + ENodeType type; // QUERY_NODE_DELETE_STMT + SNode* pFromTable; // FROM clause + SNode* pWhere; // WHERE clause + SNode* pCountFunc; // count the number of rows affected + SNode* pTagCond; // pWhere divided into pTagCond and timeRange STimeWindow timeRange; uint8_t precision; bool deleteZeroRows; @@ -397,7 +397,8 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal); char* nodesGetFillModeString(EFillMode mode); int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc); -int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagCond, SNode** pOtherCond); +int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, + SNode** pOtherCond); #ifdef __cplusplus } diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 2cb0c65d35..a0c07f0c09 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -26,8 +26,9 @@ extern "C" { extern bool gRaftDetailLog; -#define SYNC_INDEX_BEGIN 0 +#define SYNC_INDEX_BEGIN 0 #define SYNC_INDEX_INVALID -1 +#define SYNC_TERM_INVALID 0xFFFFFFFFFFFFFFFF typedef uint64_t SyncNodeId; typedef int32_t SyncGroupId; @@ -199,7 +200,7 @@ const char* syncGetMyRoleStr(int64_t rid); SyncTerm syncGetMyTerm(int64_t rid); SyncGroupId syncGetVgId(int64_t rid); void syncGetEpSet(int64_t rid, SEpSet* pEpSet); -int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak); +int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak); bool syncEnvIsStart(); const char* syncStr(ESyncState state); bool syncIsRestoreFinish(int64_t rid); diff --git a/include/libs/sync/syncTools.h b/include/libs/sync/syncTools.h index 37b465e56e..e51b20a404 100644 --- a/include/libs/sync/syncTools.h +++ b/include/libs/sync/syncTools.h @@ -43,7 +43,7 @@ void setElectTimerMS(int64_t rid, int32_t electTimerMS); void setHeartbeatTimerMS(int64_t rid, int32_t hbTimerMS); // for compatibility, the same as syncPropose -int32_t syncForwardToPeer(int64_t rid, const SRpcMsg* pMsg, bool isWeak); +int32_t syncForwardToPeer(int64_t rid, SRpcMsg* pMsg, bool isWeak); // utils const char* syncUtilState2String(ESyncState state); @@ -468,7 +468,7 @@ typedef struct SyncLeaderTransfer { SRaftId destId; */ SNodeInfo newNodeInfo; - SRaftId newLeaderId; + SRaftId newLeaderId; } SyncLeaderTransfer; SyncLeaderTransfer* syncLeaderTransferBuild(int32_t vgId); @@ -489,17 +489,16 @@ void syncLeaderTransferPrint2(char* s, const SyncLeaderTransfer* pMsg); void syncLeaderTransferLog(const SyncLeaderTransfer* pMsg); void syncLeaderTransferLog2(char* s, const SyncLeaderTransfer* pMsg); - // --------------------------------------------- typedef struct SyncReconfigFinish { - uint32_t bytes; - int32_t vgId; - uint32_t msgType; - SSyncCfg oldCfg; - SSyncCfg newCfg; + uint32_t bytes; + int32_t vgId; + uint32_t msgType; + SSyncCfg oldCfg; + SSyncCfg newCfg; SyncIndex newCfgIndex; - SyncTerm newCfgTerm; - uint64_t newCfgSeqNum; + SyncTerm newCfgTerm; + uint64_t newCfgSeqNum; } SyncReconfigFinish; @@ -521,8 +520,6 @@ void syncReconfigFinishPrint2(char* s, const SyncReconfigFinish* pMsg); void syncReconfigFinishLog(const SyncReconfigFinish* pMsg); void syncReconfigFinishLog2(char* s, const SyncReconfigFinish* pMsg); - - // on message ---------------------- int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg); int32_t syncNodeOnPingReplyCb(SSyncNode* ths, SyncPingReply* pMsg); diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index c7d1ccd3de..92701db2ad 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -195,7 +195,6 @@ void walCloseReadHandle(SWalReadHandle *); int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver); // only for tq usage -// int32_t walReadWithHandle_s(SWalReadHandle *pRead, int64_t ver, SWalReadHead **ppHead); void walSetReaderCapacity(SWalReadHandle *pRead, int32_t capacity); int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead); int32_t walFetchBody(SWalReadHandle *pRead, SWalHead **ppHead); @@ -211,13 +210,8 @@ void walCloseRef(SWalRef *); int32_t walRefVer(SWalRef *, int64_t ver); int32_t walUnrefVer(SWal *); -// deprecated -#if 0 -int32_t walRead(SWal *, SWalHead **, int64_t ver); -int32_t walReadWithFp(SWal *, FWalWrite writeFp, int64_t verStart, int32_t readNum); -#endif - // lifecycle check +bool walIsEmpty(SWal *); int64_t walGetFirstVer(SWal *); int64_t walGetSnapshotVer(SWal *); int64_t walGetLastVer(SWal *); diff --git a/include/os/os.h b/include/os/os.h index 41180ba49e..254c16efbe 100644 --- a/include/os/os.h +++ b/include/os/os.h @@ -104,8 +104,6 @@ extern "C" { #include "osTimezone.h" #include "osEnv.h" -void osDefaultInit(); - #ifdef __cplusplus } #endif diff --git a/include/util/tconfig.h b/include/util/tconfig.h index 06fa9fd9aa..fd6360aaf6 100644 --- a/include/util/tconfig.h +++ b/include/util/tconfig.h @@ -104,6 +104,8 @@ int32_t cfgAddTimezone(SConfig *pCfg, const char *name, const char *defaultVal); const char *cfgStypeStr(ECfgSrcType type); const char *cfgDtypeStr(ECfgDataType type); +void cfgDumpItemValue(SConfigItem *pItem, char* buf, int32_t bufSize, int32_t* pLen); + void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump); int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl); diff --git a/include/util/tdef.h b/include/util/tdef.h index f10bc3c7d2..5befa6a67f 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -443,8 +443,8 @@ enum { #define VNODE_HANDLE -3 #define BNODE_HANDLE -4 -#define TSDB_CONFIG_OPTION_LEN 16 -#define TSDB_CONIIG_VALUE_LEN 48 +#define TSDB_CONFIG_OPTION_LEN 32 +#define TSDB_CONFIG_VALUE_LEN 64 #define TSDB_CONFIG_NUMBER 8 #define QUERY_ID_SIZE 20 diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 37b8866e25..191bfe088a 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1507,7 +1507,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i char* jsonInnerData = data + CHAR_BYTES; if (jsonInnerType == TSDB_DATA_TYPE_NULL) { len += (VARSTR_HEADER_SIZE + strlen(TSDB_DATA_NULL_STR_L)); - } else if (jsonInnerType & TD_TAG_JSON) { + } else if (tTagIsJson(data)) { len += (VARSTR_HEADER_SIZE + ((const STag*)(data))->len); } else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value" len += varDataTLen(jsonInnerData) + CHAR_BYTES * 2; @@ -1592,7 +1592,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int if (jsonInnerType == TSDB_DATA_TYPE_NULL) { sprintf(varDataVal(dst), "%s", TSDB_DATA_NULL_STR_L); varDataSetLen(dst, strlen(varDataVal(dst))); - } else if (jsonInnerType & TD_TAG_JSON) { + } else if (tTagIsJson(data)) { char* jsonString = parseTagDatatoJson(data); STR_TO_VARSTR(dst, jsonString); taosMemoryFree(jsonString); diff --git a/source/common/src/systable.c b/source/common/src/systable.c index e7b6342150..455f204542 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -231,7 +231,13 @@ static const SSysDbTableSchema transSchema[] = { static const SSysDbTableSchema configSchema[] = { {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "value", .bytes = TSDB_CONIIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, +}; + +static const SSysDbTableSchema variablesSchema[] = { + {.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; static const SSysTableMeta infosMeta[] = { @@ -253,6 +259,7 @@ static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema)}, {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema)}, {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema)}, + {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema)}, }; static const SSysDbTableSchema connectionsSchema[] = { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 5594ee9bcf..d5843f699f 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -110,7 +110,7 @@ int32_t getJsonValueLen(const char* data) { dataLen = DOUBLE_BYTES + CHAR_BYTES; } else if (*data == TSDB_DATA_TYPE_BOOL) { dataLen = CHAR_BYTES + CHAR_BYTES; - } else if (*data & TD_TAG_JSON) { // json string + } else if (tTagIsJson(data)) { // json string dataLen = ((STag*)(data))->len; } else { ASSERT(0); diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index 1ddb606ccf..8460a27a0e 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -924,6 +924,18 @@ static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) { return n; } + +bool tTagIsJson(const void *pTag){ + return (((const STag *)pTag)->flags & TD_TAG_JSON); +} + +bool tTagIsJsonNull(void *data){ + STag *pTag = (STag*)data; + int8_t isJson = tTagIsJson(pTag); + if(!isJson) return false; + return ((STag*)data)->nTag == 0; +} + int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag) { int32_t code = 0; uint8_t *p = NULL; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 269c92a670..6cae3a13e6 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -187,6 +187,9 @@ bool tsStartUdfd = true; // internal int32_t tsTransPullupInterval = 2; int32_t tsMqRebalanceInterval = 2; +int32_t tsTtlUnit = 86400; +int32_t tsTtlPushInterval = 60; + void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) { tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN); @@ -467,6 +470,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400*365, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 10000, 1) != 0) return -1; if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1; return 0; @@ -619,6 +624,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; + tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32; + tsTtlPushInterval = cfgGetItem(pCfg, "ttlPushInterval")->i32; tsStartUdfd = cfgGetItem(pCfg, "udf")->bval; @@ -631,7 +638,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc) { - osDefaultInit(); + if (tsCfg == NULL) osDefaultInit(); SConfig *pCfg = cfgInit(); if (pCfg == NULL) return -1; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 11fe39903b..6bd0dc02e1 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -2194,6 +2194,32 @@ int32_t tDeserializeSQnodeListReq(void *buf, int32_t bufLen, SQnodeListReq *pReq return 0; } +int32_t tSerializeSDnodeListReq(void *buf, int32_t bufLen, SDnodeListReq *pReq) { + SEncoder encoder = {0}; + tEncoderInit(&encoder, buf, bufLen); + + if (tStartEncode(&encoder) < 0) return -1; + if (tEncodeI32(&encoder, pReq->rowNum) < 0) return -1; + tEndEncode(&encoder); + + int32_t tlen = encoder.pos; + tEncoderClear(&encoder); + return tlen; +} + +int32_t tDeserializeSDnodeListReq(void *buf, int32_t bufLen, SDnodeListReq *pReq) { + SDecoder decoder = {0}; + tDecoderInit(&decoder, buf, bufLen); + + if (tStartDecode(&decoder) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->rowNum) < 0) return -1; + tEndDecode(&decoder); + + tDecoderClear(&decoder); + return 0; +} + + int32_t tSerializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -2237,6 +2263,50 @@ int32_t tDeserializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp void tFreeSQnodeListRsp(SQnodeListRsp *pRsp) { taosArrayDestroy(pRsp->qnodeList); } +int32_t tSerializeSDnodeListRsp(void *buf, int32_t bufLen, SDnodeListRsp *pRsp) { + SEncoder encoder = {0}; + tEncoderInit(&encoder, buf, bufLen); + + if (tStartEncode(&encoder) < 0) return -1; + int32_t num = taosArrayGetSize(pRsp->dnodeList); + if (tEncodeI32(&encoder, num) < 0) return -1; + for (int32_t i = 0; i < num; ++i) { + SEpSet *pEpSet = taosArrayGet(pRsp->dnodeList, i); + if (tEncodeSEpSet(&encoder, pEpSet) < 0) return -1; + } + tEndEncode(&encoder); + + int32_t tlen = encoder.pos; + tEncoderClear(&encoder); + return tlen; +} + +int32_t tDeserializeSDnodeListRsp(void *buf, int32_t bufLen, SDnodeListRsp *pRsp) { + SDecoder decoder = {0}; + tDecoderInit(&decoder, buf, bufLen); + + if (tStartDecode(&decoder) < 0) return -1; + int32_t num = 0; + if (tDecodeI32(&decoder, &num) < 0) return -1; + if (NULL == pRsp->dnodeList) { + pRsp->dnodeList = taosArrayInit(num, sizeof(SEpSet)); + if (NULL == pRsp->dnodeList) return -1; + } + + for (int32_t i = 0; i < num; ++i) { + SEpSet epSet = {0}; + if (tDecodeSEpSet(&decoder, &epSet) < 0) return -1; + taosArrayPush(pRsp->dnodeList, &epSet); + } + tEndDecode(&decoder); + + tDecoderClear(&decoder); + return 0; +} + +void tFreeSDnodeListRsp(SDnodeListRsp *pRsp) { taosArrayDestroy(pRsp->dnodeList); } + + int32_t tSerializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); diff --git a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h index ee811c0071..4479c06bea 100644 --- a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h +++ b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h @@ -45,6 +45,7 @@ int32_t dmProcessConfigReq(SDnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t dmProcessAuthRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t dmProcessGrantRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t dmProcessServerRunStatus(SDnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg); // dmWorker.c int32_t dmPutNodeMsgToMgmtQueue(SDnodeMgmt *pMgmt, SRpcMsg *pMsg); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 3e55469a4a..9c8918a445 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -15,6 +15,10 @@ #define _DEFAULT_SOURCE #include "dmInt.h" +#include "systable.h" + + +extern SConfig *tsCfg; static void dmUpdateDnodeCfg(SDnodeMgmt *pMgmt, SDnodeCfg *pCfg) { if (pMgmt->pData->dnodeId == 0 || pMgmt->pData->clusterId == 0) { @@ -175,6 +179,130 @@ int32_t dmProcessServerRunStatus(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { return 0; } +SSDataBlock* dmBuildVariablesBlock(void) { + SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + size_t size = 0; + const SSysTableMeta* pMeta = NULL; + getInfosDbMeta(&pMeta, &size); + + int32_t index = 0; + for (int32_t i = 0; i < size; ++i) { + if (strcmp(pMeta[i].name, TSDB_INS_TABLE_DNODE_VARIABLES) == 0) { + index = i; + break; + } + } + + pBlock->pDataBlock = taosArrayInit(pMeta[index].colNum, sizeof(SColumnInfoData)); + + for (int32_t i = 0; i < pMeta[index].colNum; ++i) { + SColumnInfoData colInfoData = {0}; + colInfoData.info.colId = i + 1; + colInfoData.info.type = pMeta[index].schema[i].type; + colInfoData.info.bytes = pMeta[index].schema[i].bytes; + taosArrayPush(pBlock->pDataBlock, &colInfoData); + } + + pBlock->info.numOfCols = pMeta[index].colNum; + pBlock->info.hasVarCol = true; + + return pBlock; +} + +int32_t dmAppendVariablesToBlock(SSDataBlock* pBlock, int32_t dnodeId) { + int32_t numOfCfg = taosArrayGetSize(tsCfg->array); + int32_t numOfRows = 0; + blockDataEnsureCapacity(pBlock, numOfCfg); + + for (int32_t i = 0, c = 0; i < numOfCfg; ++i, c = 0) { + SConfigItem *pItem = taosArrayGet(tsCfg->array, i); + + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, c++); + colDataAppend(pColInfo, i, (const char *)&dnodeId, false); + + char name[TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(name, pItem->name, TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE); + pColInfo = taosArrayGet(pBlock->pDataBlock, c++); + colDataAppend(pColInfo, i, name, false); + + char value[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0}; + int32_t valueLen = 0; + cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_VALUE_LEN, &valueLen); + varDataSetLen(value, valueLen); + pColInfo = taosArrayGet(pBlock->pDataBlock, c++); + colDataAppend(pColInfo, i, value, false); + + numOfRows++; + } + + + pBlock->info.rows = numOfRows; + + return TSDB_CODE_SUCCESS; +} + + + +int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { + int32_t size = 0; + int32_t rowsRead = 0; + + SRetrieveTableReq retrieveReq = {0}; + if (tDeserializeSRetrieveTableReq(pMsg->pCont, pMsg->contLen, &retrieveReq) != 0) { + terrno = TSDB_CODE_INVALID_MSG; + return -1; + } + + if (strcasecmp(retrieveReq.tb, TSDB_INS_TABLE_DNODE_VARIABLES)) { + terrno = TSDB_CODE_INVALID_MSG; + return -1; + } + + SSDataBlock* pBlock = dmBuildVariablesBlock(); + + dmAppendVariablesToBlock(pBlock, pMgmt->pData->dnodeId); + + size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pBlock->info.numOfCols + + blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(pBlock->info.numOfCols); + + SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size); + if (pRsp == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + dError("failed to retrieve data since %s", terrstr()); + blockDataDestroy(pBlock); + return -1; + } + + char *pStart = pRsp->data; + *(int32_t *)pStart = htonl(pBlock->info.numOfCols); + pStart += sizeof(int32_t); // number of columns + + for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { + SSysTableSchema *pSchema = (SSysTableSchema *)pStart; + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, i); + + pSchema->bytes = htonl(pColInfo->info.bytes); + pSchema->colId = htons(pColInfo->info.colId); + pSchema->type = pColInfo->info.type; + + pStart += sizeof(SSysTableSchema); + } + + int32_t len = 0; + blockCompressEncode(pBlock, pStart, &len, pBlock->info.numOfCols, false); + + pRsp->numOfRows = htonl(pBlock->info.rows); + pRsp->precision = TSDB_TIME_PRECISION_MILLI; // millisecond time precision + pRsp->completed = 1; + pMsg->info.rsp = pRsp; + pMsg->info.rspLen = size; + dDebug("dnode variables retrieve completed"); + + blockDataDestroy(pBlock); + return TSDB_CODE_SUCCESS; +} + + SArray *dmGetMsgHandles() { int32_t code = -1; SArray *pArray = taosArrayInit(16, sizeof(SMgmtHandle)); @@ -191,6 +319,7 @@ SArray *dmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_BNODE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_CONFIG_DNODE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_SERVER_STATUS, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_DND_SYSTABLE_RETRIEVE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; // Requests handled by MNODE if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT_RSP, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c index 9ffa0e606a..89e8aa976e 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c @@ -141,6 +141,9 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { case TDMT_DND_SERVER_STATUS: code = dmProcessServerRunStatus(pMgmt, pMsg); break; + case TDMT_DND_SYSTABLE_RETRIEVE: + code = dmProcessRetrieve(pMgmt, pMsg); + break; default: terrno = TSDB_CODE_MSG_NOT_PROCESSED; break; diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index 474e6ab378..d44a7d79bf 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -161,6 +161,7 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_QNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_QNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_QNODE_LIST, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MND_DNODE_LIST, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_SNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_SNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_BNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 4761e3dc36..63d2a65df1 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -85,6 +85,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { dmProcessNetTestReq(pDnode, pRpc); return; case TDMT_MND_SYSTABLE_RETRIEVE_RSP: + case TDMT_DND_SYSTABLE_RETRIEVE_RSP: case TDMT_VND_FETCH_RSP: qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0); return; diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 8d06868955..78c4ea794f 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -47,6 +47,7 @@ static SSdbRow *mndDnodeActionDecode(SSdbRaw *pRaw); static int32_t mndDnodeActionInsert(SSdb *pSdb, SDnodeObj *pDnode); static int32_t mndDnodeActionDelete(SSdb *pSdb, SDnodeObj *pDnode); static int32_t mndDnodeActionUpdate(SSdb *pSdb, SDnodeObj *pOld, SDnodeObj *pNew); +static int32_t mndProcessDnodeListReq(SRpcMsg *pReq); static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq); static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq); @@ -76,6 +77,7 @@ int32_t mndInitDnode(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_CONFIG_DNODE, mndProcessConfigDnodeReq); mndSetMsgHandle(pMnode, TDMT_DND_CONFIG_DNODE_RSP, mndProcessConfigDnodeRsp); mndSetMsgHandle(pMnode, TDMT_MND_STATUS, mndProcessStatusReq); + mndSetMsgHandle(pMnode, TDMT_MND_DNODE_LIST, mndProcessDnodeListReq); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CONFIGS, mndRetrieveConfigs); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CONFIGS, mndCancelGetNextConfig); @@ -499,6 +501,60 @@ _OVER: return code; } +static int32_t mndProcessDnodeListReq(SRpcMsg *pReq) { + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; + SDnodeObj *pObj = NULL; + void *pIter = NULL; + SDnodeListRsp rsp = {0}; + int32_t code = -1; + + rsp.dnodeList = taosArrayInit(5, sizeof(SEpSet)); + if (NULL == rsp.dnodeList) { + mError("failed to alloc epSet while process dnode list req"); + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + while (1) { + pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pObj); + if (pIter == NULL) break; + + SEpSet epSet = {0}; + epSet.numOfEps = 1; + tstrncpy(epSet.eps[0].fqdn, pObj->fqdn, TSDB_FQDN_LEN); + epSet.eps[0].port = pObj->port; + + (void)taosArrayPush(rsp.dnodeList, &epSet); + + sdbRelease(pSdb, pObj); + } + + int32_t rspLen = tSerializeSDnodeListRsp(NULL, 0, &rsp); + void *pRsp = rpcMallocCont(rspLen); + if (pRsp == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + tSerializeSDnodeListRsp(pRsp, rspLen, &rsp); + + pReq->info.rspLen = rspLen; + pReq->info.rsp = pRsp; + code = 0; + +_OVER: + + if (code != 0) { + mError("failed to get dnode list since %s", terrstr()); + } + + tFreeSDnodeListRsp(&rsp); + + return code; +} + + static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; @@ -580,6 +636,7 @@ static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SM mInfo("trans:%d, %d vnodes on dnode:%d will be dropped", pTrans->id, numOfVnodes, pDnode->id); if (mndSetMoveVgroupsInfoToTrans(pMnode, pTrans, pDnode->id) != 0) goto _OVER; } + if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; code = 0; @@ -699,28 +756,28 @@ static int32_t mndRetrieveConfigs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p int32_t totalRows = 0; int32_t numOfRows = 0; char *cfgOpts[TSDB_CONFIG_NUMBER] = {0}; - char cfgVals[TSDB_CONFIG_NUMBER][TSDB_CONIIG_VALUE_LEN + 1] = {0}; + char cfgVals[TSDB_CONFIG_NUMBER][TSDB_CONFIG_VALUE_LEN + 1] = {0}; char *pWrite = NULL; int32_t cols = 0; cfgOpts[totalRows] = "statusInterval"; - snprintf(cfgVals[totalRows], TSDB_CONIIG_VALUE_LEN, "%d", tsStatusInterval); + snprintf(cfgVals[totalRows], TSDB_CONFIG_VALUE_LEN, "%d", tsStatusInterval); totalRows++; cfgOpts[totalRows] = "timezone"; - snprintf(cfgVals[totalRows], TSDB_CONIIG_VALUE_LEN, "%s", tsTimezoneStr); + snprintf(cfgVals[totalRows], TSDB_CONFIG_VALUE_LEN, "%s", tsTimezoneStr); totalRows++; cfgOpts[totalRows] = "locale"; - snprintf(cfgVals[totalRows], TSDB_CONIIG_VALUE_LEN, "%s", tsLocale); + snprintf(cfgVals[totalRows], TSDB_CONFIG_VALUE_LEN, "%s", tsLocale); totalRows++; cfgOpts[totalRows] = "charset"; - snprintf(cfgVals[totalRows], TSDB_CONIIG_VALUE_LEN, "%s", tsCharset); + snprintf(cfgVals[totalRows], TSDB_CONFIG_VALUE_LEN, "%s", tsCharset); totalRows++; char buf[TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE] = {0}; - char bufVal[TSDB_CONIIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0}; + char bufVal[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0}; for (int32_t i = 0; i < totalRows; i++) { cols = 0; @@ -729,7 +786,7 @@ static int32_t mndRetrieveConfigs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)buf, false); - STR_WITH_MAXSIZE_TO_VARSTR(bufVal, cfgVals[i], TSDB_CONIIG_VALUE_LEN); + STR_WITH_MAXSIZE_TO_VARSTR(bufVal, cfgVals[i], TSDB_CONFIG_VALUE_LEN); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)bufVal, false); diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 675a3aa03f..dede1c45e6 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -65,6 +65,13 @@ static void mndPullupTrans(SMnode *pMnode) { } } +static void mndTtlTimer(SMnode *pMnode) { + int32_t contLen = 0; + void *pReq = mndBuildTimerMsg(&contLen); + SRpcMsg rpcMsg = {.msgType = TDMT_MND_TTL_TIMER, .pCont = pReq, .contLen = contLen}; + tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); +} + static void mndCalMqRebalance(SMnode *pMnode) { int32_t contLen = 0; void *pReq = mndBuildTimerMsg(&contLen); @@ -83,41 +90,6 @@ static void mndPullupTelem(SMnode *pMnode) { } } -static void mndPushTtlTime(SMnode *pMnode) { - SSdb *pSdb = pMnode->pSdb; - SVgObj *pVgroup = NULL; - void *pIter = NULL; - - while (1) { - pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); - if (pIter == NULL) break; - - int32_t contLen = sizeof(SMsgHead) + sizeof(int32_t); - SMsgHead *pHead = rpcMallocCont(contLen); - if (pHead == NULL) { - sdbCancelFetch(pSdb, pIter); - sdbRelease(pSdb, pVgroup); - continue; - } - - pHead->contLen = htonl(contLen); - pHead->vgId = htonl(pVgroup->vgId); - - int32_t t = taosGetTimestampSec(); - *(int32_t *)(POINTER_SHIFT(pHead, sizeof(SMsgHead))) = htonl(t); - - SRpcMsg rpcMsg = {.msgType = TDMT_VND_DROP_TTL_TABLE, .pCont = pHead, .contLen = contLen}; - SEpSet epSet = mndGetVgroupEpset(pMnode, pVgroup); - int32_t code = tmsgSendReq(&epSet, &rpcMsg); - if (code != 0) { - mError("failed to send ttl time seed msg, code:0x%x", code); - } else { - mInfo("send ttl time seed msg, time:%d", t); - } - sdbRelease(pSdb, pVgroup); - } -} - static void *mndThreadFp(void *param) { SMnode *pMnode = param; int64_t lastTime = 0; @@ -125,14 +97,13 @@ static void *mndThreadFp(void *param) { while (1) { lastTime++; - - if (lastTime % (864000) == 0) { // sleep 1 day for ttl - mndPushTtlTime(pMnode); - } - taosMsleep(100); if (mndGetStop(pMnode)) break; + if (lastTime % (tsTransPullupInterval * 10) == 1) { + mndTtlTimer(pMnode); + } + if (lastTime % (tsTransPullupInterval * 10) == 0) { mndPullupTrans(pMnode); } @@ -558,12 +529,12 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { if (!IsReq(pMsg)) return 0; if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0; if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER || - pMsg->msgType == TDMT_MND_TRANS_TIMER) { + pMsg->msgType == TDMT_MND_TRANS_TIMER || TDMT_MND_TTL_TIMER) { return -1; } const STraceId *trace = &pMsg->info.traceId; - mGError("msg:%p, failed to check mnode state since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pMsg->msgType)); + mError("msg:%p, failed to check mnode state since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pMsg->msgType)); SEpSet epSet = {0}; mndGetMnodeEpSet(pMsg->info.node, &epSet); @@ -584,7 +555,7 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { static int32_t mndCheckMsgContent(SRpcMsg *pMsg) { if (!IsReq(pMsg)) return 0; if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0; - + const STraceId *trace = &pMsg->info.traceId; mGError("msg:%p, failed to check msg, cont:%p contLen:%d, app:%p type:%s", pMsg, pMsg->pCont, pMsg->contLen, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index e04662d22b..f1bae14c07 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -37,6 +37,7 @@ static SSdbRow *mndStbActionDecode(SSdbRaw *pRaw); static int32_t mndStbActionInsert(SSdb *pSdb, SStbObj *pStb); static int32_t mndStbActionDelete(SSdb *pSdb, SStbObj *pStb); static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew); +static int32_t mndProcessTtlTimer(SRpcMsg *pReq); static int32_t mndProcessCreateStbReq(SRpcMsg *pReq); static int32_t mndProcessAlterStbReq(SRpcMsg *pReq); static int32_t mndProcessDropStbReq(SRpcMsg *pReq); @@ -63,6 +64,7 @@ int32_t mndInitStb(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_VND_ALTER_STB_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_VND_DROP_STB_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_MND_TABLE_META, mndProcessTableMetaReq); + mndSetMsgHandle(pMnode, TDMT_MND_TTL_TIMER, mndProcessTtlTimer); mndSetMsgHandle(pMnode, TDMT_MND_TABLE_CFG, mndProcessTableCfgReq); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_STB, mndRetrieveStb); @@ -799,6 +801,43 @@ int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p return 0; } +static int32_t mndProcessTtlTimer(SRpcMsg *pReq) { + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; + SVgObj *pVgroup = NULL; + void *pIter = NULL; + + while (1) { + pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); + if (pIter == NULL) break; + + int32_t contLen = sizeof(SMsgHead) + sizeof(int32_t); + SMsgHead *pHead = rpcMallocCont(contLen); + if (pHead == NULL) { + sdbCancelFetch(pSdb, pVgroup); + sdbRelease(pSdb, pVgroup); + continue; + } + pHead->contLen = htonl(contLen); + pHead->vgId = htonl(pVgroup->vgId); + + int32_t t = taosGetTimestampSec(); + *(int32_t *)((char *)pHead + sizeof(SMsgHead)) = htonl(t); + + SRpcMsg rpcMsg = {.msgType = TDMT_VND_DROP_TTL_TABLE, .pCont = pHead, .contLen = contLen}; + SEpSet epSet = mndGetVgroupEpset(pMnode, pVgroup); + int32_t code = tmsgSendReq(&epSet, &rpcMsg); + if (code != 0) { + mError("failed to send ttl time seed, code:0x%x", code); + } else { + mDebug("send ttl time seed success, time:%d", t); + } + sdbRelease(pSdb, pVgroup); + } + + return 0; +} + static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index eec108414e..d1d88fdc90 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -56,6 +56,7 @@ static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->dep static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans); static int32_t mndProcessTransReq(SRpcMsg *pReq); +static int32_t mndProcessTtl(SRpcMsg *pReq); static int32_t mndProcessKillTransReq(SRpcMsg *pReq); static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 696e714a8c..ae13987d25 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -1553,10 +1553,11 @@ static int32_t mndSetBalanceVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SD static int32_t mndBalanceVgroupBetweenDnode(SMnode *pMnode, STrans *pTrans, SDnodeObj *pSrc, SDnodeObj *pDst) { void *pIter = NULL; int32_t code = -1; + SSdb *pSdb = pMnode->pSdb; while (1) { SVgObj *pVgroup = NULL; - pIter = sdbFetch(pMnode->pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); + pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); if (pIter == NULL) break; bool existInSrc = false; @@ -1568,13 +1569,15 @@ static int32_t mndBalanceVgroupBetweenDnode(SMnode *pMnode, STrans *pTrans, SDno } if (!existInSrc || existInDst) { - sdbRelease(pMnode->pSdb, pVgroup); + sdbRelease(pSdb, pVgroup); + continue; } SDbObj *pDb = mndAcquireDb(pMnode, pVgroup->dbName); code = mndSetBalanceVgroupInfoToTrans(pMnode, pTrans, pDb, pVgroup, pSrc, pDst); mndReleaseDb(pMnode, pDb); - sdbRelease(pMnode->pSdb, pVgroup); + sdbRelease(pSdb, pVgroup); + sdbCancelFetch(pSdb, pIter); break; } @@ -1593,15 +1596,25 @@ static int32_t mndBalanceVgroup(SMnode *pMnode, SRpcMsg *pReq, SArray *pArray) { while (1) { taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes); - SDnodeObj *pSrc = taosArrayGet(pArray, 0); - SDnodeObj *pDst = taosArrayGet(pArray, taosArrayGetSize(pArray) - 1); + for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) { + SDnodeObj *pDnode = taosArrayGet(pArray, i); + mDebug("dnode:%d, equivalent vnodes:%d support:%d, score:%f", pDnode->id, pDnode->numOfVnodes, + pDnode->numOfSupportVnodes, (float)pDnode->numOfVnodes / pDnode->numOfSupportVnodes); + } + + SDnodeObj *pSrc = taosArrayGet(pArray, taosArrayGetSize(pArray) - 1); + SDnodeObj *pDst = taosArrayGet(pArray, 0); float srcScore = (float)(pSrc->numOfVnodes - 1) / pSrc->numOfSupportVnodes; float dstScore = (float)(pDst->numOfVnodes + 1) / pDst->numOfSupportVnodes; - if (srcScore + 0.0001 < dstScore) { - mDebug("trans:%d, balance vgroup from dnode:%d to dnode:%d", pTrans->id, pSrc->id, pDst->id); + mDebug("trans:%d, after balance, src dnode:%d score:%f, dst dnode:%d score:%f", pTrans->id, pSrc->id, srcScore, + pDst->id, dstScore); + + if (srcScore > dstScore - 0.000001) { code = mndBalanceVgroupBetweenDnode(pMnode, pTrans, pSrc, pDst); if (code == 0) { + pSrc->numOfVnodes--; + pDst->numOfVnodes++; numOfVgroups++; continue; } else { @@ -1635,7 +1648,13 @@ static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) { void *pIter = NULL; int64_t curMs = taosGetTimestampMs(); - mDebug("start to balance vgroup"); + SBalanceVgroupReq req = {0}; + if (tDeserializeSBalanceVgroupReq(pReq->pCont, pReq->contLen, &req) != 0) { + terrno = TSDB_CODE_INVALID_MSG; + goto _OVER; + } + + mInfo("start to balance vgroup"); if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_BALANCE_VGROUP) != 0) goto _OVER; diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index a5ca90e55f..d655883a76 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -613,9 +613,6 @@ const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) { ASSERT(pEntry->type == TSDB_CHILD_TABLE); STag *tag = (STag *)pEntry->ctbEntry.pTags; if (type == TSDB_DATA_TYPE_JSON) { - if (tag->nTag == 0) { - return NULL; - } return tag; } bool find = tTagGet(tag, val); diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index bf5d5912f9..2bbea593fa 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -400,8 +400,7 @@ static void metaBuildTtlIdxKey(STtlIdxKey *ttlKey, const SMetaEntry *pME){ if (ttlDays <= 0) return; - ttlKey->dtime = ctime / 1000 + ttlDays * 24 * 60 * 60; -// ttlKey->dtime = ctime / 1000 + ttlDays; + ttlKey->dtime = ctime / 1000 + ttlDays * tsTtlUnit; ttlKey->uid = pME->uid; } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index c531e9ee91..b23ec13105 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -252,6 +252,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { #if 1 if (pReq->useSnapshot) { + tqInfo("retrieve using snapshot"); int64_t lastVer = walGetCommittedVer(pTq->pWal); if (rsp.reqOffset < lastVer) { tqScanSnapshot(pTq, &pHandle->execHandle, &rsp, workerId); @@ -259,6 +260,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { if (rsp.blockNum != 0) { rsp.withTbName = false; rsp.rspOffset = lastVer; + tqInfo("direct send by snapshot rsp offset %ld", lastVer); goto SEND_RSP; } } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 3dd636e5a8..88bdea3ae7 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -315,7 +315,7 @@ static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *p if (tbUids == NULL) return TSDB_CODE_OUT_OF_MEMORY; int32_t t = ntohl(*(int32_t *)pReq); - vError("rec ttl time:%d", t); + vDebug("vgId:%d, recv ttl msg, time:%d", pVnode->config.vgId, t); int32_t ret = metaTtlDropTable(pVnode->pMeta, t, tbUids); if (ret != 0) { goto end; diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 98a03aa39b..384c3f19e5 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -65,6 +65,7 @@ enum { typedef enum { CTG_TASK_GET_QNODE = 0, + CTG_TASK_GET_DNODE, CTG_TASK_GET_DB_VGROUP, CTG_TASK_GET_DB_CFG, CTG_TASK_GET_DB_INFO, @@ -216,6 +217,7 @@ typedef struct SCtgJob { int32_t dbVgNum; int32_t udfNum; int32_t qnodeNum; + int32_t dnodeNum; int32_t dbCfgNum; int32_t indexNum; int32_t userNum; @@ -565,6 +567,7 @@ int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVg int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize, int32_t rspCode, char* target); int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildUseDBInput *input, SUseDbOutput *out, SCtgTask* pTask); int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray *out, SCtgTask* pTask); +int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray **out, SCtgTask* pTask); int32_t ctgGetDBCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *dbFName, SDbCfgInfo *out, SCtgTask* pTask); int32_t ctgGetIndexInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *indexName, SIndexInfo *out, SCtgTask* pTask); int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SName *name, STableIndex* out, SCtgTask* pTask); diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 44730cd3b5..931a944adf 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -1099,8 +1099,19 @@ _return: CTG_API_LEAVE(TSDB_CODE_SUCCESS); } -int32_t catalogGetDnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray** pDnodeList) { - return TSDB_CODE_CTG_INVALID_INPUT; +int32_t catalogGetDnodeList(SCatalog* pCtg, SRequestConnInfo* pConn, SArray** pDnodeList) { + CTG_API_ENTER(); + + int32_t code = 0; + if (NULL == pCtg || NULL == pConn || NULL == pDnodeList) { + CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); + } + + CTG_ERR_JRET(ctgGetDnodeListFromMnode(pCtg, pConn, pDnodeList, NULL)); + +_return: + + CTG_API_LEAVE(TSDB_CODE_SUCCESS); } int32_t catalogGetExpiredSTables(SCatalog* pCtg, SSTableVersion **stables, uint32_t *num) { diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index df986fd4d6..8928a7e028 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -168,6 +168,21 @@ int32_t ctgInitGetQnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) { return TSDB_CODE_SUCCESS; } +int32_t ctgInitGetDnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_DNODE; + task.taskId = taskIdx; + task.pJob = pJob; + task.taskCtx = NULL; + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:0x%" PRIx64 " the %d task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type)); + + return TSDB_CODE_SUCCESS; +} + int32_t ctgInitGetIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) { char *name = (char*)param; SCtgTask task = {0}; @@ -405,6 +420,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6 int32_t tbHashNum = (int32_t)taosArrayGetSize(pReq->pTableHash); int32_t udfNum = (int32_t)taosArrayGetSize(pReq->pUdf); int32_t qnodeNum = pReq->qNodeRequired ? 1 : 0; + int32_t dnodeNum = pReq->dNodeRequired ? 1 : 0; int32_t dbCfgNum = (int32_t)taosArrayGetSize(pReq->pDbCfg); int32_t indexNum = (int32_t)taosArrayGetSize(pReq->pIndex); int32_t userNum = (int32_t)taosArrayGetSize(pReq->pUser); @@ -412,7 +428,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6 int32_t tbIndexNum = (int32_t)taosArrayGetSize(pReq->pTableIndex); int32_t tbCfgNum = (int32_t)taosArrayGetSize(pReq->pTableCfg); - *taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum + tbIndexNum + tbCfgNum; + *taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum + tbIndexNum + tbCfgNum; if (*taskNum <= 0) { ctgDebug("Empty input for job, no need to retrieve meta, reqId:0x%" PRIx64, reqId); return TSDB_CODE_SUCCESS; @@ -435,6 +451,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6 pJob->tbMetaNum = tbMetaNum; pJob->tbHashNum = tbHashNum; pJob->qnodeNum = qnodeNum; + pJob->dnodeNum = dnodeNum; pJob->dbVgNum = dbVgNum; pJob->udfNum = udfNum; pJob->dbCfgNum = dbCfgNum; @@ -509,6 +526,10 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6 CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_QNODE, NULL, NULL)); } + if (dnodeNum) { + CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_DNODE, NULL, NULL)); + } + pJob->refId = taosAddRef(gCtgMgmt.jobPool, pJob); if (pJob->refId < 0) { ctgError("add job to ref failed, error: %s", tstrerror(terrno)); @@ -631,6 +652,22 @@ int32_t ctgDumpQnodeRes(SCtgTask* pTask) { return TSDB_CODE_SUCCESS; } +int32_t ctgDumpDnodeRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pDnodeList) { + pJob->jobRes.pDnodeList = taosArrayInit(1, sizeof(SMetaRes)); + if (NULL == pJob->jobRes.pDnodeList) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + SMetaRes res = {.code = pTask->code, .pRes = pTask->res}; + taosArrayPush(pJob->jobRes.pDnodeList, &res); + + return TSDB_CODE_SUCCESS; +} + + int32_t ctgDumpDbCfgRes(SCtgTask* pTask) { SCtgJob* pJob = pTask->pJob; if (NULL == pJob->jobRes.pDbCfg) { @@ -1036,6 +1073,19 @@ _return: CTG_RET(code); } +int32_t ctgHandleGetDnodeRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + int32_t code = 0; + CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); + + TSWAP(pTask->res, pTask->msgCtx.out); + +_return: + + ctgHandleTaskEnd(pTask, code); + + CTG_RET(code); +} + int32_t ctgHandleGetIndexRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { int32_t code = 0; CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); @@ -1311,6 +1361,15 @@ int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) { return TSDB_CODE_SUCCESS; } +int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) { + SCatalog* pCtg = pTask->pJob->pCtg; + SRequestConnInfo* pConn = &pTask->pJob->conn; + + CTG_ERR_RET(ctgGetDnodeListFromMnode(pCtg, pConn, NULL, pTask)); + return TSDB_CODE_SUCCESS; +} + + int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; @@ -1462,6 +1521,7 @@ int32_t ctgCloneDbVg(SCtgTask* pTask, void** pRes) { SCtgAsyncFps gCtgAsyncFps[] = { {ctgInitGetQnodeTask, ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes, NULL, NULL}, + {ctgInitGetDnodeTask, ctgLaunchGetDnodeTask, ctgHandleGetDnodeRsp, ctgDumpDnodeRes, NULL, NULL}, {ctgInitGetDbVgTask, ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes, ctgCompDbVgTasks, ctgCloneDbVg}, {ctgInitGetDbCfgTask, ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes, NULL, NULL}, {ctgInitGetDbInfoTask, ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes, NULL, NULL}, diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c index 81dabffb4e..97edb1b837 100644 --- a/source/libs/catalog/src/ctgRemote.c +++ b/source/libs/catalog/src/ctgRemote.c @@ -40,6 +40,21 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize, qDebug("Got qnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(out)); break; } + case TDMT_MND_DNODE_LIST: { + if (TSDB_CODE_SUCCESS != rspCode) { + qError("error rsp for dnode list, error:%s", tstrerror(rspCode)); + CTG_ERR_RET(rspCode); + } + + code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize); + if (code) { + qError("Process dnode list rsp failed, error:%s", tstrerror(rspCode)); + CTG_ERR_RET(code); + } + + qDebug("Got dnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(*(SArray**)out)); + break; + } case TDMT_MND_USE_DB: { if (TSDB_CODE_SUCCESS != rspCode) { qError("error rsp for use db, error:%s, dbFName:%s", tstrerror(rspCode), target); @@ -309,9 +324,6 @@ _return: CTG_RET(code); } - - - int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray *out, SCtgTask* pTask) { char *msg = NULL; int32_t msgLen = 0; @@ -349,6 +361,39 @@ int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray return TSDB_CODE_SUCCESS; } +int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray **out, SCtgTask* pTask) { + char *msg = NULL; + int32_t msgLen = 0; + int32_t reqType = TDMT_MND_DNODE_LIST; + void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont; + + ctgDebug("try to get dnode list from mnode, mgmtEpInUse:%d", pConn->mgmtEps.inUse); + + int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](NULL, &msg, 0, &msgLen, mallocFp); + if (code) { + ctgError("Build dnode list msg failed, error:%s", tstrerror(code)); + CTG_ERR_RET(code); + } + + if (pTask) { + CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, NULL, NULL)); + CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen)); + } + + SRpcMsg rpcMsg = { + .msgType = reqType, + .pCont = msg, + .contLen = msgLen, + }; + + SRpcMsg rpcRsp = {0}; + rpcSendRecv(pConn->pTrans, &pConn->mgmtEps, &rpcMsg, &rpcRsp); + + CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL)); + + return TSDB_CODE_SUCCESS; +} + int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildUseDBInput *input, SUseDbOutput *out, SCtgTask* pTask) { char *msg = NULL; diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 8635457dfe..cc823adad0 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -23,6 +23,8 @@ char *ctgTaskTypeStr(CTG_TASK_TYPE type) { switch (type) { case CTG_TASK_GET_QNODE: return "[get qnode list]"; + case CTG_TASK_GET_DNODE: + return "[get dnode list]"; case CTG_TASK_GET_DB_VGROUP: return "[get db vgroup]"; case CTG_TASK_GET_DB_CFG: @@ -349,6 +351,11 @@ void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) { *pRes = NULL; break; } + case CTG_TASK_GET_DNODE: { + taosArrayDestroy((SArray*)*pRes); + *pRes = NULL; + break; + } case CTG_TASK_GET_TB_META: { taosMemoryFreeClear(*pRes); break; @@ -413,6 +420,11 @@ void ctgFreeSubTaskRes(CTG_TASK_TYPE type, void **pRes) { *pRes = NULL; break; } + case CTG_TASK_GET_DNODE: { + taosArrayDestroy((SArray*)*pRes); + *pRes = NULL; + break; + } case CTG_TASK_GET_TB_META: { taosMemoryFreeClear(*pRes); break; diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 778742b0fa..6b0a84a37e 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -18,6 +18,7 @@ #include "tdatablock.h" #include "tglobal.h" +extern SConfig *tsCfg; static int32_t getSchemaBytes(const SSchema* pSchema) { switch (pSchema->type) { case TSDB_DATA_TYPE_BINARY: @@ -551,7 +552,85 @@ static int32_t execShowCreateSTable(SShowCreateTableStmt* pStmt, SRetrieveTableR static int32_t execAlterLocal(SAlterLocalStmt* pStmt) { return TSDB_CODE_FAILED; } -static int32_t execShowLocalVariables() { return TSDB_CODE_FAILED; } +static SSDataBlock* buildLocalVariablesResultDataBlock() { + SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + pBlock->info.numOfCols = SHOW_LOCAL_VARIABLES_RESULT_COLS; + pBlock->info.hasVarCol = true; + + pBlock->pDataBlock = taosArrayInit(pBlock->info.numOfCols, sizeof(SColumnInfoData)); + + SColumnInfoData infoData = {0}; + infoData.info.type = TSDB_DATA_TYPE_VARCHAR; + infoData.info.bytes = SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN; + + taosArrayPush(pBlock->pDataBlock, &infoData); + + infoData.info.type = TSDB_DATA_TYPE_VARCHAR; + infoData.info.bytes = SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN; + taosArrayPush(pBlock->pDataBlock, &infoData); + + return pBlock; +} + + +int32_t setLocalVariablesResultIntoDataBlock(SSDataBlock* pBlock) { + int32_t numOfCfg = taosArrayGetSize(tsCfg->array); + int32_t numOfRows = 0; + blockDataEnsureCapacity(pBlock, numOfCfg); + + for (int32_t i = 0, c = 0; i < numOfCfg; ++i, c = 0) { + SConfigItem *pItem = taosArrayGet(tsCfg->array, i); + + char name[TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(name, pItem->name, TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE); + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, c++); + colDataAppend(pColInfo, i, name, false); + + char value[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0}; + int32_t valueLen = 0; + cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_VALUE_LEN, &valueLen); + varDataSetLen(value, valueLen); + pColInfo = taosArrayGet(pBlock->pDataBlock, c++); + colDataAppend(pColInfo, i, value, false); + + numOfRows++; + } + + + pBlock->info.rows = numOfRows; + + return TSDB_CODE_SUCCESS; +} + + +static int32_t execShowLocalVariables(SRetrieveTableRsp** pRsp) { + SSDataBlock* pBlock = buildLocalVariablesResultDataBlock(); + int32_t code = setLocalVariablesResultIntoDataBlock(pBlock); + if (code) { + return code; + } + + size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock); + *pRsp = taosMemoryCalloc(1, rspSize); + if (NULL == *pRsp) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + (*pRsp)->useconds = 0; + (*pRsp)->completed = 1; + (*pRsp)->precision = 0; + (*pRsp)->compressed = 0; + (*pRsp)->compLen = 0; + (*pRsp)->numOfRows = htonl(pBlock->info.rows); + (*pRsp)->numOfCols = htonl(SHOW_LOCAL_VARIABLES_RESULT_COLS); + + int32_t len = 0; + blockCompressEncode(pBlock, (*pRsp)->data, &len, SHOW_LOCAL_VARIABLES_RESULT_COLS, false); + ASSERT(len == rspSize - sizeof(SRetrieveTableRsp)); + + blockDataDestroy(pBlock); + return TSDB_CODE_SUCCESS; +} int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) { switch (nodeType(pStmt)) { @@ -568,7 +647,7 @@ int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) { case QUERY_NODE_ALTER_LOCAL_STMT: return execAlterLocal((SAlterLocalStmt*)pStmt); case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT: - return execShowLocalVariables(); + return execShowLocalVariables(pRsp); default: break; } diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 4ecf2ee719..f3e1eb47e8 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -106,7 +106,8 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo); SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode); -int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo, SNode* pTagCond); +EDealRes doTranslateTagExpr(SNode** pNode, void* pContext); +int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo); SArray* createSortInfo(SNodeList* pNodeList); SArray* extractPartitionColInfo(SNodeList* pNodeList); SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols, int32_t type); diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 654cf681e6..286bcea820 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -282,7 +282,6 @@ typedef struct STagScanInfo { int32_t curPos; SReadHandle readHandle; STableListInfo *pTableList; - SNode* pFilterNode; // filter info, } STagScanInfo; typedef enum EStreamScanMode { @@ -839,13 +838,11 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, - SNode* pTagCond); -int32_t doCreateMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId, - uint64_t taskId); -SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SArray* dataReaders, - SReadHandle* readHandle, SExecTaskInfo* pTaskInfo); + STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId); +SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode, + SExecTaskInfo* pTaskInfo); +SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo *pTableListInfo, + SReadHandle* readHandle, SExecTaskInfo* pTaskInfo, uint64_t queryId, uint64_t taskId); void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 891ffec09b..0282d9cccb 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -214,28 +214,111 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) { return pBlock; } -int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo, SNode* pTagCond) { +EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) { + SMetaReader* mr = (SMetaReader*)pContext; + if(nodeType(*pNode) == QUERY_NODE_COLUMN){ + SColumnNode* pSColumnNode = *(SColumnNode**)pNode; + + SValueNode *res = (SValueNode *)nodesMakeNode(QUERY_NODE_VALUE); + if (NULL == res) { + return DEAL_RES_ERROR; + } + + res->translate = true; + res->node.resType = pSColumnNode->node.resType; + + STagVal tagVal = {0}; + tagVal.cid = pSColumnNode->colId; + const char* p = metaGetTableTagVal(&mr->me, pSColumnNode->node.resType.type, &tagVal); + if (p == NULL) { + res->node.resType.type = TSDB_DATA_TYPE_NULL; + }else if (pSColumnNode->node.resType.type == TSDB_DATA_TYPE_JSON) { + int32_t len = ((const STag*)p) -> len; + res->datum.p = taosMemoryCalloc(len + 1, 1); + memcpy(res->datum.p, p, len); + } else if (IS_VAR_DATA_TYPE(pSColumnNode->node.resType.type)) { + res->datum.p = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); + memcpy(varDataVal(res->datum.p), tagVal.pData, tagVal.nData); + varDataSetLen(res->datum.p, tagVal.nData); + } else { + nodesSetValueNodeValue(res, &(tagVal.i64)); + } + nodesDestroyNode(*pNode); + *pNode = (SNode*)res; + }else if (nodeType(*pNode) == QUERY_NODE_FUNCTION){ + SFunctionNode * pFuncNode = *(SFunctionNode**)pNode; + if(pFuncNode->funcType == FUNCTION_TYPE_TBNAME){ + SValueNode *res = (SValueNode *)nodesMakeNode(QUERY_NODE_VALUE); + if (NULL == res) { + return DEAL_RES_ERROR; + } + + res->translate = true; + res->node.resType = pFuncNode->node.resType; + + int32_t len = strlen(mr->me.name); + res->datum.p = taosMemoryCalloc(len + VARSTR_HEADER_SIZE + 1, 1); + memcpy(varDataVal(res->datum.p), mr->me.name, len); + varDataSetLen(res->datum.p, len); + nodesDestroyNode(*pNode); + *pNode = (SNode*)res; + } + } + + return DEAL_RES_CONTINUE; +} + +static bool isTableOk(STableKeyInfo* info, SNode *pTagCond, SMeta *metaHandle){ + SMetaReader mr = {0}; + metaReaderInit(&mr, metaHandle, 0); + metaGetTableEntryByUid(&mr, info->uid); + + SNode *pTagCondTmp = nodesCloneNode(pTagCond); + + nodesRewriteExprPostOrder(&pTagCondTmp, doTranslateTagExpr, &mr); + metaReaderClear(&mr); + + SNode* pNew = NULL; + int32_t code = scalarCalculateConstants(pTagCondTmp, &pNew); + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode(pTagCondTmp); + return false; + } + + ASSERT(nodeType(pNew) == QUERY_NODE_VALUE); + SValueNode *pValue = (SValueNode *)pNew; + + ASSERT(pValue->node.resType.type == TSDB_DATA_TYPE_BOOL); + bool result = pValue->datum.b; + nodesDestroyNode(pNew); + return result; +} + +int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo) { int32_t code = TSDB_CODE_SUCCESS; pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo)); uint64_t tableUid = pScanNode->uid; + SNode* pTagCond = (SNode*)pListInfo->pTagCond; + SNode* pTagIndexCond = (SNode*)pListInfo->pTagIndexCond; if (pScanNode->tableType == TSDB_SUPER_TABLE) { - if (pTagCond) { + if (pTagIndexCond) { SIndexMetaArg metaArg = { .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; SArray* res = taosArrayInit(8, sizeof(uint64_t)); - code = doFilterTag(pTagCond, &metaArg, res); - if (code == TSDB_CODE_INDEX_REBUILDING) { // todo - // doFilter(); + //code = doFilterTag(pTagIndexCond, &metaArg, res); + code = TSDB_CODE_INDEX_REBUILDING; + if (code == TSDB_CODE_INDEX_REBUILDING) { + code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList); } else if (code != TSDB_CODE_SUCCESS) { qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid); taosArrayDestroy(res); terrno = code; return code; } else { - qDebug("success to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid); + qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid); } for (int i = 0; i < taosArrayGetSize(res); i++) { @@ -246,7 +329,20 @@ int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo } else { code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList); } - } else { // Create one table group. + + if(pTagCond){ + int32_t i = 0; + while(i < taosArrayGetSize(pListInfo->pTableList)) { + STableKeyInfo* info = taosArrayGet(pListInfo->pTableList, i); + bool isOk = isTableOk(info, pTagCond, metaHandle); + if(!isOk){ + taosArrayRemove(pListInfo->pTableList, i); + continue; + } + i++; + } + } + }else { // Create one table group. STableKeyInfo info = {.lastKey = 0, .uid = tableUid, .groupId = 0}; taosArrayPush(pListInfo->pTableList, &info); } diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 39ce19ed90..39b9ac91bd 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1340,7 +1340,7 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowR } if (rowRes != NULL) { - int32_t totalRows = pBlock->info.rows; + int32_t totalRows = pBlock->info.rows; SSDataBlock* px = createOneDataBlock(pBlock, true); for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { @@ -3872,8 +3872,7 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT } static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, - SNode* pTagCond); + STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId); static SArray* extractColumnInfo(SNodeList* pNodeList); @@ -3968,7 +3967,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, } } } - int32_t len = (int32_t)(pStart - (char*)keyBuf); + int32_t len = (int32_t)(pStart - (char*)keyBuf); uint64_t* pGroupId = taosHashGet(pTableListInfo->map, keyBuf, len); @@ -3987,7 +3986,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, } SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, - uint64_t queryId, uint64_t taskId, STableListInfo* pTableListInfo, SNode* pTagCond) { + uint64_t queryId, uint64_t taskId, STableListInfo* pTableListInfo) { int32_t type = nodeType(pPhyNode); if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) { @@ -3995,7 +3994,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; tsdbReaderT pDataReader = - doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); + doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId); if (pDataReader == NULL && terrno != 0) { pTaskInfo->code = terrno; return NULL; @@ -4023,14 +4022,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo return pOperator; } else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == type) { STableMergeScanPhysiNode* pTableScanNode = (STableMergeScanPhysiNode*)pPhyNode; - - SArray* dataReaders = taosArrayInit(8, POINTER_BYTES); - createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId, pTagCond); - doCreateMultipleDataReaders(pTableScanNode, pHandle, pTableListInfo, dataReaders, queryId, taskId); - + createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId); extractTableSchemaVersion(pHandle, pTableScanNode->scan.uid, pTaskInfo); - - SOperatorInfo* pOperator = createTableMergeScanOperatorInfo(pTableScanNode, dataReaders, pHandle, pTaskInfo); + SOperatorInfo* pOperator = createTableMergeScanOperatorInfo(pTableScanNode, pTableListInfo, pHandle, pTaskInfo, queryId, taskId); STableScanInfo* pScanInfo = pOperator->info; pTaskInfo->cost.pRecoder = &pScanInfo->readRecorder; return pOperator; @@ -4051,14 +4045,13 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo // for stream ASSERT(pHandle->vnode); pDataReader = - doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); + doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId); } else { // for tq ASSERT(pHandle->meta); - getTableList(pHandle->meta, pScanPhyNode, pTableListInfo, pTagCond); + getTableList(pHandle->meta, pScanPhyNode, pTableListInfo); } } - if (pDataReader == NULL && terrno != 0) { qDebug("%s pDataReader is NULL", GET_TASKID(pTaskInfo)); // return NULL; @@ -4083,7 +4076,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode; - int32_t code = getTableList(pHandle->meta, pScanPhyNode, pTableListInfo, pScanPhyNode->node.pConditions); + int32_t code = getTableList(pHandle->meta, pScanPhyNode, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { pTaskInfo->code = terrno; return NULL; @@ -4141,7 +4134,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES); for (int32_t i = 0; i < size; ++i) { SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i); - ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableListInfo, pTagCond); + ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableListInfo); if (ops[i] == NULL) { return NULL; } @@ -4232,6 +4225,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children); } else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) { pOptr = createSortOperatorInfo(ops[0], (SSortPhysiNode*)pPhyNode, pTaskInfo); + } else if (QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT == type) { + pOptr = createGroupSortOperatorInfo(ops[0], (SGroupSortPhysiNode*)pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE == type) { SMergePhysiNode* pMergePhyNode = (SMergePhysiNode*)pPhyNode; @@ -4343,8 +4338,8 @@ SArray* extractColumnInfo(SNodeList* pNodeList) { } tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { - int32_t code = getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo, pTagCond); + STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId) { + int32_t code = getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -4502,14 +4497,20 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead } (*pTaskInfo)->sql = sql; + (*pTaskInfo)->tableqinfoList.pTagCond = pPlan->pTagCond; + (*pTaskInfo)->tableqinfoList.pTagIndexCond = pPlan->pTagIndexCond; (*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, - &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond); - + &(*pTaskInfo)->tableqinfoList); if (NULL == (*pTaskInfo)->pRoot) { code = (*pTaskInfo)->code; goto _complete; } + if ((*pTaskInfo)->pRoot == NULL) { + code = TSDB_CODE_QRY_OUT_OF_MEMORY; + goto _complete; + } + return code; _complete: diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 1d1e0d6bb8..a703be86e4 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -337,7 +337,7 @@ void addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_ } for (int32_t i = 0; i < pBlock->info.rows; ++i) { - colDataAppend(pColInfoData, i, data, (data == NULL)); + colDataAppend(pColInfoData, i, data, (data == NULL) || (pColInfoData->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); } if (data && (pColInfoData->info.type != TSDB_DATA_TYPE_JSON) && p != NULL && @@ -1557,11 +1557,14 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { return NULL; } + int32_t msgType = (strcasecmp(name, TSDB_INS_TABLE_DNODE_VARIABLES) == 0) ? TDMT_DND_SYSTABLE_RETRIEVE : TDMT_MND_SYSTABLE_RETRIEVE; + pMsgSendInfo->param = pOperator; pMsgSendInfo->msgInfo.pData = buf1; pMsgSendInfo->msgInfo.len = contLen; - pMsgSendInfo->msgType = TDMT_MND_SYSTABLE_RETRIEVE; + pMsgSendInfo->msgType = msgType; pMsgSendInfo->fp = loadSysTableCallback; + pMsgSendInfo->requestId = pTaskInfo->id.queryId; int64_t transporterId = 0; int32_t code = @@ -1596,6 +1599,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { taosMemoryFree(pRsp); if (pInfo->pRes->info.rows > 0) { return pInfo->pRes; + } else if (pOperator->status == OP_EXEC_DONE) { + return NULL; } } } @@ -1838,7 +1843,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { } else { data = (char*)p; } - colDataAppend(pDst, count, data, (data == NULL)); + colDataAppend(pDst, count, data, (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && data != NULL) { @@ -1861,9 +1866,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { } pRes->info.rows = count; - doFilter(pInfo->pFilterNode, pRes); - - pOperator->resultInfo.totalRows += pRes->info.rows; + pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } @@ -1893,13 +1896,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi goto _error; } - pInfo->pTableList = pTableListInfo; - pInfo->pColMatchInfo = colList; - pInfo->pRes = createResDataBlock(pDescNode); - pInfo->readHandle = *pReadHandle; - pInfo->curPos = 0; - pInfo->pFilterNode = pPhyNode->node.pConditions; - + pInfo->pTableList = pTableListInfo; + pInfo->pColMatchInfo = colList; + pInfo->pRes = createResDataBlock(pDescNode); + pInfo->readHandle = *pReadHandle; + pInfo->curPos = 0; pOperator->name = "TagScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN; @@ -1924,6 +1925,12 @@ _error: } typedef struct STableMergeScanInfo { + STableListInfo* tableListInfo; + int32_t tableStartIndex; + int32_t tableEndIndex; + bool hasGroupId; + uint64_t groupId; + SArray* dataReaders; // array of tsdbReaderT* SReadHandle readHandle; @@ -1936,11 +1943,9 @@ typedef struct STableMergeScanInfo { SSDataBlock* pSortInputBlock; int64_t startTs; // sort start time - bool hasGroupId; - uint64_t groupId; - STupleHandle* prefetchedTuple; - - SArray* sortSourceParams; + SArray* sortSourceParams; + uint64_t queryId; + uint64_t taskId; SFileBlockLoadRecorder readRecorder; int64_t numOfRows; @@ -1968,8 +1973,6 @@ typedef struct STableMergeScanInfo { // window to check if current data block needs to be loaded. SSampleExecInfo sample; // sample execution info - int32_t curTWinIdx; - } STableMergeScanInfo; int32_t compareTableKeyInfoByGid(const void* p1, const void* p2) { @@ -1979,8 +1982,8 @@ int32_t compareTableKeyInfoByGid(const void* p1, const void* p2) { } int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { - int32_t code = getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo, pTagCond); + STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId) { + int32_t code = getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -2025,6 +2028,24 @@ _error: return code; } +int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle, STableListInfo* pTableListInfo, + int32_t tableStartIdx, int32_t tableEndIdx, SArray* arrayReader, uint64_t queryId, + uint64_t taskId) { + for (int32_t i = tableStartIdx; i <= tableEndIdx; ++i) { + STableListInfo* subListInfo = taosMemoryCalloc(1, sizeof(subListInfo)); + subListInfo->pTableList = taosArrayInit(1, sizeof(STableKeyInfo)); + taosArrayPush(subListInfo->pTableList, taosArrayGet(pTableListInfo->pTableList, i)); + + tsdbReaderT* pReader = tsdbReaderOpen(pHandle->vnode, pQueryCond, subListInfo, queryId, taskId); + taosArrayPush(arrayReader, &pReader); + + taosArrayDestroy(subListInfo->pTableList); + taosMemoryFree(subListInfo); + } + + return TSDB_CODE_SUCCESS; +} + static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo, int32_t readerIdx, SSDataBlock* pBlock, uint32_t* status) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -2204,22 +2225,47 @@ SArray* generateSortByTsInfo(int32_t order) { return pList; } -int32_t doOpenTableMergeScanOperator(SOperatorInfo* pOperator) { +int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { STableMergeScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - if (OPTR_IS_OPENED(pOperator)) { - return TSDB_CODE_SUCCESS; + { + size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList); + int32_t i = pInfo->tableStartIndex + 1; + for (; i < tableListSize; ++i) { + STableKeyInfo* tableKeyInfo = taosArrayGet(pInfo->tableListInfo->pTableList, i); + if (tableKeyInfo->groupId != pInfo->groupId) { + break; + } + } + pInfo->tableEndIndex = i - 1; } - int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; + int32_t tableStartIdx = pInfo->tableStartIndex; + int32_t tableEndIdx = pInfo->tableEndIndex; + STableListInfo* tableListInfo = pInfo->tableListInfo; + createMultipleDataReaders(&pInfo->cond, &pInfo->readHandle, tableListInfo, tableStartIdx, tableEndIdx, + pInfo->dataReaders, pInfo->queryId, pInfo->taskId); + + // todo the total available buffer should be determined by total capacity of buffer of this task. + // the additional one is reserved for merge result + pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1); + int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str); tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlock, NULL, NULL); size_t numReaders = taosArrayGetSize(pInfo->dataReaders); + for (int32_t i = 0; i < numReaders; ++i) { + STableMergeScanSortSourceParam param = {0}; + param.readerIdx = i; + param.pOperator = pOperator; + param.inputBlock = createOneDataBlock(pInfo->pResBlock, false); + taosArrayPush(pInfo->sortSourceParams, ¶m); + } + for (int32_t i = 0; i < numReaders; ++i) { SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource)); STableMergeScanSortSourceParam* param = taosArrayGet(pInfo->sortSourceParams, i); @@ -2233,9 +2279,22 @@ int32_t doOpenTableMergeScanOperator(SOperatorInfo* pOperator) { longjmp(pTaskInfo->env, terrno); } - pOperator->status = OP_RES_TO_RETURN; + return TSDB_CODE_SUCCESS; +} + +int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) { + STableMergeScanInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + + tsortDestroySortHandle(pInfo->pSortHandle); + taosArrayClear(pInfo->sortSourceParams); + + for (int32_t i = 0; i < taosArrayGetSize(pInfo->dataReaders); ++i) { + tsdbReaderT* reader = taosArrayGetP(pInfo->dataReaders, i); + tsdbCleanupReadHandle(reader); + } + taosArrayDestroy(pInfo->dataReaders); - OPTR_SET_OPENED(pOperator); return TSDB_CODE_SUCCESS; } @@ -2278,14 +2337,38 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { if (code != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, code); } + size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList); + if (!pInfo->hasGroupId) { + pInfo->hasGroupId = true; - SSDataBlock* pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator); - - if (pBlock != NULL) { - pOperator->resultInfo.totalRows += pBlock->info.rows; - } else { - doSetOperatorCompleted(pOperator); + if (tableListSize == 0) { + doSetOperatorCompleted(pOperator); + return NULL; + } + pInfo->tableStartIndex = 0; + pInfo->groupId = ((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId; + startGroupTableMergeScan(pOperator); } + SSDataBlock* pBlock = NULL; + while (pInfo->tableStartIndex < tableListSize) { + pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator); + if (pBlock != NULL) { + pBlock->info.groupId = pInfo->groupId; + pOperator->resultInfo.totalRows += pBlock->info.rows; + return pBlock; + } else { + stopGroupTableMergeScan(pOperator); + if (pInfo->tableEndIndex >= tableListSize - 1) { + doSetOperatorCompleted(pOperator); + break; + } + pInfo->tableStartIndex = pInfo->tableEndIndex + 1; + pInfo->groupId = + ((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId; + startGroupTableMergeScan(pOperator); + } + } + return pBlock; } @@ -2293,17 +2376,10 @@ void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) { STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param; cleanupQueryTableDataCond(&pTableScanInfo->cond); - for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->dataReaders); ++i) { - tsdbReaderT* reader = taosArrayGetP(pTableScanInfo->dataReaders, i); - tsdbCleanupReadHandle(reader); - } - taosArrayDestroy(pTableScanInfo->dataReaders); - if (pTableScanInfo->pColMatchInfo != NULL) { taosArrayDestroy(pTableScanInfo->pColMatchInfo); } - taosArrayDestroy(pTableScanInfo->sortSourceParams); pTableScanInfo->pResBlock = blockDataDestroy(pTableScanInfo->pResBlock); pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock); @@ -2329,8 +2405,9 @@ int32_t getTableMergeScanExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExpla return TSDB_CODE_SUCCESS; } -SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SArray* dataReaders, - SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo, + SReadHandle* readHandle, SExecTaskInfo* pTaskInfo, uint64_t queryId, + uint64_t taskId) { STableMergeScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableMergeScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -2360,22 +2437,16 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN pInfo->sample.seed = taosGetTimestampSec(); pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired; pInfo->pFilterNode = pTableScanNode->scan.node.pConditions; - pInfo->dataReaders = dataReaders; + pInfo->tableListInfo = pTableListInfo; pInfo->scanFlag = MAIN_SCAN; pInfo->pColMatchInfo = pColList; - pInfo->curTWinIdx = 0; pInfo->pResBlock = createResDataBlock(pDescNode); + pInfo->dataReaders = taosArrayInit(64, POINTER_BYTES); + pInfo->queryId = queryId; + pInfo->taskId = taskId; - pInfo->sortSourceParams = taosArrayInit(taosArrayGetSize(dataReaders), sizeof(STableMergeScanSortSourceParam)); - for (int32_t i = 0; i < taosArrayGetSize(dataReaders); ++i) { - STableMergeScanSortSourceParam* param = taosMemoryCalloc(1, sizeof(STableMergeScanSortSourceParam)); - param->readerIdx = i; - param->pOperator = pOperator; - param->inputBlock = createOneDataBlock(pInfo->pResBlock, false); - taosArrayPush(pInfo->sortSourceParams, param); - taosMemoryFree(param); - } + pInfo->sortSourceParams = taosArrayInit(64, sizeof(STableMergeScanSortSourceParam)); pInfo->pSortInfo = generateSortByTsInfo(pInfo->cond.order); pInfo->pSortInputBlock = createOneDataBlock(pInfo->pResBlock, false); @@ -2383,14 +2454,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN int32_t rowSize = pInfo->pResBlock->info.rowSize; pInfo->bufPageSize = getProperSortPageSize(rowSize); - // todo the total available buffer should be determined by total capacity of buffer of this task. - // the additional one is reserved for merge result - pInfo->sortBufSize = pInfo->bufPageSize * (taosArrayGetSize(dataReaders) + 1); - pInfo->hasGroupId = false; - pInfo->prefetchedTuple = NULL; - pOperator->name = "TableMergeScanOperator"; - // TODO : change it pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN; pOperator->blocking = false; pOperator->status = OP_NOT_OPENED; @@ -2400,8 +2464,8 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN initResultSizeInfo(pOperator, 1024); pOperator->fpSet = - createOperatorFpSet(doOpenTableMergeScanOperator, doTableMergeScan, NULL, NULL, destroyTableMergeScanOperatorInfo, - NULL, NULL, getTableMergeScanExplainExecInfo); + createOperatorFpSet(operatorDummyOpenFn, doTableMergeScan, NULL, NULL, destroyTableMergeScanOperatorInfo, NULL, + NULL, getTableMergeScanExplainExecInfo); pOperator->cost.openCost = 0; return pOperator; diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 97be4645a8..4b5ad7b123 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -424,10 +424,17 @@ int32_t getGroupSortExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, u return TSDB_CODE_SUCCESS; } -// TODO: -SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortPhyNode, +void destroyGroupSortOperatorInfo(void* param, int32_t numOfOutput) { + SGroupSortOperatorInfo* pInfo = (SGroupSortOperatorInfo*)param; + pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes); + + taosArrayDestroy(pInfo->pSortInfo); + taosArrayDestroy(pInfo->pColMatchInfo); +} + +SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo) { - SSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortOperatorInfo)); + SGroupSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SGroupSortOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL /* || rowSize > 100 * 1024 * 1024*/) { goto _error; @@ -452,8 +459,7 @@ SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SSortPhysi ; pInfo->pColMatchInfo = pColMatchColInfo; pOperator->name = "GroupSortOperator"; - // TODO - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SORT; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->info = pInfo; @@ -461,7 +467,7 @@ SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SSortPhysi pOperator->exprSupp.numOfExprs = numOfCols; pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doGroupSort, NULL, NULL, destroyOrderOperatorInfo, NULL, + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doGroupSort, NULL, NULL, destroyGroupSortOperatorInfo, NULL, NULL, getGroupSortExplainExecInfo); int32_t code = appendDownstream(pOperator, &downstream, 1); @@ -478,18 +484,6 @@ _error: return NULL; } -void destroyGroupSortOperatorInfo(void* param, int32_t numOfOutput) { - SGroupSortOperatorInfo* pInfo = (SGroupSortOperatorInfo*)param; - pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes); - - taosArrayDestroy(pInfo->pSortInfo); - taosArrayDestroy(pInfo->pColMatchInfo); -} - -// TODO: sort group -// TODO: msortCompare compare group id in multiway merge sort. -// TODO: table merge scan, group first, then for each group, multiple readers - //===================================================================================== // Multiway Sort Merge operator typedef struct SMultiwaySortMergeOperatorInfo { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 3ff45d7237..19f0fd4ea7 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1656,10 +1656,9 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); - return NULL; } - return pBInfo->pRes; + return pBInfo->pRes->info.rows > 0 ? pBInfo->pRes : NULL; } int64_t st = taosGetTimestampUs(); diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 0bde0d0581..05eaa88a83 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -351,6 +351,7 @@ static SNode* logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) { COPY_SCALAR_FIELD(intervalUnit); COPY_SCALAR_FIELD(slidingUnit); CLONE_NODE_FIELD(pTagCond); + CLONE_NODE_FIELD(pTagIndexCond); COPY_SCALAR_FIELD(triggerType); COPY_SCALAR_FIELD(watermark); COPY_SCALAR_FIELD(tsColId); diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 10a933b00b..5c5c62d915 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2326,6 +2326,7 @@ static const char* jkSubplanNodeAddr = "NodeAddr"; static const char* jkSubplanRootNode = "RootNode"; static const char* jkSubplanDataSink = "DataSink"; static const char* jkSubplanTagCond = "TagCond"; +static const char* jkSubplanTagIndexCond = "TagIndexCond"; static int32_t subplanToJson(const void* pObj, SJson* pJson) { const SSubplan* pNode = (const SSubplan*)pObj; @@ -2355,6 +2356,9 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkSubplanTagCond, nodeToJson, pNode->pTagCond); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkSubplanTagIndexCond, nodeToJson, pNode->pTagIndexCond); + } return code; } @@ -2388,6 +2392,9 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkSubplanTagCond, (SNode**)&pNode->pTagCond); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkSubplanTagIndexCond, (SNode**)&pNode->pTagIndexCond); + } return code; } @@ -3954,7 +3961,7 @@ static int32_t deleteStmtToJson(const void* pObj, SJson* pJson) { code = tjsonAddObject(pJson, jkDeleteStmtCountFunc, nodeToJson, pNode->pCountFunc); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddObject(pJson, jkDeleteStmtTagIndexCond, nodeToJson, pNode->pTagIndexCond); + code = tjsonAddObject(pJson, jkDeleteStmtTagIndexCond, nodeToJson, pNode->pTagCond); } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkDeleteStmtTimeRangeStartKey, pNode->timeRange.skey); @@ -3983,7 +3990,7 @@ static int32_t jsonToDeleteStmt(const SJson* pJson, void* pObj) { code = jsonToNodeObject(pJson, jkDeleteStmtCountFunc, &pNode->pCountFunc); } if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeObject(pJson, jkDeleteStmtTagIndexCond, &pNode->pTagIndexCond); + code = jsonToNodeObject(pJson, jkDeleteStmtTagIndexCond, &pNode->pTagCond); } if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBigIntValue(pJson, jkDeleteStmtTimeRangeStartKey, &pNode->timeRange.skey); diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index ff55195bee..13628f85db 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -669,7 +669,7 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pStmt->pFromTable); nodesDestroyNode(pStmt->pWhere); nodesDestroyNode(pStmt->pCountFunc); - nodesDestroyNode(pStmt->pTagIndexCond); + nodesDestroyNode(pStmt->pTagCond); break; } case QUERY_NODE_QUERY: { @@ -688,7 +688,13 @@ void nodesDestroyNode(SNode* pNode) { SScanLogicNode* pLogicNode = (SScanLogicNode*)pNode; destroyLogicNode((SLogicNode*)pLogicNode); nodesDestroyList(pLogicNode->pScanCols); + nodesDestroyList(pLogicNode->pScanPseudoCols); taosMemoryFreeClear(pLogicNode->pVgroupList); + nodesDestroyList(pLogicNode->pDynamicScanFuncs); + nodesDestroyNode(pLogicNode->pTagCond); + nodesDestroyNode(pLogicNode->pTagIndexCond); + taosArrayDestroy(pLogicNode->pSmaIndexes); + nodesDestroyList(pLogicNode->pPartTags); break; } case QUERY_NODE_LOGIC_PLAN_JOIN: { @@ -897,6 +903,8 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyList(pSubplan->pChildren); nodesDestroyNode((SNode*)pSubplan->pNode); nodesDestroyNode((SNode*)pSubplan->pDataSink); + nodesDestroyNode((SNode*)pSubplan->pTagCond); + nodesDestroyNode((SNode*)pSubplan->pTagIndexCond); nodesClearList(pSubplan->pParents); break; } @@ -1130,6 +1138,7 @@ void* nodesGetValueFromNode(SValueNode* pNode) { case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: + case TSDB_DATA_TYPE_JSON: return (void*)pNode->datum.p; default: break; @@ -1659,6 +1668,7 @@ int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc) { typedef struct SClassifyConditionCxt { bool hasPrimaryKey; bool hasTagIndexCol; + bool hasTagCol; bool hasOtherCol; } SClassifyConditionCxt; @@ -1670,6 +1680,9 @@ static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) { pCxt->hasPrimaryKey = true; } else if (pCol->hasIndex) { pCxt->hasTagIndexCol = true; + pCxt->hasTagCol = true; + } else if (COLUMN_TYPE_TAG == pCol->colType) { + pCxt->hasTagCol = true; } else { pCxt->hasOtherCol = true; } @@ -1678,23 +1691,31 @@ static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) { return DEAL_RES_CONTINUE; } -typedef enum EConditionType { COND_TYPE_PRIMARY_KEY = 1, COND_TYPE_TAG_INDEX, COND_TYPE_NORMAL } EConditionType; +typedef enum EConditionType { + COND_TYPE_PRIMARY_KEY = 1, + COND_TYPE_TAG_INDEX, + COND_TYPE_TAG, + COND_TYPE_NORMAL +} EConditionType; static EConditionType classifyCondition(SNode* pNode) { SClassifyConditionCxt cxt = {.hasPrimaryKey = false, .hasTagIndexCol = false, .hasOtherCol = false}; nodesWalkExpr(pNode, classifyConditionImpl, &cxt); return cxt.hasOtherCol ? COND_TYPE_NORMAL - : (cxt.hasPrimaryKey && cxt.hasTagIndexCol + : (cxt.hasPrimaryKey && cxt.hasTagCol ? COND_TYPE_NORMAL - : (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY : COND_TYPE_TAG_INDEX)); + : (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY + : (cxt.hasTagIndexCol ? COND_TYPE_TAG_INDEX : COND_TYPE_TAG))); } -static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagCond, SNode** pOtherCond) { +static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, + SNode** pOtherCond) { SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(*pCondition); int32_t code = TSDB_CODE_SUCCESS; SNodeList* pPrimaryKeyConds = NULL; + SNodeList* pTagIndexConds = NULL; SNodeList* pTagConds = NULL; SNodeList* pOtherConds = NULL; SNode* pCond = NULL; @@ -1706,6 +1727,14 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S } break; case COND_TYPE_TAG_INDEX: + if (NULL != pTagIndexCond) { + code = nodesListMakeAppend(&pTagIndexConds, nodesCloneNode(pCond)); + } + if (NULL != pTagCond) { + code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); + } + break; + case COND_TYPE_TAG: if (NULL != pTagCond) { code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); } @@ -1723,11 +1752,15 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S } SNode* pTempPrimaryKeyCond = NULL; + SNode* pTempTagIndexCond = NULL; SNode* pTempTagCond = NULL; SNode* pTempOtherCond = NULL; if (TSDB_CODE_SUCCESS == code) { code = nodesMergeConds(&pTempPrimaryKeyCond, &pPrimaryKeyConds); } + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempTagIndexCond, &pTagIndexConds); + } if (TSDB_CODE_SUCCESS == code) { code = nodesMergeConds(&pTempTagCond, &pTagConds); } @@ -1739,6 +1772,9 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S if (NULL != pPrimaryKeyCond) { *pPrimaryKeyCond = pTempPrimaryKeyCond; } + if (NULL != pTagIndexCond) { + *pTagIndexCond = pTempTagIndexCond; + } if (NULL != pTagCond) { *pTagCond = pTempTagCond; } @@ -1749,9 +1785,11 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S *pCondition = NULL; } else { nodesDestroyList(pPrimaryKeyConds); + nodesDestroyList(pTagIndexConds); nodesDestroyList(pTagConds); nodesDestroyList(pOtherConds); nodesDestroyNode(pTempPrimaryKeyCond); + nodesDestroyNode(pTempTagIndexCond); nodesDestroyNode(pTempTagCond); nodesDestroyNode(pTempOtherCond); } @@ -1759,10 +1797,11 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S return code; } -int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagCond, SNode** pOtherCond) { +int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, + SNode** pOtherCond) { if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCondition) && LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCondition)->condType) { - return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagCond, pOtherCond); + return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagIndexCond, pTagCond, pOtherCond); } switch (classifyCondition(*pCondition)) { @@ -1772,6 +1811,21 @@ int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** } break; case COND_TYPE_TAG_INDEX: + if (NULL != pTagIndexCond) { + *pTagIndexCond = *pCondition; + } + if (NULL != pTagCond) { + SNode* pTempCond = *pCondition; + if (NULL != pTagIndexCond) { + pTempCond = nodesCloneNode(*pCondition); + if (NULL == pTempCond) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + *pTagCond = pTempCond; + } + break; + case COND_TYPE_TAG: if (NULL != pTagCond) { *pTagCond = *pCondition; } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 46d2ae8383..d9c4551400 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -137,6 +137,9 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, SRealTa code = reserveTableIndexInCache(pCxt->pParseCxt->acctId, pRealTable->table.dbName, pRealTable->table.tableName, pCxt->pMetaCache); } + if (TSDB_CODE_SUCCESS == code && (0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_DNODE_VARIABLES))) { + code = reserveDnodeRequiredInCache(pCxt->pMetaCache); + } return code; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 43a4cf0ed6..7b5c30d3cb 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2013,7 +2013,7 @@ static int32_t getFillTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWin } SNode* pPrimaryKeyCond = NULL; - nodesPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL); + nodesPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL, NULL); int32_t code = TSDB_CODE_SUCCESS; if (NULL != pPrimaryKeyCond) { @@ -2161,9 +2161,6 @@ static EDealRes checkStateExpr(SNode* pNode, void* pContext) { if (COLUMN_TYPE_TAG == pCol->colType) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_STATE_WIN_COL); } - if (TSDB_SUPER_TABLE == pCol->tableType) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_STATE_WIN_TABLE); - } } return DEAL_RES_CONTINUE; } @@ -2685,7 +2682,7 @@ static int32_t partitionDeleteWhere(STranslateContext* pCxt, SDeleteStmt* pDelet SNode* pPrimaryKeyCond = NULL; SNode* pOtherCond = NULL; - int32_t code = nodesPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, &pDelete->pTagIndexCond, &pOtherCond); + int32_t code = nodesPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, NULL, &pDelete->pTagCond, &pOtherCond); if (TSDB_CODE_SUCCESS == code && NULL != pOtherCond) { code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DELETE_WHERE); } @@ -4614,6 +4611,25 @@ static int32_t extractShowCreateTableResultSchema(int32_t* numOfCols, SSchema** return TSDB_CODE_SUCCESS; } +static int32_t extractShowLocalVariablesResultSchema(int32_t* numOfCols, SSchema** pSchema) { + *numOfCols = 2; + *pSchema = taosMemoryCalloc((*numOfCols), sizeof(SSchema)); + if (NULL == (*pSchema)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + (*pSchema)[0].type = TSDB_DATA_TYPE_BINARY; + (*pSchema)[0].bytes = TSDB_CONFIG_OPTION_LEN; + strcpy((*pSchema)[0].name, "name"); + + (*pSchema)[1].type = TSDB_DATA_TYPE_BINARY; + (*pSchema)[1].bytes = TSDB_CONFIG_VALUE_LEN; + strcpy((*pSchema)[1].name, "value"); + + return TSDB_CODE_SUCCESS; +} + + int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema) { if (NULL == pRoot) { return TSDB_CODE_SUCCESS; @@ -4632,6 +4648,8 @@ int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pS case QUERY_NODE_SHOW_CREATE_TABLE_STMT: case QUERY_NODE_SHOW_CREATE_STABLE_STMT: return extractShowCreateTableResultSchema(numOfCols, pSchema); + case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT: + return extractShowLocalVariablesResultSchema(numOfCols, pSchema); default: break; } @@ -5948,12 +5966,12 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) { case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: case QUERY_NODE_SHOW_CREATE_TABLE_STMT: case QUERY_NODE_SHOW_CREATE_STABLE_STMT: + case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT: pQuery->execMode = QUERY_EXEC_MODE_LOCAL; pQuery->haveResultSet = true; break; case QUERY_NODE_RESET_QUERY_CACHE_STMT: case QUERY_NODE_ALTER_LOCAL_STMT: - case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT: pQuery->execMode = QUERY_EXEC_MODE_LOCAL; break; default: diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index b474d95b3c..689ff08ab1 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -928,7 +928,12 @@ int32_t reserveDnodeRequiredInCache(SParseMetaCache* pMetaCache) { } int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes) { - *pDnodes = taosArrayDup(pMetaCache->pDnodes); + SMetaRes* pRes = taosArrayGet(pMetaCache->pDnodes, 0); + if (pRes->code) { + return pRes->code; + } + + *pDnodes = taosArrayDup((SArray*)pRes->pRes); if (NULL == *pDnodes) { return TSDB_CODE_OUT_OF_MEMORY; } diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp index 1f7657146b..4263b59cc3 100644 --- a/source/libs/parser/test/mockCatalogService.cpp +++ b/source/libs/parser/test/mockCatalogService.cpp @@ -166,10 +166,13 @@ class MockCatalogServiceImpl { } int32_t catalogGetDnodeList(SArray** pDnodes) const { - *pDnodes = taosArrayInit(dnode_.size(), sizeof(SEpSet)); + SMetaRes res = {0}; + res.pRes = taosArrayInit(dnode_.size(), sizeof(SEpSet)); for (const auto& dnode : dnode_) { - taosArrayPush(*pDnodes, &dnode.second); + taosArrayPush((SArray*)res.pRes, &dnode.second); } + *pDnodes = taosArrayInit(1, sizeof(SMetaRes)); + taosArrayPush(*pDnodes, &res); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index eafc6ba37a..9dbd49e66c 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -1160,8 +1160,8 @@ static int32_t createDeleteScanLogicNode(SLogicPlanContext* pCxt, SDeleteStmt* p } } - if (TSDB_CODE_SUCCESS == code && NULL != pDelete->pTagIndexCond) { - pScan->pTagCond = nodesCloneNode(pDelete->pTagIndexCond); + if (TSDB_CODE_SUCCESS == code && NULL != pDelete->pTagCond) { + pScan->pTagCond = nodesCloneNode(pDelete->pTagCond); if (NULL == pScan->pTagCond) { code = TSDB_CODE_OUT_OF_MEMORY; } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 6a71b1370e..5b9dbe8388 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -15,7 +15,6 @@ #include "filter.h" #include "functionMgt.h" -#include "index.h" #include "planInt.h" #include "ttime.h" @@ -309,32 +308,6 @@ static int32_t cpdCalcTimeRange(SOptimizeContext* pCxt, SScanLogicNode* pScan, S return code; } -static int32_t cpdApplyTagIndex(SScanLogicNode* pScan, SNode** pTagCond, SNode** pOtherCond) { - int32_t code = TSDB_CODE_SUCCESS; - SIdxFltStatus idxStatus = idxGetFltStatus(*pTagCond); - switch (idxStatus) { - case SFLT_NOT_INDEX: - code = cpdCondAppend(pOtherCond, pTagCond); - break; - case SFLT_COARSE_INDEX: - pScan->pTagCond = nodesCloneNode(*pTagCond); - if (NULL == pScan->pTagCond) { - code = TSDB_CODE_OUT_OF_MEMORY; - break; - } - code = cpdCondAppend(pOtherCond, pTagCond); - break; - case SFLT_ACCURATE_INDEX: - pScan->pTagCond = *pTagCond; - *pTagCond = NULL; - break; - default: - code = TSDB_CODE_FAILED; - break; - } - return code; -} - static int32_t cpdOptimizeScanCondition(SOptimizeContext* pCxt, SScanLogicNode* pScan) { if (NULL == pScan->node.pConditions || OPTIMIZE_FLAG_TEST_MASK(pScan->node.optimizedFlag, OPTIMIZE_FLAG_CPD) || TSDB_SYSTEM_TABLE == pScan->tableType) { @@ -342,15 +315,12 @@ static int32_t cpdOptimizeScanCondition(SOptimizeContext* pCxt, SScanLogicNode* } SNode* pPrimaryKeyCond = NULL; - SNode* pTagCond = NULL; SNode* pOtherCond = NULL; - int32_t code = nodesPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pTagCond, &pOtherCond); + int32_t code = nodesPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pScan->pTagIndexCond, &pScan->pTagCond, + &pOtherCond); if (TSDB_CODE_SUCCESS == code && NULL != pPrimaryKeyCond) { code = cpdCalcTimeRange(pCxt, pScan, &pPrimaryKeyCond, &pOtherCond); } - if (TSDB_CODE_SUCCESS == code && NULL != pTagCond) { - code = cpdApplyTagIndex(pScan, &pTagCond, &pOtherCond); - } if (TSDB_CODE_SUCCESS == code) { pScan->node.pConditions = pOtherCond; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 37765edfc5..5f8fd4ac4e 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -436,6 +436,15 @@ static int32_t createScanPhysiNodeFinalize(SPhysiPlanContext* pCxt, SSubplan* pS } } + if (TSDB_CODE_SUCCESS == code) { + if (NULL != pScanLogicNode->pTagIndexCond) { + pSubplan->pTagIndexCond = nodesCloneNode(pScanLogicNode->pTagIndexCond); + if (NULL == pSubplan->pTagIndexCond) { + code = TSDB_CODE_OUT_OF_MEMORY; + } + } + } + if (TSDB_CODE_SUCCESS == code) { *pPhyNode = (SPhysiNode*)pScanPhysiNode; } else { diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index f0e0e84bd9..9847755a7a 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -166,6 +166,31 @@ static bool stbSplHasMultiTbScan(bool streamQuery, SLogicNode* pNode) { return (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pChild) && stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pChild)); } +static bool stbSplNeedSplitWindow(bool streamQuery, SLogicNode* pNode) { + SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode; + if (WINDOW_TYPE_INTERVAL == pWindow->winType) { + return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); + } + + if (WINDOW_TYPE_SESSION == pWindow->winType) { + if (!streamQuery) { + return stbSplHasMultiTbScan(streamQuery, pNode); + } else { + return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); + } + } + + if (WINDOW_TYPE_STATE == pWindow->winType) { + if (!streamQuery) { + return stbSplHasMultiTbScan(streamQuery, pNode); + } else { + return false; + } + } + + return false; +} + static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { switch (nodeType(pNode)) { case QUERY_NODE_LOGIC_PLAN_SCAN: @@ -174,13 +199,8 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { return !(((SJoinLogicNode*)pNode)->isSingleTableJoin); case QUERY_NODE_LOGIC_PLAN_AGG: return !stbSplHasGatherExecFunc(((SAggLogicNode*)pNode)->pAggFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); - case QUERY_NODE_LOGIC_PLAN_WINDOW: { - SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode; - if (WINDOW_TYPE_STATE == pWindow->winType || (!streamQuery && WINDOW_TYPE_SESSION == pWindow->winType)) { - return false; - } - return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); - } + case QUERY_NODE_LOGIC_PLAN_WINDOW: + return stbSplNeedSplitWindow(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_SORT: return stbSplHasMultiTbScan(streamQuery, pNode); default: @@ -477,11 +497,64 @@ static int32_t stbSplSplitSessionForStream(SSplitContext* pCxt, SStableSplitInfo return code; } +static void splSetTableScanType(SLogicNode* pNode, EScanType scanType) { + if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) { + ((SScanLogicNode*)pNode)->scanType = scanType; + } else { + if (1 == LIST_LENGTH(pNode->pChildren)) { + splSetTableScanType((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), scanType); + } + } +} + +static int32_t stbSplSplitSessionOrStateForBatch(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + SLogicNode* pWindow = pInfo->pSplitNode; + SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pWindow->pChildren, 0); + + SNodeList* pMergeKeys = NULL; + int32_t code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pWindow)->pTspk, &pMergeKeys); + + if (TSDB_CODE_SUCCESS == code) { + code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pChild, pMergeKeys, (SLogicNode*)pChild); + } + + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, + (SNode*)splCreateScanSubplan(pCxt, pChild, SPLIT_FLAG_STABLE_SPLIT)); + } + + if (TSDB_CODE_SUCCESS == code) { + splSetTableScanType(pChild, SCAN_TYPE_TABLE_MERGE); + ++(pCxt->groupId); + } + + if (TSDB_CODE_SUCCESS == code) { + pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE; + SPLIT_FLAG_SET_MASK(pInfo->pSubplan->splitFlag, SPLIT_FLAG_STABLE_SPLIT); + } else { + nodesDestroyList(pMergeKeys); + } + + return code; +} + static int32_t stbSplSplitSession(SSplitContext* pCxt, SStableSplitInfo* pInfo) { if (pCxt->pPlanCxt->streamQuery) { return stbSplSplitSessionForStream(pCxt, pInfo); } else { - return TSDB_CODE_PLAN_INTERNAL_ERROR; + return stbSplSplitSessionOrStateForBatch(pCxt, pInfo); + } +} + +static int32_t stbSplSplitStateForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + return TSDB_CODE_PLAN_INTERNAL_ERROR; +} + +static int32_t stbSplSplitState(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + if (pCxt->pPlanCxt->streamQuery) { + return stbSplSplitStateForStream(pCxt, pInfo); + } else { + return stbSplSplitSessionOrStateForBatch(pCxt, pInfo); } } @@ -511,6 +584,8 @@ static int32_t stbSplSplitWindowForMergeTable(SSplitContext* pCxt, SStableSplitI return stbSplSplitInterval(pCxt, pInfo); case WINDOW_TYPE_SESSION: return stbSplSplitSession(pCxt, pInfo); + case WINDOW_TYPE_STATE: + return stbSplSplitState(pCxt, pInfo); default: break; } diff --git a/source/libs/planner/test/planOptimizeTest.cpp b/source/libs/planner/test/planOptimizeTest.cpp index 1bee43aa49..8b3d263d66 100644 --- a/source/libs/planner/test/planOptimizeTest.cpp +++ b/source/libs/planner/test/planOptimizeTest.cpp @@ -40,6 +40,8 @@ TEST_F(PlanOptimizeTest, ConditionPushDown) { run("SELECT ts, c1 FROM st1 WHERE tag1 > 4 or tag1 < 2"); run("SELECT ts, c1 FROM st1 WHERE tag1 > 4 AND tag2 = 'hello'"); + + run("SELECT ts, c1 FROM st1 WHERE tag1 > 4 AND tag2 = 'hello' AND c1 > 10"); } TEST_F(PlanOptimizeTest, orderByPrimaryKey) { diff --git a/source/libs/planner/test/planSessionTest.cpp b/source/libs/planner/test/planSessionTest.cpp index 7d5d826925..f445bb5ffc 100644 --- a/source/libs/planner/test/planSessionTest.cpp +++ b/source/libs/planner/test/planSessionTest.cpp @@ -34,3 +34,13 @@ TEST_F(PlanSessionTest, selectFunc) { // select function along with the columns of select row, and with SESSION clause run("SELECT MAX(c1), c2 FROM t1 SESSION(ts, 10s)"); } + +TEST_F(PlanSessionTest, stable) { + useDb("root", "test"); + + // select function for SESSION clause + run("SELECT MAX(c1), MIN(c1) FROM st1 SESSION(ts, 10s)"); + // select function along with the columns of select row, and with SESSION clause + run("SELECT MAX(c1), c2 FROM st1 SESSION(ts, 10s)"); + run("SELECT count(ts) FROM st1 PARTITION BY c1 SESSION(ts, 10s)"); +} diff --git a/source/libs/planner/test/planStateTest.cpp b/source/libs/planner/test/planStateTest.cpp index 9ff035e148..6985bc8807 100644 --- a/source/libs/planner/test/planStateTest.cpp +++ b/source/libs/planner/test/planStateTest.cpp @@ -40,3 +40,12 @@ TEST_F(PlanStateTest, selectFunc) { // select function along with the columns of select row, and with STATE_WINDOW clause run("SELECT MAX(c1), c2 FROM t1 STATE_WINDOW(c3)"); } + +TEST_F(PlanStateTest, stable) { + useDb("root", "test"); + + // select function for STATE_WINDOW clause + run("SELECT MAX(c1), MIN(c1) FROM st1 STATE_WINDOW(c2)"); + // select function along with the columns of select row, and with STATE_WINDOW clause + run("SELECT MAX(c1), c2 FROM st1 STATE_WINDOW(c2)"); +} diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c index e2dfd9682b..df608412b0 100644 --- a/source/libs/qcom/src/querymsg.c +++ b/source/libs/qcom/src/querymsg.c @@ -126,6 +126,25 @@ int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t return TSDB_CODE_SUCCESS; } +int32_t queryBuildDnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) { + if (NULL == msg || NULL == msgLen) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + + SDnodeListReq dnodeListReq = {0}; + dnodeListReq.rowNum = -1; + + int32_t bufLen = tSerializeSDnodeListReq(NULL, 0, &dnodeListReq); + void *pBuf = (*mallcFp)(bufLen); + tSerializeSDnodeListReq(pBuf, bufLen, &dnodeListReq); + + *msg = pBuf; + *msgLen = bufLen; + + return TSDB_CODE_SUCCESS; +} + + int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) { if (NULL == msg || NULL == msgLen) { return TSDB_CODE_TSC_INVALID_INPUT; @@ -428,6 +447,27 @@ int32_t queryProcessQnodeListRsp(void *output, char *msg, int32_t msgSize) { return code; } +int32_t queryProcessDnodeListRsp(void *output, char *msg, int32_t msgSize) { + SDnodeListRsp out = {0}; + int32_t code = 0; + + if (NULL == output || NULL == msg || msgSize <= 0) { + code = TSDB_CODE_TSC_INVALID_INPUT; + return code; + } + + if (tDeserializeSDnodeListRsp(msg, msgSize, &out) != 0) { + qError("invalid dnode list rsp msg, msgSize:%d", msgSize); + code = TSDB_CODE_INVALID_MSG; + return code; + } + + *(SArray**)output = out.dnodeList; + + return code; +} + + int32_t queryProcessGetDbCfgRsp(void *output, char *msg, int32_t msgSize) { SDbCfgRsp out = {0}; @@ -535,6 +575,7 @@ void initQueryModuleMsgHandle() { queryBuildMsg[TMSG_INDEX(TDMT_MND_TABLE_META)] = queryBuildTableMetaReqMsg; queryBuildMsg[TMSG_INDEX(TDMT_MND_USE_DB)] = queryBuildUseDbMsg; queryBuildMsg[TMSG_INDEX(TDMT_MND_QNODE_LIST)] = queryBuildQnodeListMsg; + queryBuildMsg[TMSG_INDEX(TDMT_MND_DNODE_LIST)] = queryBuildDnodeListMsg; queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_DB_CFG)] = queryBuildGetDBCfgMsg; queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_INDEX)] = queryBuildGetIndexMsg; queryBuildMsg[TMSG_INDEX(TDMT_MND_RETRIEVE_FUNC)] = queryBuildRetrieveFuncMsg; @@ -547,6 +588,7 @@ void initQueryModuleMsgHandle() { queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_TABLE_META)] = queryProcessTableMetaRsp; queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_USE_DB)] = queryProcessUseDBRsp; queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_QNODE_LIST)] = queryProcessQnodeListRsp; + queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_DNODE_LIST)] = queryProcessDnodeListRsp; queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_DB_CFG)] = queryProcessGetDbCfgRsp; queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_INDEX)] = queryProcessGetIndexRsp; queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_RETRIEVE_FUNC)] = queryProcessRetrieveFuncRsp; diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index bff49c2ca9..50f2fd59d8 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -23,6 +23,7 @@ #include "sclvector.h" #include "tcompare.h" #include "tdatablock.h" +#include "tdataformat.h" #include "ttypes.h" #include "ttime.h" @@ -506,6 +507,16 @@ bool convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_t t } } + // if types can not comparable + if((IS_NUMERIC_TYPE(typeLeft) && !IS_NUMERIC_TYPE(typeRight)) || + (IS_NUMERIC_TYPE(typeRight) && !IS_NUMERIC_TYPE(typeLeft)) || + (IS_VAR_DATA_TYPE(typeLeft) && !IS_VAR_DATA_TYPE(typeRight)) || + (IS_VAR_DATA_TYPE(typeRight) && !IS_VAR_DATA_TYPE(typeLeft)) || + ((typeLeft == TSDB_DATA_TYPE_BOOL) && (typeRight != TSDB_DATA_TYPE_BOOL)) || + ((typeRight == TSDB_DATA_TYPE_BOOL) && (typeLeft != TSDB_DATA_TYPE_BOOL))) + return false; + + if(typeLeft == TSDB_DATA_TYPE_NULL || typeRight == TSDB_DATA_TYPE_NULL){ *isNull = true; return true; @@ -519,24 +530,28 @@ bool convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_t t *fp = filterGetCompFunc(type, optr); - if(IS_NUMERIC_TYPE(type) || IS_FLOAT_TYPE(type)){ + if(IS_NUMERIC_TYPE(type)){ if(typeLeft == TSDB_DATA_TYPE_NCHAR) { - convertNcharToDouble(*pLeftData, pLeftOut); - *pLeftData = pLeftOut; + ASSERT(0); +// convertNcharToDouble(*pLeftData, pLeftOut); +// *pLeftData = pLeftOut; } else if(typeLeft == TSDB_DATA_TYPE_BINARY) { - convertBinaryToDouble(*pLeftData, pLeftOut); - *pLeftData = pLeftOut; + ASSERT(0); +// convertBinaryToDouble(*pLeftData, pLeftOut); +// *pLeftData = pLeftOut; } else if(typeLeft != type) { convertNumberToNumber(*pLeftData, pLeftOut, typeLeft, type); *pLeftData = pLeftOut; } if(typeRight == TSDB_DATA_TYPE_NCHAR) { - convertNcharToDouble(*pRightData, pRightOut); - *pRightData = pRightOut; + ASSERT(0); +// convertNcharToDouble(*pRightData, pRightOut); +// *pRightData = pRightOut; } else if(typeRight == TSDB_DATA_TYPE_BINARY) { - convertBinaryToDouble(*pRightData, pRightOut); - *pRightData = pRightOut; + ASSERT(0); +// convertBinaryToDouble(*pRightData, pRightOut); +// *pRightData = pRightOut; } else if(typeRight != type) { convertNumberToNumber(*pRightData, pRightOut, typeRight, type); *pRightData = pRightOut; @@ -1693,6 +1708,13 @@ void vectorIsTrue(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, STagVal getJsonValue(char *json, char *key, bool *isExist) { STagVal val = {.pKey = key}; + if (tTagIsJson((const STag *)json) == false){ + if(isExist){ + *isExist = false; + } + return val; + } + bool find = tTagGet(((const STag *)json), &val); // json value is null and not exist is different if(isExist){ *isExist = find; diff --git a/source/libs/scalar/test/scalar/scalarTests.cpp b/source/libs/scalar/test/scalar/scalarTests.cpp index d879150200..bfd1e60d26 100644 --- a/source/libs/scalar/test/scalar/scalarTests.cpp +++ b/source/libs/scalar/test/scalar/scalarTests.cpp @@ -1310,11 +1310,11 @@ TEST(columnTest, json_column_logic_op) { printf("--------------------json string--0 {1, 8, 2, 2, 3, 0, 0, 0, 0}-------------------\n"); key = "k2"; - bool eRes1[len+len1] = {false, false, true, true, false, false, false, true, false, true, false, true, true}; + bool eRes1[len+len1] = {false, false, false, false, false, false, false, true, false, true, false, true, true}; for(int i = 0; i < len; i++){ makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes1[i], op[i], false); } - bool eRes_1[len0] = {true, true, false, false, false, false}; + bool eRes_1[len0] = {false, false, false, false, false, false}; for(int i = 0; i < len0; i++){ makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes_1[i], op[i], true); } @@ -1346,11 +1346,11 @@ TEST(columnTest, json_column_logic_op) { printf("--------------------json bool--1 {1, 8, 2, 2, 3, 0, 0, 0, 0}-------------------\n"); key = "k4"; - bool eRes3[len+len1] = {false, false, true, true, false, true, false, true, true, false, false, false, false}; + bool eRes3[len+len1] = {false, false, false, false, false, false, false, true, true, false, false, false, false}; for(int i = 0; i < len; i++){ makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes3[i], op[i], false); } - bool eRes_3[len0] = {false, true, false, false, false, true}; + bool eRes_3[len0] = {false, false, false, false, false, false}; for(int i = 0; i < len0; i++){ makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes_3[i], op[i], true); } @@ -1419,11 +1419,11 @@ TEST(columnTest, json_column_logic_op) { printf("--------------------json bool-- 0 {1, 8, 2, 2, 3, 0, 0, 0, 0}-------------------\n"); key = "k8"; - bool eRes7[len+len1] = {false, false, true, true, false, false, false, true, false, false, false, false, false}; + bool eRes7[len+len1] = {false, false, false, false, false, false, false, true, false, false, false, false, false}; for(int i = 0; i < len; i++){ makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes7[i], op[i], false); } - bool eRes_7[len0] = {true, true, false, false, false, false}; + bool eRes_7[len0] = {false, false, false, false, false, false}; for(int i = 0; i < len0; i++) { makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes_7[i], op[i], true); } @@ -1438,11 +1438,11 @@ TEST(columnTest, json_column_logic_op) { printf("--------------------json string-- 6.6hello {1, 8, 2, 2, 3, 0, 0, 0, 0}-------------------\n"); key = "k9"; - bool eRes8[len+len1] = {true, false, false, false, false, true, false, true, true, false, true, false, true}; + bool eRes8[len+len1] = {false, false, false, false, false, false, false, true, true, false, true, false, true}; for(int i = 0; i < len; i++){ makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes8[i], op[i], false); } - bool eRes_8[len0] = {false, true, true, true, false, true}; + bool eRes_8[len0] = {false, false, false, false, false, false}; for(int i = 0; i < len0; i++) { makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes_8[i], op[i], true); } diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index c51cbc0513..bc5342cc7e 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -436,6 +436,11 @@ static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries } SyncTerm myPreLogTerm = syncNodeGetPreTerm(pSyncNode, pMsg->prevLogIndex + 1); + if (myPreLogTerm == SYNC_TERM_INVALID) { + sError("vgId:%d sync get pre term error, preindex:%ld", pSyncNode->vgId, pMsg->prevLogIndex); + return false; + } + if (pMsg->prevLogIndex <= myLastIndex && pMsg->prevLogTerm == myPreLogTerm) { if (gRaftDetailLog) { sTrace( diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index da6d869611..732f450f4c 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -345,7 +345,7 @@ bool syncCanLeaderTransfer(int64_t rid) { return matchOK; } -int32_t syncForwardToPeer(int64_t rid, const SRpcMsg* pMsg, bool isWeak) { +int32_t syncForwardToPeer(int64_t rid, SRpcMsg* pMsg, bool isWeak) { int32_t ret = syncPropose(rid, pMsg, isWeak); return ret; } @@ -584,7 +584,7 @@ void setHeartbeatTimerMS(int64_t rid, int32_t hbTimerMS) { taosReleaseRef(tsNodeRefId, pSyncNode->rid); } -int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) { +int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak) { int32_t ret = 0; SSyncNode* pSyncNode = taosAcquireRef(tsNodeRefId, rid); @@ -1309,40 +1309,44 @@ void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) { SyncIndex logLastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); SyncIndex logBeginIndex = pSyncNode->pLogStore->syncLogBeginIndex(pSyncNode->pLogStore); + char* pCfgStr = syncCfg2SimpleStr(&(pSyncNode->pRaftCfg->cfg)); + if (userStrLen < 256) { - char logBuf[128 + 256]; + char logBuf[256 + 256]; if (pSyncNode != NULL && pSyncNode->pRaftCfg != NULL && pSyncNode->pRaftStore != NULL) { snprintf(logBuf, sizeof(logBuf), "vgId:%d, sync %s %s, term:%lu, commit:%ld, beginlog:%ld, lastlog:%ld, lastsnapshot:%ld, standby:%d, " "replica-num:%d, " - "lconfig:%ld, changing:%d", + "lconfig:%ld, changing:%d, %s", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), str, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, pSyncNode->pRaftCfg->isStandBy, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, - pSyncNode->changing); + pSyncNode->changing, pCfgStr); } else { snprintf(logBuf, sizeof(logBuf), "%s", str); } sDebug("%s", logBuf); } else { - int len = 128 + userStrLen; + int len = 256 + userStrLen; char* s = (char*)taosMemoryMalloc(len); if (pSyncNode != NULL && pSyncNode->pRaftCfg != NULL && pSyncNode->pRaftStore != NULL) { snprintf(s, len, "vgId:%d, sync %s %s, term:%lu, commit:%ld, beginlog:%ld, lastlog:%ld, lastsnapshot:%ld, standby:%d, " "replica-num:%d, " - "lconfig:%ld, changing:%d", + "lconfig:%ld, changing:%d, %s", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), str, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, pSyncNode->pRaftCfg->isStandBy, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, - pSyncNode->changing); + pSyncNode->changing, pCfgStr); } else { snprintf(s, len, "%s", str); } sDebug("%s", s); taosMemoryFree(s); } + + taosMemoryFree(pCfgStr); } void syncNodeErrorLog(const SSyncNode* pSyncNode, char* str) { @@ -1455,6 +1459,17 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde isAdd = false; } + // log begin config change + do { + char eventLog[256]; + char* pOldCfgStr = syncCfg2SimpleStr(&oldConfig); + char* pNewCfgStr = syncCfg2SimpleStr(pNewConfig); + snprintf(eventLog, sizeof(eventLog), "begin do config change, from %s to %s", pOldCfgStr, pNewCfgStr); + syncNodeEventLog(pSyncNode, eventLog); + taosMemoryFree(pOldCfgStr); + taosMemoryFree(pNewCfgStr); + } while (0); + if (IamInNew) { pSyncNode->pRaftCfg->isStandBy = 0; // change isStandBy to normal } @@ -1613,6 +1628,17 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde } _END: + + // log end config change + do { + char eventLog[256]; + char* pOldCfgStr = syncCfg2SimpleStr(&oldConfig); + char* pNewCfgStr = syncCfg2SimpleStr(pNewConfig); + snprintf(eventLog, sizeof(eventLog), "end do config change, from %s to %s", pOldCfgStr, pNewCfgStr); + syncNodeEventLog(pSyncNode, eventLog); + taosMemoryFree(pOldCfgStr); + taosMemoryFree(pNewCfgStr); + } while (0); return; } @@ -1888,6 +1914,16 @@ SyncIndex syncNodeSyncStartIndex(SSyncNode* pSyncNode) { return syncStartIndex; } +SyncIndex syncNodeGetPreIndex(SSyncNode* pSyncNode, SyncIndex index) { + SyncIndex preIndex = index - 1; + if (preIndex < SYNC_INDEX_INVALID) { + preIndex = SYNC_INDEX_INVALID; + } + + return preIndex; +} + +/* SyncIndex syncNodeGetPreIndex(SSyncNode* pSyncNode, SyncIndex index) { ASSERT(index >= SYNC_INDEX_BEGIN); @@ -1900,7 +1936,42 @@ SyncIndex syncNodeGetPreIndex(SSyncNode* pSyncNode, SyncIndex index) { SyncIndex preIndex = index - 1; return preIndex; } +*/ +SyncTerm syncNodeGetPreTerm(SSyncNode* pSyncNode, SyncIndex index) { + if (index < SYNC_INDEX_BEGIN) { + return SYNC_TERM_INVALID; + } + + if (index == SYNC_INDEX_BEGIN) { + return 0; + } + + SyncTerm preTerm = 0; + SyncIndex preIndex = index - 1; + SSyncRaftEntry* pPreEntry = NULL; + int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, preIndex, &pPreEntry); + if (code == 0) { + ASSERT(pPreEntry != NULL); + preTerm = pPreEntry->term; + taosMemoryFree(pPreEntry); + return preTerm; + } else { + if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) { + SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0, .lastConfigIndex = -1}; + if (pSyncNode->pFsm->FpGetSnapshotInfo != NULL) { + pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot); + if (snapshot.lastApplyIndex == preIndex) { + return snapshot.lastApplyTerm; + } + } + } + } + + return SYNC_TERM_INVALID; +} + +#if 0 SyncTerm syncNodeGetPreTerm(SSyncNode* pSyncNode, SyncIndex index) { ASSERT(index >= SYNC_INDEX_BEGIN); @@ -1938,6 +2009,7 @@ SyncTerm syncNodeGetPreTerm(SSyncNode* pSyncNode, SyncIndex index) { ASSERT(0); return -1; } +#endif #if 0 SyncTerm syncNodeGetPreTerm(SSyncNode* pSyncNode, SyncIndex index) { diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index 0f6e8a28d9..701b4a7b93 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -144,7 +144,8 @@ static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntr SyncIndex writeIndex = raftLogWriteIndex(pLogStore); if (pEntry->index != writeIndex) { - sError("raftLogAppendEntry error, pEntry->index:%ld update to writeIndex:%ld", pEntry->index, writeIndex); + sError("vgId:%d wal write index error, entry-index:%ld update to %ld", pData->pSyncNode->vgId, pEntry->index, + writeIndex); pEntry->index = writeIndex; } @@ -157,10 +158,10 @@ static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntr if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); - int32_t linuxErr = errno; - const char* linuxErrMsg = strerror(errno); - sError("raftLogAppendEntry error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, - linuxErrMsg); + int32_t sysErr = errno; + const char* sysErrStr = strerror(errno); + sError("vgId:%d wal write error, index:%ld, err:%d %X, msg:%s, syserr:%d, sysmsg:%s", pData->pSyncNode->vgId, + pEntry->index, err, err, errStr, sysErr, sysErrStr); ASSERT(0); } @@ -237,12 +238,15 @@ static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); - int32_t linuxErr = errno; - const char* linuxErrMsg = strerror(errno); - sError("raftLogGetEntry error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, - linuxErrMsg); + int32_t sysErr = errno; + const char* sysErrStr = strerror(errno); + sError("vgId:%d wal read error, index:%ld, err:%d %X, msg:%s, syserr:%d, sysmsg:%s", pData->pSyncNode->vgId, index, + err, err, errStr, sysErr, sysErrStr); + int32_t saveErr = terrno; walCloseReadHandle(pWalHandle); + terrno = saveErr; + return code; } @@ -257,8 +261,9 @@ static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, ASSERT((*ppEntry)->dataLen == pWalHandle->pHead->head.bodyLen); memcpy((*ppEntry)->data, pWalHandle->pHead->head.body, pWalHandle->pHead->head.bodyLen); - // need to hold, do not new every time!! + int32_t saveErr = terrno; walCloseReadHandle(pWalHandle); + terrno = saveErr; return code; } @@ -270,10 +275,11 @@ static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIn if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); - int32_t linuxErr = errno; - const char* linuxErrMsg = strerror(errno); - sError("raftLogTruncate error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, - linuxErrMsg); + int32_t sysErr = errno; + const char* sysErrStr = strerror(errno); + sError("vgId:%d wal truncate error, from-index:%ld, err:%d %X, msg:%s, syserr:%d, sysmsg:%s", + pData->pSyncNode->vgId, fromIndex, err, err, errStr, sysErr, sysErrStr); + ASSERT(0); } return code; @@ -360,10 +366,11 @@ int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) { if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); - int32_t linuxErr = errno; - const char* linuxErrMsg = strerror(errno); - sError("walWriteWithSyncInfo error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, - linuxErrMsg); + int32_t sysErr = errno; + const char* sysErrStr = strerror(errno); + sError("vgId:%d wal write error, index:%ld, err:%d %X, msg:%s, syserr:%d, sysmsg:%s", pData->pSyncNode->vgId, + pEntry->index, err, err, errStr, sysErr, sysErrStr); + ASSERT(0); } @@ -389,10 +396,11 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) { if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); - int32_t linuxErr = errno; - const char* linuxErrMsg = strerror(errno); - sError("walReadWithHandle error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, - linuxErrMsg); + int32_t sysErr = errno; + const char* sysErrStr = strerror(errno); + sError("vgId:%d wal read error, index:%ld, err:%d %X, msg:%s, syserr:%d, sysmsg:%s", pData->pSyncNode->vgId, + index, err, err, errStr, sysErr, sysErrStr); + ASSERT(0); } // ASSERT(walReadWithHandle(pWalHandle, index) == 0); @@ -409,8 +417,10 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) { ASSERT(pEntry->dataLen == pWalHandle->pHead->head.bodyLen); memcpy(pEntry->data, pWalHandle->pHead->head.body, pWalHandle->pHead->head.bodyLen); - // need to hold, do not new every time!! + int32_t saveErr = terrno; walCloseReadHandle(pWalHandle); + terrno = saveErr; + return pEntry; } else { @@ -426,10 +436,11 @@ int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex) { if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); - int32_t linuxErr = errno; - const char* linuxErrMsg = strerror(errno); - sError("walRollback error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, - linuxErrMsg); + int32_t sysErr = errno; + const char* sysErrStr = strerror(errno); + sError("vgId:%d wal truncate error, from-index:%ld, err:%d %X, msg:%s, syserr:%d, sysmsg:%s", + pData->pSyncNode->vgId, fromIndex, err, err, errStr, sysErr, sysErrStr); + ASSERT(0); } return 0; @@ -460,9 +471,11 @@ int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index) { if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); - int32_t linuxErr = errno; - const char* linuxErrMsg = strerror(errno); - sError("walCommit error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, linuxErrMsg); + int32_t sysErr = errno; + const char* sysErrStr = strerror(errno); + sError("vgId:%d wal update commit index error, index:%ld, err:%d %X, msg:%s, syserr:%d, sysmsg:%s", + pData->pSyncNode->vgId, index, err, err, errStr, sysErr, sysErrStr); + ASSERT(0); } return 0; diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index d6e6fbe522..c378926e28 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -139,6 +139,15 @@ int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) { // pre index, pre term SyncIndex preLogIndex = syncNodeGetPreIndex(pSyncNode, nextIndex); SyncTerm preLogTerm = syncNodeGetPreTerm(pSyncNode, nextIndex); + if (preLogTerm == SYNC_TERM_INVALID) { + SyncIndex newNextIndex = syncNodeGetLastIndex(pSyncNode) + 1; + syncIndexMgrSetIndex(pSyncNode->pNextIndex, pDestId, newNextIndex); + syncIndexMgrSetIndex(pSyncNode->pMatchIndex, pDestId, SYNC_INDEX_INVALID); + sError("vgId:%d sync get pre term error, nextIndex:%ld, update next-index:%ld, match-index:%d, raftid:%ld", + pSyncNode->vgId, nextIndex, newNextIndex, SYNC_INDEX_INVALID, pDestId->addr); + + return -1; + } // batch optimized // SyncIndex lastIndex = syncUtilMinIndex(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore), nextIndex); diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index ff3dc56f71..ed9d62fd14 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -113,8 +113,16 @@ void snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void } if (!getLastConfig) { - syncNodeLog3("", pSender->pSyncNode); - ASSERT(0); + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "snapshot sender update lcindex from %ld to -1", + pSender->snapshot.lastConfigIndex); + pSender->snapshot.lastConfigIndex = -1; + + char *eventLog = snapshotSender2SimpleStr(pSender, logBuf); + syncNodeEventLog(pSender->pSyncNode, eventLog); + taosMemoryFree(eventLog); + + memset(&(pSender->lastConfig), 0, sizeof(SSyncCfg)); } } else { diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 1c924fa636..05fe62762a 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -857,20 +857,20 @@ static int tdbBtreeBalance(SBTC *pBtc) { } // TDB_BTREE_BALANCE -static int tdbFetchOvflPage(SPager *pPager, SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) { +static int tdbFetchOvflPage(SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) { int ret = 0; *pPgno = 0; SBtreeInitPageArg iArg; iArg.pBt = pBt; iArg.flags = TDB_FLAG_ADD(0, TDB_BTREE_OVFL); - ret = tdbPagerFetchPage(pPager, pPgno, ppOfp, tdbBtreeInitPage, &iArg, pTxn); + ret = tdbPagerFetchPage(pBt->pPager, pPgno, ppOfp, tdbBtreeInitPage, &iArg, pTxn); if (ret < 0) { return -1; } // mark dirty - ret = tdbPagerWrite(pPager, *ppOfp); + ret = tdbPagerWrite(pBt->pPager, *ppOfp); if (ret < 0) { ASSERT(0); return -1; @@ -879,13 +879,13 @@ static int tdbFetchOvflPage(SPager *pPager, SPgno *pPgno, SPage **ppOfp, TXN *pT return ret; } -static int tdbLoadOvflPage(SPager *pPager, SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) { +static int tdbLoadOvflPage(SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) { int ret = 0; SBtreeInitPageArg iArg; iArg.pBt = pBt; iArg.flags = TDB_FLAG_ADD(0, TDB_BTREE_OVFL); - ret = tdbPagerFetchPage(pPager, pPgno, ppOfp, tdbBtreeInitPage, &iArg, pTxn); + ret = tdbPagerFetchPage(pBt->pPager, pPgno, ppOfp, tdbBtreeInitPage, &iArg, pTxn); if (ret < 0) { return -1; } @@ -922,7 +922,7 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const SPgno pgno = 0; SPage *ofp, *nextOfp; - ret = tdbFetchOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt); + ret = tdbFetchOvflPage(&pgno, &ofp, pTxn, pBt); if (ret < 0) { return -1; } @@ -962,7 +962,7 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const // fetch next ofp if not last page if (!lastPage) { // fetch a new ofp and make it dirty - ret = tdbFetchOvflPage(pPage->pPager, &pgno, &nextOfp, pTxn, pBt); + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); if (ret < 0) { tdbFree(pBuf); return -1; @@ -1019,14 +1019,14 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const nLeft -= lastKeyPageSpace; // fetch next ofp, a new ofp and make it dirty - ret = tdbFetchOvflPage(pPage->pPager, &pgno, &nextOfp, pTxn, pBt); + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); if (ret < 0) { return -1; } } } else { // fetch next ofp, a new ofp and make it dirty - ret = tdbFetchOvflPage(pPage->pPager, &pgno, &nextOfp, pTxn, pBt); + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); if (ret < 0) { return -1; } @@ -1057,7 +1057,7 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const // fetch next ofp if not last page if (!lastPage) { // fetch a new ofp and make it dirty - ret = tdbFetchOvflPage(pPage->pPager, &pgno, &nextOfp, pTxn, pBt); + ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt); if (ret < 0) { tdbFree(pBuf); return -1; @@ -1198,7 +1198,7 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, // unpack left val data from ovpages while (pgno != 0) { - ret = tdbLoadOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt); + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); if (ret < 0) { return -1; } @@ -1235,7 +1235,7 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, int lastKeyPageSpace = 0; // load left key & val to ovpages while (pgno != 0) { - ret = tdbLoadOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt); + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); if (ret < 0) { return -1; } @@ -1280,7 +1280,7 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, } while (nLeft > 0) { - ret = tdbLoadOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt); + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); if (ret < 0) { return -1; } @@ -1411,7 +1411,7 @@ static int tdbBtreeCellSize(const SPage *pPage, SCell *pCell, int dropOfp, TXN * int bytes; while (pgno != 0) { - ret = tdbLoadOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt); + ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt); if (ret < 0) { return -1; } @@ -2023,7 +2023,7 @@ int tdbBtcMoveTo(SBTC *pBtc, const void *pKey, int kLen, int *pCRst) { // check if key <= current position if (idx < nCells) { pCell = tdbPageGetCell(pPage, idx); - tdbBtreeDecodeCell(pPage, pCell, &cd, pBtc->pTxn, pBtc->pBt); + tdbBtreeDecodeCell(pPage, pCell, &cd); c = pBt->kcmpr(pKey, kLen, cd.pKey, cd.kLen); if (c > 0) break; } diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 8e9cb3a84b..4150fe6d1b 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -19,6 +19,8 @@ #include "tref.h" #include "walInt.h" +bool FORCE_INLINE walIsEmpty(SWal* pWal) { return pWal->vers.firstVer == -1; } + int64_t FORCE_INLINE walGetFirstVer(SWal* pWal) { return pWal->vers.firstVer; } int64_t FORCE_INLINE walGetSnaphostVer(SWal* pWal) { return pWal->vers.snapshotVer; } diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 9cbc9a3b02..031289f93e 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -141,7 +141,7 @@ int32_t walRollback(SWal *pWal, int64_t ver) { // validate offset SWalHead head; ASSERT(taosValidFile(pLogTFile)); - int size = taosReadFile(pLogTFile, &head, sizeof(SWalHead)); + int64_t size = taosReadFile(pLogTFile, &head, sizeof(SWalHead)); if (size != sizeof(SWalHead)) { return -1; } @@ -149,22 +149,33 @@ int32_t walRollback(SWal *pWal, int64_t ver) { ASSERT(code == 0); if (code != 0) { + terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } if (head.head.version != ver) { - // TODO + ASSERT(0); + terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } + // truncate old files code = taosFtruncateFile(pLogTFile, entry.offset); if (code < 0) { + ASSERT(0); + terrno = TAOS_SYSTEM_ERROR(errno); return -1; } code = taosFtruncateFile(pIdxTFile, idxOff); if (code < 0) { + ASSERT(0); + terrno = TAOS_SYSTEM_ERROR(errno); return -1; } pWal->vers.lastVer = ver - 1; + if (pWal->vers.lastVer < pWal->vers.firstVer) { + ASSERT(pWal->vers.lastVer == pWal->vers.firstVer - 1); + pWal->vers.firstVer = -1; + } ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->lastVer = ver - 1; ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->fileSize = entry.offset; taosCloseFile(&pIdxTFile); diff --git a/source/os/src/osLocale.c b/source/os/src/osLocale.c index d2369ea2a2..c2d10f3668 100644 --- a/source/os/src/osLocale.c +++ b/source/os/src/osLocale.c @@ -88,11 +88,11 @@ void taosSetSystemLocale(const char *inLocale, const char *inCharSet) { void taosGetSystemLocale(char *outLocale, char *outCharset) { #ifdef WINDOWS - char *locale = setlocale(LC_CTYPE, "chs"); + char *locale = setlocale(LC_CTYPE, "en_US.UTF-8"); if (locale != NULL) { tstrncpy(outLocale, locale, TD_LOCALE_LEN); } - strcpy(outCharset, "cp936"); + strcpy(outCharset, "UTF-8"); #elif defined(_TD_DARWIN_64) /* diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index 11d7d9831a..db1207f057 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -503,6 +503,38 @@ const char *cfgDtypeStr(ECfgDataType type) { } } +void cfgDumpItemValue(SConfigItem *pItem, char* buf, int32_t bufSize, int32_t* pLen) { + int32_t len = 0; + switch (pItem->dtype) { + case CFG_DTYPE_BOOL: + len = snprintf(buf, bufSize, "%u", pItem->bval); + break; + case CFG_DTYPE_INT32: + len = snprintf(buf, bufSize, "%d", pItem->i32); + break; + case CFG_DTYPE_INT64: + len = snprintf(buf, bufSize, "%" PRId64, pItem->i64); + break; + case CFG_DTYPE_FLOAT: + len = snprintf(buf, bufSize, "%f", pItem->fval); + break; + case CFG_DTYPE_STRING: + case CFG_DTYPE_DIR: + case CFG_DTYPE_LOCALE: + case CFG_DTYPE_CHARSET: + case CFG_DTYPE_TIMEZONE: + case CFG_DTYPE_NONE: + len = snprintf(buf, bufSize, "%s", pItem->str); + break; + } + + if (len > bufSize) { + len = bufSize; + } + + *pLen = len; +} + void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) { if (dump) { printf(" global config"); @@ -996,4 +1028,4 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl uInfo("fail get apollo url from cmd env file"); return -1; -} \ No newline at end of file +} diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 1ab6491736..cc28b19de9 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -12,6 +12,8 @@ ## ---- db ./test.sh -f tsim/db/create_all_options.sim ./test.sh -f tsim/db/alter_option.sim +./test.sh -f tsim/db/alter_replica_13.sim +#./test.sh -f tsim/db/alter_replica_31.sim ./test.sh -f tsim/db/basic1.sim ./test.sh -f tsim/db/basic2.sim ./test.sh -f tsim/db/basic3.sim @@ -21,6 +23,7 @@ ./test.sh -f tsim/db/taosdlog.sim # ---- dnode +./test.sh -f tsim/dnode/balance_replica1.sim ./test.sh -f tsim/dnode/create_dnode.sim ./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim ./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim @@ -28,6 +31,8 @@ #./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim #./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim #./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim +#./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim +./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim # ---- insert ./test.sh -f tsim/insert/basic0.sim @@ -104,6 +109,7 @@ ./test.sh -f tsim/tmq/basic4Of2Cons.sim ./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim ./test.sh -f tsim/tmq/topic.sim +./test.sh -f tsim/tmq/snapshot.sim # --- stable ./test.sh -f tsim/stable/disk.sim diff --git a/tests/script/tsim/dnode/balance_replica1.sim b/tests/script/tsim/dnode/balance_replica1.sim new file mode 100644 index 0000000000..14f3f130fb --- /dev/null +++ b/tests/script/tsim/dnode/balance_replica1.sim @@ -0,0 +1,123 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +sql connect + +print =============== step1 create dnode2 +# no enough vnodes +sql balance vgroup + +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 + +$x = 0 +step1: + $ = $x + 1 + sleep 1000 + if $x == 10 then + print ---> dnode not online! + return -1 + endi +sql show dnodes +print ---> $data00 $data01 $data02 $data03 $data04 $data05 +print ---> $data10 $data11 $data12 $data13 $data14 $data15 +if $rows != 3 then + return -1 +endi +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi +if $data(3)[4] != offline then + goto step1 +endi + +print =============== step2 create database +sql create database d1 vgroups 2 +sql use d1 +sql create table d1.st (ts timestamp, i int) tags (j int) +sql create table d1.c1 using st tags(1) +sql create table d1.c2 using st tags(1) +sql create table d1.c3 using st tags(1) +sql create table d1.c4 using st tags(1) +sql create table d1.c5 using st tags(1) +sql create table d1.c6 using st tags(1) +sql show d1.tables +if $rows != 6 then + return -1 +endi + +sql show d1.vgroups +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] +if $rows != 2 then + return -1 +endi +if $data(2)[3] != 2 then + return -1 +endi +if $data(3)[3] != 2 then + return -1 +endi + +print =============== step3: balance vgroup +# has offline dnode +sql_error balance vgroup + +system sh/exec.sh -n dnode3 -s start +$x = 0 +step3: + $ = $x + 1 + sleep 1000 + if $x == 10 then + print ---> dnode not online! + return -1 + endi +sql show dnodes +print ---> $data00 $data01 $data02 $data03 $data04 $data05 +print ---> $data10 $data11 $data12 $data13 $data14 $data15 +if $rows != 3 then + return -1 +endi +if $data(1)[4] != ready then + goto step3 +endi +if $data(2)[4] != ready then + goto step3 +endi +if $data(3)[4] != ready then + goto step3 +endi + +print =============== step4: balance +sql balance vgroup + +print show d1.vgroups +sql show d1.vgroups +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] +if $rows != 2 then + return -1 +endi +if $data(2)[3] != 3 then + return -1 +endi +if $data(3)[3] != 2 then + return -1 +endi + +print =============== step7: select data +sql show d1.tables +print rows $rows +if $rows != 6 then + return -1 +endi + +return +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT diff --git a/tests/script/tsim/dnode/drop_dnode_has_vnode_replica3.sim b/tests/script/tsim/dnode/drop_dnode_has_vnode_replica3.sim index 91679a7e81..3ea351f7a7 100644 --- a/tests/script/tsim/dnode/drop_dnode_has_vnode_replica3.sim +++ b/tests/script/tsim/dnode/drop_dnode_has_vnode_replica3.sim @@ -28,7 +28,7 @@ step1: sql show dnodes print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 -if $rows != 3 then +if $rows != 5 then return -1 endi if $data(1)[4] != ready then @@ -72,6 +72,13 @@ if $data(2)[7] != 4 then return -1 endi +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT + + +return + + print =============== step4: drop dnode 2 system sh/exec.sh -n dnode5 -s start $x = 0 diff --git a/tests/script/tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim b/tests/script/tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim index 3532332174..788a23991a 100644 --- a/tests/script/tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim +++ b/tests/script/tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim @@ -176,6 +176,8 @@ if $rows != 1 then return -1 endi +return + print =============== step33: move follower1 print redistribute vgroup 2 dnode $leaderVnode dnode $follower1 dnode 5 sql redistribute vgroup 2 dnode $leaderVnode dnode $follower1 dnode 5 diff --git a/tests/script/tsim/show/basic.sim b/tests/script/tsim/show/basic.sim index 94ca2f6550..8f9362fc69 100644 --- a/tests/script/tsim/show/basic.sim +++ b/tests/script/tsim/show/basic.sim @@ -99,7 +99,7 @@ if $rows != 1 then endi #sql select * from information_schema.`streams` sql select * from information_schema.user_tables -if $rows != 30 then +if $rows != 31 then return -1 endi #sql select * from information_schema.user_table_distributed @@ -197,7 +197,7 @@ if $rows != 1 then endi #sql select * from performance_schema.`streams` sql select * from information_schema.user_tables -if $rows != 30 then +if $rows != 31 then return -1 endi #sql select * from information_schema.user_table_distributed @@ -227,5 +227,20 @@ endi sql_error show create stable t0; +sql show variables; +if $rows != 4 then + return -1 +endi + +sql show dnode 1 variables; +if $rows != 114 then + return -1 +endi + +sql show local variables; +if $rows != 50 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/tsim/tmq/consume.sh b/tests/script/tsim/tmq/consume.sh index 3fa71d6edd..001ce6ae49 100755 --- a/tests/script/tsim/tmq/consume.sh +++ b/tests/script/tsim/tmq/consume.sh @@ -17,8 +17,9 @@ VALGRIND=0 SIGNAL=SIGINT SHOW_MSG=0 SHOW_ROW=0 +EXP_USE_SNAPSHOT=0 -while getopts "d:s:v:y:x:g:r:w:" arg +while getopts "d:s:v:y:x:g:r:w:e:" arg do case $arg in d) @@ -45,6 +46,9 @@ do w) CDB_NAME=$OPTARG ;; + e) + EXP_USE_SNAPSHOT=$OPTARG + ;; ?) echo "unkown argument" ;; @@ -91,8 +95,8 @@ if [ "$EXEC_OPTON" = "start" ]; then echo nohup valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${LOG_DIR}/valgrind-tmq_sim.log $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW > /dev/null 2>&1 & nohup valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${LOG_DIR}/valgrind-tmq_sim.log $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW > /dev/null 2>&1 & else - echo "nohup $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW -w $CDB_NAME > /dev/null 2>&1 &" - nohup $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW -w $CDB_NAME > /dev/null 2>&1 & + echo "nohup $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW -w $CDB_NAME -e $EXP_USE_SNAPSHOT > /dev/null 2>&1 &" + nohup $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW -w $CDB_NAME -e $EXP_USE_SNAPSHOT > /dev/null 2>&1 & fi else PID=`ps -ef|grep tmq_sim | grep -v grep | awk '{print $2}'` diff --git a/tests/script/tsim/tmq/snapshot.sim b/tests/script/tsim/tmq/snapshot.sim new file mode 100644 index 0000000000..5683aaa559 --- /dev/null +++ b/tests/script/tsim/tmq/snapshot.sim @@ -0,0 +1,289 @@ +#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406 +#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics +#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics +#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics +#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics + +# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN +# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5; +# +# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval). +# + +run tsim/tmq/prepareBasicEnv-1vgrp.sim + +#---- global parameters start ----# +$dbName = db +$vgroups = 1 +$stbPrefix = stb +$ctbPrefix = ctb +$ntbPrefix = ntb +$stbNum = 1 +$ctbNum = 10 +$ntbNum = 10 +$rowsPerCtb = 10 +$tstart = 1640966400000 # 2022-01-01 00:00:00.000 +#---- global parameters end ----# + +$pullDelay = 3 +$ifcheckdata = 1 +$ifmanualcommit = 1 +$showMsg = 1 +$showRow = 0 + +sql connect +sql use $dbName + +print == create topics from super table +sql create topic topic_stb_column as select ts, c3 from stb +sql create topic topic_stb_all as select ts, c1, c2, c3 from stb +sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb + +print == create topics from child table +sql create topic topic_ctb_column as select ts, c3 from ctb0 +sql create topic topic_ctb_all as select * from ctb0 +sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0 + +print == create topics from normal table +sql create topic topic_ntb_column as select ts, c3 from ntb0 +sql create topic topic_ntb_all as select * from ntb0 +sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0 + +#sql show topics +#if $rows != 9 then +# return -1 +#endi + +#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest' +$keyList = ' . group.id:cgrp1 +$keyList = $keyList . , +$keyList = $keyList . enable.auto.commit:false +#$keyList = $keyList . , +#$keyList = $keyList . auto.commit.interval.ms:6000 +#$keyList = $keyList . , +#$keyList = $keyList . auto.offset.reset:earliest +$keyList = $keyList . ' +print ========== key list: $keyList + + +$cdb_index = 0 +#=============================== start consume =============================# + +print ================ test consume from stb +$loop_cnt = 0 +loop_consume_diff_topic_from_stb: + +####################################################################################### +# clear consume info and consume result +#run tsim/tmq/clearConsume.sim +# because drop table function no stable, so by create new db for consume info and result. Modify it later +$cdb_index = $cdb_index + 1 +$cdbName = cdb . $cdb_index +sql create database $cdbName vgroups 1 +sleep 500 +sql use $cdbName + +print == create consume info table and consume result table +sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) +sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) + +sql show tables +if $rows != 2 then + return -1 +endi +####################################################################################### + +if $loop_cnt == 0 then + print == scenario 1: topic_stb_column + $topicList = ' . topic_stb_column + $topicList = $topicList . ' +elif $loop_cnt == 1 then + print == scenario 2: topic_stb_all + $topicList = ' . topic_stb_all + $topicList = $topicList . ' +elif $loop_cnt == 2 then + print == scenario 3: topic_stb_function + $topicList = ' . topic_stb_function + $topicList = $topicList . ' +else + goto loop_consume_diff_topic_from_stb_end +endi + +$consumerId = 0 +$totalMsgOfStb = $ctbNum * $rowsPerCtb +$expectmsgcnt = 1 +$expectrowcnt = 100 +sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit ) + +print == start consumer to pull msgs from stb +print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -e 1 -s start +system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -e 1 -s start + +print == check consume result +wait_consumer_end_from_stb: +sql select * from consumeresult +print ==> rows: $rows +print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + sleep 1000 + goto wait_consumer_end_from_stb +endi +if $data[0][1] != $consumerId then + return -1 +endi +if $data[0][2] != $expectmsgcnt then + return -1 +endi +if $data[0][3] != $expectrowcnt then + return -1 +endi +$loop_cnt = $loop_cnt + 1 +goto loop_consume_diff_topic_from_stb +loop_consume_diff_topic_from_stb_end: + +print ================ test consume from ctb +$loop_cnt = 0 +loop_consume_diff_topic_from_ctb: + +####################################################################################### +# clear consume info and consume result +#run tsim/tmq/clearConsume.sim +# because drop table function no stable, so by create new db for consume info and result. Modify it later +$cdb_index = $cdb_index + 1 +$cdbName = cdb . $cdb_index +sql create database $cdbName vgroups 1 +sleep 500 +sql use $cdbName + +print == create consume info table and consume result table +sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) +sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) + +sql show tables +if $rows != 2 then + return -1 +endi +####################################################################################### + +if $loop_cnt == 0 then + print == scenario 1: topic_ctb_column + $topicList = ' . topic_ctb_column + $topicList = $topicList . ' +elif $loop_cnt == 1 then + print == scenario 2: topic_ctb_all + $topicList = ' . topic_ctb_all + $topicList = $topicList . ' +elif $loop_cnt == 2 then + print == scenario 3: topic_ctb_function + $topicList = ' . topic_ctb_function + $topicList = $topicList . ' +else + goto loop_consume_diff_topic_from_ctb_end +endi + +$consumerId = 0 +$totalMsgOfCtb = $rowsPerCtb +$expectmsgcnt = 1 +sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit ) + +print == start consumer to pull msgs from ctb +print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start -e 1 +system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start -e 1 + +print == check consume result +wait_consumer_end_from_ctb: +sql select * from consumeresult +print ==> rows: $rows +print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + sleep 1000 + goto wait_consumer_end_from_ctb +endi +if $data[0][1] != $consumerId then + return -1 +endi +if $data[0][2] != 1 then + return -1 +endi +if $data[0][3] != 10 then + return -1 +endi +$loop_cnt = $loop_cnt + 1 +goto loop_consume_diff_topic_from_ctb +loop_consume_diff_topic_from_ctb_end: + +print ================ test consume from ntb +$loop_cnt = 0 +loop_consume_diff_topic_from_ntb: + +####################################################################################### +# clear consume info and consume result +#run tsim/tmq/clearConsume.sim +# because drop table function no stable, so by create new db for consume info and result. Modify it later +$cdb_index = $cdb_index + 1 +$cdbName = cdb . $cdb_index +sql create database $cdbName vgroups 1 +sleep 500 +sql use $cdbName + +print == create consume info table and consume result table +sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int) +sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int) + +sql show tables +if $rows != 2 then + return -1 +endi +####################################################################################### + +if $loop_cnt == 0 then + print == scenario 1: topic_ntb_column + $topicList = ' . topic_ntb_column + $topicList = $topicList . ' +elif $loop_cnt == 1 then + print == scenario 2: topic_ntb_all + $topicList = ' . topic_ntb_all + $topicList = $topicList . ' +elif $loop_cnt == 2 then + print == scenario 3: topic_ntb_function + $topicList = ' . topic_ntb_function + $topicList = $topicList . ' +else + goto loop_consume_diff_topic_from_ntb_end +endi + +$consumerId = 0 +$totalMsgOfNtb = $rowsPerCtb +$expectmsgcnt = $totalMsgOfNtb +sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit ) + +print == start consumer to pull msgs from ntb +print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start -e 1 +system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start -e 1 + +print == check consume result from ntb +wait_consumer_end_from_ntb: +sql select * from consumeresult +print ==> rows: $rows +print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + sleep 1000 + goto wait_consumer_end_from_ntb +endi +if $data[0][1] != $consumerId then + return -1 +endi +if $data[0][2] != 1 then + return -1 +endi +if $data[0][3] != $totalMsgOfNtb then + return -1 +endi +$loop_cnt = $loop_cnt + 1 +goto loop_consume_diff_topic_from_ntb +loop_consume_diff_topic_from_ntb_end: + +#------ not need stop consumer, because it exit after pull msg overthan expect msg +#system tsim/tmq/consume.sh -s stop -x SIGINT + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/wtest.bat b/tests/script/wtest.bat index 1aa27202c6..1f089c9d86 100644 --- a/tests/script/wtest.bat +++ b/tests/script/wtest.bat @@ -52,7 +52,7 @@ echo wal 0 >> %TAOS_CFG% echo asyncLog 0 >> %TAOS_CFG% echo locale en_US.UTF-8 >> %TAOS_CFG% echo enableCoreFile 1 >> %TAOS_CFG% -echo charset cp65001 >> %TAOS_CFG% +echo charset UTF-8 >> %TAOS_CFG% set "FILE_NAME=testSuite.sim" if "%1" == "-f" set "FILE_NAME=%2" diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py index 052ff923ac..0c649f2008 100644 --- a/tests/system-test/2-query/json_tag.py +++ b/tests/system-test/2-query/json_tag.py @@ -68,7 +68,7 @@ class TDTestCase: tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')") tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')") tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')") - # + # test invalidate json key, key must can be printed assic char tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')") tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')") @@ -79,7 +79,7 @@ class TDTestCase: # test invalidate json value, value number can not be inf,nan TD-12166 tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":1.8e308}')") tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":-1.8e308}')") - # + #test length limit char1= ''.join(['abcd']*64) char3= ''.join(['abcd']*1021) @@ -87,15 +87,15 @@ class TDTestCase: tdSql.error("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s1\":5}')" % char1) # len(key)=257 tdSql.execute("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s\":5}')" % char1) # len(key)=256 tdSql.error("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSSS\":\"%s\"}')" % char3) # len(object)=4096 - #tdSql.execute("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSS\":\"%s\"}')" % char3) # len(object)=4095 + tdSql.execute("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSS\":\"%s\"}')" % char3) # len(object)=4095 tdSql.execute("drop table if exists jsons1_15") tdSql.execute("drop table if exists jsons1_16") - # + print("============== STEP 2 ===== alter table json tag") tdSql.error("ALTER STABLE jsons1 add tag tag2 nchar(20)") tdSql.error("ALTER STABLE jsons1 drop tag jtag") tdSql.error("ALTER TABLE jsons1 MODIFY TAG jtag nchar(128)") - # + tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'") tdSql.query("select jtag from jsons1_1") tdSql.checkData(0, 0, '{"tag1":"femail","tag2":35,"tag3":true}') @@ -105,9 +105,9 @@ class TDTestCase: tdSql.execute("create table st(ts timestamp, i int) tags(t int)") tdSql.error("ALTER STABLE st add tag jtag json") tdSql.error("ALTER STABLE st add column jtag json") - # - # print("============== STEP 3 ===== query table") - # # test error syntax + + print("============== STEP 3 ===== query table") + # test error syntax tdSql.error("select * from jsons1 where jtag->tag1='beijing'") tdSql.error("select -> from jsons1") tdSql.error("select * from jsons1 where contains") @@ -115,17 +115,17 @@ class TDTestCase: tdSql.error("select jtag->location from jsons1") tdSql.error("select jtag contains location from jsons1") tdSql.error("select * from jsons1 where jtag contains location") - #tdSql.error("select * from jsons1 where jtag contains''") + tdSql.query("select * from jsons1 where jtag contains''") tdSql.error("select * from jsons1 where jtag contains 'location'='beijing'") - # - # # test function error + + # test function error tdSql.error("select avg(jtag->'tag1') from jsons1") tdSql.error("select avg(jtag) from jsons1") tdSql.error("select min(jtag->'tag1') from jsons1") tdSql.error("select min(jtag) from jsons1") tdSql.error("select ceil(jtag->'tag1') from jsons1") tdSql.error("select ceil(jtag) from jsons1") - # + #test scalar operation tdSql.query("select jtag contains 'tag1',jtag->'tag1' from jsons1 order by jtag->'tag1'") @@ -158,10 +158,11 @@ class TDTestCase: tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, False) tdSql.checkData(7, 0, "false") - tdSql.checkData(7, 1, True) + tdSql.checkData(7, 1, False) + tdSql.checkData(8, 1, False) tdSql.checkData(12, 1, True) - # # test select normal column + # test select normal column tdSql.query("select dataint from jsons1 order by dataint") tdSql.checkRows(9) tdSql.checkData(1, 0, 1) @@ -180,7 +181,7 @@ class TDTestCase: tdSql.query("select jtag from jsons1_9") tdSql.checkData(0, 0, None) - # # test select json tag->'key', value is string + # test select json tag->'key', value is string tdSql.query("select jtag->'tag1' from jsons1_1") tdSql.checkData(0, 0, '"femail"') tdSql.query("select jtag->'tag2' from jsons1_6") @@ -200,7 +201,7 @@ class TDTestCase: # test select json tag->'key', key is not exist tdSql.query("select jtag->'tag10' from jsons1_4") tdSql.checkData(0, 0, None) - # + tdSql.query("select jtag->'tag1' from jsons1") tdSql.checkRows(13) # test header name @@ -210,24 +211,25 @@ class TDTestCase: tdSql.checkColNameList(res, cname_list) - # # test where with json tag + # test where with json tag tdSql.query("select * from jsons1_1 where jtag is not null") - # tdSql.error("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'") - # tdSql.error("select * from jsons1 where jtag->'tag1'={}") - # - # # where json value is string + # tdSql.query("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'") + tdSql.error("select * from jsons1 where jtag->'tag1'={}") + + # where json value is string tdSql.query("select * from jsons1 where jtag->'tag2'='beijing'") tdSql.checkRows(2) - tdSql.query("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing'") + tdSql.query("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing' order by dataint") tdSql.checkRows(2) - # out of order, cannot compare value - #tdSql.checkData(0, 0, 2) - #tdSql.checkData(0, 1, 'jsons1_2') - #tdSql.checkData(0, 2, 5) - #tdSql.checkData(0, 3, '{"tag1":5,"tag2":"beijing"}') - #tdSql.checkData(1, 0, 3) - #tdSql.checkData(1, 1, 'jsons1_3') - #tdSql.checkData(1, 2, 'false') + tdSql.checkData(0, 0, 2) + tdSql.checkData(0, 1, 'jsons1_2') + tdSql.checkData(0, 2, "5.000000000") + tdSql.checkData(0, 3, '{"tag1":5,"tag2":"beijing"}') + tdSql.checkData(1, 0, 3) + tdSql.checkData(1, 1, 'jsons1_3') + tdSql.checkData(1, 2, 'false') + + tdSql.query("select * from jsons1 where jtag->'tag1'='beijing'") tdSql.checkRows(0) tdSql.query("select * from jsons1 where jtag->'tag1'='收到货'") @@ -236,72 +238,73 @@ class TDTestCase: tdSql.checkRows(1) tdSql.query("select * from jsons1 where jtag->'tag2'>='beijing'") tdSql.checkRows(3) - # open - #tdSql.query("select * from jsons1 where jtag->'tag2'<'beijing'") - #tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag2'<='beijing'") + tdSql.query("select * from jsons1 where jtag->'tag2'<'beijing'") tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag2'<='beijing'") + tdSql.checkRows(4) tdSql.query("select * from jsons1 where jtag->'tag2'!='beijing'") - tdSql.checkRows(5) + tdSql.checkRows(3) tdSql.query("select * from jsons1 where jtag->'tag2'=''") tdSql.checkRows(2) - # - # # where json value is int + + # where json value is int tdSql.query("select * from jsons1 where jtag->'tag1'=5") tdSql.checkRows(1) tdSql.checkData(0, 1, 2) tdSql.query("select * from jsons1 where jtag->'tag1'=10") tdSql.checkRows(0) tdSql.query("select * from jsons1 where jtag->'tag1'<54") - tdSql.checkRows(4) + tdSql.checkRows(3) tdSql.query("select * from jsons1 where jtag->'tag1'<=11") - tdSql.checkRows(4) + tdSql.checkRows(3) tdSql.query("select * from jsons1 where jtag->'tag1'>4") tdSql.checkRows(2) tdSql.query("select * from jsons1 where jtag->'tag1'>=5") tdSql.checkRows(2) tdSql.query("select * from jsons1 where jtag->'tag1'!=5") - tdSql.checkRows(6) + tdSql.checkRows(2) tdSql.query("select * from jsons1 where jtag->'tag1'!=55") - tdSql.checkRows(7) - # - # # where json value is double + tdSql.checkRows(3) + + # where json value is double tdSql.query("select * from jsons1 where jtag->'tag1'=1.232") tdSql.checkRows(1) tdSql.query("select * from jsons1 where jtag->'tag1'<1.232") - tdSql.checkRows(1) + tdSql.checkRows(0) tdSql.query("select * from jsons1 where jtag->'tag1'<=1.232") - tdSql.checkRows(2) + tdSql.checkRows(1) tdSql.query("select * from jsons1 where jtag->'tag1'>1.23") tdSql.checkRows(3) tdSql.query("select * from jsons1 where jtag->'tag1'>=1.232") tdSql.checkRows(3) tdSql.query("select * from jsons1 where jtag->'tag1'!=1.232") - tdSql.checkRows(6) + tdSql.checkRows(2) tdSql.query("select * from jsons1 where jtag->'tag1'!=3.232") - tdSql.checkRows(7) - #tdSql.error("select * from jsons1 where jtag->'tag1'/0=3") - #tdSql.error("select * from jsons1 where jtag->'tag1'/5=1") - # - # # where json value is bool + tdSql.checkRows(3) + tdSql.query("select * from jsons1 where jtag->'tag1'/0=3") + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1'/5=1") + tdSql.checkRows(1) + + # where json value is bool tdSql.query("select * from jsons1 where jtag->'tag1'=true") tdSql.checkRows(0) - #tdSql.query("select * from jsons1 where jtag->'tag1'=false") - #tdSql.checkRows(1) + tdSql.query("select * from jsons1 where jtag->'tag1'=false") + tdSql.checkRows(1) tdSql.query("select * from jsons1 where jtag->'tag1'!=false") - tdSql.checkRows(3) - #tdSql.error("select * from jsons1 where jtag->'tag1'>false") - # - # # where json value is null - # open - #tdSql.query("select * from jsons1 where jtag->'tag1'=null") # only json suport =null. This synatx will change later. - #tdSql.checkRows(1) - # - # # where json key is null + tdSql.checkRows(0) + tdSql.query("select * from jsons1 where jtag->'tag1'>false") + tdSql.checkRows(0) + + # where json value is null + tdSql.query("select * from jsons1 where jtag->'tag1'=null") + tdSql.checkRows(0) + + # where json key is null tdSql.query("select * from jsons1 where jtag->'tag_no_exist'=3") tdSql.checkRows(0) - # - # # where json value is not exist + + # where json value is not exist tdSql.query("select * from jsons1 where jtag->'tag1' is null") tdSql.checkData(0, 0, 'jsons1_9') tdSql.checkRows(2) @@ -309,16 +312,16 @@ class TDTestCase: tdSql.checkRows(9) tdSql.query("select * from jsons1 where jtag->'tag3' is not null") tdSql.checkRows(3) - # - # # test contains + + # test contains tdSql.query("select * from jsons1 where jtag contains 'tag1'") - tdSql.checkRows(7) + tdSql.checkRows(8) tdSql.query("select * from jsons1 where jtag contains 'tag3'") - tdSql.checkRows(3) + tdSql.checkRows(4) tdSql.query("select * from jsons1 where jtag contains 'tag_no_exist'") tdSql.checkRows(0) - # - # # test json tag in where condition with and/or + + # test json tag in where condition with and/or tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'") tdSql.checkRows(1) tdSql.query("select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'") @@ -335,15 +338,15 @@ class TDTestCase: tdSql.checkRows(3) tdSql.query("select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'") tdSql.checkRows(2) - # - # - # # test with between and + + + # test with between and tdSql.query("select * from jsons1 where jtag->'tag1' between 1 and 30") tdSql.checkRows(3) tdSql.query("select * from jsons1 where jtag->'tag1' between 'femail' and 'beijing'") tdSql.checkRows(2) - # - # # test with tbname/normal column + + # test with tbname/normal column tdSql.query("select * from jsons1 where tbname = 'jsons1_1'") tdSql.checkRows(2) tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'") @@ -352,20 +355,18 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23") tdSql.checkRows(1) - # - # - # # test where condition like - # open - # syntax error - #tdSql.query("select *,tbname from jsons1 where jtag->'tag2' like 'bei%'") - #tdSql.checkRows(2) - #tdSql.query("select *,tbname from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null") - #tdSql.checkRows(2) - # - # # test where condition in no support in + + + # test where condition like + tdSql.query("select * from jsons1 where jtag->'tag2' like 'bei%'") + tdSql.checkRows(2) + tdSql.query("select * from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null") + tdSql.checkRows(2) + + # test where condition in no support in # tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')") - # - # # test where condition match/nmath + + # test where condition match/nmath tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma'") tdSql.checkRows(2) tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma$'") @@ -376,23 +377,22 @@ class TDTestCase: tdSql.checkRows(1) tdSql.query("select * from jsons1 where jtag->'tag1' nmatch 'ma'") tdSql.checkRows(1) - # - # # test distinct + + # test distinct tdSql.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')") tdSql.query("select distinct jtag->'tag1' from jsons1") tdSql.checkRows(8) tdSql.query("select distinct jtag from jsons1") tdSql.checkRows(9) - # - # #test dumplicate key with normal colomn + + #test dumplicate key with normal colomn tdSql.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")") - #tdSql.query("select *,tbname,jtag from jsons1 where jtag->'datastr' match '是' and datastr match 'js'") - #tdSql.checkRows(1) - # open - #tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14'") - #tdSql.checkRows(0) - # - # # test join + tdSql.query("select * from jsons1 where jtag->'datastr' match '是' and datastr match 'js'") + tdSql.checkRows(1) + # tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14'") + # tdSql.checkRows(1) + + # test join tdSql.execute("create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") tdSql.execute("insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2')") tdSql.execute("insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')") @@ -460,19 +460,18 @@ class TDTestCase: tdSql.checkColNameList(res, cname_list) # test top/bottom with group by json tag - # random failure - #tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'") - #tdSql.checkRows(11) - #tdSql.checkData(0, 1, None) - #tdSql.checkData(2, 0, 4) - #tdSql.checkData(3, 0, 3) - #tdSql.checkData(3, 1, "false") - #tdSql.checkData(10, 0, 23) - #tdSql.checkData(10, 1, '"femail"') + # tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'") + # tdSql.checkRows(11) + # tdSql.checkData(0, 1, None) + # tdSql.checkData(2, 0, 4) + # tdSql.checkData(3, 0, 3) + # tdSql.checkData(3, 1, "false") + # tdSql.checkData(8, 0, 2) + # tdSql.checkData(10, 1, '"femail"') # test having - #tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' having count(*) > 1") - #tdSql.checkRows(3) + tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' having count(*) > 1") + tdSql.checkRows(3) # subquery with json tag tdSql.query("select * from (select jtag, dataint from jsons1) order by dataint") @@ -480,22 +479,13 @@ class TDTestCase: tdSql.checkData(1, 1, 1) tdSql.checkData(5, 0, '{"tag1":false,"tag2":"beijing"}') - # tdSql.query("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)") - # tdSql.checkRows(11) - # tdSql.checkData(1, 0, '"femail"') - # tdSql.checkData(2, 0, 5) - # - # res = tdSql.getColNameList("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)") - # cname_list = [] - # cname_list.append("jtag->'tag1'") - # tdSql.checkColNameList(res, cname_list) - # - # tdSql.query("select ts,tbname,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)") + tdSql.error("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)") + # tdSql.query("select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)") # tdSql.checkRows(11) # tdSql.checkData(1, 1, "jsons1_1") # tdSql.checkData(1, 2, '"femail"') - # - # # union all + + # union all tdSql.query("select jtag->'tag1' from jsons1 union all select jtag->'tag2' from jsons2") tdSql.checkRows(17) tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag2' from jsons2_1") @@ -508,24 +498,25 @@ class TDTestCase: tdSql.query("select dataint,jtag,tbname from jsons1 union all select dataint,jtag,tbname from jsons2") tdSql.checkRows(13) - # #show create table - # tdSql.query("show create table jsons1") - # tdSql.checkData(0, 1, 'CREATE TABLE `jsons1` (`ts` TIMESTAMP,`dataint` INT,`databool` BOOL,`datastr` NCHAR(50),`datastrbin` BINARY(150)) TAGS (`jtag` JSON)') - # - # #test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares + #show create table + tdSql.query("show create table jsons1") + tdSql.checkData(0, 1, 'CREATE STABLE `jsons1` (`ts` TIMESTAMP, `dataint` INT, `databool` BOOL, `datastr` NCHAR(50), `datastrbin` VARCHAR(150)) TAGS (`jtag` JSON) WATERMARK 5000a, 5000a') + + #test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares tdSql.query("select count(*) from jsons1 where jtag is not null") tdSql.checkData(0, 0, 10) tdSql.query("select avg(dataint) from jsons1 where jtag is not null") tdSql.checkData(0, 0, 5.3) - #tdSql.error("select twa(dataint) from jsons1 where jtag is not null") - tdSql.query("select irate(dataint) from jsons1 where jtag is not null") - #tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null") - #tdSql.checkData(0, 0, 49) + # tdSql.query("select twa(dataint) from jsons1 where jtag is not null") + # tdSql.checkData(0, 0, 36) + # tdSql.error("select irate(dataint) from jsons1 where jtag is not null") + tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null") + tdSql.checkData(0, 0, 45) tdSql.query("select stddev(dataint) from jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 4.496912521) - #tdSql.error("SELECT LEASTSQUARES(dataint, 1, 1) from jsons1 where jtag is not null") - # - # #test selection function:min/max/first/last/top/bottom/percentile/apercentile/last_row/interp + tdSql.query("SELECT LEASTSQUARES(dataint, 1, 1) from jsons1 where jtag is not null") + + #test selection function:min/max/first/last/top/bottom/percentile/apercentile/last_row/interp tdSql.query("select min(dataint) from jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 1) tdSql.query("select max(dataint) from jsons1 where jtag->'tag1'>1") @@ -541,13 +532,16 @@ class TDTestCase: tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1") tdSql.query("select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 1.5) - #tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1") - #tdSql.checkData(0, 0, 11) - #tdSql.error("select interp(dataint) from jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1") - # - # #test calculation function:diff/derivative/spread/ceil/floor/round/ - #tdSql.error("select diff(dataint) from jsons1 where jtag->'tag1'>1") - #tdSql.error("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1") + # tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.query("select interp(dataint) from jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1") + + #test calculation function:diff/derivative/spread/ceil/floor/round/ + tdSql.query("select diff(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkRows(2) + # tdSql.checkData(0, 0, -1) + # tdSql.checkData(1, 0, 10) + tdSql.query("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1") + tdSql.checkData(0, 0, -2) tdSql.query("select spread(dataint) from jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 10) tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1") diff --git a/tests/system-test/7-tmq/tmqCheckData.py b/tests/system-test/7-tmq/tmqCheckData.py new file mode 100644 index 0000000000..0e55dfa19d --- /dev/null +++ b/tests/system-test/7-tmq/tmqCheckData.py @@ -0,0 +1,182 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def checkFileContent(self, consumerId, queryString): + buildPath = tdCom.getBuildPath() + cfgPath = tdCom.getClientCfgPath() + dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) + cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) + tdLog.info(cmdStr) + os.system(cmdStr) + + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) + tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) + + consumeFile = open(consumeRowsFile, mode='r') + queryFile = open(dstFile, mode='r') + + # skip first line for it is schema + queryFile.readline() + + while True: + dst = queryFile.readline() + src = consumeFile.readline() + + if dst: + if dst != src: + tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) + else: + break + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db1', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1} + + topicNameList = ['topic1', 'topic2', 'topic3'] + expectRowsList = [] + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdLog.info("create stb") + tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdLog.info("create ctb") + tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + tdLog.info("insert data") + tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) + + tdLog.info("create topics from stb with filter") + queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("wait the consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("0 tmq consume rows error!") + + self.checkFileContent(consumerId, queryString) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + + queryString = "select ts, log(c1), cos(c1) from %s.%s where c1 > 3169" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[1], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + consumerId = 1 + topicList = topicNameList[1] + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("wait the consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + if expectRowsList[1] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) + tdLog.exit("1 tmq consume rows error!") + + self.checkFileContent(consumerId, queryString) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + + queryString = "select ts, log(c1), atan(c1) from %s.%s where ts >= %d" %(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+6137) + sqlString = "create topic %s as %s" %(topicNameList[2], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + consumerId = 2 + topicList = topicNameList[2] + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("wait the consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + # if expectRowsList[2] != resultList[0]: + # tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0])) + # tdLog.exit("2 tmq consume rows error!") + + # self.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + tdSql.prepare() + self.tmqCase1() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqUdf.py b/tests/system-test/7-tmq/tmqUdf.py new file mode 100644 index 0000000000..f1c451de85 --- /dev/null +++ b/tests/system-test/7-tmq/tmqUdf.py @@ -0,0 +1,194 @@ +from distutils.log import error +import taos +import sys +import time +import socket +import os +import threading +import subprocess +import platform + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def prepare_udf_so(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + print(projPath) + + if platform.system().lower() == 'windows': + self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + if (not tdDnodes.dnodes[0].remoteIP == ""): + tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\") + self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so') + else: + self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + self.libudf1 = self.libudf1.replace('\r','').replace('\n','') + return + + def create_udf_function(self): + # create scalar functions + tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1) + + functions = tdSql.getResult("show functions") + function_nums = len(functions) + if function_nums == 1: + tdLog.info("create one udf functions success ") + else: + tdLog.exit("create udf functions fail") + return + + def checkFileContent(self, consumerId, queryString): + buildPath = tdCom.getBuildPath() + cfgPath = tdCom.getClientCfgPath() + dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) + cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) + tdLog.info(cmdStr) + os.system(cmdStr) + + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) + tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) + + consumeFile = open(consumeRowsFile, mode='r') + queryFile = open(dstFile, mode='r') + + # skip first line for it is schema + queryFile.readline() + + while True: + dst = queryFile.readline() + src = consumeFile.readline() + + if dst: + if dst != src: + tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) + else: + break + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db1', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 1000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1} + + topicNameList = ['topic1', 'topic2'] + expectRowsList = [] + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdLog.info("create stb") + tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdLog.info("create ctb") + tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + tdLog.info("insert data") + tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) + + tdLog.info("create topics from stb with filter") + queryString = "select ts, c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("wait the consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("0 tmq consume rows error!") + + self.checkFileContent(consumerId, queryString) + tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) + + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + + queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[1], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + consumerId = 1 + topicList = topicNameList[1] + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("wait the consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + if expectRowsList[1] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) + tdLog.exit("1 tmq consume rows error!") + + self.checkFileContent(consumerId, queryString) + tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + tdSql.prepare() + self.prepare_udf_so() + self.create_udf_function() + self.tmqCase1() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 64ba0183c5..19a67e924c 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -132,3 +132,5 @@ python3 ./test.py -f 7-tmq/db.py python3 ./test.py -f 7-tmq/tmqError.py python3 ./test.py -f 7-tmq/schema.py python3 ./test.py -f 7-tmq/stbFilter.py +python3 ./test.py -f 7-tmq/tmqCheckData.py +python3 ./test.py -f 7-tmq/tmqUdf.py diff --git a/tests/system-test/simpletest.bat b/tests/system-test/simpletest.bat index c8084c3ad5..74f2cdfcba 100644 --- a/tests/system-test/simpletest.bat +++ b/tests/system-test/simpletest.bat @@ -4,8 +4,8 @@ python3 .\test.py -f 0-others\taosShellNetChk.py python3 .\test.py -f 0-others\telemetry.py python3 .\test.py -f 0-others\taosdMonitor.py -python3 .\test.py -f 0-others\udfTest.py -python3 .\test.py -f 0-others\udf_create.py +@REM python3 .\test.py -f 0-others\udfTest.py +@REM python3 .\test.py -f 0-others\udf_create.py @REM python3 .\test.py -f 0-others\udf_restart_taosd.py @REM python3 .\test.py -f 0-others\cachelast.py diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index 948df3a40a..053cd79432 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -22,8 +22,10 @@ #include #include "taos.h" +#include "taosdef.h" #include "taoserror.h" #include "tlog.h" +#include "types.h" #define GREEN "\033[1;32m" #define NC "\033[0m" @@ -34,11 +36,7 @@ #define MAX_CONSUMER_THREAD_CNT (16) #define MAX_VGROUP_CNT (32) -typedef enum { - NOTIFY_CMD_START_CONSUM, - NOTIFY_CMD_START_COMMIT, - NOTIFY_CMD_ID_BUTT -}NOTIFY_CMD_ID; +typedef enum { NOTIFY_CMD_START_CONSUM, NOTIFY_CMD_START_COMMIT, NOTIFY_CMD_ID_BUTT } NOTIFY_CMD_ID; typedef struct { TdThread thread; @@ -49,8 +47,9 @@ typedef struct { // char autoCommit[8]; // true, false // char autoOffsetRest[16]; // none, earliest, latest - int32_t ifCheckData; - int64_t expectMsgCnt; + TdFilePtr pConsumeRowsFile; + int32_t ifCheckData; + int64_t expectMsgCnt; int64_t consumeMsgCnt; int64_t consumeRowCnt; @@ -86,6 +85,7 @@ typedef struct { int32_t saveRowFlag; int32_t consumeDelay; // unit s int32_t numOfThread; + int32_t useSnapshot; SThreadInfo stThreads[MAX_CONSUMER_THREAD_CNT]; } SConfInfo; @@ -93,6 +93,8 @@ static SConfInfo g_stConfInfo; TdFilePtr g_fp = NULL; static int running = 1; +int8_t useSnapshot = 0; + // char* g_pRowValue = NULL; // TdFilePtr g_fp = NULL; @@ -129,7 +131,6 @@ void initLogFile() { char tmpString[128]; sprintf(filename, "%s/../log/tmqlog_%s.txt", configDir, getCurrentTimeString(tmpString)); - // sprintf(filename, "%s/../log/tmqlog.txt", configDir); #ifdef WINDOWS for (int i = 2; i < sizeof(filename); i++) { if (filename[i] == ':') filename[i] = '-'; @@ -203,6 +204,8 @@ void parseArgument(int32_t argc, char* argv[]) { g_stConfInfo.saveRowFlag = atol(argv[++i]); } else if (strcmp(argv[i], "-y") == 0) { g_stConfInfo.consumeDelay = atol(argv[++i]); + } else if (strcmp(argv[i], "-e") == 0) { + useSnapshot = (int8_t)atol(argv[++i]); } else { pError("%s unknow para: %s %s", GREEN, argv[++i], NC); exit(-1); @@ -296,17 +299,148 @@ int32_t saveConsumeContentToTbl(SThreadInfo* pInfo, char* buf) { return 0; } +static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { + // if (shell.args.is_raw_time) { + // sprintf(buf, "%" PRId64, val); + // return buf; + // } + + time_t tt; + int32_t ms = 0; + if (precision == TSDB_TIME_PRECISION_NANO) { + tt = (time_t)(val / 1000000000); + ms = val % 1000000000; + } else if (precision == TSDB_TIME_PRECISION_MICRO) { + tt = (time_t)(val / 1000000); + ms = val % 1000000; + } else { + tt = (time_t)(val / 1000); + ms = val % 1000; + } + + /* + comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. + if (tt < 0) { + tt = 0; + } + */ + +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + if (tt <= 0 && ms < 0) { + tt--; + if (precision == TSDB_TIME_PRECISION_NANO) { + ms += 1000000000; + } else if (precision == TSDB_TIME_PRECISION_MICRO) { + ms += 1000000; + } else { + ms += 1000; + } + } + + struct tm* ptm = taosLocalTime(&tt, NULL); + size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", ptm); + + if (precision == TSDB_TIME_PRECISION_NANO) { + sprintf(buf + pos, ".%09d", ms); + } else if (precision == TSDB_TIME_PRECISION_MICRO) { + sprintf(buf + pos, ".%06d", ms); + } else { + sprintf(buf + pos, ".%03d", ms); + } + + return buf; +} + +static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* field, int32_t length, + int32_t precision) { + if (val == NULL) { + taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR); + return; + } + + int n; + char buf[TSDB_MAX_BYTES_PER_ROW]; + switch (field->type) { + case TSDB_DATA_TYPE_BOOL: + taosFprintfFile(pFile, "%d", ((((int32_t)(*((char*)val))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + taosFprintfFile(pFile, "%d", *((int8_t*)val)); + break; + case TSDB_DATA_TYPE_UTINYINT: + taosFprintfFile(pFile, "%u", *((uint8_t*)val)); + break; + case TSDB_DATA_TYPE_SMALLINT: + taosFprintfFile(pFile, "%d", *((int16_t*)val)); + break; + case TSDB_DATA_TYPE_USMALLINT: + taosFprintfFile(pFile, "%u", *((uint16_t*)val)); + break; + case TSDB_DATA_TYPE_INT: + taosFprintfFile(pFile, "%d", *((int32_t*)val)); + break; + case TSDB_DATA_TYPE_UINT: + taosFprintfFile(pFile, "%u", *((uint32_t*)val)); + break; + case TSDB_DATA_TYPE_BIGINT: + taosFprintfFile(pFile, "%" PRId64, *((int64_t*)val)); + break; + case TSDB_DATA_TYPE_UBIGINT: + taosFprintfFile(pFile, "%" PRIu64, *((uint64_t*)val)); + break; + case TSDB_DATA_TYPE_FLOAT: + taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val)); + break; + case TSDB_DATA_TYPE_DOUBLE: + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", length, GET_DOUBLE_VAL(val)); + if (n > TMAX(25, length)) { + taosFprintfFile(pFile, "%*.15e", length, GET_DOUBLE_VAL(val)); + } else { + taosFprintfFile(pFile, "%s", buf); + } + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_JSON: + memcpy(buf, val, length); + buf[length] = 0; + taosFprintfFile(pFile, "\'%s\'", buf); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + shellFormatTimestamp(buf, *(int64_t*)val, precision); + taosFprintfFile(pFile, "'%s'", buf); + break; + default: + break; + } +} + +static void dumpToFileForCheck(TdFilePtr pFile, TAOS_ROW row, TAOS_FIELD* fields, int32_t* length, int32_t num_fields, + int32_t precision) { + for (int32_t i = 0; i < num_fields; i++) { + if (i > 0) { + taosFprintfFile(pFile, "\n"); + } + shellDumpFieldToFile(pFile, (const char*)row[i], fields + i, length[i], precision); + } + taosFprintfFile(pFile, "\n"); +} + static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) { char buf[1024]; int32_t totalRows = 0; // printf("topic: %s\n", tmq_get_topic_name(msg)); - int32_t vgroupId = tmq_get_vgroup_id(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); + const char* dbName = tmq_get_db_name(msg); - taosFprintfFile(g_fp, "msg index:%" PRId64 ", consumerId: %d\n", msgIndex, pInfo->consumerId); - // taosFprintfFile(g_fp, "topic: %s, vgroupId: %d, tableName: %s\n", tmq_get_topic_name(msg), vgroupId, - // tmq_get_table_name(msg)); - taosFprintfFile(g_fp, "topic: %s, vgroupId: %d\n", tmq_get_topic_name(msg), vgroupId); + taosFprintfFile(g_fp, "consumerId: %d, msg index:%" PRId64 "\n", pInfo->consumerId, msgIndex); + taosFprintfFile(g_fp, "dbName: %s, topic: %s, vgroupId: %d\n", dbName != NULL ? dbName : "invalid table", + tmq_get_topic_name(msg), vgroupId); while (1) { TAOS_ROW row = taos_fetch_row(msg); @@ -315,16 +449,18 @@ static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) TAOS_FIELD* fields = taos_fetch_fields(msg); int32_t numOfFields = taos_field_count(msg); - - taos_print_row(buf, row, fields, numOfFields); - + int32_t* length = taos_fetch_lengths(msg); + int32_t precision = taos_result_precision(msg); const char* tbName = tmq_get_table_name(msg); + dumpToFileForCheck(pInfo->pConsumeRowsFile, row, fields, length, numOfFields, precision); + taos_print_row(buf, row, fields, numOfFields); + if (0 != g_stConfInfo.showRowFlag) { taosFprintfFile(g_fp, "tbname:%s, rows[%d]: %s\n", (tbName != NULL ? tbName : "null table"), totalRows, buf); - if (0 != g_stConfInfo.saveRowFlag) { - saveConsumeContentToTbl(pInfo, buf); - } + // if (0 != g_stConfInfo.saveRowFlag) { + // saveConsumeContentToTbl(pInfo, buf); + // } } totalRows++; @@ -347,8 +483,7 @@ int queryDB(TAOS* taos, char* command) { return 0; } -static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) { -} +static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) {} int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { char sqlStr[1024] = {0}; @@ -356,11 +491,8 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { int64_t now = taosGetTimestampMs(); // schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int - sprintf(sqlStr, "insert into %s.notifyinfo values (%"PRId64", %d, %d)", - g_stConfInfo.cdbName, - now, - cmdId, - pInfo->consumerId); + sprintf(sqlStr, "insert into %s.notifyinfo values (%" PRId64 ", %d, %d)", g_stConfInfo.cdbName, now, cmdId, + pInfo->consumerId); taos_query_a(pInfo->taos, sqlStr, appNothing, NULL); @@ -370,12 +502,12 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { } static int32_t g_once_commit_flag = 0; -static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { - pError("tmq_commit_cb_print() commit %d\n", code); +static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { + pError("tmq_commit_cb_print() commit %d\n", code); if (0 == g_once_commit_flag) { g_once_commit_flag = 1; - notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT); + notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT); } taosFprintfFile(g_fp, "tmq_commit_cb_print() be called\n"); } @@ -409,6 +541,10 @@ void build_consumer(SThreadInfo* pInfo) { // tmq_conf_set(conf, "auto.offset.reset", "none"); // tmq_conf_set(conf, "auto.offset.reset", "earliest"); // tmq_conf_set(conf, "auto.offset.reset", "latest"); + // + if (useSnapshot) { + tmq_conf_set(conf, "experiment.use.snapshot", "true"); + } pInfo->tmq = tmq_consumer_new(conf, NULL, 0); @@ -464,6 +600,19 @@ void loop_consume(SThreadInfo* pInfo) { pInfo->ts = taosGetTimestampMs(); + if (pInfo->ifCheckData) { + char filename[256] = {0}; + char tmpString[128]; + // sprintf(filename, "%s/../log/consumerid_%d_%s.txt", configDir, pInfo->consumerId, + // getCurrentTimeString(tmpString)); + sprintf(filename, "%s/../log/consumerid_%d.txt", configDir, pInfo->consumerId); + pInfo->pConsumeRowsFile = taosOpenFile(filename, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM); + if (pInfo->pConsumeRowsFile == NULL) { + taosFprintfFile(g_fp, "%s create file fail for save rows content\n", getCurrentTimeString(tmpString)); + return; + } + } + while (running) { TAOS_RES* tmqMsg = tmq_consumer_poll(pInfo->tmq, g_stConfInfo.consumeDelay * 1000); if (tmqMsg) { @@ -475,10 +624,10 @@ void loop_consume(SThreadInfo* pInfo) { totalMsgs++; - if (0 == once_flag) { + if (0 == once_flag) { once_flag = 1; - notifyMainScript(pInfo, NOTIFY_CMD_START_CONSUM); - } + notifyMainScript(pInfo, NOTIFY_CMD_START_CONSUM); + } if (totalRows >= pInfo->expectMsgCnt) { char tmpString[128]; @@ -507,7 +656,7 @@ void* consumeThreadFunc(void* param) { pInfo->taos = taos_connect(NULL, "root", "taosdata", NULL, 0); if (pInfo->taos == NULL) { taosFprintfFile(g_fp, "taos_connect() fail, can not notify and save consume result to main scripte\n"); - exit(-1); + exit(-1); } build_consumer(pInfo); diff --git a/tools/shell/src/shellCommand.c b/tools/shell/src/shellCommand.c index ef71f3fce6..f236c1eb88 100644 --- a/tools/shell/src/shellCommand.c +++ b/tools/shell/src/shellCommand.c @@ -21,6 +21,7 @@ #define UP 3 #define DOWN 4 #define PSIZE shell.info.promptSize +#define SHELL_INPUT_MAX_COMMAND_SIZE 10000 typedef struct { char *buffer; @@ -227,6 +228,7 @@ void shellPrintChar(char c, int32_t times) { } void shellPositionCursor(int32_t step, int32_t direction) { +#ifndef WINDOWS if (step > 0) { if (direction == LEFT) { fprintf(stdout, "\033[%dD", step); @@ -239,6 +241,7 @@ void shellPositionCursor(int32_t step, int32_t direction) { } fflush(stdout); } +#endif } void shellUpdateBuffer(SShellCmd *cmd) { @@ -330,10 +333,14 @@ void shellClearScreen(int32_t ecmd_pos, int32_t cursor_pos) { int32_t command_x = ecmd_pos / ws_col; shellPositionCursor(cursor_y, LEFT); shellPositionCursor(command_x - cursor_x, DOWN); +#ifndef WINDOWS fprintf(stdout, "\033[2K"); +#endif for (int32_t i = 0; i < command_x; i++) { shellPositionCursor(1, UP); + #ifndef WINDOWS fprintf(stdout, "\033[2K"); + #endif } fflush(stdout); } @@ -394,6 +401,41 @@ void shellShowOnScreen(SShellCmd *cmd) { fflush(stdout); } +char taosGetConsoleChar() { +#ifdef WINDOWS + static void *console = NULL; + if (console == NULL) { + console = GetStdHandle(STD_INPUT_HANDLE); + } + static TdWchar buf[SHELL_INPUT_MAX_COMMAND_SIZE]; + static char mbStr[5]; + static unsigned long bufLen = 0; + static uint16_t bufIndex = 0, mbStrIndex = 0, mbStrLen = 0; + if (bufLen == 0) { + ReadConsoleW(console, buf, SHELL_INPUT_MAX_COMMAND_SIZE, &bufLen, NULL); + bufIndex = 0; + } + if (mbStrLen == 0){ + if (buf[bufIndex] == '\r') { + bufIndex++; + } + mbStrLen = WideCharToMultiByte(CP_UTF8, 0, &buf[bufIndex], 1, mbStr, sizeof(mbStr), NULL, NULL); + mbStrIndex = 0; + bufIndex++; + } + mbStrIndex++; + if (mbStrIndex == mbStrLen) { + mbStrLen = 0; + if (bufIndex == bufLen) { + bufLen = 0; + } + } + return mbStr[mbStrIndex-1]; +#else + return (char)getchar(); // getchar() return an 'int32_t' value +#endif +} + int32_t shellReadCommand(char *command) { SShellHistory *pHistory = &shell.history; SShellCmd cmd = {0}; @@ -407,7 +449,7 @@ int32_t shellReadCommand(char *command) { // Read input. char c; while (1) { - c = (char)getchar(); // getchar() return an 'int32_t' value + c = taosGetConsoleChar(); if (c == EOF) { return c; @@ -417,7 +459,7 @@ int32_t shellReadCommand(char *command) { int32_t count = shellCountPrefixOnes(c); utf8_array[0] = c; for (int32_t k = 1; k < count; k++) { - c = (char)getchar(); + c = taosGetConsoleChar(); utf8_array[k] = c; } shellInsertChar(&cmd, utf8_array, count); @@ -472,10 +514,10 @@ int32_t shellReadCommand(char *command) { break; } } else if (c == '\033') { - c = (char)getchar(); + c = taosGetConsoleChar(); switch (c) { case '[': - c = (char)getchar(); + c = taosGetConsoleChar(); switch (c) { case 'A': // Up arrow if (hist_counter != pHistory->hstart) { @@ -502,35 +544,35 @@ int32_t shellReadCommand(char *command) { shellMoveCursorLeft(&cmd); break; case '1': - if ((c = (char)getchar()) == '~') { + if ((c = taosGetConsoleChar()) == '~') { // Home key shellPositionCursorHome(&cmd); } break; case '2': - if ((c = (char)getchar()) == '~') { + if ((c = taosGetConsoleChar()) == '~') { // Insert key } break; case '3': - if ((c = (char)getchar()) == '~') { + if ((c = taosGetConsoleChar()) == '~') { // Delete key shellDeleteChar(&cmd); } break; case '4': - if ((c = (char)getchar()) == '~') { + if ((c = taosGetConsoleChar()) == '~') { // End key shellPositionCursorEnd(&cmd); } break; case '5': - if ((c = (char)getchar()) == '~') { + if ((c = taosGetConsoleChar()) == '~') { // Page up key } break; case '6': - if ((c = (char)getchar()) == '~') { + if ((c = taosGetConsoleChar()) == '~') { // Page down key } break; diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 1f29237d38..8a017d378d 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -393,15 +393,11 @@ void shellPrintNChar(const char *str, int32_t length, int32_t width) { break; } int w = 0; -#ifdef WINDOWS - w = bytes; -#else if(*(str + pos) == '\t' || *(str + pos) == '\n' || *(str + pos) == '\r'){ w = bytes; }else{ w = taosWcharWidth(wc); } -#endif pos += bytes; if (w <= 0) { @@ -524,6 +520,16 @@ bool shellIsLimitQuery(const char *sql) { return false; } +bool shellIsShowQuery(const char *sql) { + //todo refactor + if (taosStrCaseStr(sql, "show ") != NULL) { + return true; + } + + return false; +} + + int32_t shellVerticalPrintResult(TAOS_RES *tres, const char *sql) { TAOS_ROW row = taos_fetch_row(tres); if (row == NULL) { @@ -686,7 +692,7 @@ int32_t shellHorizontalPrintResult(TAOS_RES *tres, const char *sql) { uint64_t resShowMaxNum = UINT64_MAX; - if (shell.args.commands == NULL && shell.args.file[0] == 0 && !shellIsLimitQuery(sql)) { + if (shell.args.commands == NULL && shell.args.file[0] == 0 && !shellIsLimitQuery(sql) && !shellIsShowQuery(sql)) { resShowMaxNum = SHELL_DEFAULT_RES_SHOW_NUM; }