From 96b3603bfda19844cd9f3e06438ddb402b1e731b Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 25 Jun 2022 09:09:33 +0800 Subject: [PATCH 01/29] refactor: rename auth to privilege --- source/dnode/mnode/impl/inc/mndAuth.h | 13 ++++-- source/dnode/mnode/impl/src/mndAcct.c | 13 ++++++ source/dnode/mnode/impl/src/mndAuth.c | 10 ++-- source/dnode/mnode/impl/src/mndBnode.c | 4 +- source/dnode/mnode/impl/src/mndDb.c | 15 +++--- source/dnode/mnode/impl/src/mndDnode.c | 6 +-- source/dnode/mnode/impl/src/mndFunc.c | 4 +- source/dnode/mnode/impl/src/mndMnode.c | 4 +- source/dnode/mnode/impl/src/mndOffset.c | 16 ++++--- source/dnode/mnode/impl/src/mndProfile.c | 48 ++++++++------------ source/dnode/mnode/impl/src/mndQnode.c | 4 +- source/dnode/mnode/impl/src/mndShow.c | 2 +- source/dnode/mnode/impl/src/mndSma.c | 4 +- source/dnode/mnode/impl/src/mndSnode.c | 4 +- source/dnode/mnode/impl/src/mndStb.c | 6 +-- source/dnode/mnode/impl/src/mndStream.c | 4 +- source/dnode/mnode/impl/src/mndTopic.c | 2 +- source/dnode/mnode/impl/src/mndTrans.c | 3 +- source/dnode/mnode/impl/src/mndUser.c | 18 ++++---- source/dnode/mnode/impl/src/mndVgroup.c | 6 +-- tests/script/tsim/user/privilege_sysinfo.sim | 21 +++++++++ 21 files changed, 117 insertions(+), 90 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndAuth.h b/source/dnode/mnode/impl/inc/mndAuth.h index 81a776b652..c6f337b44e 100644 --- a/source/dnode/mnode/impl/inc/mndAuth.h +++ b/source/dnode/mnode/impl/inc/mndAuth.h @@ -24,6 +24,9 @@ extern "C" { typedef enum { MND_OPER_CONNECT = 1, + MND_OPER_CREATE_ACCT, + MND_OPER_DROP_ACCT, + MND_OPER_ALTER_ACCT, MND_OPER_CREATE_USER, MND_OPER_DROP_USER, MND_OPER_ALTER_USER, @@ -45,6 +48,8 @@ typedef enum { MND_OPER_CREATE_FUNC, MND_OPER_DROP_FUNC, MND_OPER_KILL_TRANS, + MND_OPER_KILL_CONN, + MND_OPER_KILL_QUERY, MND_OPER_CREATE_DB, MND_OPER_ALTER_DB, MND_OPER_DROP_DB, @@ -57,10 +62,10 @@ typedef enum { int32_t mndInitAuth(SMnode *pMnode); void mndCleanupAuth(SMnode *pMnode); -int32_t mndCheckOperAuth(SMnode *pMnode, const char *user, EOperType operType); -int32_t mndCheckDbAuth(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb); -int32_t mndCheckShowAuth(SMnode *pMnode, const char *user, int32_t showType); -int32_t mndCheckAlterUserAuth(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter); +int32_t mndCheckOperPrivilege(SMnode *pMnode, const char *user, EOperType operType); +int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb); +int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, int32_t showType); +int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/src/mndAcct.c b/source/dnode/mnode/impl/src/mndAcct.c index 0ce4a8c76e..da78abf5c0 100644 --- a/source/dnode/mnode/impl/src/mndAcct.c +++ b/source/dnode/mnode/impl/src/mndAcct.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "mndAcct.h" +#include "mndAuth.h" #include "mndShow.h" #include "mndTrans.h" @@ -212,18 +213,30 @@ static int32_t mndAcctActionUpdate(SSdb *pSdb, SAcctObj *pOld, SAcctObj *pNew) { } static int32_t mndProcessCreateAcctReq(SRpcMsg *pReq) { + if (mndCheckOperPrivilege(pReq->info.node, pReq->info.conn.user, MND_OPER_CREATE_ACCT) != 0) { + return -1; + } + terrno = TSDB_CODE_MSG_NOT_PROCESSED; mError("failed to process create acct request since %s", terrstr()); return -1; } static int32_t mndProcessAlterAcctReq(SRpcMsg *pReq) { + if (mndCheckOperPrivilege(pReq->info.node, pReq->info.conn.user, MND_OPER_ALTER_ACCT) != 0) { + return -1; + } + terrno = TSDB_CODE_MSG_NOT_PROCESSED; mError("failed to process create acct request since %s", terrstr()); return -1; } static int32_t mndProcessDropAcctReq(SRpcMsg *pReq) { + if (mndCheckOperPrivilege(pReq->info.node, pReq->info.conn.user, MND_OPER_DROP_ACCT) != 0) { + return -1; + } + terrno = TSDB_CODE_MSG_NOT_PROCESSED; mError("failed to process create acct request since %s", terrstr()); return -1; diff --git a/source/dnode/mnode/impl/src/mndAuth.c b/source/dnode/mnode/impl/src/mndAuth.c index 4445e3b9f7..f1542f5d42 100644 --- a/source/dnode/mnode/impl/src/mndAuth.c +++ b/source/dnode/mnode/impl/src/mndAuth.c @@ -73,7 +73,7 @@ static int32_t mndProcessAuthReq(SRpcMsg *pReq) { return code; } -int32_t mndCheckOperAuth(SMnode *pMnode, const char *user, EOperType operType) { +int32_t mndCheckOperPrivilege(SMnode *pMnode, const char *user, EOperType operType) { int32_t code = 0; SUserObj *pUser = mndAcquireUser(pMnode, user); @@ -95,6 +95,8 @@ int32_t mndCheckOperAuth(SMnode *pMnode, const char *user, EOperType operType) { switch (operType) { case MND_OPER_CONNECT: + case MND_OPER_CREATE_FUNC: + case MND_OPER_DROP_FUNC: break; default: terrno = TSDB_CODE_MND_NO_RIGHTS; @@ -106,7 +108,7 @@ _OVER: return code; } -int32_t mndCheckAlterUserAuth(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter) { +int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter) { if (pUser->superUser && pAlter->alterType != TSDB_ALTER_USER_PASSWD) { terrno = TSDB_CODE_MND_NO_RIGHTS; return -1; @@ -129,7 +131,7 @@ int32_t mndCheckAlterUserAuth(SUserObj *pOperUser, SUserObj *pUser, SAlterUserRe return -1; } -int32_t mndCheckShowAuth(SMnode *pMnode, const char *user, int32_t showType) { +int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, int32_t showType) { int32_t code = 0; SUserObj *pUser = mndAcquireUser(pMnode, user); @@ -162,7 +164,7 @@ _OVER: return code; } -int32_t mndCheckDbAuth(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb) { +int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb) { int32_t code = 0; SUserObj *pUser = mndAcquireUser(pMnode, user); diff --git a/source/dnode/mnode/impl/src/mndBnode.c b/source/dnode/mnode/impl/src/mndBnode.c index e2b1aad008..debd7cbcc0 100644 --- a/source/dnode/mnode/impl/src/mndBnode.c +++ b/source/dnode/mnode/impl/src/mndBnode.c @@ -277,7 +277,7 @@ static int32_t mndProcessCreateBnodeReq(SRpcMsg *pReq) { } mDebug("bnode:%d, start to create", createReq.dnodeId); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_CREATE_BNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_BNODE) != 0) { goto _OVER; } @@ -382,7 +382,7 @@ static int32_t mndProcessDropBnodeReq(SRpcMsg *pReq) { } mDebug("bnode:%d, start to drop", dropReq.dnodeId); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_DROP_BNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_BNODE) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 345464399e..00a5e609d7 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -506,6 +506,9 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { } mDebug("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups); + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) { + goto _OVER; + } pDb = mndAcquireDb(pMnode, createReq.db); if (pDb != NULL) { @@ -526,10 +529,6 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) { - goto _OVER; - } - code = mndCreateDb(pMnode, pReq, &createReq, pUser); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; @@ -700,7 +699,7 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_ALTER_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_ALTER_DB, pDb) != 0) { goto _OVER; } @@ -980,7 +979,7 @@ static int32_t mndProcessDropDbReq(SRpcMsg *pReq) { } } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_DROP_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_DB, pDb) != 0) { goto _OVER; } @@ -1127,7 +1126,7 @@ static int32_t mndProcessUseDbReq(SRpcMsg *pReq) { mError("db:%s, failed to process use db req since %s", usedbReq.db, terrstr()); } else { - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_USE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_USE_DB, pDb) != 0) { goto _OVER; } @@ -1252,7 +1251,7 @@ static int32_t mndProcessCompactDbReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_COMPACT_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_COMPACT_DB, pDb) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 113777bc1f..6cf80ffe47 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -621,7 +621,7 @@ static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq) { } mInfo("dnode:%s:%d, start to create", createReq.fqdn, createReq.port); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DNODE) != 0) { goto _OVER; } @@ -715,7 +715,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) { } mInfo("dnode:%d, start to drop, ep:%s:%d", dropReq.dnodeId, dropReq.fqdn, dropReq.port); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) { goto _OVER; } @@ -779,7 +779,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { } mInfo("dnode:%d, start to config, option:%s, value:%s", cfgReq.dnodeId, cfgReq.config, cfgReq.value); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_CONFIG_DNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CONFIG_DNODE) != 0) { return -1; } diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 37e0a719dd..7baa641aa7 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -283,7 +283,7 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) { } mDebug("func:%s, start to create", createReq.name); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) { goto _OVER; } @@ -346,7 +346,7 @@ static int32_t mndProcessDropFuncReq(SRpcMsg *pReq) { } mDebug("func:%s, start to drop", dropReq.name); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_DROP_FUNC) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_FUNC) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index bc3d23282c..1919340250 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -389,7 +389,7 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) { } mDebug("mnode:%d, start to create", createReq.dnodeId); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_CREATE_MNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_MNODE) != 0) { goto _OVER; } @@ -594,7 +594,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) { } mDebug("mnode:%d, start to drop", dropReq.dnodeId); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndOffset.c b/source/dnode/mnode/impl/src/mndOffset.c index 18f2e993b2..8b5a1401f0 100644 --- a/source/dnode/mnode/impl/src/mndOffset.c +++ b/source/dnode/mnode/impl/src/mndOffset.c @@ -36,13 +36,15 @@ static int32_t mndOffsetActionUpdate(SSdb *pSdb, SMqOffsetObj *pOffset, SMqOffse static int32_t mndProcessCommitOffsetReq(SRpcMsg *pReq); int32_t mndInitOffset(SMnode *pMnode) { - SSdbTable table = {.sdbType = SDB_OFFSET, - .keyType = SDB_KEY_BINARY, - .encodeFp = (SdbEncodeFp)mndOffsetActionEncode, - .decodeFp = (SdbDecodeFp)mndOffsetActionDecode, - .insertFp = (SdbInsertFp)mndOffsetActionInsert, - .updateFp = (SdbUpdateFp)mndOffsetActionUpdate, - .deleteFp = (SdbDeleteFp)mndOffsetActionDelete}; + SSdbTable table = { + .sdbType = SDB_OFFSET, + .keyType = SDB_KEY_BINARY, + .encodeFp = (SdbEncodeFp)mndOffsetActionEncode, + .decodeFp = (SdbDecodeFp)mndOffsetActionDecode, + .insertFp = (SdbInsertFp)mndOffsetActionInsert, + .updateFp = (SdbUpdateFp)mndOffsetActionUpdate, + .deleteFp = (SdbDeleteFp)mndOffsetActionDelete, + }; mndSetMsgHandle(pMnode, TDMT_MND_MQ_COMMIT_OFFSET, mndProcessCommitOffsetReq); diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index e9df4ae1d0..5e3fa3c1e3 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -227,6 +227,10 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } taosIp2String(pReq->info.conn.clientIp, ip); + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CONNECT) != 0) { + mGError("user:%s, failed to login from %s since %s", pReq->info.conn.user, ip, terrstr()); + goto _OVER; + } pUser = mndAcquireUser(pMnode, pReq->info.conn.user); if (pUser == NULL) { @@ -240,11 +244,6 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_CONNECT) != 0) { - mGError("user:%s, failed to login from %s since %s", pReq->info.conn.user, ip, terrstr()); - goto _OVER; - } - if (connReq.db[0]) { char db[TSDB_DB_FNAME_LEN] = {0}; snprintf(db, TSDB_DB_FNAME_LEN, "%d%s%s", pUser->acctId, TS_PATH_DELIMITER, connReq.db); @@ -271,7 +270,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { connectRsp.connId = pConn->id; connectRsp.connType = connReq.connType; connectRsp.dnodeNum = mndGetDnodeSize(pMnode); - + strcpy(connectRsp.sVer, version); snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version, buildinfo, gitinfo); @@ -475,16 +474,16 @@ static int32_t mndGetOnlineDnodeNum(SMnode *pMnode, int32_t *num) { SDnodeObj *pDnode = NULL; int64_t curMs = taosGetTimestampMs(); void *pIter = NULL; - + while (true) { pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode); if (pIter == NULL) break; - + bool online = mndIsDnodeOnline(pDnode, curMs); if (online) { (*num)++; } - + sdbRelease(pSdb, pDnode); } @@ -652,15 +651,6 @@ static int32_t mndProcessKillQueryReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SProfileMgmt *pMgmt = &pMnode->profileMgmt; - SUserObj *pUser = mndAcquireUser(pMnode, pReq->info.conn.user); - if (pUser == NULL) return 0; - if (!pUser->superUser) { - mndReleaseUser(pMnode, pUser); - terrno = TSDB_CODE_MND_NO_RIGHTS; - return -1; - } - mndReleaseUser(pMnode, pUser); - SKillQueryReq killReq = {0}; if (tDeserializeSKillQueryReq(pReq->pCont, pReq->contLen, &killReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -668,6 +658,10 @@ static int32_t mndProcessKillQueryReq(SRpcMsg *pReq) { } mInfo("kill query msg is received, queryId:%s", killReq.queryStrId); + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_KILL_QUERY) != 0) { + return -1; + } + int32_t connId = 0; uint64_t queryId = 0; char *p = strchr(killReq.queryStrId, ':'); @@ -697,21 +691,16 @@ static int32_t mndProcessKillConnReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SProfileMgmt *pMgmt = &pMnode->profileMgmt; - SUserObj *pUser = mndAcquireUser(pMnode, pReq->info.conn.user); - if (pUser == NULL) return 0; - if (!pUser->superUser) { - mndReleaseUser(pMnode, pUser); - terrno = TSDB_CODE_MND_NO_RIGHTS; - return -1; - } - mndReleaseUser(pMnode, pUser); - SKillConnReq killReq = {0}; if (tDeserializeSKillConnReq(pReq->pCont, pReq->contLen, &killReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; return -1; } + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_KILL_CONN) != 0) { + return -1; + } + SConnObj *pConn = taosCacheAcquireByKey(pMgmt->connCache, &killReq.connId, sizeof(uint32_t)); if (pConn == NULL) { mError("connId:%u, failed to kill connection, conn not exist", killReq.connId); @@ -726,10 +715,10 @@ static int32_t mndProcessKillConnReq(SRpcMsg *pReq) { } static int32_t mndProcessSvrVerReq(SRpcMsg *pReq) { - int32_t code = -1; + int32_t code = -1; SServerVerRsp rsp = {0}; strcpy(rsp.ver, version); - + int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp); if (contLen < 0) goto _over; void *pRsp = rpcMallocCont(contLen); @@ -746,7 +735,6 @@ _over: return code; } - static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; diff --git a/source/dnode/mnode/impl/src/mndQnode.c b/source/dnode/mnode/impl/src/mndQnode.c index 9f1eb4ee24..4992a99c5c 100644 --- a/source/dnode/mnode/impl/src/mndQnode.c +++ b/source/dnode/mnode/impl/src/mndQnode.c @@ -279,7 +279,7 @@ static int32_t mndProcessCreateQnodeReq(SRpcMsg *pReq) { } mDebug("qnode:%d, start to create", createReq.dnodeId); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_CREATE_QNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_QNODE) != 0) { goto _OVER; } @@ -390,7 +390,7 @@ static int32_t mndProcessDropQnodeReq(SRpcMsg *pReq) { } mDebug("qnode:%d, start to drop", dropReq.dnodeId); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_DROP_QNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_QNODE) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 27de3883e9..bd129a8045 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -231,7 +231,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { mDebug("show:0x%" PRIx64 ", start retrieve data, type:%d", pShow->id, pShow->type); - // if (mndCheckShowAuth(pMnode, pReq->info.conn.user, pShow->type) != 0) return -1; + // if (mndCheckShowPrivilege(pMnode, pReq->info.conn.user, pShow->type) != 0) return -1; int32_t numOfCols = pShow->pMeta->numOfColumns; SSDataBlock *pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 05603f8554..5445c1e09f 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -713,7 +713,7 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { goto _OVER; } @@ -974,7 +974,7 @@ static int32_t mndProcessDropSmaReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndSnode.c b/source/dnode/mnode/impl/src/mndSnode.c index a638bdf61f..ea5011a659 100644 --- a/source/dnode/mnode/impl/src/mndSnode.c +++ b/source/dnode/mnode/impl/src/mndSnode.c @@ -285,7 +285,7 @@ static int32_t mndProcessCreateSnodeReq(SRpcMsg *pReq) { } mDebug("snode:%d, start to create", createReq.dnodeId); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_CREATE_SNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_SNODE) != 0) { goto _OVER; } @@ -397,7 +397,7 @@ static int32_t mndProcessDropSnodeReq(SRpcMsg *pReq) { } mDebug("snode:%d, start to drop", dropReq.dnodeId); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_DROP_SNODE) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_SNODE) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index f1bae14c07..026ce97776 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -876,7 +876,7 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { goto _OVER; } @@ -1607,7 +1607,7 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { goto _OVER; } @@ -1737,7 +1737,7 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 5e2f5bc2dd..8847344493 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -437,7 +437,7 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre goto _OVER; } - if (mndCheckDbAuth(pMnode, user, MND_OPER_WRITE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, user, MND_OPER_WRITE_DB, pDb) != 0) { goto _OVER; } @@ -550,7 +550,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { goto _OVER; } #endif diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index a650ed29f1..d3c51d18fc 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -480,7 +480,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbAuth(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index d1d88fdc90..963efc9233 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1384,8 +1384,7 @@ static int32_t mndProcessKillTransReq(SRpcMsg *pReq) { } mInfo("trans:%d, start to kill", killReq.transId); - - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_KILL_TRANS) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_KILL_TRANS) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index 03c9647bfe..95e011db28 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -295,7 +295,7 @@ static int32_t mndCreateUser(SMnode *pMnode, char *acct, SCreateUserReq *pCreate tstrncpy(userObj.acct, acct, TSDB_USER_LEN); userObj.createdTime = taosGetTimestampMs(); userObj.updateTime = userObj.createdTime; - userObj.superUser = 0;//pCreate->superUser; + userObj.superUser = 0; // pCreate->superUser; userObj.sysInfo = pCreate->sysInfo; userObj.enable = pCreate->enable; @@ -337,6 +337,9 @@ static int32_t mndProcessCreateUserReq(SRpcMsg *pReq) { } mDebug("user:%s, start to create", createReq.user); + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_USER) != 0) { + goto _OVER; + } if (createReq.user[0] == 0) { terrno = TSDB_CODE_MND_INVALID_USER_FORMAT; @@ -360,10 +363,6 @@ static int32_t mndProcessCreateUserReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_CREATE_USER) != 0) { - goto _OVER; - } - code = mndCreateUser(pMnode, pOperUser->acct, &createReq, pReq); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; @@ -466,7 +465,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckAlterUserAuth(pOperUser, pUser, &alterReq) != 0) { + if (mndCheckAlterUserPrivilege(pOperUser, pUser, &alterReq) != 0) { goto _OVER; } @@ -631,6 +630,9 @@ static int32_t mndProcessDropUserReq(SRpcMsg *pReq) { } mDebug("user:%s, start to drop", dropReq.user); + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_USER) != 0) { + goto _OVER; + } if (dropReq.user[0] == 0) { terrno = TSDB_CODE_MND_INVALID_USER_FORMAT; @@ -643,10 +645,6 @@ static int32_t mndProcessDropUserReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_DROP_USER) != 0) { - goto _OVER; - } - code = mndDropUser(pMnode, pReq, pUser); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 1c5d73031f..ccdeb2626e 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -1212,7 +1212,7 @@ static int32_t mndProcessRedistributeVgroupMsg(SRpcMsg *pReq) { } mInfo("vgId:%d, start to redistribute vgroup to dnode %d:%d:%d", req.vgId, req.dnodeId1, req.dnodeId2, req.dnodeId3); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_REDISTRIBUTE_VGROUP) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_REDISTRIBUTE_VGROUP) != 0) { goto _OVER; } @@ -1507,7 +1507,7 @@ static int32_t mndProcessSplitVgroupMsg(SRpcMsg *pReq) { SDbObj *pDb = NULL; mDebug("vgId:%d, start to split", vgId); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_SPLIT_VGROUP) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_SPLIT_VGROUP) != 0) { goto _OVER; } @@ -1657,7 +1657,7 @@ static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) { } mInfo("start to balance vgroup"); - if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_BALANCE_VGROUP) != 0) { + if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_BALANCE_VGROUP) != 0) { goto _OVER; } diff --git a/tests/script/tsim/user/privilege_sysinfo.sim b/tests/script/tsim/user/privilege_sysinfo.sim index 9ddfce8a97..ea3294765c 100644 --- a/tests/script/tsim/user/privilege_sysinfo.sim +++ b/tests/script/tsim/user/privilege_sysinfo.sim @@ -22,5 +22,26 @@ sql_error drop user sysinfo1 sql_error alter user sysinfo1 pass '1' sql_error alter user sysinfo0 pass '1' +sql_error create dnode $hostname port 7200 +sql_error drop dnode 1 + +sql_error create qnode on dnode 1 +sql_error drop qnode on dnode 1 + +sql_error create mnode on dnode 1 +sql_error drop mnode on dnode 1 + +sql_error create snode on dnode 1 +sql_error drop snode on dnode 1 + +sql_error redistribute vgroup 2 dnode 1 dnode 2 +sql_error balance vgroup + +sql_error kill transaction 1 +sql_error kill connection 1 +sql_error kill query 1 + +print =============== check db +sql_error create database db system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file From 23653fd8430f8221e642d8bf7a15178603376ff0 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 25 Jun 2022 11:22:34 +0800 Subject: [PATCH 02/29] enh: privilege for topic and stream --- source/dnode/mnode/impl/inc/mndPrivilege.h | 1 + source/dnode/mnode/impl/src/mndConsumer.c | 4 +++ source/dnode/mnode/impl/src/mndPrivilege.c | 20 ++++++----- source/dnode/mnode/impl/src/mndStream.c | 35 +++++++------------- source/dnode/mnode/impl/src/mndTopic.c | 8 +++-- tests/script/tsim/user/privilege_sysinfo.sim | 3 -- 6 files changed, 34 insertions(+), 37 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndPrivilege.h b/source/dnode/mnode/impl/inc/mndPrivilege.h index 15f9e4e6b5..a1bec69790 100644 --- a/source/dnode/mnode/impl/inc/mndPrivilege.h +++ b/source/dnode/mnode/impl/inc/mndPrivilege.h @@ -64,6 +64,7 @@ void mndCleanupPrivilege(SMnode *pMnode); int32_t mndCheckOperPrivilege(SMnode *pMnode, const char *user, EOperType operType); int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb); +int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *name); int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, int32_t showType); int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter); diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 7dc5ee1ea1..69f58f33cf 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -431,6 +431,10 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { goto SUBSCRIBE_OVER; } + if (mndCheckDbPrivilege(pMnode, pMsg->info.conn.user, MND_OPER_READ_DB, pTopic->db) != 0) { + goto SUBSCRIBE_OVER; + } + #if 0 // ref topic to prevent drop // TODO make topic complete diff --git a/source/dnode/mnode/impl/src/mndPrivilege.c b/source/dnode/mnode/impl/src/mndPrivilege.c index 478ba2bee4..752b11540d 100644 --- a/source/dnode/mnode/impl/src/mndPrivilege.c +++ b/source/dnode/mnode/impl/src/mndPrivilege.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "mndPrivilege.h" #include "mndUser.h" +#include "mndDb.h" int32_t mndInitPrivilege(SMnode *pMnode) { return 0; } @@ -133,15 +134,7 @@ int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType if (pUser->sysInfo) goto _OVER; } - if (operType == MND_OPER_ALTER_DB) { - if (strcmp(pUser->user, pDb->createUser) == 0 && pUser->sysInfo) goto _OVER; - } - - if (operType == MND_OPER_DROP_DB) { - if (strcmp(pUser->user, pDb->createUser) == 0 && pUser->sysInfo) goto _OVER; - } - - if (operType == MND_OPER_COMPACT_DB) { + if (operType == MND_OPER_ALTER_DB || operType == MND_OPER_DROP_DB || operType == MND_OPER_COMPACT_DB) { if (strcmp(pUser->user, pDb->createUser) == 0 && pUser->sysInfo) goto _OVER; } @@ -168,3 +161,12 @@ _OVER: mndReleaseUser(pMnode, pUser); return code; } + +int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *name) { + SDbObj *pDb = mndAcquireDb(pMnode, name); + if (pDb == NULL) return -1; + + int32_t code = mndCheckDbPrivilege(pMnode, user, operType, pDb); + mndReleaseDb(pMnode, pDb); + return code; +} \ No newline at end of file diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index e49756c837..c45421baa9 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -437,10 +437,6 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre goto _OVER; } - if (mndCheckDbPrivilege(pMnode, user, MND_OPER_WRITE_DB, pDb) != 0) { - goto _OVER; - } - int32_t numOfStbs = -1; if (mndGetNumOfStbs(pMnode, pDb->name, &numOfStbs) != 0) { goto _OVER; @@ -542,19 +538,6 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto _OVER; } - // TODO check read auth for source and write auth for target -#if 0 - pDb = mndAcquireDb(pMnode, createStreamReq.sourceDB); - if (pDb == NULL) { - terrno = TSDB_CODE_MND_DB_NOT_SELECTED; - goto _OVER; - } - - if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { - goto _OVER; - } -#endif - // build stream obj from request SStreamObj streamObj = {0}; if (mndBuildStreamObjFromCreateReq(pMnode, &streamObj, &createStreamReq) < 0) { @@ -592,6 +575,16 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto _OVER; } + if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_READ_DB, streamObj.sourceDb) != 0) { + mndTransDrop(pTrans); + goto _OVER; + } + + if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, streamObj.targetDb) != 0) { + mndTransDrop(pTrans); + goto _OVER; + } + // execute creation if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -641,13 +634,9 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { } } -#if 0 - // todo check auth - pUser = mndAcquireUser(pMnode, pReq->info.conn.user); - if (pUser == NULL) { - goto DROP_STREAM_OVER; + if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) { + return -1; } -#endif STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) { diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index b8c17378c4..90c4e96517 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -480,7 +480,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { goto _OVER; } - if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) { + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_READ_DB, pDb) != 0) { goto _OVER; } @@ -571,6 +571,10 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { } #endif + if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_READ_DB, pTopic->db) != 0) { + return -1; + } + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq); mndTransSetDbName(pTrans, pTopic->db, NULL); if (pTrans == NULL) { @@ -579,7 +583,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { } mDebug("trans:%d, used to drop topic:%s", pTrans->id, pTopic->name); - + if (mndDropOffsetByTopic(pMnode, pTrans, dropReq.name) < 0) { ASSERT(0); return -1; diff --git a/tests/script/tsim/user/privilege_sysinfo.sim b/tests/script/tsim/user/privilege_sysinfo.sim index ea3294765c..35760d45fd 100644 --- a/tests/script/tsim/user/privilege_sysinfo.sim +++ b/tests/script/tsim/user/privilege_sysinfo.sim @@ -13,9 +13,6 @@ print user sysinfo0 login sql close sql connect sysinfo0 -system sh/exec.sh -n dnode1 -s stop -return - print =============== check oper sql_error create user u1 pass 'u1' sql_error drop user sysinfo1 From 43f1d411220ad559ab1c795893e24c3b3c10b504 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 25 Jun 2022 11:23:44 +0800 Subject: [PATCH 03/29] fix: compile error --- source/dnode/mnode/impl/src/mndConsumer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 69f58f33cf..5b5de10fba 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -431,7 +431,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { goto SUBSCRIBE_OVER; } - if (mndCheckDbPrivilege(pMnode, pMsg->info.conn.user, MND_OPER_READ_DB, pTopic->db) != 0) { + if (mndCheckDbPrivilegeByName(pMnode, pMsg->info.conn.user, MND_OPER_READ_DB, pTopic->db) != 0) { goto SUBSCRIBE_OVER; } From d08835d51821a31eee196b8c16562a5201714580 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 25 Jun 2022 12:03:15 +0800 Subject: [PATCH 04/29] fix: privilege for create topic --- source/dnode/mnode/impl/src/mndTopic.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 078bbc1db7..f881f237c2 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -14,12 +14,12 @@ */ #include "mndTopic.h" -#include "mndPrivilege.h" #include "mndConsumer.h" #include "mndDb.h" #include "mndDnode.h" #include "mndMnode.h" #include "mndOffset.h" +#include "mndPrivilege.h" #include "mndShow.h" #include "mndStb.h" #include "mndSubscribe.h" @@ -480,6 +480,11 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { goto _OVER; } + if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_READ_DB, pDb) != 0) { + goto _OVER; + } + + code = mndCreateTopic(pMnode, pReq, &createTopicReq, pDb); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; _OVER: @@ -578,7 +583,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { } mDebug("trans:%d, used to drop topic:%s", pTrans->id, pTopic->name); - + if (mndDropOffsetByTopic(pMnode, pTrans, dropReq.name) < 0) { ASSERT(0); return -1; From a9f094613a47047acab797feec6d8eb58798e718 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 25 Jun 2022 13:52:52 +0800 Subject: [PATCH 05/29] fix: restore table merge scan operator --- source/dnode/vnode/inc/vnode.h | 1 + source/dnode/vnode/src/tsdb/tsdbRead.c | 2 +- source/libs/executor/src/executil.c | 4 +- source/libs/executor/src/executorimpl.c | 3 +- source/libs/executor/src/scanoperator.c | 122 ++++++++++++++---------- 5 files changed, 77 insertions(+), 55 deletions(-) diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index a32bf0ecdb..b97d4605e7 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -196,6 +196,7 @@ struct SVnodeCfg { typedef struct { TSKEY lastKey; uint64_t uid; + uint64_t groupId; } STableKeyInfo; struct SMetaEntry { diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 540810f876..ab31d65d68 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2852,7 +2852,7 @@ int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { break; } - STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, uid = id}; + STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, uid = id, .groupId = 0}; taosArrayPush(list, &info); } diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 5ac5957f2b..374a3a736d 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -315,7 +315,7 @@ int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo } for (int i = 0; i < taosArrayGetSize(res); i++) { - STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)}; + STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0}; taosArrayPush(pListInfo->pTableList, &info); } taosArrayDestroy(res); @@ -336,7 +336,7 @@ int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo } } }else { // Create one table group. - STableKeyInfo info = {.lastKey = 0, .uid = tableUid}; + STableKeyInfo info = {.lastKey = 0, .uid = tableUid, .groupId = 0}; taosArrayPush(pListInfo->pTableList, &info); } pListInfo->pGroupList = taosArrayInit(4, POINTER_BYTES); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index c23d9a5040..b4d87a53b1 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4028,6 +4028,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, int32_t len = (int32_t)(pStart - (char*)keyBuf); uint64_t groupId = calcGroupId(keyBuf, len); taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &groupId, sizeof(uint64_t)); + info->groupId = groupId; groupNum++; nodesClearList(groupNew); @@ -4127,7 +4128,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo return NULL; } } else { // Create one table group. - STableKeyInfo info = {.lastKey = 0, .uid = pBlockNode->uid}; + STableKeyInfo info = {.lastKey = 0, .uid = pBlockNode->uid, .groupId = 0}; taosArrayPush(pTableListInfo->pTableList, &info); } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 9c0ed40c30..dedf6c0707 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1965,7 +1965,10 @@ _error: typedef struct STableMergeScanInfo { STableListInfo* tableListInfo; - int32_t currentGroupId; + int32_t tableStartIndex; + int32_t tableEndIndex; + bool hasGroupId; + uint64_t groupId; SArray* dataReaders; // array of tsdbReaderT* SReadHandle readHandle; @@ -2006,7 +2009,7 @@ typedef struct STableMergeScanInfo { int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan int32_t dataBlockLoadFlag; SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time - // window to check if current data block needs to be loaded. + // window to check if current data block needs to be loaded. SSampleExecInfo sample; // sample execution info } STableMergeScanInfo; @@ -2030,6 +2033,22 @@ int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle return TSDB_CODE_SUCCESS; } +int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle, STableListInfo* pTableListInfo, + int32_t tableStartIdx, int32_t tableEndIdx, SArray* arrayReader, uint64_t queryId, + uint64_t taskId) { + for (int32_t i = tableStartIdx; i <= tableEndIdx; ++i) { + SArray* subTableList = taosArrayInit(1, sizeof(STableKeyInfo)); + taosArrayPush(subTableList, taosArrayGet(pTableListInfo->pTableList, i)); + + tsdbReaderT* pReader = tsdbReaderOpen(pHandle->vnode, pQueryCond, subTableList, queryId, taskId); + taosArrayPush(arrayReader, &pReader); + + taosArrayDestroy(subTableList); + } + + return TSDB_CODE_SUCCESS; +} + // todo refactor static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo, int32_t readerIdx, SSDataBlock* pBlock, uint32_t* status) { @@ -2216,34 +2235,32 @@ SArray* generateSortByTsInfo(int32_t order) { return pList; } -static int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle, SArray* tableList, SArray* arrayReader, uint64_t queryId, - uint64_t taskId) { - for (int32_t i = 0; i < taosArrayGetSize(tableList); ++i) { - SArray* tmp = taosArrayInit(1, sizeof(STableKeyInfo)); - taosArrayPush(tmp, taosArrayGet(tableList, i)); - - tsdbReaderT* pReader = tsdbReaderOpen(pHandle->vnode, pQueryCond, tmp, queryId, taskId); - taosArrayPush(arrayReader, &pReader); - - taosArrayDestroy(tmp); - } - - return TSDB_CODE_SUCCESS; -} - int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { STableMergeScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SArray* tableList = taosArrayGetP(pInfo->tableListInfo->pGroupList, pInfo->currentGroupId); + { + size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList); + int32_t i = pInfo->tableStartIndex + 1; + for (; i < tableListSize; ++i) { + STableKeyInfo* tableKeyInfo = taosArrayGet(pInfo->tableListInfo->pTableList, i); + if (tableKeyInfo->groupId != pInfo->groupId) { + break; + } + } + pInfo->tableEndIndex = i - 1; + } - createMultipleDataReaders(&pInfo->cond, &pInfo->readHandle, tableList, + int32_t tableStartIdx = pInfo->tableStartIndex; + int32_t tableEndIdx = pInfo->tableEndIndex; + + STableListInfo* tableListInfo = pInfo->tableListInfo; + createMultipleDataReaders(&pInfo->cond, &pInfo->readHandle, tableListInfo, tableStartIdx, tableEndIdx, pInfo->dataReaders, pInfo->queryId, pInfo->taskId); // todo the total available buffer should be determined by total capacity of buffer of this task. // the additional one is reserved for merge result - int32_t tableLen = taosArrayGetSize(tableList); - pInfo->sortBufSize = pInfo->bufPageSize * ((tableLen==0?1:tableLen) + 1); + pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1); int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str); @@ -2330,43 +2347,38 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { if (code != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, code); } + size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList); + if (!pInfo->hasGroupId) { + pInfo->hasGroupId = true; - if (pInfo->currentGroupId == -1) { - pInfo->currentGroupId++; - if (pInfo->currentGroupId >= taosArrayGetSize(pInfo->tableListInfo->pGroupList)) { + if (tableListSize == 0) { doSetOperatorCompleted(pOperator); return NULL; } + pInfo->tableStartIndex = 0; + pInfo->groupId = ((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId; startGroupTableMergeScan(pOperator); } - SSDataBlock* pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator); - if (pBlock != NULL) { - uint64_t* groupId = taosHashGet(pInfo->tableListInfo->map, &(pBlock->info.uid), sizeof(uint64_t)); - if(groupId) pBlock->info.groupId = *groupId; - - pOperator->resultInfo.totalRows += pBlock->info.rows; - return pBlock; + SSDataBlock* pBlock = NULL; + while (pInfo->tableStartIndex < tableListSize) { + pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator); + if (pBlock != NULL) { + pBlock->info.groupId = pInfo->groupId; + pOperator->resultInfo.totalRows += pBlock->info.rows; + return pBlock; + } else { + stopGroupTableMergeScan(pOperator); + if (pInfo->tableEndIndex >= tableListSize - 1) { + doSetOperatorCompleted(pOperator); + break; + } + pInfo->tableStartIndex = pInfo->tableEndIndex + 1; + pInfo->groupId = + ((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId; + startGroupTableMergeScan(pOperator); + } } - stopGroupTableMergeScan(pOperator); - pInfo->currentGroupId++; - if (pInfo->currentGroupId >= taosArrayGetSize(pInfo->tableListInfo->pGroupList)) { - doSetOperatorCompleted(pOperator); - return NULL; - } - startGroupTableMergeScan(pOperator); - - pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator); - if (pBlock != NULL) { - uint64_t* groupId = taosHashGet(pInfo->tableListInfo->map, &(pBlock->info.uid), sizeof(uint64_t)); - if(groupId) pBlock->info.groupId = *groupId; - - pOperator->resultInfo.totalRows += pBlock->info.rows; - return pBlock; - } - - doSetOperatorCompleted(pOperator); - return pBlock; } @@ -2403,6 +2415,12 @@ int32_t getTableMergeScanExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExpla return TSDB_CODE_SUCCESS; } +int32_t compareTableKeyInfoByGid(const void* p1, const void* p2) { + const STableKeyInfo* info1 = p1; + const STableKeyInfo* info2 = p2; + return info1->groupId - info2->groupId; +} + SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo, uint64_t queryId, uint64_t taskId) { @@ -2411,6 +2429,9 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN if (pInfo == NULL || pOperator == NULL) { goto _error; } + if (pTableScanNode->pPartitionTags) { + taosArraySort(pTableListInfo->pTableList, compareTableKeyInfoByGid); + } SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc; @@ -2443,7 +2464,6 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN pInfo->dataReaders = taosArrayInit(64, POINTER_BYTES); pInfo->queryId = queryId; pInfo->taskId = taskId; - pInfo->currentGroupId = -1; pInfo->sortSourceParams = taosArrayInit(64, sizeof(STableMergeScanSortSourceParam)); From 7752a7e4fd067a7452b977bf09f4bb94e7304a7b Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 25 Jun 2022 14:14:01 +0800 Subject: [PATCH 06/29] add test case for param ttl --- tests/system-test/1-insert/table_param_ttl.py | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 tests/system-test/1-insert/table_param_ttl.py diff --git a/tests/system-test/1-insert/table_param_ttl.py b/tests/system-test/1-insert/table_param_ttl.py new file mode 100644 index 0000000000..f93ebb98cb --- /dev/null +++ b/tests/system-test/1-insert/table_param_ttl.py @@ -0,0 +1,61 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * + +class TDTestCase: + updatecfgDict = {'ttlUnit':10,'ttlPushInterval':5} + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.ntbname = 'ntb' + self.stbname = 'stb' + self.tbnum = 10 + self.ttl_param = 1 + def ttl_check(self): + tdSql.prepare() + for i in range(self.tbnum): + tdSql.execute(f'create table {self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.ttl_param}') + tdSql.query(f'show tables') + tdSql.checkRows(self.tbnum) + tdSql.execute('reset query cache') + sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+5) + tdSql.query(f'show tables') + tdSql.checkRows(0) + + tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)') + for i in range(self.tbnum): + tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({i}) ttl {self.ttl_param}') + tdSql.query(f'show tables') + tdSql.checkRows(self.tbnum) + sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+5) + tdSql.query(f'show tables') + tdSql.checkRows(0) + + def run(self): + + self.ttl_check() + + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From c0564bd6395595e0ebf9ff87ccfb58005d90d4e7 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 25 Jun 2022 14:16:07 +0800 Subject: [PATCH 07/29] update --- tests/system-test/1-insert/table_param_ttl.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/system-test/1-insert/table_param_ttl.py b/tests/system-test/1-insert/table_param_ttl.py index f93ebb98cb..65be3cdcf3 100644 --- a/tests/system-test/1-insert/table_param_ttl.py +++ b/tests/system-test/1-insert/table_param_ttl.py @@ -46,12 +46,7 @@ class TDTestCase: tdSql.checkRows(0) def run(self): - self.ttl_check() - - - - def stop(self): tdSql.close() From fa84d68f9846f5f3449e1e99ebf598683c82abf1 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 25 Jun 2022 14:20:57 +0800 Subject: [PATCH 08/29] add case into ci --- tests/system-test/fulltest.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index ef217b828f..22dc829a23 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -23,6 +23,7 @@ python3 ./test.py -f 1-insert/alter_stable.py python3 ./test.py -f 1-insert/alter_table.py python3 ./test.py -f 1-insert/insertWithMoreVgroup.py python3 ./test.py -f 1-insert/table_comment.py +python3 ./test.py -f 1-insert/table_param_ttl.py python3 ./test.py -f 2-query/between.py python3 ./test.py -f 2-query/distinct.py python3 ./test.py -f 2-query/varchar.py From 4eae779c85b4407fb70d4d5ef1a7aa7d88c58af6 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 25 Jun 2022 14:24:57 +0800 Subject: [PATCH 09/29] update test case --- tests/system-test/1-insert/table_param_ttl.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/system-test/1-insert/table_param_ttl.py b/tests/system-test/1-insert/table_param_ttl.py index 65be3cdcf3..850c323be0 100644 --- a/tests/system-test/1-insert/table_param_ttl.py +++ b/tests/system-test/1-insert/table_param_ttl.py @@ -17,7 +17,7 @@ from util.sql import * from util.common import * class TDTestCase: - updatecfgDict = {'ttlUnit':10,'ttlPushInterval':5} + updatecfgDict = {'ttlUnit':5,'ttlPushInterval':3} def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) @@ -32,7 +32,7 @@ class TDTestCase: tdSql.query(f'show tables') tdSql.checkRows(self.tbnum) tdSql.execute('reset query cache') - sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+5) + sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval']) tdSql.query(f'show tables') tdSql.checkRows(0) @@ -41,7 +41,7 @@ class TDTestCase: tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({i}) ttl {self.ttl_param}') tdSql.query(f'show tables') tdSql.checkRows(self.tbnum) - sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+5) + sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval']) tdSql.query(f'show tables') tdSql.checkRows(0) From e70ec32c118b0eb2ff77698631a140009e0836b5 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 25 Jun 2022 14:41:25 +0800 Subject: [PATCH 10/29] update --- tests/system-test/1-insert/table_param_ttl.py | 31 ++++++++++++++++--- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/tests/system-test/1-insert/table_param_ttl.py b/tests/system-test/1-insert/table_param_ttl.py index 850c323be0..49d6476d9c 100644 --- a/tests/system-test/1-insert/table_param_ttl.py +++ b/tests/system-test/1-insert/table_param_ttl.py @@ -25,18 +25,30 @@ class TDTestCase: self.stbname = 'stb' self.tbnum = 10 self.ttl_param = 1 - def ttl_check(self): + self.default_ttl = 100 + self.modify_ttl = 1 + def ttl_check_ntb(self): tdSql.prepare() + for i in range(self.tbnum): tdSql.execute(f'create table {self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.ttl_param}') tdSql.query(f'show tables') tdSql.checkRows(self.tbnum) - tdSql.execute('reset query cache') sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval']) tdSql.query(f'show tables') tdSql.checkRows(0) - + for i in range(self.tbnum): + tdSql.execute(f'create table {self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.default_ttl}') + for i in range(int(self.tbnum/2)): + tdSql.execute(f'alter table {self.ntbname}_{i} ttl {self.modify_ttl}') + sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval']) + tdSql.query(f'show tables') + tdSql.checkRows(self.tbnum - int(self.tbnum/2)) + tdSql.execute('drop database db') + def ttl_check_ctb(self): + tdSql.prepare() tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)') + for i in range(self.tbnum): tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({i}) ttl {self.ttl_param}') tdSql.query(f'show tables') @@ -44,9 +56,20 @@ class TDTestCase: sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval']) tdSql.query(f'show tables') tdSql.checkRows(0) + for i in range(self.tbnum): + tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({i}) ttl {self.default_ttl}') + tdSql.query(f'show tables') + tdSql.checkRows(self.tbnum) + for i in range(int(self.tbnum/2)): + tdSql.execute(f'alter table {self.stbname}_{i} ttl {self.modify_ttl}') + sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval']) + tdSql.query(f'show tables') + tdSql.checkRows(self.tbnum - int(self.tbnum/2)) + tdSql.execute('drop database db') def run(self): - self.ttl_check() + self.ttl_check_ntb() + self.ttl_check_ctb() def stop(self): tdSql.close() From 91735042b74cc415be1d60aa264f045ffe79e914 Mon Sep 17 00:00:00 2001 From: jiacy-jcy Date: Sat, 25 Jun 2022 15:17:25 +0800 Subject: [PATCH 11/29] update bottom.py --- tests/system-test/2-query/bottom.py | 180 +++++++++++++++++----------- 1 file changed, 110 insertions(+), 70 deletions(-) diff --git a/tests/system-test/2-query/bottom.py b/tests/system-test/2-query/bottom.py index 1037b0a8f3..a95daf22f4 100644 --- a/tests/system-test/2-query/bottom.py +++ b/tests/system-test/2-query/bottom.py @@ -17,100 +17,140 @@ from util.log import * from util.cases import * from util.sql import * from util.common import * - +from util.sqlset import * class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) - + self.dbname = 'db_test' + self.setsql = TDSetSql() + self.ntbname = 'ntb' self.rowNum = 10 self.tbnum = 20 self.ts = 1537146000000 self.binary_str = 'taosdata' self.nchar_str = '涛思数据' - def bottom_check_base(self): - tdSql.prepare() - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, - col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') - tdSql.execute("create table stb_1 using stb tags('beijing')") - column_list = ['col1','col2','col3','col4','col5','col6','col7','col8'] - error_column_list = ['col11','col12','col13'] - error_param_list = [0,101] - for i in range(self.rowNum): - tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + self.column_dict = { + 'ts' : 'timestamp', + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'binary(20)', + 'col13': 'nchar(20)' + } - for i in column_list: - tdSql.query(f'select bottom({i},2) from stb_1') - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) - for j in error_param_list: - tdSql.error(f'select bottom({i},{j}) from stb_1') - for i in error_column_list: - tdSql.error(f'select bottom({i},10) from stb_1') - tdSql.query("select ts,bottom(col1, 2),ts from stb_1 group by tbname") - tdSql.checkRows(2) - tdSql.query('select bottom(col2,1) from stb_1 interval(1y) order by col2') - tdSql.checkData(0,0,1) - - tdSql.error('select * from stb_1 where bottom(col2,1)=1') - tdSql.execute('drop database db') - def bottom_check_distribute(self): - # prepare data for vgroup 4 - dbname = tdCom.getLongName(5, "letters") + self.param_list = [1,100] + def insert_data(self,column_dict,tbname,row_num): + sql = '' + for k, v in column_dict.items(): + if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \ + v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned' or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned' or v.lower() == 'bool': + sql += '%d,' + elif v.lower() == 'float' or v.lower() == 'double': + sql += '%f,' + elif 'binary' in v.lower(): + sql += f'"{self.binary_str}%d",' + elif 'nchar' in v.lower(): + sql += f'"{self.nchar_str}%d",' + insert_sql = f'insert into {tbname} values({sql[:-1]})' + for i in range(row_num): + insert_list = [] + for k, v in column_dict.items(): + if v.lower() in[ 'tinyint' , 'smallint' , 'int', 'bigint' , 'tinyint unsigned' , 'smallint unsigned' , 'int unsigned' , 'bigint unsigned'] or\ + 'binary' in v.lower() or 'nchar' in v.lower(): + insert_list.append(0 + i) + elif v.lower() == 'float' or v.lower() == 'double': + insert_list.append(0.1 + i) + elif v.lower() == 'bool': + insert_list.append(i % 2) + elif v.lower() == 'timestamp': + insert_list.append(self.ts + i) + tdSql.execute(insert_sql%(tuple(insert_list))) + def bottom_check_data(self,tbname,tb_type): + new_column_dict = {} + for param in self.param_list: + for k,v in self.column_dict.items(): + if v.lower() in ['tinyint','smallint','int','bigint','tinyint unsigned','smallint unsigned','int unsigned','bigint unsigned']: + tdSql.query(f'select bottom({k},{param}) from {tbname} order by {k}') + if param >= self.rowNum: + if tb_type in ['normal_table','child_table']: + tdSql.checkRows(self.rowNum) + values_list = [] + for i in range(self.rowNum): + tp = (i,) + values_list.append(tp) + tdSql.checkEqual(tdSql.queryResult,values_list) + elif tb_type == 'stable': + tdSql.checkRows(param) + elif param < self.rowNum: + if tb_type in ['normal_table','child_table']: + tdSql.checkRows(param) + values_list = [] + for i in range(param): + tp = (i,) + values_list.append(tp) + tdSql.checkEqual(tdSql.queryResult,values_list) + elif tb_type == 'stable': + tdSql.checkRows(param) + for i in [self.param_list[0]-1,self.param_list[-1]+1]: + tdSql.error(f'select top({k},{i}) from {tbname}') + new_column_dict.update({k:v}) + elif v.lower() == 'bool' or 'binary' in v.lower() or 'nchar' in v.lower(): + tdSql.error(f'select top({k},{param}) from {tbname}') + tdSql.error(f'select * from {tbname} where top({k},{param})=1') + pass + def bottom_check_ntb(self): + tdSql.execute(f'create database if not exists {self.dbname} vgroups 1') + tdSql.execute(f'use {self.dbname}') + tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) + self.insert_data(self.column_dict,self.ntbname,self.rowNum) + self.bottom_check_data(self.ntbname,'normal_table') + tdSql.execute(f'drop database {self.dbname}') + def bottom_check_stb(self): stbname = tdCom.getLongName(5, "letters") - vgroup_num = 2 - child_table_num = 20 - tdSql.execute(f"create database if not exists {dbname} vgroups {vgroup_num}") - tdSql.execute(f'use {dbname}') - # build 20 child tables,every table insert 10 rows - tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, - col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') - for i in range(child_table_num): - tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')") - tdSql.execute(f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i)) - column_list = ['col1','col2','col3','col4','col5','col6','col7','col8'] - error_column_list = ['col11','col12','col13'] - error_param_list = [0,101] - for i in [f'{stbname}', f'{dbname}.{stbname}']: - for j in column_list: - tdSql.query(f"select bottom({j},1) from {i}") - tdSql.checkRows(0) + tag_dict = { + 't0':'int' + } + tag_values = [ + f'1' + ] + tdSql.execute(f"create database if not exists {self.dbname} vgroups 2") + tdSql.execute(f'use {self.dbname}') + tdSql.execute(self.setsql.set_create_stable_sql(stbname,self.column_dict,tag_dict)) + for i in range(self.tbnum): + tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})") + tdSql.execute(self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum)) tdSql.query('show tables') vgroup_list = [] for i in range(len(tdSql.queryResult)): vgroup_list.append(tdSql.queryResult[i][6]) vgroup_list_set = set(vgroup_list) - for i in vgroup_list_set: vgroups_num = vgroup_list.count(i) - if vgroups_num >=2: + if vgroups_num >= 2: tdLog.info(f'This scene with {vgroups_num} vgroups is ok!') - continue else: - tdLog.exit(f'This scene does not meet the requirements with {vgroups_num} vgroup!\n') - for i in range(self.rowNum): - for j in range(child_table_num): - tdSql.execute(f"insert into {stbname}_{j} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) - for i in column_list: - tdSql.query(f'select bottom({i},2) from {stbname}') - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(1,),(1,)]) - for j in error_param_list: - tdSql.error(f'select bottom({i},{j}) from {stbname}') - for i in error_column_list: - tdSql.error(f'select bottom({i},10) from {stbname}') - - tdSql.execute(f'drop database {dbname}') + tdLog.exit( + 'This scene does not meet the requirements with {vgroups_num} vgroup!\n') + for i in range(self.tbnum): + self.bottom_check_data(f'{stbname}_{i}','child_table') + self.bottom_check_data(f'{stbname}','stable') + tdSql.execute(f'drop database {self.dbname}') + def run(self): - - self.bottom_check_base() - self.bottom_check_distribute() + self.bottom_check_ntb() + self.bottom_check_stb() - def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From 6adc19d4437705552669c7459e80b763c7344de5 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 25 Jun 2022 15:20:11 +0800 Subject: [PATCH 12/29] fix: overlapping intervals problem --- source/libs/executor/src/timewindowoperator.c | 51 +++++++++++-------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 48b0b1c071..0ba898e4f7 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -3900,18 +3900,22 @@ _error: // merge interval operator typedef struct SMergeIntervalAggOperatorInfo { SIntervalAggOperatorInfo intervalAggOperatorInfo; - - SHashObj* groupIntervalHash; - void* groupIntervalIter; + SList* groupIntervals; + SListIter groupIntervalsIter; bool hasGroupId; uint64_t groupId; SSDataBlock* prefetchedBlock; bool inputBlocksFinished; } SMergeIntervalAggOperatorInfo; +typedef struct SGroupTimeWindow { + uint64_t groupId; + STimeWindow window; +} SGroupTimeWindow; + void destroyMergeIntervalOperatorInfo(void* param, int32_t numOfOutput) { SMergeIntervalAggOperatorInfo* miaInfo = (SMergeIntervalAggOperatorInfo*)param; - taosHashCleanup(miaInfo->groupIntervalHash); + tdListFree(miaInfo->groupIntervals); destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo, numOfOutput); } @@ -3940,15 +3944,22 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t bool ascScan = (iaInfo->order == TSDB_ORDER_ASC); SExprSupp* pExprSup = &pOperatorInfo->exprSupp; - STimeWindow* prevWin = taosHashGet(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId)); - if (prevWin == NULL) { - taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow)); - return 0; - } + SGroupTimeWindow groupTimeWindow = {.groupId = tableGroupId, .window = *newWin}; + tdListAppend(miaInfo->groupIntervals, &groupTimeWindow); - if ((ascScan && newWin->skey > prevWin->skey || (!ascScan) && newWin->skey < prevWin->skey)) { - finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock); - taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow)); + SListIter iter = {0}; + tdListInitIter(miaInfo->groupIntervals, &iter, TD_LIST_FORWARD); + SListNode* listNode = NULL; + while ((listNode = tdListNext(&iter)) != NULL) { + SGroupTimeWindow* prevGrpWin = (SGroupTimeWindow*)listNode->data; + if (prevGrpWin->groupId != tableGroupId ) { + continue; + } + STimeWindow* prevWin = &prevGrpWin->window; + if ((ascScan && newWin->skey > prevWin->ekey || (!ascScan) && newWin->skey < prevWin->ekey)) { + finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock); + tdListPopNode(miaInfo->groupIntervals, listNode); + } } return 0; @@ -4075,6 +4086,7 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) { } if (pBlock == NULL) { + tdListInitIter(miaInfo->groupIntervals, &miaInfo->groupIntervalsIter, TD_LIST_FORWARD); miaInfo->inputBlocksFinished = true; break; } @@ -4100,14 +4112,12 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) { } if (miaInfo->inputBlocksFinished) { - void* win = taosHashIterate(miaInfo->groupIntervalHash, miaInfo->groupIntervalIter); - if (win != NULL) { - miaInfo->groupIntervalIter = win; + SListNode* listNode = tdListNext(&miaInfo->groupIntervalsIter); - size_t len = 0; - uint64_t* pTableGroupId = taosHashGetKey(win, &len); - finalizeWindowResult(pOperator, *pTableGroupId, win, pRes); - pRes->info.groupId = *pTableGroupId; + if (listNode != NULL) { + SGroupTimeWindow* grpWin = (SGroupTimeWindow*)(listNode->data); + finalizeWindowResult(pOperator, grpWin->groupId, &grpWin->window, pRes); + pRes->info.groupId = grpWin->groupId; } } @@ -4129,8 +4139,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI goto _error; } - miaInfo->groupIntervalHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, HASH_NO_LOCK); - miaInfo->groupIntervalIter = NULL; + miaInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow)); SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo; From 90121ae43982ccb2214e742c5b8922723c0ad5ad Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Sat, 25 Jun 2022 15:26:29 +0800 Subject: [PATCH 13/29] fix(tmq): check stb existence when subscribing stb --- source/dnode/mnode/impl/src/mndTopic.c | 4 ++++ source/libs/stream/src/streamExec.c | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index a650ed29f1..8fcd345544 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -401,6 +401,10 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * } } else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) { SStbObj *pStb = mndAcquireStb(pMnode, pCreate->subStbName); + if (pStb == NULL) { + terrno = TSDB_CODE_MND_STB_NOT_EXIST; + return -1; + } topicObj.stbUid = pStb->uid; } /*} else if (pCreate->subType == TOPIC_SUB_TYPE__DB) {*/ diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 8e7cac03a2..fe0f406f8d 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -109,7 +109,7 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) { if (type == STREAM_INPUT__TRIGGER) { blockDataDestroy(((SStreamTrigger*)data)->pBlock); taosFreeQitem(data); - } else if (type == STREAM_INPUT__DATA_BLOCK) { + } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE) { taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock); taosFreeQitem(data); } else if (type == STREAM_INPUT__DATA_SUBMIT) { From 1ba41e03ff12133ea1a60dd2e65013d37a026bd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chappyguoxy=E2=80=9D?= <“happy_guoxy@163.com”> Date: Sat, 25 Jun 2022 15:48:25 +0800 Subject: [PATCH 14/29] test: refine query cases --- tests/system-test/2-query/json_tag.py | 102 ++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py index 2ef1b8dad2..9b96b6ebd0 100644 --- a/tests/system-test/2-query/json_tag.py +++ b/tests/system-test/2-query/json_tag.py @@ -11,12 +11,14 @@ # -*- coding: utf-8 -*- +import imp import sys import taos from util.log import tdLog from util.cases import tdCases from util.sql import tdSql import json +import os class TDTestCase: @@ -29,6 +31,9 @@ class TDTestCase: return def init(self, conn, logSql): + self.testcasePath = os.path.split(__file__)[0] + self.testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) @@ -557,6 +562,103 @@ class TDTestCase: tdSql.checkRows(3) tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1") tdSql.checkRows(3) + + #math function + tdSql.query("select sin(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select cos(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select tan(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select asin(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select acos(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select atan(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select abs(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select pow(dataint,5) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select log(dataint,10) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select sqrt(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select HISTOGRAM(dataint,'user_input','[1, 33, 555, 7777]',1) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select csum(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select mavg(dataint,1) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select statecount(dataint,'GE',10) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select stateduration(dataint,'GE',0) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select sample(dataint,3) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select HYPERLOGLOG(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(1) + tdSql.query("select twa(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(1) + + # function not ready + # tdSql.query("select tail(dataint,1) from jsons1 where jtag->'tag1'>1;") + # tdSql.checkRows(3) + # tdSql.query("select unique(dataint) from jsons1 where jtag->'tag1'>1;") + # tdSql.checkRows(3) + # tdSql.query("select mode(dataint) from jsons1 where jtag->'tag1'>1;") + # tdSql.checkRows(3) + # tdSql.query("select irate(dataint) from jsons1 where jtag->'tag1'>1;") + # tdSql.checkRows(1) + + #str function + tdSql.query("select upper(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select ltrim(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select lower(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select rtrim(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select CHAR_LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select SUBSTR(dataStr,5) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select CONCAT(dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select CONCAT_ws('adad!@!@%$^$%$^$%^a',dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select CAST(dataStr as bigint) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + + #time function + tdSql.query("select now() from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select today() from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select TIMEZONE() from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select TO_ISO8601(ts) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select TO_UNIXTIMESTAMP(datastr) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select TIMETRUNCATE(ts,1u) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select TIMEDIFF(ts,_c0) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select TIMEDIFF(ts,1u) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(3) + tdSql.query("select ELAPSED(ts,1h) from jsons1 where jtag->'tag1'>1;") + tdSql.checkRows(1) + # # #test TD-12077 tdSql.execute("insert into jsons1_16 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}') values(1591062628000, 2, NULL, '你就会', 'dws')") From 592d8e488f5c877f41b7ea3bed67956f3d94a4c4 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Sat, 25 Jun 2022 16:14:54 +0800 Subject: [PATCH 15/29] test: add sim full test --- tests/pytest/util/dnodes.py | 24 +++++---- tests/script/test-all.bat | 58 +++++++++++++++++++++ tests/system-test/6-cluster/5dnode1mnode.py | 6 +-- tests/system-test/6-cluster/5dnode2mnode.py | 5 +- tests/system-test/7-tmq/tmqCommon.py | 8 ++- tests/system-test/7-tmq/tmqError.py | 5 +- tests/system-test/test-all.bat | 2 +- tests/system-test/test.py | 46 +++++++++++++--- tools/shell/src/shellEngine.c | 1 + 9 files changed, 128 insertions(+), 27 deletions(-) create mode 100644 tests/script/test-all.bat diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index be3454f78f..a38b14a52d 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -508,7 +508,6 @@ class TDDnode: def stoptaosd(self): if (not self.remoteIP == ""): - print("123") self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1)) tdLog.info("stop dnode%d"%self.index) return @@ -518,18 +517,21 @@ class TDDnode: toBeKilled = "valgrind.bin" if self.running != 0: - psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}'" % (toBeKilled,self.index) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") - - while(processID): - killCmd = "kill -INT %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) + if platform.system().lower() == 'windows': + os.system("wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId | xargs echo | awk '{print $2}' | xargs taskkill -f -pid"%self.index) + else: + psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}'" % (toBeKilled,self.index) processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - if self.valgrind: - time.sleep(2) + + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + if self.valgrind: + time.sleep(2) self.running = 0 tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) diff --git a/tests/script/test-all.bat b/tests/script/test-all.bat new file mode 100644 index 0000000000..7a1a4bc7fa --- /dev/null +++ b/tests/script/test-all.bat @@ -0,0 +1,58 @@ +@echo off +SETLOCAL EnableDelayedExpansion +for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a") +set /a a=0 +echo Windows Taosd Full Test +set /a exitNum=0 +rm -rf failed.txt +set caseFile="jenkins\\basic.txt" +if not "%2" == "" ( + set caseFile="%2" +) +for /F "usebackq tokens=*" %%i in (!caseFile!) do ( + set line=%%i + if "!line:~,9!" == "./test.sh" ( + set /a a+=1 + echo !a! Processing %%i + call :GetTimeSeconds !time! + set time1=!_timeTemp! + echo Start at !time! + call !line:./test.sh=wtest.bat! > result_!a!.txt 2>error_!a!.txt + if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && set /a exitNum=8 && echo %%i >>failed.txt ) else ( call :colorEcho 0a "Success" &echo. ) + ) +) +exit !exitNum! + +:colorEcho +set timeNow=%time% +call :GetTimeSeconds %timeNow% +set time2=%_timeTemp% +set /a interTime=%time2% - %time1% +echo End at %timeNow% , cast %interTime%s +echo off + "%~2" +findstr /v /a:%1 /R "^$" "%~2" nul +del "%~2" > nul 2>&1i +goto :eof + +:GetTimeSeconds +set tt=%1 +set tt=%tt:.= % +set tt=%tt::= % +set tt=%tt: 0= % +set /a index=1 +for %%a in (%tt%) do ( + if !index! EQU 1 ( + set /a hh=%%a + )^ + else if !index! EQU 2 ( + set /a mm=%%a + + )^ + else if !index! EQU 3 ( + set /a ss=%%a + ) + set /a index=index+1 +) +set /a _timeTemp=(%hh%*60+%mm%)*60+%ss% +goto :eof diff --git a/tests/system-test/6-cluster/5dnode1mnode.py b/tests/system-test/6-cluster/5dnode1mnode.py index 75134224db..5f4ab7357b 100644 --- a/tests/system-test/6-cluster/5dnode1mnode.py +++ b/tests/system-test/6-cluster/5dnode1mnode.py @@ -20,7 +20,7 @@ class MyDnodes(TDDnodes): self.simDeployed = False class TDTestCase: - + noConn = True def init(self,conn ,logSql): tdLog.debug(f"start to excute {__file__}") self.TDDnodes = None @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -81,7 +81,7 @@ class TDTestCase: dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] - cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" + cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\"" print(cmd) os.system(cmd) diff --git a/tests/system-test/6-cluster/5dnode2mnode.py b/tests/system-test/6-cluster/5dnode2mnode.py index d3cde987c6..e08e738be6 100644 --- a/tests/system-test/6-cluster/5dnode2mnode.py +++ b/tests/system-test/6-cluster/5dnode2mnode.py @@ -20,6 +20,7 @@ class MyDnodes(TDDnodes): self.simDeployed = False class TDTestCase: + noConn = True def init(self,conn ,logSql): tdLog.debug(f"start to excute {__file__}") @@ -40,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -85,7 +86,7 @@ class TDTestCase: dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] - cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" + cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\"" print(cmd) os.system(cmd) diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py index b8aa78e3ac..788ae3474c 100644 --- a/tests/system-test/7-tmq/tmqCommon.py +++ b/tests/system-test/7-tmq/tmqCommon.py @@ -86,7 +86,13 @@ class TMQCom: shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' if (platform.system().lower() == 'windows'): - shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath + processorName = buildPath + '\\build\\bin\\tmq_sim.exe' + if alias != 0: + processorNameNew = buildPath + '\\build\\bin\\tmq_sim_new.exe' + shellCmd = 'cp %s %s'%(processorName, processorNameNew) + os.system(shellCmd) + processorName = processorNameNew + shellCmd = 'mintty -h never ' + processorName + ' -c ' + cfgPath shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> nul 2>&1 &" else: diff --git a/tests/system-test/7-tmq/tmqError.py b/tests/system-test/7-tmq/tmqError.py index 5b5658d528..bd8ec565d8 100644 --- a/tests/system-test/7-tmq/tmqError.py +++ b/tests/system-test/7-tmq/tmqError.py @@ -288,7 +288,10 @@ class TDTestCase: tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicFromStb1) - os.system('pkill tmq_sim') + if (platform.system().lower() == 'windows'): + os.system("TASKKILL /F /IM tmq_sim.exe") + else: + os.system('pkill tmq_sim') tdLog.printNoPrefix("======== test case 1 end ...... ") diff --git a/tests/system-test/test-all.bat b/tests/system-test/test-all.bat index 275cbeebbb..adc9e0ce28 100644 --- a/tests/system-test/test-all.bat +++ b/tests/system-test/test-all.bat @@ -2,7 +2,7 @@ SETLOCAL EnableDelayedExpansion for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a") set /a a=0 -if %1 == full ( +if "%1" == "full" ( echo Windows Taosd Full Test set /a exitNum=0 del /Q /F failed.txt diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 8a8356449c..35f8ea953c 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -23,6 +23,7 @@ import platform import socket import threading from distutils.log import warn as printf +from tkinter import N from fabric2 import Connection sys.path.append("../pytest") from util.log import * @@ -187,9 +188,9 @@ if __name__ == "__main__": tdLog.info("Procedures for tdengine deployed in %s" % (host)) if platform.system().lower() == 'windows': + fileName = fileName.replace("/", os.sep) if (masterIp == "" and not fileName[0:12] == "0-others\\udf"): threading.Thread(target=checkRunTimeError,daemon=True).start() - tdCases.logSql(logSql) tdLog.info("Procedures for testing self-deployment") tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) @@ -208,18 +209,46 @@ if __name__ == "__main__": uModule = importlib.import_module(moduleName) try: ucase = uModule.TDTestCase() - if ((json.dumps(updateCfgDict) == '{}') and (ucase.updatecfgDict is not None)): + if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')): updateCfgDict = ucase.updatecfgDict updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode() except Exception as r: print(r) else: pass - tdDnodes.deploy(1,updateCfgDict) - tdDnodes.start(1) - conn = taos.connect( - host="%s"%(host), - config=tdDnodes.sim.getCfgDir()) + if dnodeNums == 1 : + tdDnodes.deploy(1,updateCfgDict) + tdDnodes.start(1) + tdCases.logSql(logSql) + else : + tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums)) + dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums) + tdDnodes = ClusterDnodes(dnodeslist) + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + for dnode in tdDnodes.dnodes: + tdDnodes.deploy(dnode.index,{}) + for dnode in tdDnodes.dnodes: + tdDnodes.starttaosd(dnode.index) + tdCases.logSql(logSql) + conn = taos.connect( + host, + config=tdDnodes.getSimCfgPath()) + print(tdDnodes.getSimCfgPath(),host) + cluster.create_dnode(conn) + try: + if cluster.check_dnode(conn) : + print("check dnode ready") + except Exception as r: + print(r) + if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True: + conn = None + else: + conn = taos.connect( + host="%s"%(host), + config=tdDnodes.sim.getCfgDir()) if is_test_framework: tdCases.runOneWindows(conn, fileName) else: @@ -307,4 +336,5 @@ if __name__ == "__main__": tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py") else: tdLog.info("not need to query") - conn.close() + if conn is not None: + conn.close() diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 8a017d378d..8bc99a2665 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -156,6 +156,7 @@ void shellRunSingleCommandImp(char *command) { } fname = sptr + 2; + while (*fname == ' ') fname++; *sptr = '\0'; } From 77b365f0ccd6cf68c58e24d2e8afecc4c6a63678 Mon Sep 17 00:00:00 2001 From: Minghao Li Date: Sat, 25 Jun 2022 16:27:05 +0800 Subject: [PATCH 16/29] refactor(sync): do not replicate when one replica --- source/libs/sync/src/syncMain.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index c5af72c971..3100d0525c 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -914,6 +914,9 @@ void syncNodeStart(SSyncNode* pSyncNode) { syncNodeBecomeLeader(pSyncNode, "one replica start"); // Raft 3.6.2 Committing entries from previous terms + syncNodeAppendNoop(pSyncNode); + syncMaybeAdvanceCommitIndex(pSyncNode); + return; } @@ -1662,6 +1665,12 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde // change isStandBy to normal (election timeout) if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { syncNodeBecomeLeader(pSyncNode, tmpbuf); + + // Raft 3.6.2 Committing entries from previous terms + syncNodeReplicate(pSyncNode); + syncNodeAppendNoop(pSyncNode); + syncMaybeAdvanceCommitIndex(pSyncNode); + } else { syncNodeBecomeFollower(pSyncNode, tmpbuf); } @@ -1807,16 +1816,9 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { // stop elect timer syncNodeStopElectTimer(pSyncNode); - // start replicate right now! - syncNodeReplicate(pSyncNode); - // start heartbeat timer syncNodeStartHeartbeatTimer(pSyncNode); - // append noop - syncNodeAppendNoop(pSyncNode); - syncMaybeAdvanceCommitIndex(pSyncNode); // maybe only one replica - // trace log do { int32_t debugStrLen = strlen(debugStr); @@ -1841,9 +1843,9 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) { syncNodeLog2("==state change syncNodeCandidate2Leader==", pSyncNode); // Raft 3.6.2 Committing entries from previous terms - - // do not use this - // syncNodeEqNoop(pSyncNode); + syncNodeReplicate(pSyncNode); + syncNodeAppendNoop(pSyncNode); + syncMaybeAdvanceCommitIndex(pSyncNode); } void syncNodeFollower2Candidate(SSyncNode* pSyncNode) { From 10e90ce973830815b616e0ba28b626766a1d4e96 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Sat, 25 Jun 2022 16:12:00 +0800 Subject: [PATCH 17/29] feat(stream): auto pull data --- include/common/tcommon.h | 3 +- include/common/tdatablock.h | 3 + include/libs/stream/tstream.h | 2 +- include/libs/stream/tstreamUpdate.h | 3 + source/common/src/tdatablock.c | 78 +++- source/dnode/snode/src/snode.c | 2 +- source/dnode/vnode/src/tq/tqRead.c | 6 +- source/libs/executor/inc/executorimpl.h | 11 +- source/libs/executor/src/scanoperator.c | 59 ++- source/libs/executor/src/timewindowoperator.c | 382 ++++++++++++++---- source/libs/stream/src/streamDispatch.c | 3 +- source/libs/stream/src/streamExec.c | 15 +- source/libs/stream/src/streamUpdate.c | 20 + .../tsim/stream/distributeInterval0.sim | 8 +- tests/script/tsim/stream/partitionby.sim | 40 +- tests/script/tsim/stream/schedSnode.sim | 6 +- 16 files changed, 519 insertions(+), 122 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index a14f7eff8a..928fe0aa0e 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -42,12 +42,13 @@ enum { typedef enum EStreamType { STREAM_NORMAL = 1, STREAM_INVERT, - STREAM_REPROCESS, + STREAM_CLEAR, STREAM_INVALID, STREAM_GET_ALL, STREAM_DELETE, STREAM_RETRIEVE, STREAM_PUSH_DATA, + STREAM_PUSH_EMPTY, } EStreamType; typedef struct { diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 2a0d4e7ff6..2e2c7d1700 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -224,6 +224,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize); int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n); int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src); +int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src); SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData); SSDataBlock* createDataBlock(); int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData); @@ -236,6 +237,8 @@ void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData); void blockDebugShowData(const SArray* dataBlocks, const char* flag); +// for debug +char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf); int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, tb_uid_t suid); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 594344ba8a..db928f194c 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -319,7 +319,7 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem return -1; } taosWriteQitem(pTask->inputQueue->queue, pSubmitClone); - } else if (pItem->type == STREAM_INPUT__DATA_BLOCK) { + } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) { taosWriteQitem(pTask->inputQueue->queue, pItem); } else if (pItem->type == STREAM_INPUT__CHECKPOINT) { taosWriteQitem(pTask->inputQueue->queue, pItem); diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h index 398851a09f..21a1515d8f 100644 --- a/include/libs/stream/tstreamUpdate.h +++ b/include/libs/stream/tstreamUpdate.h @@ -32,12 +32,15 @@ typedef struct SUpdateInfo { int64_t interval; int64_t watermark; TSKEY minTS; + SScalableBf* pCloseWinSBF; } SUpdateInfo; SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark); SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark); bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts); void updateInfoDestroy(SUpdateInfo *pInfo); +void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo); +void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo); #ifdef __cplusplus } diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 593f8c5c0b..cc995c4d64 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1164,7 +1164,7 @@ int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows) int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows) { int32_t code = 0; - ASSERT(numOfRows > 0); + //ASSERT(numOfRows > 0); if (numOfRows == 0) { return TSDB_CODE_SUCCESS; @@ -1230,6 +1230,32 @@ int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) { return 0; } +int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src) { + ASSERT(src != NULL && dst != NULL); + + blockDataCleanup(dst); + int32_t code = blockDataEnsureCapacity(dst, src->info.rows); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return code; + } + + size_t numOfCols = taosArrayGetSize(src->pDataBlock); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pDst = taosArrayGet(dst->pDataBlock, i); + SColumnInfoData* pSrc = taosArrayGet(src->pDataBlock, i); + if (pSrc->pData == NULL) { + continue; + } + + colDataAssign(pDst, pSrc, src->info.rows, &src->info); + } + + dst->info.rows = src->info.rows; + dst->info.window = src->info.window; + return TSDB_CODE_SUCCESS; +} + SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) { if (pDataBlock == NULL) { return NULL; @@ -1627,6 +1653,56 @@ void blockDebugShowData(const SArray* dataBlocks, const char* flag) { } } +// for debug +char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) { + int32_t size = 2048; + *pDataBuf = taosMemoryCalloc(size, 1); + char* dumpBuf = *pDataBuf; + char pBuf[128] = {0}; + int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); + int32_t rows = pDataBlock->info.rows; + int32_t len = 0; + len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|\n", flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId); + for (int32_t j = 0; j < rows; j++) { + len += snprintf(dumpBuf + len, size - len, "%s |", flag); + for (int32_t k = 0; k < colNum; k++) { + SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); + void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); + if (colDataIsNull(pColInfoData, rows, j, NULL)) { + len += snprintf(dumpBuf + len, size - len, " %15s |", "NULL"); + continue; + } + switch (pColInfoData->info.type) { + case TSDB_DATA_TYPE_TIMESTAMP: + formatTimestamp(pBuf, *(uint64_t*)var, TSDB_TIME_PRECISION_MILLI); + len += snprintf(dumpBuf + len, size - len, " %25s |", pBuf); + break; + case TSDB_DATA_TYPE_INT: + len += snprintf(dumpBuf + len, size - len, " %15d |", *(int32_t*)var); + break; + case TSDB_DATA_TYPE_UINT: + len += snprintf(dumpBuf + len, size - len, " %15u |", *(uint32_t*)var); + break; + case TSDB_DATA_TYPE_BIGINT: + len += snprintf(dumpBuf + len, size - len, " %15ld |", *(int64_t*)var); + break; + case TSDB_DATA_TYPE_UBIGINT: + len += snprintf(dumpBuf + len, size - len, " %15lu |", *(uint64_t*)var); + break; + case TSDB_DATA_TYPE_FLOAT: + len += snprintf(dumpBuf + len, size - len, " %15f |", *(float*)var); + break; + case TSDB_DATA_TYPE_DOUBLE: + len += snprintf(dumpBuf + len, size - len, " %15lf |", *(double*)var); + break; + } + } + len += snprintf(dumpBuf + len, size - len, "\n"); + } + len += snprintf(dumpBuf + len, size - len, "%s |end\n", flag); + return dumpBuf; +} + /** * @brief TODO: Assume that the final generated result it less than 3M * diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 3a92cba773..b13e654caf 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -257,7 +257,7 @@ int32_t sndProcessSMsg(SSnode *pSnode, SRpcMsg *pMsg) { case TDMT_STREAM_TASK_RECOVER_RSP: return sndProcessTaskRecoverRsp(pSnode, pMsg); case TDMT_STREAM_RETRIEVE_RSP: - return sndProcessTaskRecoverRsp(pSnode, pMsg); + return sndProcessTaskRetrieveRsp(pSnode, pMsg); default: ASSERT(0); } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 6184d8f810..96f4eb3fd9 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -109,11 +109,15 @@ int32_t tqReadHandleSetMsg(STqReadHandle* pReadHandle, SSubmitReq* pMsg, int64_t } bool tqNextDataBlock(STqReadHandle* pHandle) { + if (pHandle->pMsg == NULL) return false; while (1) { if (tGetSubmitMsgNext(&pHandle->msgIter, &pHandle->pBlock) < 0) { return false; } - if (pHandle->pBlock == NULL) return false; + if (pHandle->pBlock == NULL) { + pHandle->pMsg = NULL; + return false; + } if (pHandle->tbIdHash == NULL) { return true; diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index d8d231e952..36f81e86ff 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -293,6 +293,7 @@ typedef enum EStreamScanMode { STREAM_SCAN_FROM_RES, STREAM_SCAN_FROM_UPDATERES, STREAM_SCAN_FROM_DATAREADER, + STREAM_SCAN_FROM_DATAREADER_RETRIEVE, } EStreamScanMode; typedef struct SCatchSupporter { @@ -348,7 +349,9 @@ typedef struct SStreamBlockScanInfo { SArray* childIds; SessionWindowSupporter sessionSup; bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA. - int32_t scanWinIndex; + int32_t scanWinIndex; // for state operator + int32_t pullDataResIndex; + SSDataBlock* pPullDataRes; // pull data SSDataBlock } SStreamBlockScanInfo; typedef struct SSysTableScanInfo { @@ -427,8 +430,13 @@ typedef struct SStreamFinalIntervalOperatorInfo { STimeWindowAggSupp twAggSup; SArray* pChildren; SSDataBlock* pUpdateRes; + bool returnUpdate; SPhysiNode* pPhyNode; // create new child bool isFinal; + SHashObj* pPullDataMap; + SArray* pPullWins; // SPullWindowInfo + int32_t pullIndex; + SSDataBlock* pPullDataRes; } SStreamFinalIntervalOperatorInfo; typedef struct SAggOperatorInfo { @@ -851,6 +859,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex); int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* groupKey); +SSDataBlock* createPullDataBlock(); #ifdef __cplusplus } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 7affac76d2..edf98deed4 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -776,15 +776,14 @@ static bool isStateWindow(SStreamBlockScanInfo* pInfo) { return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE; } -static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { - SSDataBlock* pSDB = pInfo->pUpdateRes; +static bool prepareDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) { STimeWindow win = { .skey = INT64_MIN, .ekey = INT64_MAX, }; bool needRead = false; - if (!isStateWindow(pInfo) && pInfo->updateResIndex < pSDB->info.rows) { - SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, pInfo->primaryTsIndex); + if (!isStateWindow(pInfo) && (*pRowIndex) < pSDB->info.rows) { + SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, tsColIndex); TSKEY* tsCols = (TSKEY*)pColDataInfo->pData; SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; @@ -793,14 +792,14 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { int64_t gap = pInfo->sessionSup.gap; int32_t winIndex = 0; SResultWindowInfo* pCurWin = - getSessionTimeWindow(pAggSup, tsCols[pInfo->updateResIndex], INT64_MIN, pSDB->info.groupId, gap, &winIndex); + getSessionTimeWindow(pAggSup, tsCols[(*pRowIndex)], INT64_MIN, pSDB->info.groupId, gap, &winIndex); win = pCurWin->win; - pInfo->updateResIndex += - updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, pInfo->updateResIndex, gap, NULL); + (*pRowIndex) += + updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, (*pRowIndex), gap, NULL); } else { - win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[pInfo->updateResIndex], &pInfo->interval, + win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[(*pRowIndex)], &pInfo->interval, pInfo->interval.precision, NULL); - pInfo->updateResIndex += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, pInfo->updateResIndex, win.ekey, + (*pRowIndex) += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, (*pRowIndex), win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); } needRead = true; @@ -823,6 +822,9 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { pTableScanInfo->cond.twindows[0] = win; pTableScanInfo->curTWinIdx = 0; // tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + // if (!pTableScanInfo->dataReader) { + // return false; + // } pTableScanInfo->scanTimes = 0; pTableScanInfo->currentGroupId = -1; return true; @@ -862,12 +864,12 @@ static uint64_t getGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_ */ } -static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo) { +static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) { while (1) { SSDataBlock* pResult = NULL; pResult = doTableScan(pInfo->pSnapshotReadOp); if (pResult == NULL) { - if (prepareDataScan(pInfo)) { + if (prepareDataScan(pInfo, pSDB, tsColIndex, pRowIndex)) { // scan next window data pResult = doTableScan(pInfo->pSnapshotReadOp); } @@ -916,7 +918,7 @@ static void setUpdateData(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, SSDa pUpdateBlock->info.rows = i; pInfo->tsArrayIndex += i; pUpdateBlock->info.groupId = pInfo->groupId; - pUpdateBlock->info.type = STREAM_REPROCESS; + pUpdateBlock->info.type = STREAM_CLEAR; blockDataUpdateTsWindow(pUpdateBlock, 0); } // all rows have same group id @@ -970,6 +972,14 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { int32_t current = pInfo->validBlockIndex++; SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current); blockDataUpdateTsWindow(pBlock, 0); + if (pBlock->info.type == STREAM_RETRIEVE) { + pInfo->blockType = STREAM_DATA_TYPE_SUBMIT_BLOCK; + pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE; + copyDataBlock(pInfo->pPullDataRes, pBlock); + pInfo->pullDataResIndex = 0; + prepareDataScan(pInfo, pInfo->pPullDataRes, 0, &pInfo->pullDataResIndex); + updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo); + } return pBlock; } else if (pInfo->blockType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { if (pInfo->scanMode == STREAM_SCAN_FROM_RES) { @@ -979,28 +989,39 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } else if (pInfo->scanMode == STREAM_SCAN_FROM_UPDATERES) { pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; if (!isStateWindow(pInfo)) { - prepareDataScan(pInfo); + prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); } return pInfo->pUpdateRes; + } else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RETRIEVE) { + SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pPullDataRes, 0, &pInfo->pullDataResIndex); + if (pSDB != NULL) { + getUpdateDataBlock(pInfo, true, pSDB, NULL); + pSDB->info.type = STREAM_PUSH_DATA; + return pSDB; + } + pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; } else { if (isStateWindow(pInfo) && taosArrayGetSize(pInfo->sessionSup.pStreamAggSup->pScanWindow) > 0) { pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; pInfo->updateResIndex = pInfo->pUpdateRes->info.rows; - prepareDataScan(pInfo); + prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); } if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) { - SSDataBlock* pSDB = doDataScan(pInfo); + SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); if (pSDB == NULL) { setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes); if (pInfo->pUpdateRes->info.rows > 0) { if (!isStateWindow(pInfo)) { - prepareDataScan(pInfo); + // Todo(liuyao) mybe can delete this. + bool test = prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); + ASSERT(test == false); } return pInfo->pUpdateRes; } else { pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; } } else { + pSDB->info.type = STREAM_NORMAL; getUpdateDataBlock(pInfo, true, pSDB, NULL); return pSDB; } @@ -1070,10 +1091,12 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { taosArrayDestroy(block.pDataBlock); if (pInfo->pRes->pDataBlock == NULL) { // TODO add log + updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo); pOperator->status = OP_EXEC_DONE; pTaskInfo->code = terrno; return NULL; } + // currently only the tbname pseudo column if (pInfo->numOfPseudoExpr > 0) { addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes); @@ -1091,12 +1114,13 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pOperator->resultInfo.totalRows += pBlockInfo->rows; if (pBlockInfo->rows == 0) { + updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo); pOperator->status = OP_EXEC_DONE; } else if (pInfo->pUpdateInfo) { pInfo->tsArrayIndex = 0; getUpdateDataBlock(pInfo, true, pInfo->pRes, pInfo->pUpdateRes); if (pInfo->pUpdateRes->info.rows > 0) { - if (pInfo->pUpdateRes->info.type == STREAM_REPROCESS) { + if (pInfo->pUpdateRes->info.type == STREAM_CLEAR) { pInfo->updateResIndex = 0; pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES; } else if (pInfo->pUpdateRes->info.type == STREAM_INVERT) { @@ -1209,6 +1233,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; pInfo->groupId = 0; + pInfo->pPullDataRes = createPullDataBlock(); pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 0ba898e4f7..b7d39207d8 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -15,6 +15,7 @@ #include "executorimpl.h" #include "function.h" #include "functionMgt.h" +#include "tcompare.h" #include "tdatablock.h" #include "tfill.h" #include "ttime.h" @@ -26,6 +27,16 @@ typedef enum SResultTsInterpType { #define IS_FINAL_OP(op) ((op)->isFinal) +typedef struct SWinRes { + TSKEY ts; + uint64_t groupId; +} SWinRes; + +typedef struct SPullWindowInfo { + STimeWindow window; + uint64_t groupId; +} SPullWindowInfo; + static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator); static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo); @@ -684,11 +695,13 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num } void printDataBlock(SSDataBlock* pBlock, const char* flag) { - if (pBlock == NULL) return; - SArray* blocks = taosArrayInit(1, sizeof(SSDataBlock)); - taosArrayPush(blocks, pBlock); - blockDebugShowData(blocks, flag); - taosArrayDestroy(blocks); + if (pBlock == NULL){ + qDebug("======printDataBlock Block is Null"); + return; + } + char *pBuf = NULL; + qDebug("%s", dumpBlockData(pBlock, flag, &pBuf)); + taosMemoryFree(pBuf); } typedef int64_t (*__get_value_fn_t)(void* data, int32_t index); @@ -1217,30 +1230,40 @@ void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprS } } -void doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t bytes, uint64_t groupId, +bool doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t bytes, uint64_t groupId, int32_t numOfOutput) { SET_RES_WINDOW_KEY(pAggSup->keyBuf, pData, bytes, groupId); SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); if (!p1) { // window has been closed - return; + return false; } doClearWindowImpl(p1, pAggSup->pResultBuf, pSup, numOfOutput); + return true; } static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* pInterval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, SArray* pUpWins) { - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); - TSKEY* tsCols = (TSKEY*)pColDataInfo->pData; + SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, tsIndex); + TSKEY* tsCols = (TSKEY*)pTsCol->pData; + uint64_t* pGpDatas = NULL; + if (pBlock->info.type == STREAM_RETRIEVE) { + SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, 2); + pGpDatas = (uint64_t*)pGpCol->pData; + } int32_t step = 0; for (int32_t i = 0; i < pBlock->info.rows; i += step) { SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, pInterval->precision, NULL); step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); - doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput); - if (pUpWins) { + uint64_t groupId = pBlock->info.groupId; + if (pGpDatas) { + groupId = pGpDatas[i]; + } + bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TKEY), groupId, numOfOutput); + if (pUpWins && res) { taosArrayPush(pUpWins, &win); } } @@ -1268,8 +1291,8 @@ bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup) { return pSup->maxTs != INT64_MIN && pWin->ekey < pSup->maxTs - pSup->waterMark; } -static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval, - SArray* closeWins) { +static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, + SInterval* pInterval, SHashObj* pPullDataMap, SArray* closeWins) { void* pIte = NULL; size_t keyLen = 0; while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { @@ -1280,10 +1303,20 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval, pInterval->precision, NULL); + SWinRes winRe = {.ts = win.skey, .groupId = groupId,}; + void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinRes)); if (isCloseWindow(&win, pSup)) { - char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))]; - SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId); - taosHashRemove(pHashMap, keyBuf, keyLen); + if (chIds && pPullDataMap) { + SArray* chAy = *(SArray**) chIds; + int32_t size = taosArrayGetSize(chAy); + qInfo("======window %ld wait child size:%d", win.skey ,size); + for (int32_t i = 0; i < size; i++) { + qInfo("======window %ld wait chid id:%d", win.skey ,*(int32_t*)taosArrayGet(chAy, i)); + } + continue; + } else if (pPullDataMap) { + qInfo("======close window %ld", win.skey); + } SResultRowPosition* pPos = (SResultRowPosition*)pIte; if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { int32_t code = saveResult(ts, pPos->pageId, pPos->offset, groupId, closeWins); @@ -1291,11 +1324,25 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, return code; } } + char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))]; + SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId); + taosHashRemove(pHashMap, keyBuf, keyLen); } } return TSDB_CODE_SUCCESS; } +static void closeChildIntervalWindow(SArray* pChildren, TSKEY maxTs) { + int32_t size = taosArrayGetSize(pChildren); + for (int32_t i = 0; i < size; i++) { + SOperatorInfo* pChildOp = taosArrayGetP(pChildren, i); + SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info; + ASSERT(pChInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE); + pChInfo->twAggSup.maxTs = TMAX(pChInfo->twAggSup.maxTs, maxTs); + closeIntervalWindow(pChInfo->aggSup.pResultRowHashTable, &pChInfo->twAggSup, &pChInfo->interval, NULL, NULL); + } +} + static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SIntervalAggOperatorInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -1324,7 +1371,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { break; } - if (pBlock->info.type == STREAM_REPROCESS) { + if (pBlock->info.type == STREAM_CLEAR) { doClearWindows(&pInfo->aggSup, &pOperator->exprSupp, &pInfo->interval, 0, pOperator->exprSupp.numOfExprs, pBlock, NULL); qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo)); @@ -1345,7 +1392,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdated); } - closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pUpdated); + closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdated); finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); @@ -1373,6 +1420,11 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); cleanupAggSup(&pInfo->aggSup); + //it should be empty. + taosHashCleanup(pInfo->pPullDataMap); + taosArrayDestroy(pInfo->pPullWins); + blockDataDestroy(pInfo->pPullDataRes); + if (pInfo->pChildren) { int32_t size = taosArrayGetSize(pInfo->pChildren); for (int32_t i = 0; i < size; i++) { @@ -2164,6 +2216,24 @@ bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup) { return p1 == NULL; } +int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY* tsCols, + int32_t startPos, TSKEY eKey, STimeWindow* pNextWin) { + int32_t forwardRows = getNumOfRowsInTimeWindow(pBlockInfo, tsCols, startPos, + eKey, binarySearchForKey, NULL, TSDB_ORDER_ASC); + int32_t prevEndPos = forwardRows - 1 + startPos; + return getNextQualifiedWindow(pInterval, pNextWin, pBlockInfo, tsCols, prevEndPos, TSDB_ORDER_ASC); +} + +void addPullWindow(SHashObj* pMap, SWinRes* pWinRes, int32_t size) { + SArray* childIds = taosArrayInit(8, sizeof(int32_t)); + for (int32_t i = 0; i < size; i++) { + taosArrayPush(childIds, &i); + } + taosHashPut(pMap, pWinRes, sizeof(SWinRes), &childIds, sizeof(void*)); +} + +static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; } + static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId, SArray* pUpdated) { SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info; @@ -2177,35 +2247,59 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc SResultRow* pResult = NULL; int32_t forwardRows = 0; - if (pSDataBlock->pDataBlock != NULL) { - SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex); - tsCols = (int64_t*)pColDataInfo->pData; - } else { - return; - } + ASSERT(pSDataBlock->pDataBlock != NULL); + SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex); + tsCols = (int64_t*)pColDataInfo->pData; int32_t startPos = ascScan ? 0 : (pSDataBlock->info.rows - 1); TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols); STimeWindow nextWin = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->interval.precision, NULL); while (1) { - if (IS_FINAL_OP(pInfo) && isCloseWindow(&nextWin, &pInfo->twAggSup) && - isDeletedWindow(&nextWin, tableGroupId, &pInfo->aggSup)) { - SArray* pUpWins = taosArrayInit(8, sizeof(STimeWindow)); - taosArrayPush(pUpWins, &nextWin); - rebuildIntervalWindow(pInfo, pSup, pUpWins, pInfo->binfo.pRes->info.groupId, pSup->numOfExprs, - pOperatorInfo->pTaskInfo); - taosArrayDestroy(pUpWins); + if (IS_FINAL_OP(pInfo) && isCloseWindow(&nextWin, &pInfo->twAggSup) && pInfo->pChildren) { + bool ignore = true; + SWinRes winRes = {.ts = nextWin.skey, .groupId = tableGroupId,}; + void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinRes)); + if (isDeletedWindow(&nextWin, tableGroupId, &pInfo->aggSup) && !chIds) { + SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId}; + // add pull data request + taosArrayPush(pInfo->pPullWins, &pull); + addPullWindow(pInfo->pPullDataMap, &winRes, taosArrayGetSize(pInfo->pChildren)); + } else { + int32_t index = -1; + SArray* chArray = NULL; + if (chIds) { + chArray = *(void**) chIds; + int32_t chId = getChildIndex(pSDataBlock); + index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ); + } + if (index != -1 && pSDataBlock->info.type == STREAM_PUSH_DATA) { + taosArrayRemove(chArray, index); + if (taosArrayGetSize(chArray) == 0) { + // pull data is over + taosHashRemove(pInfo->pPullDataMap, &winRes, sizeof(SWinRes)); + } + } + if ( index == -1 || pSDataBlock->info.type == STREAM_PUSH_DATA) { + ignore = false; + } + } + + if (ignore) { + startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin); + if (startPos < 0) { + break; + } + continue; + } } + int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, true, &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - pos->groupId = tableGroupId; - pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; - *(int64_t*)pos->key = pResult->win.skey; + forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) { @@ -2230,14 +2324,17 @@ static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo) initResultRowInfo(&pInfo->binfo.resultRowInfo); } -static void clearUpdateDataBlock(SSDataBlock* pBlock) { +static void clearSpecialDataBlock(SSDataBlock* pBlock) { + if (pBlock->info.rows <= 0) { + return; + } blockDataCleanup(pBlock); } void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex) { // ASSERT(pDest->info.capacity >= pSource->info.rows); blockDataEnsureCapacity(pDest, pSource->info.rows); - clearUpdateDataBlock(pDest); + clearSpecialDataBlock(pDest); SColumnInfoData* pDestCol = taosArrayGet(pDest->pDataBlock, 0); SColumnInfoData* pSourceCol = taosArrayGet(pSource->pDataBlock, tsColIndex); @@ -2254,7 +2351,63 @@ void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsCol blockDataUpdateTsWindow(pDest, 0); } -static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; } +static bool needBreak(SStreamFinalIntervalOperatorInfo* pInfo) { + int32_t size = taosArrayGetSize(pInfo->pPullWins); + if (pInfo->pullIndex < size) { + return true; + } + return false; +} + +static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pBlock) { + clearSpecialDataBlock(pBlock); + int32_t size = taosArrayGetSize(array); + if (size - (*pIndex) == 0) { + return; + } + blockDataEnsureCapacity(pBlock, size - (*pIndex) ); + ASSERT(3 <= taosArrayGetSize(pBlock->pDataBlock)); + for (; (*pIndex) < size; (*pIndex)++) { + SPullWindowInfo* pWin = taosArrayGet(array, (*pIndex) ); + SColumnInfoData* pStartTs = (SColumnInfoData*) taosArrayGet(pBlock->pDataBlock, 0); + colDataAppend(pStartTs, pBlock->info.rows, (const char*)&pWin->window.skey, false); + + SColumnInfoData* pEndTs = (SColumnInfoData*) taosArrayGet(pBlock->pDataBlock, 1); + colDataAppend(pEndTs, pBlock->info.rows, (const char*)&pWin->window.ekey, false); + + SColumnInfoData* pGroupId = (SColumnInfoData*) taosArrayGet(pBlock->pDataBlock, 2); + colDataAppend(pGroupId, pBlock->info.rows, (const char*)&pWin->groupId, false); + pBlock->info.rows++; + } + if ((*pIndex) == size) { + *pIndex = 0; + taosArrayClear(array); + } + blockDataUpdateTsWindow(pBlock, 0); +} + +void processPushEmpty(SSDataBlock* pBlock, SHashObj* pMap) { + SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, 0); + TSKEY* tsData = (TSKEY*)pStartCol->pData; + SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, 2); + uint64_t* groupIdData = (uint64_t*)pGroupCol->pData; + int32_t chId = getChildIndex(pBlock); + for (int32_t i = 0; i < pBlock->info.rows; i++) { + SWinRes winRes = {.ts = tsData[i], .groupId = groupIdData[i]}; + void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinRes)); + if (chIds) { + SArray* chArray = *(SArray**) chIds; + int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ); + if (index != -1) { + taosArrayRemove(chArray, index); + if (taosArrayGetSize(chArray) == 0) { + // pull data is over + taosHashRemove(pMap, &winRes, sizeof(SWinRes)); + } + } + } + } +} static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info; @@ -2270,28 +2423,50 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pInfo->binfo.pRes->info.rows == 0) { pOperator->status = OP_EXEC_DONE; - if (IS_FINAL_OP(pInfo) || pInfo->pUpdateRes->info.rows == 0) { - if (!IS_FINAL_OP(pInfo)) { - // semi interval operator clear disk buffer - clearStreamIntervalOperator(pInfo); - } - return NULL; + if (!IS_FINAL_OP(pInfo)) { + // semi interval operator clear disk buffer + clearStreamIntervalOperator(pInfo); } + return NULL; + } + printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); + return pInfo->binfo.pRes; + } else { + doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); + if (pInfo->binfo.pRes->info.rows != 0) { + printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); + return pInfo->binfo.pRes; + } + if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) { + pInfo->returnUpdate = false; + ASSERT(!IS_FINAL_OP(pInfo)); + printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); // process the rest of the data - pOperator->status = OP_OPENED; return pInfo->pUpdateRes; } - return pInfo->binfo.pRes; + doBuildPullDataBlock(pInfo->pPullWins, &pInfo->pullIndex, pInfo->pPullDataRes); + if (pInfo->pPullDataRes->info.rows != 0) { + // process the rest of the data + ASSERT(IS_FINAL_OP(pInfo)); + printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); + return pInfo->pPullDataRes; + } } while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { - clearUpdateDataBlock(pInfo->pUpdateRes); + clearSpecialDataBlock(pInfo->pUpdateRes); + pOperator->status = OP_RES_TO_RETURN; + qInfo("Stream Final Interval return data"); break; } + printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv"); + maxTs = TMAX(maxTs, pBlock->info.window.ekey); - if (pBlock->info.type == STREAM_REPROCESS) { + if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PUSH_DATA || pBlock->info.type == STREAM_INVALID) { + pInfo->binfo.pRes->info.type = pBlock->info.type; + } else if (pBlock->info.type == STREAM_CLEAR) { SArray* pUpWins = taosArrayInit(8, sizeof(STimeWindow)); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pInfo->primaryTsIndex, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); @@ -2310,11 +2485,25 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { } removeResults(pUpWins, pUpdated); copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex); + pInfo->returnUpdate = true; taosArrayDestroy(pUpWins); break; } else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) { getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdated); continue; + } else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) { + SArray* pUpWins = taosArrayInit(8, sizeof(STimeWindow)); + doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, 0, pOperator->exprSupp.numOfExprs, + pBlock, pUpWins); + removeResults(pUpWins, pUpdated); + taosArrayDestroy(pUpWins); + if (taosArrayGetSize(pUpdated) > 0) { + break; + } + continue; + } else if (pBlock->info.type == STREAM_PUSH_EMPTY && IS_FINAL_OP(pInfo)) { + processPushEmpty(pBlock, pInfo->pPullDataMap); + continue; } setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, MAIN_SCAN, true); @@ -2334,31 +2523,70 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info; setInputDataBlock(pChildOp, pChildOp->exprSupp.pCtx, pBlock, pChInfo->order, MAIN_SCAN, true); doHashInterval(pChildOp, pBlock, pBlock->info.groupId, NULL); - pChInfo->twAggSup.maxTs = TMAX(pChInfo->twAggSup.maxTs, pBlock->info.window.ekey); + + if (needBreak(pInfo)) { + break; + } } - maxTs = TMAX(maxTs, pBlock->info.window.ekey); } pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); if (IS_FINAL_OP(pInfo)) { - closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pUpdated); + closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, + &pInfo->interval, pInfo->pPullDataMap, pUpdated); + closeChildIntervalWindow(pInfo->pChildren, pInfo->twAggSup.maxTs); } finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - pOperator->status = OP_RES_TO_RETURN; - if (pInfo->binfo.pRes->info.rows == 0) { - pOperator->status = OP_EXEC_DONE; - if (pInfo->pUpdateRes->info.rows == 0) { - return NULL; - } + if (pInfo->binfo.pRes->info.rows != 0) { + printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); + return pInfo->binfo.pRes; + } + + if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) { + pInfo->returnUpdate = false; + ASSERT(!IS_FINAL_OP(pInfo)); + printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); // process the rest of the data - pOperator->status = OP_OPENED; return pInfo->pUpdateRes; } - return pInfo->binfo.pRes; + + doBuildPullDataBlock(pInfo->pPullWins, &pInfo->pullIndex, pInfo->pPullDataRes); + if (pInfo->pPullDataRes->info.rows != 0) { + // process the rest of the data + ASSERT(IS_FINAL_OP(pInfo)); + printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); + return pInfo->pPullDataRes; + } + // ASSERT(false); + return NULL; +} + +SSDataBlock* createPullDataBlock() { + SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + pBlock->info.hasVarCol = false; + pBlock->info.groupId = 0; + pBlock->info.rows = 0; + pBlock->info.type = STREAM_RETRIEVE; + pBlock->info.rowSize = sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t); + + pBlock->pDataBlock = taosArrayInit(3, sizeof(SColumnInfoData)); + SColumnInfoData infoData = {0}; + infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP; + infoData.info.bytes = sizeof(TSKEY); + // window start ts + taosArrayPush(pBlock->pDataBlock, &infoData); + // window end ts + taosArrayPush(pBlock->pDataBlock, &infoData); + + infoData.info.type = TSDB_DATA_TYPE_UBIGINT; + infoData.info.bytes = sizeof(uint64_t); + taosArrayPush(pBlock->pDataBlock, &infoData); + + return pBlock; } SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, @@ -2412,23 +2640,30 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, goto _error; } } - // semi interval operator does not catch result pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - pInfo->pUpdateRes->info.type = STREAM_REPROCESS; + pInfo->pUpdateRes->info.type = STREAM_CLEAR; blockDataEnsureCapacity(pInfo->pUpdateRes, 128); + pInfo->returnUpdate = false; + pInfo->pPhyNode = (SPhysiNode*)nodesCloneNode((SNode*)pPhyNode); if (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) { pInfo->isFinal = true; pOperator->name = "StreamFinalIntervalOperator"; } else { + // semi interval operator does not catch result pInfo->isFinal = false; pOperator->name = "StreamSemiIntervalOperator"; } - if (!IS_FINAL_OP(pInfo)) { + if (!IS_FINAL_OP(pInfo) || numOfChild == 0) { pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; } + pInfo->pPullWins = taosArrayInit(8, sizeof(SPullWindowInfo)); + pInfo->pullIndex = 0; + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK); + pInfo->pPullDataRes = createPullDataBlock(); pOperator->operatorType = pPhyNode->type; pOperator->blocking = true; @@ -2811,11 +3046,6 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex, } } -typedef struct SWinRes { - TSKEY ts; - uint64_t groupId; -} SWinRes; - static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SHashObj* pStUpdated, SHashObj* pStDeleted, bool hasEndTs) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -3027,14 +3257,14 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { } else if (pOperator->status == OP_RES_TO_RETURN) { doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0) { - /*printDataBlock(pInfo->pDelRes, "session del");*/ + printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo)? "Final Session" : "Single Session"); return pInfo->pDelRes; } doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } - /*printDataBlock(pBInfo->pRes, "session insert");*/ + printDataBlock(pBInfo->pRes, IS_FINAL_OP(pInfo)? "Final Session" : "Single Session"); return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; } @@ -3048,7 +3278,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { break; } - if (pBlock->info.type == STREAM_REPROCESS) { + if (pBlock->info.type == STREAM_CLEAR) { SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, 0, pOperator->exprSupp.numOfExprs, pInfo->gap, pWins); @@ -3102,11 +3332,11 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0) { - /*printDataBlock(pInfo->pDelRes, "session del");*/ + printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo)? "Final Session" : "Single Session"); return pInfo->pDelRes; } doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); - /*printDataBlock(pBInfo->pRes, "session insert");*/ + printDataBlock(pBInfo->pRes, IS_FINAL_OP(pInfo)? "Final Session" : "Single Session"); return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; } @@ -3169,11 +3399,11 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { - clearUpdateDataBlock(pInfo->pUpdateRes); + clearSpecialDataBlock(pInfo->pUpdateRes); break; } - if (pBlock->info.type == STREAM_REPROCESS) { + if (pBlock->info.type == STREAM_CLEAR) { SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); doClearSessionWindows(&pInfo->streamAggSup, pSup, pBlock, 0, pSup->numOfExprs, pInfo->gap, pWins); removeSessionResults(pStUpdated, pWins); @@ -3236,7 +3466,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream } else { pInfo->isFinal = false; pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - pInfo->pUpdateRes->info.type = STREAM_REPROCESS; + pInfo->pUpdateRes->info.type = STREAM_CLEAR; blockDataEnsureCapacity(pInfo->pUpdateRes, 128); pOperator->name = "StreamSessionSemiAggOperator"; pOperator->fpSet = @@ -3554,7 +3784,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { break; } - if (pBlock->info.type == STREAM_REPROCESS) { + if (pBlock->info.type == STREAM_CLEAR) { doClearStateWindows(&pInfo->streamAggSup, pBlock, pInfo->primaryTsIndex, &pInfo->stateCol, pInfo->stateCol.slotId, pSeUpdated, pInfo->pSeDeleted); continue; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 0c9d46f055..a5e9b8edd9 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -115,6 +115,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) .srcNodeId = pTask->nodeId, .srcTaskId = pTask->taskId, .pRetrieve = pRetrieve, + .retrieveLen = dataStrLen, }; int32_t sz = taosArrayGetSize(pTask->childEpInfo); @@ -146,7 +147,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) .code = 0, .msgType = TDMT_STREAM_RETRIEVE, .pCont = buf, - .contLen = len, + .contLen = sizeof(SMsgHead) + len, }; if (tmsgSendReq(&pEpInfo->epSet, &rpcMsg) < 0) { diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index fe0f406f8d..c75e6c004a 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -45,11 +45,16 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) ASSERT(false); } if (output == NULL) { - if (pItem->type == STREAM_INPUT__DATA_RETRIEVE && !hasData) { - SSDataBlock block = {0}; - block.info.type = STREAM_PUSH_DATA; - block.info.childId = pTask->selfChildId; - taosArrayPush(pRes, &block); + if (pItem->type == STREAM_INPUT__DATA_RETRIEVE) { + //SSDataBlock block = {0}; + //block.info.type = STREAM_PUSH_EMPTY; + //block.info.childId = pTask->selfChildId; + SStreamDataBlock* pRetrieveBlock = (SStreamDataBlock*)data; + ASSERT(taosArrayGetSize(pRetrieveBlock->blocks) == 1); + SSDataBlock* pBlock = createOneDataBlock(taosArrayGet(pRetrieveBlock->blocks, 0), true); + pBlock->info.type = STREAM_PUSH_EMPTY; + pBlock->info.childId = pTask->selfChildId; + taosArrayPush(pRes, pBlock); } break; } diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c index ada391b40a..3d64cec8d8 100644 --- a/source/libs/stream/src/streamUpdate.c +++ b/source/libs/stream/src/streamUpdate.c @@ -119,6 +119,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma taosArrayPush(pInfo->pTsBuckets, &dumy); } pInfo->numBuckets = DEFAULT_BUCKET_SIZE; + pInfo->pCloseWinSBF = NULL; return pInfo; } @@ -154,6 +155,9 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) { TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); if (ts < maxTs - pInfo->watermark) { // this window has been closed. + if (pInfo->pCloseWinSBF) { + return tScalableBfPut(pInfo->pCloseWinSBF, &ts, sizeof(TSKEY)); + } return true; } @@ -193,3 +197,19 @@ void updateInfoDestroy(SUpdateInfo *pInfo) { taosArrayDestroy(pInfo->pTsSBFs); taosMemoryFree(pInfo); } + +void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo) { + if (pInfo->pCloseWinSBF) { + return; + } + int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND); + pInfo->pCloseWinSBF = tScalableBfInit(rows, DEFAULT_FALSE_POSITIVE); +} + +void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo) { + if (!pInfo || !pInfo->pCloseWinSBF) { + return; + } + tScalableBfDestroy(pInfo->pCloseWinSBF); + pInfo->pCloseWinSBF = NULL; +} diff --git a/tests/script/tsim/stream/distributeInterval0.sim b/tests/script/tsim/stream/distributeInterval0.sim index 91ce49bc8c..3e38df2c89 100644 --- a/tests/script/tsim/stream/distributeInterval0.sim +++ b/tests/script/tsim/stream/distributeInterval0.sim @@ -80,17 +80,17 @@ endi if $data03 != 4 then print ======$data03 - return -1 + goto loop1 endi if $data04 != 52 then print ======$data04 - return -1 + goto loop1 endi if $data05 != 13 then print ======$data05 - return -1 + goto loop1 endi # row 1 @@ -179,7 +179,7 @@ sql use test1; sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int); sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); -sql create stream stream_t2 trigger at_once into streamtST1 as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st interval(10s) ; +sql create stream stream_t2 trigger at_once watermark 20s into streamtST1 as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st interval(10s) ; sql insert into ts1 values(1648791211000,1,2,3); sql insert into ts1 values(1648791222001,2,2,3); diff --git a/tests/script/tsim/stream/partitionby.sim b/tests/script/tsim/stream/partitionby.sim index b84a01eb4a..c634ad85ee 100644 --- a/tests/script/tsim/stream/partitionby.sim +++ b/tests/script/tsim/stream/partitionby.sim @@ -74,7 +74,7 @@ sql create stable st(ts timestamp,a int,b int,c int,id int) tags(ta int,tb int,t sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); -sql create stream stream_t2 trigger at_once into streamtST as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6, max(id) c7 from st partition by ta interval(10s) ; +sql create stream stream_t2 trigger at_once watermark 20s into streamtST as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6, max(id) c7 from st partition by ta interval(10s) ; sql insert into ts1 values(1648791211000,1,2,3,1); sql insert into ts1 values(1648791222001,2,2,3,2); sql insert into ts2 values(1648791211000,1,2,3,3); @@ -83,16 +83,16 @@ sql insert into ts2 values(1648791222001,2,2,3,4); sql insert into ts2 values(1648791222002,2,2,3,5); sql insert into ts2 values(1648791222002,2,2,3,6); -sql insert into ts1 values(1648791211000,1,2,3,1); -sql insert into ts1 values(1648791222001,2,2,3,2); -sql insert into ts2 values(1648791211000,1,2,3,3); -sql insert into ts2 values(1648791222001,2,2,3,4); +sql insert into ts1 values(1648791211000,1,2,3,7); +sql insert into ts1 values(1648791222001,2,2,3,8); +sql insert into ts2 values(1648791211000,1,2,3,9); +sql insert into ts2 values(1648791222001,2,2,3,10); $loop_count = 0 loop2: sleep 300 -sql select * from streamtST; +sql select * from streamtST order by c7 asc; $loop_count = $loop_count + 1 if $loop_count == 10 then @@ -104,8 +104,18 @@ print =====data01=$data01 goto loop2 endi -if $data02 != 1 then -print =====data02=$data02 +if $data11 != 1 then +print =====data11=$data11 +goto loop2 +endi + +if $data21 != 1 then +print =====data21=$data21 +goto loop2 +endi + +if $data31 != 2 then +print =====data31=$data31 goto loop2 endi @@ -114,8 +124,18 @@ print =====data03=$data03 goto loop2 endi -if $data04 != 2 then -print =====data04=$data04 +if $data13 != 2 then +print =====data13=$data13 +goto loop2 +endi + +if $data23 != 1 then +print =====data23=$data23 +goto loop2 +endi + +if $data33 != 4 then +print =====data33=$data33 goto loop2 endi diff --git a/tests/script/tsim/stream/schedSnode.sim b/tests/script/tsim/stream/schedSnode.sim index dbf714a96f..dbdaaf65d0 100644 --- a/tests/script/tsim/stream/schedSnode.sim +++ b/tests/script/tsim/stream/schedSnode.sim @@ -79,17 +79,17 @@ endi if $data03 != 4 then print ======$data03 - return -1 + goto loop1 endi if $data04 != 52 then print ======$data04 - return -1 + goto loop1 endi if $data05 != 13 then print ======$data05 - return -1 + goto loop1 endi # row 1 From 814b3caabf3af2ce115e59398529e07025729b35 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Sat, 25 Jun 2022 17:14:50 +0800 Subject: [PATCH 18/29] enh(stream): generate schema only once --- source/dnode/vnode/src/tq/tqSink.c | 133 ++++++++++++++--------------- source/os/src/osFile.c | 2 +- 2 files changed, 63 insertions(+), 72 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index ef3b205b3e..9abc2f639b 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -18,57 +18,87 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid, const char* stbFullName, int32_t vgId) { SSubmitReq* ret = NULL; + SArray* schemaReqs = NULL; + SArray* schemaReqSz = NULL; SArray* tagArray = taosArrayInit(1, sizeof(STagVal)); if (!tagArray) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - // cal size - int32_t cap = sizeof(SSubmitReq); int32_t sz = taosArrayGetSize(pBlocks); - for (int32_t i = 0; i < sz; i++) { - SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); - int32_t rows = pDataBlock->info.rows; - // TODO min - int32_t rowSize = pDataBlock->info.rowSize; - int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema); - int32_t schemaLen = 0; - if (createTb) { - SVCreateTbReq createTbReq = {0}; - char* cname = buildCtbNameByGroupId(stbFullName, pDataBlock->info.groupId); - createTbReq.name = cname; - createTbReq.flags = 0; - createTbReq.type = TSDB_CHILD_TABLE; - createTbReq.ctb.suid = suid; - - STagVal tagVal = { - .cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1, - .type = TSDB_DATA_TYPE_UBIGINT, - .i64 = (int64_t)pDataBlock->info.groupId, + if (createTb) { + schemaReqs = taosArrayInit(sz, sizeof(void*)); + schemaReqSz = taosArrayInit(sz, sizeof(int32_t)); + for (int32_t i = 0; i < sz; i++) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + STagVal tagVal = { + .cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1, + .type = TSDB_DATA_TYPE_UBIGINT, + .i64 = (int64_t)pDataBlock->info.groupId, }; STag* pTag = NULL; taosArrayClear(tagArray); taosArrayPush(tagArray, &tagVal); tTagNew(tagArray, 1, false, &pTag); if (pTag == NULL) { - tdDestroySVCreateTbReq(&createTbReq); + taosArrayDestroy(schemaReqs); taosArrayDestroy(tagArray); return NULL; } + + SVCreateTbReq createTbReq = {0}; + createTbReq.name = buildCtbNameByGroupId(stbFullName, pDataBlock->info.groupId); + createTbReq.flags = 0; + createTbReq.type = TSDB_CHILD_TABLE; + createTbReq.ctb.suid = suid; createTbReq.ctb.pTag = (uint8_t*)pTag; int32_t code; + int32_t schemaLen; tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code); - - tdDestroySVCreateTbReq(&createTbReq); if (code < 0) { + tdDestroySVCreateTbReq(&createTbReq); taosArrayDestroy(tagArray); + taosMemoryFreeClear(ret); return NULL; } - } + void* schemaStr = taosMemoryMalloc(schemaLen); + if (schemaStr == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + taosArrayPush(schemaReqs, &schemaStr); + taosArrayPush(schemaReqSz, &schemaLen); + + SEncoder encoder = {0}; + tEncoderInit(&encoder, schemaStr, schemaLen); + code = tEncodeSVCreateTbReq(&encoder, &createTbReq); + if (code < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + tEncoderClear(&encoder); + tdDestroySVCreateTbReq(&createTbReq); + } + } + taosArrayDestroy(tagArray); + + // cal size + int32_t cap = sizeof(SSubmitReq); + for (int32_t i = 0; i < sz; i++) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + int32_t rows = pDataBlock->info.rows; + // TODO min + int32_t rowSize = pDataBlock->info.rowSize; + int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema); + + int32_t schemaLen = 0; + if (createTb) { + schemaLen = *(int32_t*)taosArrayGet(schemaReqSz, i); + } cap += sizeof(SSubmitBlk) + schemaLen + rows * maxLen; } @@ -99,55 +129,13 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo int32_t schemaLen = 0; if (createTb) { - SVCreateTbReq createTbReq = {0}; - char* cname = buildCtbNameByGroupId(stbFullName, pDataBlock->info.groupId); - createTbReq.name = cname; - createTbReq.flags = 0; - createTbReq.type = TSDB_CHILD_TABLE; - createTbReq.ctb.suid = suid; - - STagVal tagVal = { - .cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1, - .type = TSDB_DATA_TYPE_UBIGINT, - .i64 = (int64_t)pDataBlock->info.groupId, - }; - taosArrayClear(tagArray); - taosArrayPush(tagArray, &tagVal); - STag* pTag = NULL; - tTagNew(tagArray, 1, false, &pTag); - if (pTag == NULL) { - tdDestroySVCreateTbReq(&createTbReq); - taosArrayDestroy(tagArray); - taosMemoryFreeClear(ret); - return NULL; - } - createTbReq.ctb.pTag = (uint8_t*)pTag; - - int32_t code; - tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code); - if (code < 0) { - tdDestroySVCreateTbReq(&createTbReq); - taosArrayDestroy(tagArray); - taosMemoryFreeClear(ret); - return NULL; - } - - SEncoder encoder = {0}; - tEncoderInit(&encoder, blkSchema, schemaLen); - code = tEncodeSVCreateTbReq(&encoder, &createTbReq); - tEncoderClear(&encoder); - tdDestroySVCreateTbReq(&createTbReq); - - if (code < 0) { - taosArrayDestroy(tagArray); - taosMemoryFreeClear(ret); - return NULL; - } + schemaLen = *(int32_t*)taosArrayGet(schemaReqSz, i); + void* schemaStr = taosArrayGetP(schemaReqs, i); + memcpy(blkSchema, schemaStr, schemaLen); } blkHead->schemaLen = htonl(schemaLen); STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen); - for (int32_t j = 0; j < rows; j++) { SRowBuilder rb = {0}; tdSRowInit(&rb, pTSchema->version); @@ -175,7 +163,10 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo } ret->length = htonl(ret->length); - taosArrayDestroy(tagArray); + + if (schemaReqs) taosArrayDestroyP(schemaReqs, taosMemoryFree); + taosArrayDestroy(schemaReqSz); + return ret; } diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index 05b7498cc0..0c6cd80f44 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -671,7 +671,7 @@ void taosFprintfFile(TdFilePtr pFile, const char *format, ...) { fflush(pFile->fp); } -bool taosValidFile(TdFilePtr pFile) { return pFile != NULL; } +bool taosValidFile(TdFilePtr pFile) { return pFile != NULL && pFile->fd > 0; } int32_t taosUmaskFile(int32_t maskVal) { #ifdef WINDOWS From 35336d7964015ef19aa3f50a2e65a38f43aa4863 Mon Sep 17 00:00:00 2001 From: tangfangzhi Date: Sat, 25 Jun 2022 17:33:24 +0800 Subject: [PATCH 19/29] ci: keep change branch in log --- Jenkinsfile2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 861478160f..f582461fb2 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -79,6 +79,7 @@ def pre_test(){ git pull >/dev/null git log -5 echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log + echo "CHANGE_BRANCH:${CHANGE_BRANCH}" >>${WKDIR}/jenkins.log echo "community log: `git log -5`" >>${WKDIR}/jenkins.log git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD @@ -95,6 +96,7 @@ def pre_test(){ git pull >/dev/null git log -5 echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log + echo "CHANGE_BRANCH:${CHANGE_BRANCH}" >>${WKDIR}/jenkins.log echo "tdinternal log: `git log -5`" >>${WKDIR}/jenkins.log git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD From 8526b2ff89b7e7fee235cbdd7a122bbedab846ea Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Sat, 25 Jun 2022 17:43:26 +0800 Subject: [PATCH 20/29] test: add test case for tmq --- tests/system-test/7-tmq/tmqShow.py | 158 +++++++++++++++++++++++++++++ tests/test/c/tmqSim.c | 3 +- 2 files changed, 160 insertions(+), 1 deletion(-) create mode 100644 tests/system-test/7-tmq/tmqShow.py diff --git a/tests/system-test/7-tmq/tmqShow.py b/tests/system-test/7-tmq/tmqShow.py new file mode 100644 index 0000000000..6b7e7375ff --- /dev/null +++ b/tests/system-test/7-tmq/tmqShow.py @@ -0,0 +1,158 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db1', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 10, + 'rowsPerTbl': 4000, + 'batchNum': 15, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 20, + 'showMsg': 1, + 'showRow': 1} + + topicNameList = ['topic1', 'topic2', 'topic3', 'topic4'] + consumeGroupIdList = ['cgrp1', 'cgrp1', 'cgrp3', 'cgrp4'] + consumerIdList = [0, 1, 2, 3] + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict['vgroups'],replica=1) + tdLog.info("create stb") + tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdLog.info("create ctb") + tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + # tdLog.info("insert data") + # tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) + + tdLog.info("create 4 topics") + sqlString = "create topic %s as database %s" %(topicNameList[0], paraDict['dbName']) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + sqlString = "create topic %s as stable %s.%s" %(topicNameList[1], paraDict['dbName'], paraDict['stbName']) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + queryString = "select * from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[2], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s " %(topicNameList[3], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + tdSql.query("show topics") + tdLog.debug(tdSql.queryResult) + rows = tdSql.getRows() + if rows != len(consumerIdList): + tdLog.exit("topic rows error") + + for i in range (rows): + topicName = tdSql.getData(i,0) + matchFlag = 0 + while matchFlag == 0: + for j in range(len(topicNameList)): + if topicName == topicNameList[j]: + matchFlag = 1 + break + if matchFlag == 0: + tdLog.exit("topic name: %s is error", topicName) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[0] + tmqCom.insertConsumerInfo(consumerIdList[0], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + topicList = topicNameList[1] + keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[1] + tmqCom.insertConsumerInfo(consumerIdList[1], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + topicList = topicNameList[2] + keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[2] + tmqCom.insertConsumerInfo(consumerIdList[2], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + topicList = topicNameList[3] + keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[3] + tmqCom.insertConsumerInfo(consumerIdList[3], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("async insert data") + pThread = tmqCom.asyncInsertData(paraDict) + + time.sleep(5) + tdLog.info("check show consumers") + tdSql.query("show consumers") + # tdLog.info(tdSql.queryResult) + rows = tdSql.getRows() + tdLog.info("show consumers rows: %d"%rows) + if rows != len(topicNameList): + tdLog.exit("show consumers rows error") + + tdLog.info("check show subscriptions") + tdSql.query("show subscriptions") + # tdLog.debug(tdSql.queryResult) + rows = tdSql.getRows() + tdLog.info("show subscriptions rows: %d"%rows) + if rows != paraDict['vgroups'] * len(topicNameList): + tdLog.exit("show subscriptions rows error") + + pThread.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = len(consumerIdList) + _ = tmqCom.selectConsumeResult(expectRows) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + tdSql.prepare() + self.tmqCase1() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index c2fba52e6e..81fa72d15a 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -635,8 +635,9 @@ void loop_consume(SThreadInfo* pInfo) { } } + int32_t consumeDelay = g_stConfInfo.consumeDelay == -1 ? -1 : (g_stConfInfo.consumeDelay * 1000); while (running) { - TAOS_RES* tmqMsg = tmq_consumer_poll(pInfo->tmq, g_stConfInfo.consumeDelay * 1000); + TAOS_RES* tmqMsg = tmq_consumer_poll(pInfo->tmq, consumeDelay); if (tmqMsg) { if (0 != g_stConfInfo.showMsgFlag) { totalRows += msg_process(tmqMsg, pInfo, totalMsgs); From 40a21701f9afa399b307d5aa9a5081e0e407fd87 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Sat, 25 Jun 2022 17:44:43 +0800 Subject: [PATCH 21/29] test: add test case into CI --- tests/system-test/fulltest.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index ef217b828f..81195ca1b1 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -139,3 +139,4 @@ python3 ./test.py -f 7-tmq/tmqCheckData.py python3 ./test.py -f 7-tmq/tmqUdf.py #python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 5 python3 ./test.py -f 7-tmq/tmqConsumerGroup.py +python3 ./test.py -f 7-tmq/tmqShow.py From 47ae534c0f23e8a5223f0b904e0d7e8b003bce9c Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Sat, 25 Jun 2022 17:44:54 +0800 Subject: [PATCH 22/29] fix(sma): drop stream when drop sma --- source/dnode/mnode/impl/inc/mndStream.h | 5 +++ source/dnode/mnode/impl/src/mndSma.c | 18 +++++++++ source/dnode/mnode/impl/src/mndStream.c | 2 +- source/dnode/vnode/src/tq/tqSink.c | 2 +- source/libs/executor/inc/executorimpl.h | 1 - source/libs/executor/src/executor.c | 11 ++--- source/libs/executor/src/scanoperator.c | 53 +++++++++++-------------- source/libs/wal/src/walRead.c | 2 +- 8 files changed, 53 insertions(+), 41 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index 69385c3a46..5e9089cec9 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -34,6 +34,11 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw); int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); +// for sma +// TODO refactor +int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); +int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 05603f8554..3dadcf77f0 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -857,6 +857,24 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name); mndTransSetDbName(pTrans, pDb->name, NULL); + SStreamObj *pStream = mndAcquireStream(pMnode, pSma->name); + if (pStream == NULL || pStream->smaId != pSma->uid) { + sdbRelease(pMnode->pSdb, pStream); + goto _OVER; + } else { + if (mndDropStreamTasks(pMnode, pTrans, pStream) < 0) { + mError("stream:%s, failed to drop task since %s", pStream->name, terrstr()); + sdbRelease(pMnode->pSdb, pStream); + goto _OVER; + } + + // drop stream + if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) { + sdbRelease(pMnode->pSdb, pStream); + goto _OVER; + } + } + if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER; if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 21158bb0a2..29c6819163 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -494,7 +494,7 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) { return 0; } -static int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) { +int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) { int32_t lv = taosArrayGetSize(pStream->tasks); for (int32_t i = 0; i < lv; i++) { SArray *pTasks = taosArrayGetP(pStream->tasks, i); diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 9abc2f639b..0bb9918488 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -43,7 +43,7 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo taosArrayPush(tagArray, &tagVal); tTagNew(tagArray, 1, false, &pTag); if (pTag == NULL) { - taosArrayDestroy(schemaReqs); + terrno = TSDB_CODE_OUT_OF_MEMORY; taosArrayDestroy(tagArray); return NULL; } diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index eb7beb19db..0f3251515e 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -347,7 +347,6 @@ typedef struct SStreamBlockScanInfo { SInterval interval; // if the upstream is an interval operator, the interval info is also kept here. SArray* childIds; SessionWindowSupporter sessionSup; - bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA. int32_t scanWinIndex; } SStreamBlockScanInfo; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 6de364e63a..3fd491885f 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -19,8 +19,7 @@ #include "tdatablock.h" #include "vnode.h" -static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, bool assignUid, - char* id) { +static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, char* id) { ASSERT(pOperator != NULL); if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (pOperator->numOfDownstream == 0) { @@ -33,12 +32,11 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu return TSDB_CODE_QRY_APP_ERROR; } pOperator->status = OP_NOT_OPENED; - return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, assignUid, id); + return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, id); } else { pOperator->status = OP_NOT_OPENED; SStreamBlockScanInfo* pInfo = pOperator->info; - pInfo->assignBlockUid = assignUid; // TODO: if a block was set but not consumed, // prevent setting a different type of block @@ -76,7 +74,7 @@ int32_t qStreamScanSnapshot(qTaskInfo_t tinfo) { return TSDB_CODE_QRY_APP_ERROR; } SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - return doSetStreamBlock(pTaskInfo->pRoot, NULL, 0, STREAM_DATA_TYPE_FROM_SNAPSHOT, 0, NULL); + return doSetStreamBlock(pTaskInfo->pRoot, NULL, 0, STREAM_DATA_TYPE_FROM_SNAPSHOT, NULL); } int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) { @@ -94,8 +92,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - int32_t code = - doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, assignUid, GET_TASKID(pTaskInfo)); + int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { qError("%s failed to set the stream block data", GET_TASKID(pTaskInfo)); } else { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 9c0ed40c30..0c73ec72a5 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -507,20 +507,21 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { STableScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - if(pInfo->currentGroupId == -1){ + if (pInfo->currentGroupId == -1) { pInfo->currentGroupId++; if (pInfo->currentGroupId >= taosArrayGetSize(pTaskInfo->tableqinfoList.pGroupList)) { setTaskStatus(pTaskInfo, TASK_COMPLETED); return NULL; } - SArray *tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId); + SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId); tsdbCleanupReadHandle(pInfo->dataReader); - tsdbReaderT* pReader = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, pInfo->queryId, pInfo->taskId); + tsdbReaderT* pReader = + tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, pInfo->queryId, pInfo->taskId); pInfo->dataReader = pReader; } SSDataBlock* result = doTableScanGroup(pOperator); - if(result){ + if (result) { return result; } @@ -530,7 +531,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { return NULL; } - SArray *tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId); + SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId); tsdbSetTableList(pInfo->dataReader, tableList); tsdbResetReadHandle(pInfo->dataReader, &pInfo->cond, 0); @@ -538,7 +539,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { pInfo->scanTimes = 0; result = doTableScanGroup(pOperator); - if(result){ + if (result) { return result; } @@ -822,7 +823,7 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { STableScanInfo* pTableScanInfo = pInfo->pSnapshotReadOp->info; pTableScanInfo->cond.twindows[0] = win; pTableScanInfo->curTWinIdx = 0; -// tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + // tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); pTableScanInfo->scanTimes = 0; pTableScanInfo->currentGroupId = -1; return true; @@ -1030,14 +1031,6 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pInfo->pRes->info.type = STREAM_NORMAL; pInfo->pRes->info.capacity = numOfRows; - // for generating rollup SMA result, each time is an independent time serie. - // TODO temporarily used, when the statement of "partition by tbname" is ready, remove this - if (pInfo->assignBlockUid) { - pInfo->pRes->info.groupId = uid; - } else { - pInfo->pRes->info.groupId = groupId; - } - uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &uid, sizeof(int64_t)); if (groupIdPre) { pInfo->pRes->info.groupId = *groupIdPre; @@ -1132,9 +1125,9 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { return tableIdList; } -SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, - STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, - STimeWindowAggSupp* pTwSup, uint64_t queryId, uint64_t taskId) { +SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, + SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup, uint64_t queryId, + uint64_t taskId) { SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); @@ -1934,11 +1927,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi goto _error; } - pInfo->pTableList = pTableListInfo; - pInfo->pColMatchInfo = colList; - pInfo->pRes = createResDataBlock(pDescNode); - pInfo->readHandle = *pReadHandle; - pInfo->curPos = 0; + pInfo->pTableList = pTableListInfo; + pInfo->pColMatchInfo = colList; + pInfo->pRes = createResDataBlock(pDescNode); + pInfo->readHandle = *pReadHandle; + pInfo->curPos = 0; pOperator->name = "TagScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN; @@ -2216,8 +2209,8 @@ SArray* generateSortByTsInfo(int32_t order) { return pList; } -static int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle, SArray* tableList, SArray* arrayReader, uint64_t queryId, - uint64_t taskId) { +static int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle, SArray* tableList, + SArray* arrayReader, uint64_t queryId, uint64_t taskId) { for (int32_t i = 0; i < taosArrayGetSize(tableList); ++i) { SArray* tmp = taosArrayInit(1, sizeof(STableKeyInfo)); taosArrayPush(tmp, taosArrayGet(tableList, i)); @@ -2237,13 +2230,13 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { SArray* tableList = taosArrayGetP(pInfo->tableListInfo->pGroupList, pInfo->currentGroupId); - createMultipleDataReaders(&pInfo->cond, &pInfo->readHandle, tableList, - pInfo->dataReaders, pInfo->queryId, pInfo->taskId); + createMultipleDataReaders(&pInfo->cond, &pInfo->readHandle, tableList, pInfo->dataReaders, pInfo->queryId, + pInfo->taskId); // todo the total available buffer should be determined by total capacity of buffer of this task. // the additional one is reserved for merge result int32_t tableLen = taosArrayGetSize(tableList); - pInfo->sortBufSize = pInfo->bufPageSize * ((tableLen==0?1:tableLen) + 1); + pInfo->sortBufSize = pInfo->bufPageSize * ((tableLen == 0 ? 1 : tableLen) + 1); int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str); @@ -2342,7 +2335,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { SSDataBlock* pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator); if (pBlock != NULL) { uint64_t* groupId = taosHashGet(pInfo->tableListInfo->map, &(pBlock->info.uid), sizeof(uint64_t)); - if(groupId) pBlock->info.groupId = *groupId; + if (groupId) pBlock->info.groupId = *groupId; pOperator->resultInfo.totalRows += pBlock->info.rows; return pBlock; @@ -2359,7 +2352,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator); if (pBlock != NULL) { uint64_t* groupId = taosHashGet(pInfo->tableListInfo->map, &(pBlock->info.uid), sizeof(uint64_t)); - if(groupId) pBlock->info.groupId = *groupId; + if (groupId) pBlock->info.groupId = *groupId; pOperator->resultInfo.totalRows += pBlock->info.rows; return pBlock; diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 682afbb785..20fa5f1f2b 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -103,6 +103,7 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) { wError("cannot open file %s, since %s", fnameStr, terrstr()); return -1; } + pRead->pReadLogTFile = pLogTFile; walBuildIdxName(pRead->pWal, fileFirstVer, fnameStr); TdFilePtr pIdxTFile = taosOpenFile(fnameStr, TD_FILE_READ); @@ -112,7 +113,6 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) { return -1; } - pRead->pReadLogTFile = pLogTFile; pRead->pReadIdxTFile = pIdxTFile; return 0; } From beae5e3a82d8a2f45a47f67e2e209d493d95b985 Mon Sep 17 00:00:00 2001 From: tomchon Date: Sat, 25 Jun 2022 18:07:05 +0800 Subject: [PATCH 23/29] test: modify testcase of muti-mnodes --- tests/pytest/util/cluster.py | 6 +- .../5dnode3mnodeSep1VnodeStopCreateDb.py | 181 +++++++++++ .../5dnode3mnodeSep1VnodeStopMnodeCreateDb.py | 181 +++++++++++ .../5dnode3mnodeSep1VnodeStopVnodeCreateDb.py | 181 +++++++++++ .../5dnode3mnodeSeperate1VnodeStopInsert.py | 85 +---- .../system-test/6-cluster/5dnode3mnodeStop.py | 82 +++-- .../6-cluster/clusterCommonCheck.py | 211 +++++++++++++ .../6-cluster/clusterCommonCreate.py | 298 ++++++++++++++++++ tests/system-test/fulltest.sh | 4 + 9 files changed, 1130 insertions(+), 99 deletions(-) create mode 100644 tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopCreateDb.py create mode 100644 tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py create mode 100644 tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py create mode 100644 tests/system-test/6-cluster/clusterCommonCheck.py create mode 100644 tests/system-test/6-cluster/clusterCommonCreate.py diff --git a/tests/pytest/util/cluster.py b/tests/pytest/util/cluster.py index 6eb78f0771..21cad4c5bb 100644 --- a/tests/pytest/util/cluster.py +++ b/tests/pytest/util/cluster.py @@ -24,7 +24,7 @@ class ClusterDnodes(TDDnodes): class ConfigureyCluster: - """This will create defined number of dnodes and create a cluset. + """This will create defined number of dnodes and create a cluster. at the same time, it will return TDDnodes list: dnodes, """ hostname= socket.gethostname() @@ -85,8 +85,8 @@ class ConfigureyCluster: count+=1 time.sleep(1) else: - tdLog.debug("create cluster with %d dnode but check dnode not ready within 5s ! "%self.dnodeNums) - return -1 + tdLog.exit("create cluster with %d dnode but check dnode not ready within 5s ! "%self.dnodeNums) + cluster = ConfigureyCluster() \ No newline at end of file diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopCreateDb.py new file mode 100644 index 0000000000..59fe1c0b16 --- /dev/null +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopCreateDb.py @@ -0,0 +1,181 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * +sys.path.append("./6-cluster") +from clusterCommonCreate import * +from clusterCommonCheck import clusterComCheck + +import time +import socket +import subprocess +from multiprocessing import Process +import threading +import time +import inspect +import ctypes + +class TDTestCase: + + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + self.TDDnodes = None + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def _async_raise(self, tid, exctype): + """raises the exception, performs cleanup if needed""" + if not inspect.isclass(exctype): + exctype = type(exctype) + res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) + if res == 0: + raise ValueError("invalid thread id") + elif res != 1: + # """if it returns a number greater than one, you're in trouble, + # and you should call it again with exc=NULL to revert the effect""" + ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) + raise SystemError("PyThreadState_SetAsyncExc failed") + + def stopThread(self,thread): + self._async_raise(thread.ident, SystemExit) + + + def insertData(self,countstart,countstop): + # fisrt add data : db\stable\childtable\general table + + for couti in range(countstart,countstop): + tdLog.debug("drop database if exists db%d" %couti) + tdSql.execute("drop database if exists db%d" %couti) + print("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("use db%d" %couti) + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + + def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'replica': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1} + dnodenumbers=int(dnodenumbers) + mnodeNums=int(mnodeNums) + dbNumbers = int(dnodenumbers * restartNumber) + + tdLog.info("first check dnode and mnode") + tdSql.query("show dnodes;") + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(4,1,'%s:6430'%self.host) + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkMnodeStatus(1) + + # fisr add three mnodes; + tdLog.info("fisr add three mnodes and check mnode status") + tdSql.execute("create mnode on dnode 2") + clusterComCheck.checkMnodeStatus(2) + tdSql.execute("create mnode on dnode 3") + clusterComCheck.checkMnodeStatus(3) + + # add some error operations and + tdLog.info("Confirm the status of the dnode again") + tdSql.error("create mnode on dnode 2") + tdSql.query("show dnodes;") + print(tdSql.queryResult) + clusterComCheck.checkDnodes(dnodenumbers) + + tdLog.info("Take turns stopping all dnodes ") + # seperate vnode and mnode in different dnodes. + # create database and stable + tdDnodes=cluster.dnodes + stopcount =0 + while stopcount < restartNumber: + for i in range(dnodenumbers): + # threads=[] + # threads = MyThreadFunc(self.insert_data(i*2,i*2+2)) + paraDict["dbName"]= 'db%d%d'%(stopcount,i) + threads=threading.Thread(target=clusterComCreate.create_database, args=(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])) + threads.start() + tdDnodes[i].stoptaosd() + # sleep(10) + tdDnodes[i].starttaosd() + # sleep(10) + + if clusterComCheck.checkDnodes(dnodenumbers): + # threads.join() + tdLog.info("first restart loop") + else: + print("456") + threads.join() + self.stopThread(threads) + tdLog.exit("one or more of dnodes failed to start ") + # self.check3mnode() + stopcount+=1 + threads.join() + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkDbRows(dbNumbers) + for i in range(restartNumber): + clusterComCheck.checkDb(dnodenumbers,'db%d'%i) + + + def run(self): + # print(self.master_dnode.cfgDict) + self.fiveDnodeThreeMnode(5,3,1) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py new file mode 100644 index 0000000000..cf608f6480 --- /dev/null +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py @@ -0,0 +1,181 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * +sys.path.append("./6-cluster") +from clusterCommonCreate import * +from clusterCommonCheck import clusterComCheck + +import time +import socket +import subprocess +from multiprocessing import Process +import threading +import time +import inspect +import ctypes + +class TDTestCase: + + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + self.TDDnodes = None + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def _async_raise(self, tid, exctype): + """raises the exception, performs cleanup if needed""" + if not inspect.isclass(exctype): + exctype = type(exctype) + res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) + if res == 0: + raise ValueError("invalid thread id") + elif res != 1: + # """if it returns a number greater than one, you're in trouble, + # and you should call it again with exc=NULL to revert the effect""" + ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) + raise SystemError("PyThreadState_SetAsyncExc failed") + + def stopThread(self,thread): + self._async_raise(thread.ident, SystemExit) + + + def insertData(self,countstart,countstop): + # fisrt add data : db\stable\childtable\general table + + for couti in range(countstart,countstop): + tdLog.debug("drop database if exists db%d" %couti) + tdSql.execute("drop database if exists db%d" %couti) + print("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("use db%d" %couti) + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + + def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'replica': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1} + dnodenumbers=int(dnodenumbers) + mnodeNums=int(mnodeNums) + dbNumbers = int(mnodeNums * restartNumber) + tdLog.info("first check dnode and mnode") + tdSql.query("show dnodes;") + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(4,1,'%s:6430'%self.host) + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkMnodeStatus(1) + + # fisr add three mnodes; + tdLog.info("fisr add three mnodes and check mnode status") + tdSql.execute("create mnode on dnode 2") + clusterComCheck.checkMnodeStatus(2) + tdSql.execute("create mnode on dnode 3") + clusterComCheck.checkMnodeStatus(3) + + # add some error operations and + tdLog.info("Confirm the status of the dnode again") + tdSql.error("create mnode on dnode 2") + tdSql.query("show dnodes;") + print(tdSql.queryResult) + clusterComCheck.checkDnodes(dnodenumbers) + + tdLog.info("Take turns stopping Mnodes ") + # seperate vnode and mnode in different dnodes. + # create database and stable + tdDnodes=cluster.dnodes + stopcount =0 + while stopcount < restartNumber: + tdLog.info("first restart loop") + for i in range(mnodeNums): + # threads=[] + # threads = MyThreadFunc(self.insert_data(i*2,i*2+2)) + paraDict["dbName"]= 'db%d%d'%(stopcount,i) + threads=threading.Thread(target=clusterComCreate.create_database, args=(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])) + threads.start() + tdDnodes[i].stoptaosd() + # sleep(10) + tdDnodes[i].starttaosd() + # sleep(10) + + if clusterComCheck.checkDnodes(dnodenumbers): + # threads.join() + tdLog.info("123") + else: + print("456") + threads.join() + self.stopThread(threads) + tdLog.exit("one or more of dnodes failed to start ") + # self.check3mnode() + stopcount+=1 + threads.join() + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkDbRows(dbNumbers) + for i in range(restartNumber): + clusterComCheck.checkDb(mnodeNums,'db%d'%i) + + + def run(self): + # print(self.master_dnode.cfgDict) + self.fiveDnodeThreeMnode(5,3,1) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py new file mode 100644 index 0000000000..2d2322fada --- /dev/null +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py @@ -0,0 +1,181 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * +sys.path.append("./6-cluster") +from clusterCommonCreate import * +from clusterCommonCheck import clusterComCheck + +import time +import socket +import subprocess +from multiprocessing import Process +import threading +import time +import inspect +import ctypes + +class TDTestCase: + + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + self.TDDnodes = None + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def _async_raise(self, tid, exctype): + """raises the exception, performs cleanup if needed""" + if not inspect.isclass(exctype): + exctype = type(exctype) + res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) + if res == 0: + raise ValueError("invalid thread id") + elif res != 1: + # """if it returns a number greater than one, you're in trouble, + # and you should call it again with exc=NULL to revert the effect""" + ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) + raise SystemError("PyThreadState_SetAsyncExc failed") + + def stopThread(self,thread): + self._async_raise(thread.ident, SystemExit) + + + def insertData(self,countstart,countstop): + # fisrt add data : db\stable\childtable\general table + + for couti in range(countstart,countstop): + tdLog.debug("drop database if exists db%d" %couti) + tdSql.execute("drop database if exists db%d" %couti) + print("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("use db%d" %couti) + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + + def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'replica': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1} + dnodenumbers=int(dnodenumbers) + mnodeNums=int(mnodeNums) + vnodeNumbers = int(dnodenumbers-mnodeNums) + dbNumbers = int(vnodeNumbers * restartNumber) + tdLog.info("first check dnode and mnode") + tdSql.query("show dnodes;") + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(4,1,'%s:6430'%self.host) + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkMnodeStatus(1) + + # fisr add three mnodes; + tdLog.info("fisr add three mnodes and check mnode status") + tdSql.execute("create mnode on dnode 2") + clusterComCheck.checkMnodeStatus(2) + tdSql.execute("create mnode on dnode 3") + clusterComCheck.checkMnodeStatus(3) + + # add some error operations and + tdLog.info("Confirm the status of the dnode again") + tdSql.error("create mnode on dnode 2") + tdSql.query("show dnodes;") + print(tdSql.queryResult) + clusterComCheck.checkDnodes(dnodenumbers) + + tdLog.info("Take turns stopping Vnodes ") + # seperate vnode and mnode in different dnodes. + # create database and stable + tdDnodes=cluster.dnodes + stopcount =0 + while stopcount < restartNumber: + for i in range(vnodeNumbers): + # threads=[] + # threads = MyThreadFunc(self.insert_data(i*2,i*2+2)) + paraDict["dbName"]= 'db%d%d'%(stopcount,i) + threads=threading.Thread(target=clusterComCreate.create_database, args=(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])) + threads.start() + tdDnodes[mnodeNums+i].stoptaosd() + # sleep(10) + tdDnodes[mnodeNums+i].starttaosd() + # sleep(10) + + if clusterComCheck.checkDnodes(vnodeNumbers): + # threads.join() + tdLog.info("first restart loop") + else: + print("456") + threads.join() + self.stopThread(threads) + tdLog.exit("one or more of dnodes failed to start ") + # self.check3mnode() + stopcount+=1 + threads.join() + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkDbRows(dbNumbers) + for i in range(restartNumber): + clusterComCheck.checkDb(vnodeNumbers,'db%d'%i) + + + def run(self): + # print(self.master_dnode.cfgDict) + self.fiveDnodeThreeMnode(5,3,1) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py b/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py index 1739db09af..aa1d7ecc29 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py @@ -9,6 +9,11 @@ from util.sql import * from util.cases import * from util.dnodes import TDDnodes from util.dnodes import TDDnode +from util.cluster import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + import time import socket import subprocess @@ -17,26 +22,13 @@ import threading import time import inspect import ctypes -class MyDnodes(TDDnodes): - def __init__(self ,dnodes_lists): - super(MyDnodes,self).__init__() - self.dnodes = dnodes_lists # dnode must be TDDnode instance - self.simDeployed = False - class TDTestCase: def init(self,conn ,logSql): tdLog.debug(f"start to excute {__file__}") - self.TDDnodes = None - - def buildcluster(self,dnodenumber): - self.depoly_cluster(dnodenumber) - self.master_dnode = self.TDDnodes.dnodes[0] - self.host=self.master_dnode.cfgDict["fqdn"] - conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) - tdSql.init(conn1.cursor()) - + # tdSql.init(conn.cursor()) + # self.host = socket.gethostname() def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -106,52 +98,6 @@ class TDTestCase: tdSql.checkData(0,0,rowsPerSTable) return - def depoly_cluster(self ,dnodes_nums=5,independent=True): - - testCluster = False - valgrind = 0 - hostname = socket.gethostname() - dnodes = [] - start_port = 6030 - start_port_sec = 6130 - for num in range(1, dnodes_nums+1): - dnode = TDDnode(num) - dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}") - dnode.addExtraCfg("fqdn", f"{hostname}") - dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}") - dnode.addExtraCfg("monitorFqdn", hostname) - dnode.addExtraCfg("monitorPort", 7043) - dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}") - # configure three dnoe don't support vnodes - if independent and (num < 4): - dnode.addExtraCfg("supportVnodes", 0) - - dnodes.append(dnode) - - self.TDDnodes = MyDnodes(dnodes) - self.TDDnodes.init("") - self.TDDnodes.setTestCluster(testCluster) - self.TDDnodes.setValgrind(valgrind) - self.TDDnodes.stopAll() - for dnode in self.TDDnodes.dnodes: - self.TDDnodes.deploy(dnode.index,{}) - - for dnode in self.TDDnodes.dnodes: - self.TDDnodes.starttaosd(dnode.index) - - # create cluster - for dnode in self.TDDnodes.dnodes[1:]: - # print(dnode.cfgDict) - dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] - dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] - dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] - cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" - print(cmd) - os.system(cmd) - - time.sleep(2) - tdLog.info(" create cluster with %d dnode done! " %dnodes_nums) - def checkdnodes(self,dnodenumber): count=0 while count < 100: @@ -305,6 +251,14 @@ class TDTestCase: tdSql.checkData(2,2,'offline') tdSql.checkData(2,3,'ready') + + def check5dnode(self): + tdSql.query("show dnodes;") + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(4,1,'%s:6430'%self.host) + tdSql.checkData(0,4,'ready') + tdSql.checkData(4,4,'ready') + def five_dnode_three_mnode(self,dnodenumber): tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -346,6 +300,7 @@ class TDTestCase: threads.join() else: print("456") + threads.join() self.stop_thread(threads) assert 1 == 2 ,"some dnode started failed" return False @@ -357,16 +312,8 @@ class TDTestCase: self.check3mnode() - def getConnection(self, dnode): - host = dnode.cfgDict["fqdn"] - port = dnode.cfgDict["serverPort"] - config_dir = dnode.cfgDir - return taos.connect(host=host, port=int(port), config=config_dir) - - def run(self): # print(self.master_dnode.cfgDict) - self.buildcluster(5) self.five_dnode_three_mnode(5) def stop(self): diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop.py b/tests/system-test/6-cluster/5dnode3mnodeStop.py index 69b9c3d879..6c32da853a 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStop.py @@ -12,7 +12,10 @@ from util.dnodes import TDDnodes from util.dnodes import TDDnode from util.cluster import * from test import tdDnodes +sys.path.append("./6-cluster") +from clusterCommonCreate import * +from clusterCommonCheck import * import time import socket import subprocess @@ -216,59 +219,84 @@ class TDTestCase: else: tdLog.exit("create cluster with %d dnode but check dnode not ready within 5s ! "%dnodeNumbers) - def five_dnode_three_mnode(self,dnodenumber): - self.check_dnodes_status(5) - tdSql.query("show mnodes;") - tdLog.debug(self.host) - tdSql.checkRows(1) + def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'replica': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1} + dnodenumbers=int(dnodenumbers) + mnodeNums=int(mnodeNums) + dbNumbers = int(dnodenumbers * restartNumber) + + tdLog.info("first check dnode and mnode") + tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) - tdSql.checkData(0,2,'leader') - tdSql.checkData(0,3,'ready') + tdSql.checkData(4,1,'%s:6430'%self.host) + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkMnodeStatus(1) # fisr add three mnodes; + tdLog.info("fisr add three mnodes and check mnode status") tdSql.execute("create mnode on dnode 2") + clusterComCheck.checkMnodeStatus(2) tdSql.execute("create mnode on dnode 3") + clusterComCheck.checkMnodeStatus(3) - # fisrt check statut ready - self.check3mnode() - - + # add some error operations and + tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") - tdSql.query("show dnodes;") - # tdLog.debug(tdSql.queryResult) - - tdLog.debug("stop and follower of mnode") + print(tdSql.queryResult) + clusterComCheck.checkDnodes(dnodenumbers) + # restart all taosd tdDnodes=cluster.dnodes - # tdLog.debug(tdDnodes[0]) tdDnodes[1].stoptaosd() - self.check3mnode2off() + clusterComCheck.check3mnodeoff(2,3) tdDnodes[1].starttaosd() - self.check3mnode() + clusterComCheck.checkMnodeStatus(3) tdDnodes[2].stoptaosd() - self.check3mnode3off() + clusterComCheck.check3mnodeoff(3,3) tdDnodes[2].starttaosd() - self.check3mnode() + clusterComCheck.checkMnodeStatus(3) tdDnodes[0].stoptaosd() - self.check3mnode1off() + clusterComCheck.check3mnodeoff(1,3) tdDnodes[0].starttaosd() - self.check3mnode() + clusterComCheck.checkMnodeStatus(3) - self.check3mnode() + tdLog.info("Take turns stopping all dnodes ") + # seperate vnode and mnode in different dnodes. + # create database and stable stopcount =0 while stopcount <= 2: - for i in range(dnodenumber): + tdLog.info("first restart loop") + for i in range(dnodenumbers): tdDnodes[i].stoptaosd() tdDnodes[i].starttaosd() - # self.check3mnode() stopcount+=1 - self.check3mnode() + clusterComCheck.checkDnodes(dnodenumbers) + clusterComCheck.checkMnodeStatus(3) def run(self): - self.five_dnode_three_mnode(5) + # print(self.master_dnode.cfgDict) + self.fiveDnodeThreeMnode(5,3,1) def stop(self): tdSql.close() diff --git a/tests/system-test/6-cluster/clusterCommonCheck.py b/tests/system-test/6-cluster/clusterCommonCheck.py new file mode 100644 index 0000000000..d030008963 --- /dev/null +++ b/tests/system-test/6-cluster/clusterCommonCheck.py @@ -0,0 +1,211 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from collections import defaultdict +import random +import string +import threading +import requests +import time +# import socketfrom + +import taos +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * + +# class actionType(Enum): +# CREATE_DATABASE = 0 +# CREATE_STABLE = 1 +# CREATE_CTABLE = 2 +# INSERT_DATA = 3 + +class ClusterComCheck: + def init(self, conn, logSql): + tdSql.init(conn.cursor()) + # tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def checkDnodes(self,dnodeNumbers): + count=0 + while count < 5: + tdSql.query("show dnodes") + # tdLog.debug(tdSql.queryResult) + status=0 + for i in range(dnodeNumbers): + if tdSql.queryResult[i][4] == "ready": + status+=1 + tdLog.info(status) + + if status == dnodeNumbers: + tdLog.success("it find cluster with %d dnodes and check that all cluster dnodes are ready within 5s! " %dnodeNumbers) + return True + count+=1 + time.sleep(1) + else: + tdLog.debug(tdSql.queryResult) + tdLog.exit("it find cluster with %d dnodes but check that there dnodes are not ready within 5s ! "%dnodeNumbers) + + def checkDbRows(self,dbNumbers): + dbNumbers=int(dbNumbers) + count=0 + while count < 5: + tdSql.query("show databases;") + if tdSql.checkRows(dbNumbers+2): + tdLog.success("we find %d databases and expect %d in clusters! " %(tdSql.queryRows,dbNumbers+2)) + return True + else: + continue + else : + tdLog.debug(tdSql.queryResult) + tdLog.exit("we find %d databases but expect %d in clusters! " %(tdSql.queryRows,dbNumbers)) + + def checkDb(self,dbNumbers,dbindex): + count=0 + while count < 5: + query_status=0 + for i in range(dbNumbers): + for j in range(dbNumbers): + tdSql.query("show databases;") + if "%s%d"%(dbindex,j) == tdSql.queryResult[i+2][0] : + if tdSql.queryResult[i+2][19] == "ready": + query_status+=1 + else: + continue + # print(query_status) + count+=1 + if query_status == dbNumbers: + tdLog.success("we find cluster with %d dnode and check all databases are ready within 5s! " %dbNumbers) + return True + else: + tdLog.debug(tdSql.queryResult) + tdLog.exit("database is not ready within 5s") + + def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,): + tdSql.execute("use %s"%dbname) + tdSql.query("show stables") + tdSql.checkRows(stableCount) + tdSql.query("show tables") + tdSql.checkRows(CtableCount) + for i in range(stableCount): + tdSql.query("select count(*) from %s%d"%(stbname,i)) + tdSql.checkData(0,0,rowsPerSTable) + return + + def checkMnodeStatus(self,mnodeNums): + self.mnodeNums=int(mnodeNums) + # self.leaderDnode=int(leaderDnode) + + count=0 + + while count < 10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(self.mnodeNums) : + tdLog.success("cluster has %d mnodes" %self.mnodeNums ) + + if self.mnodeNums == 1: + if tdSql.queryResult[0][2]== 'leader' and tdSql.queryResult[0][3]== 'ready' : + tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums) + return True + count+=1 + elif self.mnodeNums == 3 : + if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' : + if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' : + if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' : + tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums) + return True + elif tdSql.queryResult[1][2]=='leader' and tdSql.queryResult[1][3]== 'ready' : + if tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' : + if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' : + tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums) + return True + elif tdSql.queryResult[2][2]=='leader' and tdSql.queryResult[2][3]== 'ready' : + if tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' : + if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' : + tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums) + return True + count+=1 + elif self.mnodeNums == 2 : + if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' : + if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' : + tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums) + return True + elif tdSql.queryResult[1][2]=='leader' and tdSql.queryResult[1][3]== 'ready' : + if tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' : + tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums) + return True + count+=1 + else: + tdLog.debug(tdSql.queryResult) + tdLog.exit("cluster of %d mnodes is not ready in 10s " %self.mnodeNums) + + + + + def check3mnodeoff(self,offlineDnodeNo,mnodeNums=3): + count=0 + while count < 10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(mnodeNums) : + tdLog.success("cluster has %d mnodes" %self.mnodeNums ) + else: + tdLog.exit("mnode number is correct") + if offlineDnodeNo == 1: + if tdSql.queryResult[0][2]=='offline' : + if tdSql.queryResult[1][2]=='leader' and tdSql.queryResult[1][3]== 'ready' : + if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' : + tdLog.success("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo) + return True + elif tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' : + if tdSql.queryResult[2][2]=='leader' and tdSql.queryResult[2][3]== 'ready' : + tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo) + return True + count+=1 + elif offlineDnodeNo == 2: + if tdSql.queryResult[1][2]=='offline' : + if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' : + if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' : + tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo) + return True + elif tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' : + if tdSql.queryResult[2][2]=='leader' and tdSql.queryResult[2][3]== 'ready' : + tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo) + return True + count+=1 + elif offlineDnodeNo == 3: + if tdSql.queryResult[2][2]=='offline' : + if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' : + if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' : + tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo) + return True + elif tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' : + if tdSql.queryResult[1][2]=='leader' and tdSql.queryResult[1][3]== 'ready' : + tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo) + return True + count+=1 + else: + tdLog.debug(tdSql.queryResult) + tdLog.exit("stop mnodes on dnode %d failed in 10s ") + + + + + + + def close(self): + self.cursor.close() + +clusterComCheck = ClusterComCheck() diff --git a/tests/system-test/6-cluster/clusterCommonCreate.py b/tests/system-test/6-cluster/clusterCommonCreate.py new file mode 100644 index 0000000000..b3107d8537 --- /dev/null +++ b/tests/system-test/6-cluster/clusterCommonCreate.py @@ -0,0 +1,298 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from collections import defaultdict +import random +import string +import threading +import requests +import time +# import socketfrom + +import taos +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * + +# class actionType(Enum): +# CREATE_DATABASE = 0 +# CREATE_STABLE = 1 +# CREATE_CTABLE = 2 +# INSERT_DATA = 3 + +class ClusterComCreate: + def init(self, conn, logSql): + tdSql.init(conn.cursor()) + # tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + tdSql.query("drop table if exists %s.notifyinfo "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + tdSql.query("create table %s.notifyinfo (ts timestamp, cmdid int, consumerid int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + buildPath = tdCom.getBuildPath() + cfgPath = tdCom.getClientCfgPath() + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + if (platform.system().lower() == 'windows'): + shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" + else: + shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'): + while 1: + tdSql.query("select * from %s.notifyinfo"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if (tdSql.getRows() == 1) and (tdSql.getData(0, 1) == 0): + break + else: + time.sleep(0.1) + return + + def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'): + while 1: + tdSql.query("select * from %s.notifyinfo"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 2 : + print(tdSql.getData(0, 1), tdSql.getData(1, 1)) + if tdSql.getData(1, 1) == 1: + break + time.sleep(0.1) + return + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + tagValue = 'beijing' + if (i % 2 == 0): + tagValue = 'shanghai' + + sql += " %s%d using %s tags(%d, '%s')"%(ctbPrefix,i,stbName,i+1, tagValue) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=None): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs is None: + t = time.time() + startTs = int(round(t * 1000)) + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def insert_data_1(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + t = time.time() + startTs = int(round(t * 1000)) + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + if (j % 2 == 0): + sql += "(%d, %d, %d, 'tmqrow_%d') "%(startTs + j, j, j, j) + else: + sql += "(%d, %d, %d, 'tmqrow_%d') "%(startTs + j, j, -j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s%d values " %(ctbPrefix,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + ctbDict = {} + for i in range(ctbNum): + ctbDict[i] = 0 + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfCtb = 0 + while rowsOfCtb < rowsPerTbl: + for i in range(ctbNum): + sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) + for k in range(batchNum): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i]) + ctbDict[i] += 1 + if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl): + tsql.execute(sql) + sql = "insert into " + break + rowsOfCtb = ctbDict[0] + + tdLog.debug("insert data ............ [OK]") + return + + def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data wiht auto create child table ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def syncCreateDbStbCtbInsertData(self, tsql, paraDict): + tdCom.create_database(tsql, paraDict["dbName"],paraDict["dropFlag"]) + tdCom.create_stable(tsql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdCom.create_ctable(tsql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + if "event" in paraDict and type(paraDict['event']) == type(threading.Event()): + paraDict["event"].set() + + ctbPrefix = paraDict['ctbPrefix'] + ctbNum = paraDict["ctbNum"] + for i in range(ctbNum): + tbName = '%s%s'%(ctbPrefix,i) + tdCom.insert_rows(tsql,dbname=paraDict["dbName"],tbname=tbName,start_ts_value=paraDict['startTs'],count=paraDict['rowsPerTbl']) + return + + def threadFunction(self, **paraDict): + # create new connector for new tdSql instance in my thread + newTdSql = tdCom.newTdSql() + self.syncCreateDbStbCtbInsertData(self, newTdSql, paraDict) + return + + def asyncCreateDbStbCtbInsertData(self, paraDict): + pThread = threading.Thread(target=self.threadFunction, kwargs=paraDict) + pThread.start() + return pThread + + + def close(self): + self.cursor.close() + +clusterComCreate = ClusterComCreate() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 73e6716cad..825b6ea26b 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -113,6 +113,10 @@ python3 ./test.py -f 2-query/twa.py python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3 +# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopCreateDb.py -N 5 -M 3 +# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3 + # python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py From bdd4134fc1d2b368316e8fe6c6063d671c3d2ac5 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Sat, 25 Jun 2022 18:11:11 +0800 Subject: [PATCH 24/29] Revert "fix(sma): drop stream when drop sma" --- source/dnode/mnode/impl/inc/mndStream.h | 5 ----- source/dnode/mnode/impl/src/mndSma.c | 18 ------------------ source/dnode/mnode/impl/src/mndStream.c | 2 +- source/dnode/vnode/src/tq/tqSink.c | 2 +- source/libs/executor/inc/executorimpl.h | 1 + source/libs/executor/src/executor.c | 11 +++++++---- source/libs/executor/src/scanoperator.c | 8 ++++++++ source/libs/wal/src/walRead.c | 2 +- 8 files changed, 19 insertions(+), 30 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index 5e9089cec9..69385c3a46 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -34,11 +34,6 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw); int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); -// for sma -// TODO refactor -int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); -int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); - #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index d880fb28d4..ef24cd0ba4 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -857,24 +857,6 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name); mndTransSetDbName(pTrans, pDb->name, NULL); - SStreamObj *pStream = mndAcquireStream(pMnode, pSma->name); - if (pStream == NULL || pStream->smaId != pSma->uid) { - sdbRelease(pMnode->pSdb, pStream); - goto _OVER; - } else { - if (mndDropStreamTasks(pMnode, pTrans, pStream) < 0) { - mError("stream:%s, failed to drop task since %s", pStream->name, terrstr()); - sdbRelease(pMnode->pSdb, pStream); - goto _OVER; - } - - // drop stream - if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) { - sdbRelease(pMnode->pSdb, pStream); - goto _OVER; - } - } - if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER; if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 444a8864ef..b78756d8b8 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -490,7 +490,7 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) { return 0; } -int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) { +static int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) { int32_t lv = taosArrayGetSize(pStream->tasks); for (int32_t i = 0; i < lv; i++) { SArray *pTasks = taosArrayGetP(pStream->tasks, i); diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 0bb9918488..9abc2f639b 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -43,7 +43,7 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo taosArrayPush(tagArray, &tagVal); tTagNew(tagArray, 1, false, &pTag); if (pTag == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; + taosArrayDestroy(schemaReqs); taosArrayDestroy(tagArray); return NULL; } diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index eb2c6f5102..36f81e86ff 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -348,6 +348,7 @@ typedef struct SStreamBlockScanInfo { SInterval interval; // if the upstream is an interval operator, the interval info is also kept here. SArray* childIds; SessionWindowSupporter sessionSup; + bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA. int32_t scanWinIndex; // for state operator int32_t pullDataResIndex; SSDataBlock* pPullDataRes; // pull data SSDataBlock diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 3fd491885f..6de364e63a 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -19,7 +19,8 @@ #include "tdatablock.h" #include "vnode.h" -static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, char* id) { +static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, bool assignUid, + char* id) { ASSERT(pOperator != NULL); if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (pOperator->numOfDownstream == 0) { @@ -32,11 +33,12 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu return TSDB_CODE_QRY_APP_ERROR; } pOperator->status = OP_NOT_OPENED; - return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, id); + return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, assignUid, id); } else { pOperator->status = OP_NOT_OPENED; SStreamBlockScanInfo* pInfo = pOperator->info; + pInfo->assignBlockUid = assignUid; // TODO: if a block was set but not consumed, // prevent setting a different type of block @@ -74,7 +76,7 @@ int32_t qStreamScanSnapshot(qTaskInfo_t tinfo) { return TSDB_CODE_QRY_APP_ERROR; } SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - return doSetStreamBlock(pTaskInfo->pRoot, NULL, 0, STREAM_DATA_TYPE_FROM_SNAPSHOT, NULL); + return doSetStreamBlock(pTaskInfo->pRoot, NULL, 0, STREAM_DATA_TYPE_FROM_SNAPSHOT, 0, NULL); } int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) { @@ -92,7 +94,8 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, GET_TASKID(pTaskInfo)); + int32_t code = + doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, assignUid, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { qError("%s failed to set the stream block data", GET_TASKID(pTaskInfo)); } else { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index aadf7b3b7e..15803a4b73 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1051,6 +1051,14 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pInfo->pRes->info.type = STREAM_NORMAL; pInfo->pRes->info.capacity = numOfRows; + // for generating rollup SMA result, each time is an independent time serie. + // TODO temporarily used, when the statement of "partition by tbname" is ready, remove this + if (pInfo->assignBlockUid) { + pInfo->pRes->info.groupId = uid; + } else { + pInfo->pRes->info.groupId = groupId; + } + uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &uid, sizeof(int64_t)); if (groupIdPre) { pInfo->pRes->info.groupId = *groupIdPre; diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 20fa5f1f2b..682afbb785 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -103,7 +103,6 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) { wError("cannot open file %s, since %s", fnameStr, terrstr()); return -1; } - pRead->pReadLogTFile = pLogTFile; walBuildIdxName(pRead->pWal, fileFirstVer, fnameStr); TdFilePtr pIdxTFile = taosOpenFile(fnameStr, TD_FILE_READ); @@ -113,6 +112,7 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) { return -1; } + pRead->pReadLogTFile = pLogTFile; pRead->pReadIdxTFile = pIdxTFile; return 0; } From 996f995644fbf8bc2566b99a3c1cd8d979acee4e Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Sat, 25 Jun 2022 18:14:42 +0800 Subject: [PATCH 25/29] fix(sma): drop stream when drop sma --- source/dnode/mnode/impl/inc/mndStream.h | 4 ++++ source/dnode/mnode/impl/src/mndSma.c | 19 ++++++++++++++++++- source/dnode/mnode/impl/src/mndStream.c | 4 ++-- source/dnode/vnode/src/tq/tqSink.c | 2 +- source/libs/wal/src/walRead.c | 3 ++- 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index 69385c3a46..0901e77287 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -33,6 +33,10 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw); int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); +// for sma +// TODO refactor +int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); +int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index ef24cd0ba4..10eeaba982 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -15,11 +15,11 @@ #define _DEFAULT_SOURCE #include "mndSma.h" -#include "mndPrivilege.h" #include "mndDb.h" #include "mndDnode.h" #include "mndInfoSchema.h" #include "mndMnode.h" +#include "mndPrivilege.h" #include "mndScheduler.h" #include "mndShow.h" #include "mndStb.h" @@ -857,6 +857,23 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name); mndTransSetDbName(pTrans, pDb->name, NULL); + SStreamObj *pStream = mndAcquireStream(pMnode, pSma->name); + if (pStream == NULL || pStream->smaId != pSma->uid) { + sdbRelease(pMnode->pSdb, pStream); + goto _OVER; + } else { + if (mndDropStreamTasks(pMnode, pTrans, pStream) < 0) { + mError("stream:%s, failed to drop task since %s", pStream->name, terrstr()); + sdbRelease(pMnode->pSdb, pStream); + goto _OVER; + } + + // drop stream + if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) { + sdbRelease(pMnode->pSdb, pStream); + goto _OVER; + } + } if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER; if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index b78756d8b8..cabbac14f1 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -14,10 +14,10 @@ */ #include "mndStream.h" -#include "mndPrivilege.h" #include "mndDb.h" #include "mndDnode.h" #include "mndMnode.h" +#include "mndPrivilege.h" #include "mndScheduler.h" #include "mndShow.h" #include "mndStb.h" @@ -490,7 +490,7 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) { return 0; } -static int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) { +int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) { int32_t lv = taosArrayGetSize(pStream->tasks); for (int32_t i = 0; i < lv; i++) { SArray *pTasks = taosArrayGetP(pStream->tasks, i); diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 9abc2f639b..0bb9918488 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -43,7 +43,7 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo taosArrayPush(tagArray, &tagVal); tTagNew(tagArray, 1, false, &pTag); if (pTag == NULL) { - taosArrayDestroy(schemaReqs); + terrno = TSDB_CODE_OUT_OF_MEMORY; taosArrayDestroy(tagArray); return NULL; } diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 682afbb785..2de0fea9ac 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -104,6 +104,8 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) { return -1; } + pRead->pReadLogTFile = pLogTFile; + walBuildIdxName(pRead->pWal, fileFirstVer, fnameStr); TdFilePtr pIdxTFile = taosOpenFile(fnameStr, TD_FILE_READ); if (pIdxTFile == NULL) { @@ -112,7 +114,6 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) { return -1; } - pRead->pReadLogTFile = pLogTFile; pRead->pReadIdxTFile = pIdxTFile; return 0; } From 565ee08895f427c4701f8f6caedc86d904e979ef Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Sat, 25 Jun 2022 18:32:56 +0800 Subject: [PATCH 26/29] test: add tmq test case --- tests/system-test/99-TDcase/TD-16821.py | 186 ++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 tests/system-test/99-TDcase/TD-16821.py diff --git a/tests/system-test/99-TDcase/TD-16821.py b/tests/system-test/99-TDcase/TD-16821.py new file mode 100644 index 0000000000..ef74515792 --- /dev/null +++ b/tests/system-test/99-TDcase/TD-16821.py @@ -0,0 +1,186 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def checkFileContent(self, consumerId, queryString): + buildPath = tdCom.getBuildPath() + cfgPath = tdCom.getClientCfgPath() + dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) + cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) + tdLog.info(cmdStr) + os.system(cmdStr) + + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) + tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) + + consumeFile = open(consumeRowsFile, mode='r') + queryFile = open(dstFile, mode='r') + + # skip first line for it is schema + queryFile.readline() + + while True: + dst = queryFile.readline() + src = consumeFile.readline() + + if dst: + if dst != src: + tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) + else: + break + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'db1', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1} + + topicNameList = ['topic1', 'topic2', 'topic3'] + expectRowsList = [] + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdLog.info("create stb") + tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdLog.info("create ctb") + tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + tdLog.info("insert data") + tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) + + tdLog.info("create topics from stb with filter") + # queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + # queryString = "select ts, c1, c2 from %s.%s" %(paraDict['dbName'], paraDict['stbName']) + queryString = "select * from %s.%s" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as stable %s.%s" %(topicNameList[0], paraDict["dbName"],paraDict["stbName"]) + # sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + # queryString = 'select * from %s.%s'%(paraDict["dbName"],paraDict["stbName"]) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("wait the consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("0 tmq consume rows error!") + + self.checkFileContent(consumerId, queryString) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + + queryString = "select ts, log(c1), cos(c1) from %s.%s where c1 > 3169" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[1], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + consumerId = 1 + topicList = topicNameList[1] + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("wait the consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + if expectRowsList[1] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) + tdLog.exit("1 tmq consume rows error!") + + self.checkFileContent(consumerId, queryString) + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + + queryString = "select ts, log(c1), atan(c1) from %s.%s where ts >= %d" %(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+6137) + sqlString = "create topic %s as %s" %(topicNameList[2], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + consumerId = 2 + topicList = topicNameList[2] + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) + + tdLog.info("wait the consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + # if expectRowsList[2] != resultList[0]: + # tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0])) + # tdLog.exit("2 tmq consume rows error!") + + # self.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + tdSql.prepare() + self.tmqCase1() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From e8563be0501d22e00d39aca58fda4168a1da2679 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Sat, 25 Jun 2022 18:44:46 +0800 Subject: [PATCH 27/29] Update 5dnode3mnodeStop.py --- tests/system-test/6-cluster/5dnode3mnodeStop.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop.py b/tests/system-test/6-cluster/5dnode3mnodeStop.py index c1d5a99af2..5311d29846 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStop.py @@ -253,11 +253,7 @@ class TDTestCase: # fisr add three mnodes; tdLog.info("fisr add three mnodes and check mnode status") tdSql.execute("create mnode on dnode 2") -<<<<<<< HEAD clusterComCheck.checkMnodeStatus(2) -======= - time.sleep(10) ->>>>>>> e571567ec2ae0c7668ccf1b8aaaf949fc84bb44c tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) @@ -307,4 +303,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) From 1975c981dbf0675f05a754e6f140c6e3143b0852 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Sat, 25 Jun 2022 18:47:18 +0800 Subject: [PATCH 28/29] test: del test tkinter --- tests/system-test/test.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 35f8ea953c..76b83da348 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -22,9 +22,6 @@ import json import platform import socket import threading -from distutils.log import warn as printf -from tkinter import N -from fabric2 import Connection sys.path.append("../pytest") from util.log import * from util.dnodes import * From 0c1a51bad63e88cd2f6f84dd08ce0295dd8fc88f Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Sat, 25 Jun 2022 20:02:57 +0800 Subject: [PATCH 29/29] enh(stream): refine tqRetrieveDataBlock api --- source/client/src/clientImpl.c | 26 ++++---- source/client/src/clientMain.c | 31 ++++------ source/common/src/tdatablock.c | 9 +-- source/dnode/vnode/inc/vnode.h | 27 ++++----- source/dnode/vnode/src/tq/tqExec.c | 4 +- source/dnode/vnode/src/tq/tqRead.c | 16 ++--- source/libs/executor/src/scanoperator.c | 81 +++++++++++++------------ 7 files changed, 93 insertions(+), 101 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index ac9daa5119..489966b636 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -617,12 +617,12 @@ int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNod .requestId = pRequest->requestId, .requestObjRefId = pRequest->self}; SSchedulerReq req = {.pConn = &conn, - .pNodeList = pNodeList, - .pDag = pDag, - .sql = pRequest->sqlstr, - .startTs = pRequest->metric.start, - .fp = schdExecCallback, - .cbParam = &res}; + .pNodeList = pNodeList, + .pDag = pDag, + .sql = pRequest->sqlstr, + .startTs = pRequest->metric.start, + .fp = schdExecCallback, + .cbParam = &res}; int32_t code = schedulerAsyncExecJob(&req, &pRequest->body.queryJob); @@ -669,13 +669,13 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList .requestId = pRequest->requestId, .requestObjRefId = pRequest->self}; SSchedulerReq req = {.pConn = &conn, - .pNodeList = pNodeList, - .pDag = pDag, - .sql = pRequest->sqlstr, - .startTs = pRequest->metric.start, - .fp = NULL, - .cbParam = NULL, - .reqKilled = &pRequest->killed}; + .pNodeList = pNodeList, + .pDag = pDag, + .sql = pRequest->sqlstr, + .startTs = pRequest->metric.start, + .fp = NULL, + .cbParam = NULL, + .reqKilled = &pRequest->killed}; int32_t code = schedulerExecJob(&req, &pRequest->body.queryJob, &res); pRequest->body.resInfo.execRes = res.res; diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index f5dfe2de36..bbd477fa3b 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -199,10 +199,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { return pResInfo->userFields; } - -TAOS_RES *taos_query(TAOS *taos, const char *sql) { - return taosQueryImpl(taos, sql, false); -} +TAOS_RES *taos_query(TAOS *taos, const char *sql) { return taosQueryImpl(taos, sql, false); } TAOS_ROW taos_fetch_row(TAOS_RES *res) { if (res == NULL) { @@ -593,11 +590,11 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) { return pResInfo->pCol[columnIndex].offset; } -int taos_validate_sql(TAOS *taos, const char *sql) { - TAOS_RES* pObj = taosQueryImpl(taos, sql, true); +int taos_validate_sql(TAOS *taos, const char *sql) { + TAOS_RES *pObj = taosQueryImpl(taos, sql, true); int code = taos_errno(pObj); - + taos_free_result(pObj); return code; } @@ -884,10 +881,10 @@ void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) { int taos_load_table_info(TAOS *taos, const char *tableNameList) { const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list - int32_t code = 0; - SRequestObj *pRequest = NULL; - SCatalogReq catalogReq = {0}; - + int32_t code = 0; + SRequestObj *pRequest = NULL; + SCatalogReq catalogReq = {0}; + if (NULL == tableNameList) { return TSDB_CODE_SUCCESS; } @@ -911,26 +908,25 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { goto _return; } - SCatalog* pCtg = NULL; + SCatalog *pCtg = NULL; code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCtg); if (code != TSDB_CODE_SUCCESS) { goto _return; } - char* sql = "taos_load_table_info"; + char *sql = "taos_load_table_info"; code = buildRequest(pTscObj, sql, strlen(sql), &pRequest); if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } - + SSyncQueryParam param = {0}; tsem_init(¶m.sem, 0, 0); param.pRequest = pRequest; - SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self}; + SRequestConnInfo conn = { + .pTrans = pTscObj->pAppInfo->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self}; conn.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); @@ -951,7 +947,6 @@ _return: return code; } - TAOS_STMT *taos_stmt_init(TAOS *taos) { STscObj *pObj = acquireTscObj(*(int64_t *)taos); if (NULL == pObj) { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index cc995c4d64..9f89d72172 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1164,7 +1164,7 @@ int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows) int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows) { int32_t code = 0; - //ASSERT(numOfRows > 0); + // ASSERT(numOfRows > 0); if (numOfRows == 0) { return TSDB_CODE_SUCCESS; @@ -1657,12 +1657,13 @@ void blockDebugShowData(const SArray* dataBlocks, const char* flag) { char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) { int32_t size = 2048; *pDataBuf = taosMemoryCalloc(size, 1); - char* dumpBuf = *pDataBuf; - char pBuf[128] = {0}; + char* dumpBuf = *pDataBuf; + char pBuf[128] = {0}; int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); int32_t rows = pDataBlock->info.rows; int32_t len = 0; - len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|\n", flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId); + len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|\n", flag, + (int32_t)pDataBlock->info.type, pDataBlock->info.childId); for (int32_t j = 0; j < rows; j++) { len += snprintf(dumpBuf + len, size - len, "%s |", flag); for (int32_t k = 0; k < colNum; k++) { diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index b97d4605e7..858376519d 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -116,18 +116,18 @@ typedef void *tsdbReaderT; #define BLOCK_LOAD_TABLE_SEQ_ORDER 2 #define BLOCK_LOAD_TABLE_RR_ORDER 3 -int32_t tsdbSetTableList(tsdbReaderT reader, SArray* tableList); -tsdbReaderT tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *tableList, uint64_t qId, - uint64_t taskId); -tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *groupList, uint64_t qId, - void *pMemRef); -int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo); -bool isTsdbCacheLastRow(tsdbReaderT *pReader); -int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list); -int32_t tsdbGetCtbIdList(SMeta *pMeta, int64_t suid, SArray *list); -void *tsdbGetIdx(SMeta *pMeta); -void *tsdbGetIvtIdx(SMeta *pMeta); -int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle); +int32_t tsdbSetTableList(tsdbReaderT reader, SArray *tableList); +tsdbReaderT tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *tableList, uint64_t qId, + uint64_t taskId); +tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *groupList, uint64_t qId, + void *pMemRef); +int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo); +bool isTsdbCacheLastRow(tsdbReaderT *pReader); +int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list); +int32_t tsdbGetCtbIdList(SMeta *pMeta, int64_t suid, SArray *list); +void *tsdbGetIdx(SMeta *pMeta); +void *tsdbGetIvtIdx(SMeta *pMeta); +int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle); bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); @@ -150,8 +150,7 @@ int32_t tqReadHandleRemoveTbUidList(STqReadHandle *pHandle, const SArray *tbUidL int32_t tqReadHandleSetMsg(STqReadHandle *pHandle, SSubmitReq *pMsg, int64_t ver); bool tqNextDataBlock(STqReadHandle *pHandle); bool tqNextDataBlockFilterOut(STqReadHandle *pHandle, SHashObj *filterOutUids); -int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle *pHandle, uint64_t *pGroupId, uint64_t *pUid, - int32_t *pNumOfRows); +int32_t tqRetrieveDataBlock(SSDataBlock *pBlock, STqReadHandle *pHandle); // sma int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days); diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c index be8fef1249..afeeeab500 100644 --- a/source/dnode/vnode/src/tq/tqExec.c +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -112,7 +112,7 @@ int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkR tqReadHandleSetMsg(pReader, pReq, 0); while (tqNextDataBlock(pReader)) { SSDataBlock block = {0}; - if (tqRetrieveDataBlock(&block, pReader, &block.info.groupId, &block.info.uid, &block.info.rows) < 0) { + if (tqRetrieveDataBlock(&block, pReader) < 0) { if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue; ASSERT(0); } @@ -129,7 +129,7 @@ int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkR tqReadHandleSetMsg(pReader, pReq, 0); while (tqNextDataBlockFilterOut(pReader, pExec->execDb.pFilterOutTbUid)) { SSDataBlock block = {0}; - if (tqRetrieveDataBlock(&block, pReader, &block.info.groupId, &block.info.uid, &block.info.rows) < 0) { + if (tqRetrieveDataBlock(&block, pReader) < 0) { if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue; ASSERT(0); } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 96f4eb3fd9..cbee639911 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -146,10 +146,7 @@ bool tqNextDataBlockFilterOut(STqReadHandle* pHandle, SHashObj* filterOutUids) { return false; } -int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle, uint64_t* pGroupId, uint64_t* pUid, - int32_t* pNumOfRows) { - *pUid = 0; - +int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle) { // TODO: cache multiple schema int32_t sversion = htonl(pHandle->pBlock->sversion); if (pHandle->cachedSchemaSuid == 0 || pHandle->cachedSchemaVer != sversion || @@ -180,7 +177,6 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle, uint64_ STSchema* pTschema = pHandle->pSchema; SSchemaWrapper* pSchemaWrapper = pHandle->pSchemaWrapper; - *pNumOfRows = pHandle->msgIter.numOfRows; int32_t colNumNeed = taosArrayGetSize(pHandle->pColIdList); if (colNumNeed == 0) { @@ -221,22 +217,22 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle, uint64_ } } - if (blockDataEnsureCapacity(pBlock, *pNumOfRows) < 0) { + if (blockDataEnsureCapacity(pBlock, pHandle->msgIter.numOfRows) < 0) { goto FAIL; } int32_t colActual = blockDataGetNumOfCols(pBlock); - // TODO in stream shuffle case, fetch groupId - *pGroupId = 0; - STSRowIter iter = {0}; tdSTSRowIterInit(&iter, pTschema); STSRow* row; int32_t curRow = 0; tInitSubmitBlkIter(&pHandle->msgIter, pHandle->pBlock, &pHandle->blkIter); - *pUid = pHandle->msgIter.uid; // set the uid of table for submit block + + pBlock->info.groupId = 0; + pBlock->info.uid = pHandle->msgIter.uid; // set the uid of table for submit block + pBlock->info.rows = pHandle->msgIter.numOfRows; while ((row = tGetSubmitBlkNext(&pHandle->blkIter)) != NULL) { tdSTSRowIterReset(&iter, row); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 15803a4b73..ac57d3a9da 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -507,20 +507,21 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { STableScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - if(pInfo->currentGroupId == -1){ + if (pInfo->currentGroupId == -1) { pInfo->currentGroupId++; if (pInfo->currentGroupId >= taosArrayGetSize(pTaskInfo->tableqinfoList.pGroupList)) { setTaskStatus(pTaskInfo, TASK_COMPLETED); return NULL; } - SArray *tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId); + SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId); tsdbCleanupReadHandle(pInfo->dataReader); - tsdbReaderT* pReader = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, pInfo->queryId, pInfo->taskId); + tsdbReaderT* pReader = + tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, pInfo->queryId, pInfo->taskId); pInfo->dataReader = pReader; } SSDataBlock* result = doTableScanGroup(pOperator); - if(result){ + if (result) { return result; } @@ -530,7 +531,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { return NULL; } - SArray *tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId); + SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId); tsdbSetTableList(pInfo->dataReader, tableList); tsdbResetReadHandle(pInfo->dataReader, &pInfo->cond, 0); @@ -538,7 +539,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { pInfo->scanTimes = 0; result = doTableScanGroup(pOperator); - if(result){ + if (result) { return result; } @@ -777,9 +778,9 @@ static bool isStateWindow(SStreamBlockScanInfo* pInfo) { } static bool prepareDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) { - STimeWindow win = { - .skey = INT64_MIN, - .ekey = INT64_MAX, + STimeWindow win = { + .skey = INT64_MIN, + .ekey = INT64_MAX, }; bool needRead = false; if (!isStateWindow(pInfo) && (*pRowIndex) < pSDB->info.rows) { @@ -794,13 +795,12 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int3 SResultWindowInfo* pCurWin = getSessionTimeWindow(pAggSup, tsCols[(*pRowIndex)], INT64_MIN, pSDB->info.groupId, gap, &winIndex); win = pCurWin->win; - (*pRowIndex) += - updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, (*pRowIndex), gap, NULL); + (*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, (*pRowIndex), gap, NULL); } else { - win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[(*pRowIndex)], &pInfo->interval, - pInfo->interval.precision, NULL); - (*pRowIndex) += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, (*pRowIndex), win.ekey, - binarySearchForKey, NULL, TSDB_ORDER_ASC); + win = + getActiveTimeWindow(NULL, &dumyInfo, tsCols[(*pRowIndex)], &pInfo->interval, pInfo->interval.precision, NULL); + (*pRowIndex) += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, (*pRowIndex), win.ekey, binarySearchForKey, NULL, + TSDB_ORDER_ASC); } needRead = true; } else if (isStateWindow(pInfo)) { @@ -821,7 +821,7 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int3 STableScanInfo* pTableScanInfo = pInfo->pSnapshotReadOp->info; pTableScanInfo->cond.twindows[0] = win; pTableScanInfo->curTWinIdx = 0; -// tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + // tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); // if (!pTableScanInfo->dataReader) { // return false; // } @@ -1033,12 +1033,13 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { while (tqNextDataBlock(pInfo->streamBlockReader)) { SSDataBlock block = {0}; - uint64_t groupId = 0; - uint64_t uid = 0; - int32_t numOfRows = 0; // todo refactor - int32_t code = tqRetrieveDataBlock(&block, pInfo->streamBlockReader, &groupId, &uid, &numOfRows); + int32_t code = tqRetrieveDataBlock(&block, pInfo->streamBlockReader); + + uint64_t groupId = block.info.groupId; + uint64_t uid = block.info.uid; + int32_t numOfRows = block.info.rows; if (code != TSDB_CODE_SUCCESS || numOfRows == 0) { pTaskInfo->code = code; @@ -1154,9 +1155,9 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { return tableIdList; } -SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, - STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, - STimeWindowAggSupp* pTwSup, uint64_t queryId, uint64_t taskId) { +SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, + SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup, uint64_t queryId, + uint64_t taskId) { SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); @@ -1743,14 +1744,14 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan SSDataBlock* pResBlock = createResDataBlock(pDescNode); int32_t num = 0; - SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID); + SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID); - pInfo->accountId = pScanPhyNode->accountId; - pInfo->pUser = taosMemoryStrDup((void*) pUser); + pInfo->accountId = pScanPhyNode->accountId; + pInfo->pUser = taosMemoryStrDup((void*)pUser); pInfo->showRewrite = pScanPhyNode->showRewrite; - pInfo->pRes = pResBlock; - pInfo->pCondition = pScanNode->node.pConditions; - pInfo->scanCols = colList; + pInfo->pRes = pResBlock; + pInfo->pCondition = pScanNode->node.pConditions; + pInfo->scanCols = colList; initResultSizeInfo(pOperator, 4096); @@ -1766,13 +1767,13 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan pInfo->readHandle = *(SReadHandle*)readHandle; } - pOperator->name = "SysTableScanOperator"; + pOperator->name = "SysTableScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; pOperator->exprSupp.numOfExprs = taosArrayGetSize(pResBlock->pDataBlock); - pOperator->pTaskInfo = pTaskInfo; + pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, NULL, destroySysScanOperator, NULL, NULL, NULL); @@ -1959,11 +1960,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi goto _error; } - pInfo->pTableList = pTableListInfo; - pInfo->pColMatchInfo = colList; - pInfo->pRes = createResDataBlock(pDescNode); - pInfo->readHandle = *pReadHandle; - pInfo->curPos = 0; + pInfo->pTableList = pTableListInfo; + pInfo->pColMatchInfo = colList; + pInfo->pRes = createResDataBlock(pDescNode); + pInfo->readHandle = *pReadHandle; + pInfo->curPos = 0; pOperator->name = "TagScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN; @@ -2034,7 +2035,7 @@ typedef struct STableMergeScanInfo { int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan int32_t dataBlockLoadFlag; SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time - // window to check if current data block needs to be loaded. + // window to check if current data block needs to be loaded. SSampleExecInfo sample; // sample execution info } STableMergeScanInfo;