From 54a546931c7ba4f86d62df05abfcd270be52e04b Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 24 Feb 2023 19:22:08 +0800 Subject: [PATCH 01/27] fix:optimize version logic in tmq and remove useless code --- include/common/tmsg.h | 2 +- source/dnode/mnode/impl/inc/mndDef.h | 2 +- source/dnode/vnode/src/inc/tq.h | 2 +- source/dnode/vnode/src/tq/tq.c | 5 --- source/dnode/vnode/src/tq/tqRead.c | 14 ++------ source/libs/executor/src/scanoperator.c | 12 ------- source/libs/wal/src/walRead.c | 44 +++++++++++++------------ 7 files changed, 28 insertions(+), 53 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 8dcadf47b6..1cb5695bc6 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2605,7 +2605,7 @@ typedef struct { char subKey[TSDB_SUBSCRIBE_KEY_LEN]; int8_t subType; int8_t withMeta; - char* qmsg; + char* qmsg; //SubPlanToString int64_t suid; } SMqRebVgReq; diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 6f6f801c39..4c42e6b2a1 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -528,7 +528,7 @@ void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer typedef struct { int32_t vgId; - char* qmsg; + char* qmsg; //SubPlanToString SEpSet epSet; } SMqVgEp; diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 828341ddd8..f76347dee2 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -67,7 +67,7 @@ typedef struct { // tqExec typedef struct { - char* qmsg; + char* qmsg; // SubPlanToString } STqExecCol; typedef struct { diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 276de64bbd..00d86983f2 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -568,7 +568,6 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { taosWLockLatch(&pTq->pushLock); tqScanData(pTq, pHandle, &dataRsp, &fetchOffsetNew); -#if 1 if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG && dataRsp.reqOffset.version == dataRsp.rspOffset.version) { STqPushEntry* pPushEntry = taosMemoryCalloc(1, sizeof(STqPushEntry)); @@ -588,7 +587,6 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { return 0; } } -#endif taosWUnLockLatch(&pTq->pushLock); if (tqSendDataRsp(pTq, pMsg, &req, &dataRsp) < 0) { @@ -605,10 +603,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { } // for taosx - ASSERT(pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN); - SMqMetaRsp metaRsp = {0}; - STaosxRsp taosxRsp = {0}; tqInitTaosxRsp(&taosxRsp, &req); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 7cad739ffa..1a54614f43 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -309,7 +309,8 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) { if (!fromProcessedMsg) { if (walNextValidMsg(pReader->pWalReader) < 0) { pReader->ver = - pReader->pWalReader->curVersion - (pReader->pWalReader->curInvalid | pReader->pWalReader->curStopped); + pReader->pWalReader->curVersion - pReader->pWalReader->curStopped; +// pReader->pWalReader->curVersion - (pReader->pWalReader->curInvalid | pReader->pWalReader->curStopped); ret->offset.type = TMQ_OFFSET__LOG; ret->offset.version = pReader->ver; ret->fetchType = FETCH_TYPE__NONE; @@ -318,18 +319,7 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) { return -1; } void* body = pReader->pWalReader->pHead->head.body; -#if 0 - if (pReader->pWalReader->pHead->head.msgType != TDMT_VND_SUBMIT) { - // TODO do filter - ret->fetchType = FETCH_TYPE__META; - ret->meta = pReader->pWalReader->pHead->head.body; - return 0; - } else { -#endif tqReaderSetDataMsg(pReader, body, pReader->pWalReader->pHead->head.version); -#if 0 - } -#endif } while (tqNextDataBlock(pReader)) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 99e630f45e..99ea62fe3e 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1647,8 +1647,6 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { } else if (ret.fetchType == FETCH_TYPE__NONE || (ret.fetchType == FETCH_TYPE__SEP && pOperator->status == OP_EXEC_RECV)) { pTaskInfo->streamInfo.lastStatus = ret.offset; - ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version); - ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion); char formatBuf[80]; tFormatOffset(formatBuf, 80, &ret.offset); qDebug("queue scan log return null, offset %s", formatBuf); @@ -1656,16 +1654,6 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { return NULL; } } -#if 0 - } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { - SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp); - if (pResult && pResult->info.rows > 0) { - qDebug("stream scan tsdb return %d rows", pResult->info.rows); - return pResult; - } - qDebug("stream scan tsdb return null"); - return NULL; -#endif } else { qError("unexpected streamInfo prepare type: %d", pTaskInfo->streamInfo.prepareStatus.type); return NULL; diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 526dba0bb5..a0a0a96b3a 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -201,6 +201,7 @@ int32_t walReadSeekVerImpl(SWalReader *pReader, int64_t ver) { pReader->curVersion, pReader->curInvalid, ver); pReader->curVersion = ver; + pReader->curInvalid = 0; return 0; } @@ -211,8 +212,8 @@ int32_t walReadSeekVer(SWalReader *pReader, int64_t ver) { return 0; } - pReader->curInvalid = 1; - pReader->curVersion = ver; +// pReader->curInvalid = 1; +// pReader->curVersion = ver; if (ver > pWal->vers.lastVer || ver < pWal->vers.firstVer) { wDebug("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pReader->pWal->cfg.vgId, @@ -220,8 +221,8 @@ int32_t walReadSeekVer(SWalReader *pReader, int64_t ver) { terrno = TSDB_CODE_WAL_LOG_NOT_EXIST; return -1; } - if (ver < pWal->vers.snapshotVer) { - } +// if (ver < pWal->vers.snapshotVer) { +// } if (walReadSeekVerImpl(pReader, ver) < 0) { return -1; @@ -240,8 +241,8 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { if (pRead->curInvalid || pRead->curVersion != fetchVer) { if (walReadSeekVer(pRead, fetchVer) < 0) { - pRead->curVersion = fetchVer; - pRead->curInvalid = 1; +// pRead->curVersion = fetchVer; +// pRead->curInvalid = 1; return -1; } seeked = true; @@ -260,11 +261,11 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { } else { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } - pRead->curInvalid = 1; +// pRead->curInvalid = 1; return -1; } } - pRead->curInvalid = 0; +// pRead->curInvalid = 0; return 0; } @@ -295,13 +296,13 @@ static int32_t walFetchBodyNew(SWalReader *pRead) { pRead->pWal->cfg.vgId, pRead->pHead->head.version, ver); terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } - pRead->curInvalid = 1; +// pRead->curInvalid = 1; return -1; } if (walValidBodyCksum(pRead->pHead) != 0) { wError("vgId:%d, wal fetch body error:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver); - pRead->curInvalid = 1; +// pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } @@ -320,7 +321,7 @@ static int32_t walSkipFetchBodyNew(SWalReader *pRead) { code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); - pRead->curInvalid = 1; +// pRead->curInvalid = 1; return -1; } @@ -348,8 +349,8 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { if (pRead->curInvalid || pRead->curVersion != ver) { code = walReadSeekVer(pRead, ver); if (code < 0) { - pRead->curVersion = ver; - pRead->curInvalid = 1; +// pRead->curVersion = ver; +// pRead->curInvalid = 1; return -1; } seeked = true; @@ -369,7 +370,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { } else { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } - pRead->curInvalid = 1; +// pRead->curInvalid = 1; return -1; } } @@ -382,7 +383,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { return -1; } - pRead->curInvalid = 0; +// pRead->curInvalid = 0; return 0; } @@ -400,7 +401,7 @@ int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead) { code = taosLSeekFile(pRead->pLogFile, pHead->head.bodyLen, SEEK_CUR); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); - pRead->curInvalid = 1; +// pRead->curInvalid = 1; return -1; } @@ -439,14 +440,14 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { pRead->pWal->cfg.vgId, pReadHead->version, ver); terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } - pRead->curInvalid = 1; +// pRead->curInvalid = 1; return -1; } if (pReadHead->version != ver) { wError("vgId:%d, wal fetch body error, index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId, pReadHead->version, ver); - pRead->curInvalid = 1; +// pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } @@ -454,7 +455,7 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { if (walValidBodyCksum(*ppHead) != 0) { wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver); - pRead->curInvalid = 1; +// pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } @@ -550,7 +551,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { if (pReader->pHead->head.version != ver) { wError("vgId:%d, unexpected wal log, index:%" PRId64 ", read request index:%" PRId64, pReader->pWal->cfg.vgId, pReader->pHead->head.version, ver); - pReader->curInvalid = 1; +// pReader->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; taosThreadMutexUnlock(&pReader->mutex); return -1; @@ -563,7 +564,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { uint32_t readCkSum = walCalcBodyCksum(pReader->pHead->head.body, pReader->pHead->head.bodyLen); uint32_t logCkSum = pReader->pHead->cksumBody; wError("checksum written into log:%u, checksum calculated:%u", logCkSum, readCkSum); - pReader->curInvalid = 1; +// pReader->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; taosThreadMutexUnlock(&pReader->mutex); return -1; @@ -581,5 +582,6 @@ void walReadReset(SWalReader *pReader) { taosCloseFile(&pReader->pLogFile); pReader->curInvalid = 1; pReader->curFileFirstVer = -1; + pReader->curVersion = -1; taosThreadMutexUnlock(&pReader->mutex); } From b46098793d13e867ae9ff559c107dbe01fce39d0 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 26 Feb 2023 12:16:45 +0800 Subject: [PATCH 02/27] refactor: do some internal refactor and add some logs for tmq. --- include/os/osMemory.h | 3 + source/dnode/mnode/impl/src/mndConsumer.c | 73 ++++++++++------------ source/dnode/mnode/impl/src/mndSubscribe.c | 8 --- source/dnode/vnode/src/tq/tq.c | 6 +- source/dnode/vnode/src/tq/tqExec.c | 4 +- source/dnode/vnode/src/tq/tqMeta.c | 2 +- source/dnode/vnode/src/tq/tqPush.c | 22 ++++--- source/dnode/vnode/src/tq/tqRead.c | 45 +++++++------ source/dnode/vnode/src/vnd/vnodeSvr.c | 10 +-- source/libs/executor/inc/executorimpl.h | 3 +- source/libs/executor/src/executor.c | 6 +- source/libs/executor/src/scanoperator.c | 3 +- 12 files changed, 94 insertions(+), 91 deletions(-) diff --git a/include/os/osMemory.h b/include/os/osMemory.h index 3f57c72933..cd2d4a3494 100644 --- a/include/os/osMemory.h +++ b/include/os/osMemory.h @@ -29,7 +29,10 @@ extern "C" { #define calloc CALLOC_FUNC_TAOS_FORBID #define realloc REALLOC_FUNC_TAOS_FORBID #define free FREE_FUNC_TAOS_FORBID +#ifdef strdup +#undef strdup #define strdup STRDUP_FUNC_TAOS_FORBID +#endif #endif // ifndef ALLOW_FORBID_FUNC #endif // if !defined(WINDOWS) diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 6621b6a3f9..768fe45e3e 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -15,17 +15,11 @@ #define _DEFAULT_SOURCE #include "mndConsumer.h" -#include "mndDb.h" -#include "mndDnode.h" -#include "mndMnode.h" #include "mndPrivilege.h" #include "mndShow.h" -#include "mndStb.h" #include "mndSubscribe.h" #include "mndTopic.h" #include "mndTrans.h" -#include "mndUser.h" -#include "mndVgroup.h" #include "tcompare.h" #include "tname.h" @@ -209,6 +203,7 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { taosMemoryFree(pConsumerNew); mndTransDrop(pTrans); return 0; + FAIL: tDeleteSMqConsumerObj(pConsumerNew); taosMemoryFree(pConsumerNew); @@ -580,6 +575,10 @@ static int32_t validateTopics(const SArray* pTopicList, SMnode* pMnode, const ch return 0; } +static void* topicNameDup(void* p){ + return taosStrdup((char*) p); +} + int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { SMnode *pMnode = pMsg->info.node; char *msgStr = pMsg->pCont; @@ -616,15 +615,15 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); tstrncpy(pConsumerNew->clientId, subscribe.clientId, 256); - pConsumerNew->updateType = CONSUMER_UPDATE__MODIFY; - taosArrayDestroy(pConsumerNew->rebNewTopics); - pConsumerNew->rebNewTopics = pTopicList; // all subscribe topics should re-balance. - subscribe.topicNames = NULL; - for (int32_t i = 0; i < newTopicNum; i++) { - char *newTopicCopy = taosStrdup(taosArrayGetP(pTopicList, i)); - taosArrayPush(pConsumerNew->assignedTopics, &newTopicCopy); - } + // set the update type + pConsumerNew->updateType = CONSUMER_UPDATE__MODIFY; + pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); + + // all subscribed topics should re-balance. + taosArrayDestroy(pConsumerNew->rebNewTopics); + pConsumerNew->rebNewTopics = pTopicList; + subscribe.topicNames = NULL; if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto _over; if (mndTransPrepare(pMnode, pTrans) != 0) goto _over; @@ -646,17 +645,11 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { goto _over; } + // set the update type pConsumerNew->updateType = CONSUMER_UPDATE__MODIFY; + pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); - for (int32_t i = 0; i < newTopicNum; i++) { - char *newTopicCopy = taosStrdup(taosArrayGetP(pTopicList, i)); - taosArrayPush(pConsumerNew->assignedTopics, &newTopicCopy); - } - - int32_t oldTopicNum = 0; - if (pExistedConsumer->currentTopics) { - oldTopicNum = taosArrayGetSize(pExistedConsumer->currentTopics); - } + int32_t oldTopicNum = (pExistedConsumer->currentTopics)? taosArrayGetSize(pExistedConsumer->currentTopics):0; int32_t i = 0, j = 0; while (i < oldTopicNum || j < newTopicNum) { @@ -692,11 +685,8 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { } } - if (pExistedConsumer && taosArrayGetSize(pConsumerNew->rebNewTopics) == 0 && - taosArrayGetSize(pConsumerNew->rebRemovedTopics) == 0) { - /*if (taosArrayGetSize(pConsumerNew->assignedTopics) == 0) {*/ - /*pConsumerNew->updateType = */ - /*}*/ + // no topics need to be rebalanced + if (taosArrayGetSize(pConsumerNew->rebNewTopics) == 0 && taosArrayGetSize(pConsumerNew->rebRemovedTopics) == 0) { goto _over; } @@ -718,8 +708,9 @@ _over: tDeleteSMqConsumerObj(pConsumerNew); taosMemoryFree(pConsumerNew); } + // TODO: replace with destroy subscribe msg - if (subscribe.topicNames) taosArrayDestroyP(subscribe.topicNames, (FDelete)taosMemoryFree); + taosArrayDestroyP(subscribe.topicNames, (FDelete)taosMemoryFree); return code; } @@ -750,12 +741,12 @@ SSdbRaw *mndConsumerActionEncode(SMqConsumerObj *pConsumer) { CM_ENCODE_OVER: taosMemoryFreeClear(buf); if (terrno != 0) { - mError("consumer:%" PRId64 ", failed to encode to raw:%p since %s", pConsumer->consumerId, pRaw, terrstr()); + mError("consumer:0x%" PRIx64 " failed to encode to raw:%p since %s", pConsumer->consumerId, pRaw, terrstr()); sdbFreeRaw(pRaw); return NULL; } - mTrace("consumer:%" PRId64 ", encode to raw:%p, row:%p", pConsumer->consumerId, pRaw, pConsumer); + mTrace("consumer:0x%" PRIx64 ", encode to raw:%p, row:%p", pConsumer->consumerId, pRaw, pConsumer); return pRaw; } @@ -823,8 +814,8 @@ static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) { } static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer) { - mDebug("consumer:0x%" PRIx64 " perform delete action, status:%s", pConsumer->consumerId, - mndConsumerStatusName(pConsumer->status)); + mDebug("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, + pConsumer->status, mndConsumerStatusName(pConsumer->status)); tDeleteSMqConsumerObj(pConsumer); return 0; } @@ -1075,22 +1066,23 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * // consumer group char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0}; - tstrncpy(varDataVal(cgroup), pConsumer->cgroup, TSDB_CGROUP_LEN); - varDataSetLen(cgroup, strlen(varDataVal(cgroup))); + STR_TO_VARSTR(cgroup, pConsumer->cgroup); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)cgroup, false); // client id char clientId[256 + VARSTR_HEADER_SIZE] = {0}; - tstrncpy(varDataVal(clientId), pConsumer->clientId, 256); - varDataSetLen(clientId, strlen(varDataVal(clientId))); + STR_TO_VARSTR(clientId, pConsumer->clientId); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)clientId, false); // status char status[20 + VARSTR_HEADER_SIZE] = {0}; - tstrncpy(varDataVal(status), mndConsumerStatusName(pConsumer->status), 20); - varDataSetLen(status, strlen(varDataVal(status))); + const char* pStatusName = mndConsumerStatusName(pConsumer->status); + STR_TO_VARSTR(status, pStatusName); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)status, false); @@ -1123,8 +1115,11 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * numOfRows++; } + taosRUnLockLatch(&pConsumer->lock); sdbRelease(pSdb, pConsumer); + + pBlock->info.rows = numOfRows; } pShow->numOfRows += numOfRows; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 8d8fd1b9b5..8544994c3e 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -16,15 +16,10 @@ #define _DEFAULT_SOURCE #include "mndSubscribe.h" #include "mndConsumer.h" -#include "mndDb.h" -#include "mndDnode.h" -#include "mndMnode.h" #include "mndScheduler.h" #include "mndShow.h" -#include "mndStb.h" #include "mndTopic.h" #include "mndTrans.h" -#include "mndUser.h" #include "mndVgroup.h" #include "tcompare.h" #include "tname.h" @@ -1041,7 +1036,6 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock } // do not show for cleared subscription -#if 1 int32_t sz = taosArrayGetSize(pSub->unassignedVgs); for (int32_t i = 0; i < sz; i++) { SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i); @@ -1087,8 +1081,6 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock numOfRows++; } -#endif - pBlock->info.rows = numOfRows; taosRUnLockLatch(&pSub->lock); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index c9bbb30380..c465617975 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -217,7 +217,7 @@ int32_t tqPushDataRsp(STQ* pTq, STqPushEntry* pPushEntry) { char buf2[80] = {0}; tFormatOffset(buf1, tListLen(buf1), &pRsp->reqOffset); tFormatOffset(buf2, tListLen(buf2), &pRsp->rspOffset); - tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, reqOffset:%s, rspOffset:%s", + tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, req:%s, rsp:%s", TD_VID(pTq->pVnode), pRsp->head.consumerId, pRsp->head.epoch, pRsp->blockNum, buf1, buf2); return 0; @@ -275,7 +275,7 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con char buf2[80] = {0}; tFormatOffset(buf1, 80, &pRsp->reqOffset); tFormatOffset(buf2, 80, &pRsp->rspOffset); - tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d), block num:%d, reqOffset:%s, rspOffset:%s", + tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d), block num:%d, req:%s, rsp:%s", TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2); return 0; @@ -604,7 +604,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { code = -1; } - tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp data block:%d, offset type:%d, uid/version:%" PRId64 ", ts:%" PRId64 "", + tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, offset type:%d, uid/version:%" PRId64 ", ts:%" PRId64 "", consumerId, pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type, dataRsp.rspOffset.uid, dataRsp.rspOffset.ts); diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c index e92da7668a..5638228641 100644 --- a/source/dnode/vnode/src/tq/tqExec.c +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -113,9 +113,7 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs return -1; } - ASSERT(pRsp->withTbName == false); - ASSERT(pRsp->withSchema == false); - + ASSERT(!(pRsp->withTbName || pRsp->withSchema)); return 0; } diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index 12fd15c63a..a85e6e0a70 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -197,7 +197,7 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { return -1; } - tqDebug("tq save %s(%d) handle consumer:0x%" PRIx64 "epoch:%d vgId:%d", pHandle->subKey, + tqDebug("tq save %s(%d) handle consumer:0x%" PRIx64 " epoch:%d vgId:%d", pHandle->subKey, (int32_t)strlen(pHandle->subKey), pHandle->consumerId, pHandle->epoch, TD_VID(pTq->pVnode)); void* buf = taosMemoryCalloc(1, vlen); diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index e747262277..5a25d7e894 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -193,7 +193,7 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_ memset(&pHandle->pushHandle.rpcInfo, 0, sizeof(SRpcHandleInfo)); taosWUnLockLatch(&pHandle->pushHandle.lock); - tqDebug("vgId:%d offset %" PRId64 " from consumer:%" PRId64 ", (epoch %d) send rsp, block num: %d, reqOffset:%" PRId64 ", rspOffset:%" PRId64, + tqDebug("vgId:%d offset %" PRId64 " from consumer:%" PRId64 ", (epoch %d) send rsp, block num: %d, req:%" PRId64 ", rsp:%" PRId64, TD_VID(pTq->pVnode), fetchOffset, pHandle->pushHandle.consumerId, pHandle->pushHandle.epoch, rsp.blockNum, rsp.reqOffset, rsp.rspOffset); @@ -210,25 +210,30 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) void* pReq = POINTER_SHIFT(msg, sizeof(SSubmitReq2Msg)); int32_t len = msgLen - sizeof(SSubmitReq2Msg); - tqDebug("vgId:%d tq push msg version:%" PRId64 " type: %s, p head %p, p body %p, len %d", pTq->pVnode->config.vgId, ver, - TMSG_INFO(msgType), msg, pReq, len); - if (msgType == TDMT_VND_SUBMIT) { // lock push mgr to avoid potential msg lost taosWLockLatch(&pTq->pushLock); - if (taosHashGetSize(pTq->pPushMgr) != 0) { - tqDebug("vgId:%d, push handle num %d", pTq->pVnode->config.vgId, taosHashGetSize(pTq->pPushMgr)); + int32_t numOfRegisteredPush = taosHashGetSize(pTq->pPushMgr); + if (numOfRegisteredPush > 0) { + tqDebug("vgId:%d tq push msg version:%" PRId64 " type:%s, head:%p, body:%p len:%d, numOfPushed consumers:%d", + pTq->pVnode->config.vgId, ver, TMSG_INFO(msgType), msg, pReq, len, numOfRegisteredPush); + SArray* cachedKeys = taosArrayInit(0, sizeof(void*)); SArray* cachedKeyLens = taosArrayInit(0, sizeof(size_t)); + void* data = taosMemoryMalloc(len); if (data == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; tqError("failed to copy data for stream since out of memory"); taosArrayDestroyP(cachedKeys, (FDelete)taosMemoryFree); taosArrayDestroy(cachedKeyLens); + + // unlock + taosWUnLockLatch(&pTq->pushLock); return -1; } + memcpy(data, pReq, len); void* pIter = NULL; @@ -262,7 +267,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) }; qStreamSetScanMemData(task, submit); - // exec + // here start to scan submit block to extract the subscribed data while (1) { SSDataBlock* pDataBlock = NULL; uint64_t ts = 0; @@ -278,7 +283,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) pRsp->blockNum++; } - tqDebug("vgId:%d, tq handle push, subkey: %s, block num: %d", pTq->pVnode->config.vgId, pPushEntry->subKey, + tqDebug("vgId:%d, tq handle push, subkey:%s, block num:%d", pTq->pVnode->config.vgId, pPushEntry->subKey, pRsp->blockNum); if (pRsp->blockNum > 0) { // set offset @@ -295,6 +300,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) tqPushDataRsp(pTq, pPushEntry); } } + // delete entry for (int32_t i = 0; i < taosArrayGetSize(cachedKeys); i++) { void* key = taosArrayGetP(cachedKeys, i); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 2c8f20d8cd..4bf6320c32 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -292,9 +292,12 @@ void tqCloseReader(STqReader* pReader) { int32_t tqSeekVer(STqReader* pReader, int64_t ver) { if (walReadSeekVer(pReader->pWalReader, ver) < 0) { + tqError("tmq poll: wal reader failed to seek to ver:%"PRId64, ver); return -1; + } else { + tqDebug("tmq poll: wal reader seek to ver:%"PRId64, ver); + return 0; } - return 0; } int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) { @@ -302,28 +305,33 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) { while (1) { if (!fromProcessedMsg) { - if (walNextValidMsg(pReader->pWalReader) < 0) { - pReader->ver = - pReader->pWalReader->curVersion - (pReader->pWalReader->curInvalid | pReader->pWalReader->curStopped); + SWalReader* pWalReader = pReader->pWalReader; + + if (walNextValidMsg(pWalReader) < 0) { + pReader->ver = pWalReader->curVersion - (pWalReader->curInvalid | pWalReader->curStopped); ret->offset.type = TMQ_OFFSET__LOG; ret->offset.version = pReader->ver; ret->fetchType = FETCH_TYPE__NONE; tqDebug("return offset %" PRId64 ", no more valid", ret->offset.version); return -1; } - void* body = POINTER_SHIFT(pReader->pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg)); - int32_t bodyLen = pReader->pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg); - int64_t ver = pReader->pWalReader->pHead->head.version; + + void* body = POINTER_SHIFT(pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg)); + int32_t bodyLen = pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg); + int64_t ver = pWalReader->pHead->head.version; + + tqDebug("tmq poll: extract submit msg from wal, version:%"PRId64" len:%d", ver, bodyLen); + #if 0 - if (pReader->pWalReader->pHead->head.msgType != TDMT_VND_SUBMIT) { + if (pWalReader->pHead->head.msgType != TDMT_VND_SUBMIT) { // TODO do filter ret->fetchType = FETCH_TYPE__META; - ret->meta = pReader->pWalReader->pHead->head.body; + ret->meta = pWalReader->pHead->head.body; return 0; } else { #endif tqReaderSetSubmitReq2(pReader, body, bodyLen, ver); - /*tqReaderSetDataMsg(pReader, body, pReader->pWalReader->pHead->head.version);*/ + /*tqReaderSetDataMsg(pReader, body, pWalReader->pHead->head.version);*/ #if 0 } #endif @@ -358,7 +366,7 @@ int32_t tqReaderSetDataMsg(STqReader* pReader, const SSubmitReq* pMsg, int64_t v // if (tInitSubmitMsgIter(pMsg, &pReader->msgIter) < 0) return -1; // while (true) { // if (tGetSubmitMsgNext(&pReader->msgIter, &pReader->pBlock) < 0) return -1; -// tqDebug("submitnext vgId:%d, block:%p, dataLen:%d, len:%d, uid:%"PRId64, pReader->pWalReader->pWal->cfg.vgId, pReader->pBlock, pReader->msgIter.dataLen, +// tqDebug("submitnext vgId:%d, block:%p, dataLen:%d, len:%d, uid:%"PRId64, pWalReader->pWal->cfg.vgId, pReader->pBlock, pReader->msgIter.dataLen, // pReader->msgIter.len, pReader->msgIter.uid); // if (pReader->pBlock == NULL) break; // } @@ -371,10 +379,8 @@ int32_t tqReaderSetDataMsg(STqReader* pReader, const SSubmitReq* pMsg, int64_t v #endif int32_t tqReaderSetSubmitReq2(STqReader* pReader, void* msgStr, int32_t msgLen, int64_t ver) { - ASSERT(pReader->msg2.msgStr == NULL); - ASSERT(msgStr); - ASSERT(msgLen); - ASSERT(ver >= 0); + ASSERT(pReader->msg2.msgStr == NULL && msgStr && msgLen && (ver >= 0)); + pReader->msg2.msgStr = msgStr; pReader->msg2.msgLen = msgLen; pReader->msg2.ver = ver; @@ -421,7 +427,10 @@ bool tqNextDataBlock(STqReader* pReader) { #endif bool tqNextDataBlock2(STqReader* pReader) { - if (pReader->msg2.msgStr == NULL) return false; + if (pReader->msg2.msgStr == NULL) { + return false; + } + ASSERT(pReader->setMsg == 1); tqDebug("tq reader next data block %p, %d %" PRId64 " %d", pReader->msg2.msgStr, pReader->msg2.msgLen, @@ -528,7 +537,7 @@ int32_t tqScanSubmitSplit(SArray* pBlocks, SArray* schemas, STqReader* pReader) if (pReader->pSchema == NULL) { tqWarn("vgId:%d, cannot found tsschema for table: uid:%" PRId64 " (suid:%" PRId64 "), version %d, possibly dropped table", - pReader->pWalReader->pWal->cfg.vgId, pReader->msgIter.uid, pReader->msgIter.suid, sversion); + pWalReader->pWal->cfg.vgId, pReader->msgIter.uid, pReader->msgIter.suid, sversion); pReader->cachedSchemaSuid = 0; terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND; return -1; @@ -538,7 +547,7 @@ int32_t tqScanSubmitSplit(SArray* pBlocks, SArray* schemas, STqReader* pReader) pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, pReader->msgIter.uid, sversion, 1); if (pReader->pSchemaWrapper == NULL) { tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", version %d, possibly dropped table", - pReader->pWalReader->pWal->cfg.vgId, pReader->msgIter.uid, pReader->cachedSchemaVer); + pWalReader->pWal->cfg.vgId, pReader->msgIter.uid, pReader->cachedSchemaVer); pReader->cachedSchemaSuid = 0; terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND; return -1; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 8651478afa..a3438b611e 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -1264,8 +1264,8 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq goto _exit; } - for (int32_t i = 1; i < nColData; i++) { - if (aColData[i].nVal != aColData[0].nVal) { + for (int32_t j = 1; j < nColData; j++) { + if (aColData[j].nVal != aColData[0].nVal) { code = TSDB_CODE_INVALID_MSG; goto _exit; } @@ -1299,8 +1299,8 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq SVCreateTbRsp *pCreateTbRsp = taosArrayReserve(pSubmitRsp->aCreateTbRsp, 1); // create table - if (metaCreateTable(pVnode->pMeta, version, pSubmitTbData->pCreateTbReq, &pCreateTbRsp->pMeta) == - 0) { // create table success + if (metaCreateTable(pVnode->pMeta, version, pSubmitTbData->pCreateTbReq, &pCreateTbRsp->pMeta) == 0) { + // create table success if (newTbUids == NULL && (newTbUids = taosArrayInit(TARRAY_SIZE(pSubmitReq->aSubmitTbData), sizeof(int64_t))) == NULL) { @@ -1330,7 +1330,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq pSubmitRsp->affectedRows += affectedRows; } - // update table uid list + // update the affected table uid list if (taosArrayGetSize(newTbUids) > 0) { vDebug("vgId:%d, add %d table into query table list in handling submit", TD_VID(pVnode), (int32_t)taosArrayGetSize(newTbUids)); diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 1712cba0f5..5df3b14a5b 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -133,8 +133,7 @@ typedef struct { int64_t snapshotVer; // const SSubmitReq* pReq; - SPackedData submit; - + SPackedData submit; SSchemaWrapper* schema; char tbName[TSDB_TABLE_NAME_LEN]; int8_t recoverStep; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 347ac369d8..b3d3e8aab7 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1035,8 +1035,9 @@ int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq, int64_t sc int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE); - ASSERT(pTaskInfo->streamInfo.submit.msgStr == NULL); + ASSERT((pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE )&& (pTaskInfo->streamInfo.submit.msgStr == NULL)); + qDebug("set the submit block for future scan"); + pTaskInfo->streamInfo.submit = submit; return 0; } @@ -1050,6 +1051,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) { return 0; } + if (subType == TOPIC_SUB_TYPE__COLUMN) { uint16_t type = pOperator->operatorType; pOperator->status = OP_OPENED; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index a238b84993..e1594b13a8 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1562,7 +1562,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamScanInfo* pInfo = pOperator->info; - qDebug("queue scan called"); + qDebug("start to exec queue scan"); if (pTaskInfo->streamInfo.submit.msgStr != NULL) { if (pInfo->tqReader->msg2.msgStr == NULL) { @@ -1587,7 +1587,6 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { SSDataBlock block = {0}; int32_t code = tqRetrieveDataBlock2(&block, pInfo->tqReader, NULL); - if (code != TSDB_CODE_SUCCESS || block.info.rows == 0) { continue; } From a28156e832d4298a15806fd7d6019ad0076e42a5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 26 Feb 2023 12:35:50 +0800 Subject: [PATCH 03/27] refactor: do some internal refactor and add some logs for tmq. --- source/libs/executor/src/executor.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index b3d3e8aab7..9611cdab67 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1048,15 +1048,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE); pTaskInfo->streamInfo.prepareStatus = *pOffset; pTaskInfo->streamInfo.returned = 0; + if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) { return 0; } if (subType == TOPIC_SUB_TYPE__COLUMN) { - uint16_t type = pOperator->operatorType; pOperator->status = OP_OPENED; + // TODO add more check - if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { ASSERT(pOperator->numOfDownstream == 1); pOperator = pOperator->pDownstream[0]; } @@ -1070,7 +1071,8 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT return -1; } } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { - /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/ + // iterate all tables from tableInfoList, and retrieve rows from each table one-by-one + // those data are from the snapshot in tsdb, besides the data in the wal file. int64_t uid = pOffset->uid; int64_t ts = pOffset->ts; @@ -1129,7 +1131,6 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, ts, pTableScanInfo->currentTable, numOfTables); - /*}*/ } else { ASSERT(0); } @@ -1172,7 +1173,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema); pTaskInfo->streamInfo.schema = mtInfo.schema; - qDebug("tmqsnap qStreamPrepareScan snapshot data uid %" PRId64 " ts %" PRId64, mtInfo.uid, pOffset->ts); + qDebug("tmqsnap qStreamPrepareScan snapshot data uid:%" PRId64 " ts %" PRId64, mtInfo.uid, pOffset->ts); } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) { SStreamRawScanInfo* pInfo = pOperator->info; SSnapContext* sContext = pInfo->sContext; @@ -1180,7 +1181,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT qError("setForSnapShot error. uid:%" PRIu64 " ,version:%" PRId64, pOffset->uid, pOffset->version); return -1; } - qDebug("tmqsnap qStreamPrepareScan snapshot meta uid %" PRId64 " ts %" PRId64, pOffset->uid, pOffset->ts); + qDebug("tmqsnap qStreamPrepareScan snapshot meta uid:%" PRId64 " ts %" PRId64, pOffset->uid, pOffset->ts); } else if (pOffset->type == TMQ_OFFSET__LOG) { SStreamRawScanInfo* pInfo = pOperator->info; tsdbReaderClose(pInfo->dataReader); From e1be7dd39905d692896049a7898147dd7a2331cc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 27 Feb 2023 17:55:30 +0800 Subject: [PATCH 04/27] fix(query): fix memory leak. --- source/dnode/mnode/impl/src/mndConsumer.c | 2 ++ source/util/src/thashutil.c | 5 ----- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 768fe45e3e..280f3b0ecc 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -618,6 +618,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { // set the update type pConsumerNew->updateType = CONSUMER_UPDATE__MODIFY; + taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); // all subscribed topics should re-balance. @@ -647,6 +648,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { // set the update type pConsumerNew->updateType = CONSUMER_UPDATE__MODIFY; + taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); int32_t oldTopicNum = (pExistedConsumer->currentTopics)? taosArrayGetSize(pExistedConsumer->currentTopics):0; diff --git a/source/util/src/thashutil.c b/source/util/src/thashutil.c index 21b9359076..f9c7eb1f56 100644 --- a/source/util/src/thashutil.c +++ b/source/util/src/thashutil.c @@ -50,11 +50,6 @@ uint32_t taosDJB2Hash(const char *key, uint32_t len) { return hash; } -uint32_t xxHash(const char *key, uint32_t len) { - int32_t seed = 0xcc9e2d51; - return XXH32(key, len, seed); -} - uint32_t MurmurHash3_32(const char *key, uint32_t len) { const uint8_t *data = (const uint8_t *)key; const int32_t nblocks = len >> 2u; From d18dd3067bfd2c5db185a26479efbba01da4f227 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 28 Feb 2023 00:38:37 +0800 Subject: [PATCH 05/27] other: merge main. --- include/os/osMemory.h | 2 - source/dnode/vnode/src/tq/tqRead.c | 2 + source/libs/executor/src/executor.c | 1 + tests/develop-test/test.py | 10 +- tests/parallel_test/cases.task | 30 +- .../system-test/0-others/tag_index_advance.py | 520 ++++++++++++++++++ tests/system-test/0-others/tag_index_basic.py | 37 +- tests/system-test/pytest.sh | 4 +- tests/system-test/test.py | 10 +- 9 files changed, 585 insertions(+), 31 deletions(-) create mode 100644 tests/system-test/0-others/tag_index_advance.py diff --git a/include/os/osMemory.h b/include/os/osMemory.h index b197dfd8a2..44a97bf055 100644 --- a/include/os/osMemory.h +++ b/include/os/osMemory.h @@ -33,8 +33,6 @@ extern "C" { #undef strdup #define strdup STRDUP_FUNC_TAOS_FORBID #endif -#endif // ifndef ALLOW_FORBID_FUNC - #endif // ifndef ALLOW_FORBID_FUNC #endif // if !defined(WINDOWS) diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 4bf6320c32..5352ebe6d4 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -291,6 +291,8 @@ void tqCloseReader(STqReader* pReader) { } int32_t tqSeekVer(STqReader* pReader, int64_t ver) { + // todo set the correct vgId + tqDebug("tmq poll: vgId:%d wal seek to version:%"PRId64, 0, ver); if (walReadSeekVer(pReader->pWalReader, ver) < 0) { tqError("tmq poll: wal reader failed to seek to ver:%"PRId64, ver); return -1; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 9611cdab67..c2bf001c86 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1067,6 +1067,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; tsdbReaderClose(pTSInfo->base.dataReader); pTSInfo->base.dataReader = NULL; + // let's seek to the next version in wal file if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) { return -1; } diff --git a/tests/develop-test/test.py b/tests/develop-test/test.py index 1b0f0d0aed..600925174f 100644 --- a/tests/develop-test/test.py +++ b/tests/develop-test/test.py @@ -343,7 +343,7 @@ if __name__ == "__main__": for i in range(cursor.rowcount): if res[i][0] == "queryPolicy": if int(res[i][1]) == int(queryPolicy): - tdLog.success( + tdLog.info( f"alter queryPolicy to {queryPolicy} successfully" ) else: @@ -402,7 +402,7 @@ if __name__ == "__main__": for i in range(cursor.rowcount): if res[i][0] == "queryPolicy": if int(res[i][1]) == int(queryPolicy): - tdLog.success( + tdLog.info( f"alter queryPolicy to {queryPolicy} successfully" ) else: @@ -471,7 +471,7 @@ if __name__ == "__main__": # for i in range(tdSql.queryRows): # if tdSql.queryResult[i][0] == "queryPolicy" : # if int(tdSql.queryResult[i][1]) == int(queryPolicy): - # tdLog.success('alter queryPolicy to %d successfully'%queryPolicy) + # tdLog.info('alter queryPolicy to %d successfully'%queryPolicy) # else : # tdLog.debug(tdSql.queryResult) # tdLog.exit("alter queryPolicy to %d failed"%queryPolicy) @@ -484,7 +484,7 @@ if __name__ == "__main__": for i in range(cursor.rowcount): if res[i][0] == "queryPolicy": if int(res[i][1]) == int(queryPolicy): - tdLog.success( + tdLog.info( f"alter queryPolicy to {queryPolicy} successfully" ) else: @@ -545,7 +545,7 @@ if __name__ == "__main__": for i in range(cursor.rowcount): if res[i][0] == "queryPolicy": if int(res[i][1]) == int(queryPolicy): - tdLog.success( + tdLog.info( f"alter queryPolicy to {queryPolicy} successfully" ) else: diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index d708227cba..90b71751f5 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -308,7 +308,7 @@ ,,y,script,./test.sh -f tsim/vnode/replica3_repeat.sim ,,y,script,./test.sh -f tsim/vnode/replica3_vgroup.sim ,,y,script,./test.sh -f tsim/vnode/replica3_many.sim -#,,y,script,./test.sh -f tsim/vnode/replica3_import.sim +,,y,script,./test.sh -f tsim/vnode/replica3_import.sim ,,y,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim ,,y,script,./test.sh -f tsim/vnode/stable_dnode2_stop.sim ,,y,script,./test.sh -f tsim/vnode/stable_dnode2.sim @@ -680,8 +680,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 6 -M 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3 -n 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 @@ -913,13 +913,13 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQueryInterval.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity_1.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity_1.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/avg.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/elapsed.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/csum.py -Q 3 @@ -927,9 +927,9 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tail.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 3 @@ -945,7 +945,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 3 @@ -991,7 +991,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timetruncate.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py -Q 4 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py -Q 4 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -Q 4 @@ -1086,7 +1086,7 @@ ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/json_tag.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/query_json.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sample_csv_json.py -,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py +#,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py -R ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py -R diff --git a/tests/system-test/0-others/tag_index_advance.py b/tests/system-test/0-others/tag_index_advance.py new file mode 100644 index 0000000000..a8d6cde85a --- /dev/null +++ b/tests/system-test/0-others/tag_index_advance.py @@ -0,0 +1,520 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os +import socket +import subprocess +import random +import string +import random + + +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * +from util.sqlset import * + +from util.dnodes import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode + +# +# -------------- util -------------------------- +# +def pathSize(path): + + total_size = 0 + for dirpath, dirnames, filenames in os.walk(path): + for i in filenames: + #use join to concatenate all the components of path + f = os.path.join(dirpath, i) + #use getsize to generate size in bytes and add it to the total size + total_size += os.path.getsize(f) + #print(dirpath) + + print(" %s %.02f MB"%(path, total_size/1024/1024)) + return total_size + + ''' + total = 0 + with os.scandir(path) as it: + for entry in it: + if entry.is_file(): + total += entry.stat().st_size + elif entry.is_dir(): + total += pathSize(entry.path) + + print(" %s %.02f MB"%(path, total/1024/1024)) + return total + ''' + +# +# --------------- cluster ------------------------ +# + +class MyDnodes(TDDnodes): + def __init__(self ,dnodes_lists): + super(MyDnodes,self).__init__() + self.dnodes = dnodes_lists # dnode must be TDDnode instance + self.simDeployed = False + +class TagCluster: + noConn = True + def init(self, conn, logSql, replicaVar=1): + tdLog.debug(f"start to excute {__file__}") + self.TDDnodes = None + self.depoly_cluster(5) + self.master_dnode = self.TDDnodes.dnodes[0] + self.host=self.master_dnode.cfgDict["fqdn"] + conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) + tdSql.init(conn1.cursor()) + + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + def depoly_cluster(self ,dnodes_nums): + + testCluster = False + valgrind = 0 + hostname = socket.gethostname() + dnodes = [] + start_port = 6030 + for num in range(1, dnodes_nums+1): + dnode = TDDnode(num) + dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}") + dnode.addExtraCfg("fqdn", f"{hostname}") + dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}") + dnode.addExtraCfg("monitorFqdn", hostname) + dnode.addExtraCfg("monitorPort", 7043) + dnodes.append(dnode) + + self.TDDnodes = MyDnodes(dnodes) + self.TDDnodes.init("") + self.TDDnodes.setTestCluster(testCluster) + self.TDDnodes.setValgrind(valgrind) + + self.TDDnodes.setAsan(tdDnodes.getAsan()) + self.TDDnodes.stopAll() + for dnode in self.TDDnodes.dnodes: + self.TDDnodes.deploy(dnode.index,{}) + + for dnode in self.TDDnodes.dnodes: + self.TDDnodes.starttaosd(dnode.index) + + # create cluster + dnode_first_host = "" + sql = "" + for dnode in self.TDDnodes.dnodes[1:]: + # print(dnode.cfgDict) + dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] + if dnode_first_host == "": + dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] + dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] + sql += f"create dnode '{dnode_id}'; " + + cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s " + cmd += f'"{sql}"' + print(cmd) + os.system(cmd) + + time.sleep(2) + tdLog.info(" create cluster done! ") + + + def getConnection(self, dnode): + host = dnode.cfgDict["fqdn"] + port = dnode.cfgDict["serverPort"] + config_dir = dnode.cfgDir + return taos.connect(host=host, port=int(port), config=config_dir) + + def run(self): + tdLog.info(" create cluster ok.") + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +class PerfDB: + def __init__(self): + self.sqls = [] + self.spends= [] + + # execute + def execute(self, sql): + print(f" perfdb execute {sql}") + stime = time.time() + ret = tdSql.execute(sql, 1) + spend = time.time() - stime + + self.sqls.append(sql) + self.spends.append(spend) + return ret + + # query + def query(self, sql): + print(f" perfdb query {sql}") + start = time.time() + ret = tdSql.query(sql, None, 1) + spend = time.time() - start + self.sqls.append(sql) + self.spends.append(spend) + return ret + + +# +# ----------------- TDTestCase ------------------ +# +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + tdLog.debug("start to execute %s" % __file__) + self.dbs = [PerfDB(), PerfDB()] + self.cur = 0 + self.tagCluster = TagCluster() + self.tagCluster.init(conn, logSql, replicaVar) + self.lenBinary = 64 + self.lenNchar = 32 + + # column + self.column_dict = { + 'ts': 'timestamp', + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': f'varchar({self.lenBinary})', + 'col13': f'nchar({self.lenNchar})' + } + # tag + self.tag_dict = { + 't1': 'tinyint', + 't2': 'smallint', + 't3': 'int', + 't4': 'bigint', + 't5': 'tinyint unsigned', + 't6': 'smallint unsigned', + 't7': 'int unsigned', + 't8': 'bigint unsigned', + 't9': 'float', + 't10': 'double', + 't11': 'bool', + 't12': f'varchar({self.lenBinary})', + 't13': f'nchar({self.lenNchar})', + 't14': 'timestamp' + } + + # generate specail wide random string + def random_string(self, count): + letters = string.ascii_letters + return ''.join(random.choice(letters) for i in range(count)) + + # execute + def execute(self, sql): + obj = self.dbs[self.cur] + return obj.execute(sql) + + # query + def query(self, sql): + return self.dbs[self.cur].query(sql) + + def set_stb_sql(self,stbname,column_dict,tag_dict): + column_sql = '' + tag_sql = '' + for k,v in column_dict.items(): + column_sql += f"{k} {v}, " + for k,v in tag_dict.items(): + tag_sql += f"{k} {v}, " + create_stb_sql = f'create stable {stbname} ({column_sql[:-2]}) tags ({tag_sql[:-2]})' + return create_stb_sql + + # create datbase + def create_database(self, dbname, vgroups, replica): + sql = f'create database {dbname} vgroups {vgroups} replica {replica}' + tdSql.execute(sql) + #tdSql.execute(sql) + tdSql.execute(f'use {dbname}') + + # create stable and child tables + def create_table(self, stbname, tbname, count): + # create stable + create_table_sql = self.set_stb_sql(stbname, self.column_dict, self.tag_dict) + tdSql.execute(create_table_sql) + + # create child table + tdLog.info(f" start create {count} child tables.") + for i in range(count): + ti = i % 128 + binTxt = self.random_string(self.lenBinary) + tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"{binTxt}","nch{i}",now' + sql = f'create table {tbname}{i} using {stbname} tags({tags})' + tdSql.execute(sql) + if i > 0 and i % 1000 == 0: + tdLog.info(f" child table count = {i}") + + tdLog.info(f" end create {count} child tables.") + + + # create stable and child tables + def create_tagidx(self, stbname): + cnt = -1 + for key in self.tag_dict.keys(): + # first tag have default index, so skip + if cnt == -1: + cnt = 0 + continue; + sql = f'create index idx_{key} on {stbname} ({key})' + tdLog.info(f" sql={sql}") + tdSql.execute(sql) + cnt += 1 + tdLog.info(f' create {cnt} tag indexs ok.') + + # insert to child table d1 data + def insert_data(self, tbname): + # d1 insert 3 rows + for i in range(3): + sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});' + tdSql.execute(sql) + # d20 insert 4 + for i in range(4): + sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});' + tdSql.execute(sql) + + # check show indexs + def show_tagidx(self, dbname, stbname): + sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="{dbname}"' + tdSql.query(sql) + rows = len(self.tag_dict.keys())-1 + tdSql.checkRows(rows) + + for i in range(rows): + col_name = tdSql.getData(i, 1) + idx_name = f'idx_{col_name}' + tdSql.checkData(i, 0, idx_name) + + tdLog.info(f' show {rows} tag indexs ok.') + + # query with tag idx + def query_tagidx(self, stbname): + sql = f'select * from meters where t2=1' + self.query(sql) + tdSql.checkRows(3) + + sql = f'select * from meters where t2<10' + self.query(sql) + tdSql.checkRows(3) + + sql = f'select * from meters where t2>10' + self.query(sql) + tdSql.checkRows(4) + + sql = f'select * from meters where t3<30' + self.query(sql) + tdSql.checkRows(7) + + sql = f'select * from meters where t12="11"' + tdSql.query(sql) + tdSql.checkRows(0) + + sql = f'select * from meters where (t4 < 10 or t5 = 20) and t6= 30' + self.query(sql) + tdSql.checkRows(0) + + sql = f'select * from meters where (t7 < 20 and t8 = 20) or t4 = 20' + self.query(sql) + tdSql.checkRows(4) + + sql = f'select * from meters where t12 like "%ab%" ' + self.query(sql) + tdSql.checkRows(0) + + sql = f'select * from meters where t13 = "d20" ' + self.query(sql) + tdSql.checkRows(0) + + sql = f'select * from meters where t13 = "nch20" ' + self.query(sql) + tdSql.checkRows(4) + + sql = f'select * from meters where tbname = "d20" ' + self.query(sql) + tdSql.checkRows(4) + + + # drop child table + def drop_tables(self, tbname, count): + # table d1 and d20 have verify data , so can not drop + start = random.randint(21, count/2) + end = random.randint(count/2 + 1, count - 1) + for i in range(start, end): + sql = f'drop table {tbname}{i}' + tdSql.execute(sql) + cnt = end - start + 1 + tdLog.info(f' drop table from {start} to {end} count={cnt}') + + # drop tag index + def drop_tagidx(self, dbname, stbname): + # drop index + cnt = -1 + for key in self.tag_dict.keys(): + # first tag have default index, so skip + if cnt == -1: + cnt = 0 + continue; + sql = f'drop index idx_{key}' + tdSql.execute(sql) + cnt += 1 + + # check idx result is 0 + sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="{dbname}"' + tdSql.query(sql) + tdSql.checkRows(0) + tdLog.info(f' drop {cnt} tag indexs ok.') + + # show performance + def show_performance(self, count) : + db = self.dbs[0] + db1 = self.dbs[1] + cnt = len(db.sqls) + cnt1 = len(db1.sqls) + if cnt != len(db1.sqls): + tdLog.info(f" datebase sql count not equal. cnt={cnt} cnt1={cnt1}\n") + return False + + tdLog.info(f" database sql cnt ={cnt}") + print(f" ----------------- performance (child tables = {count})--------------------") + print(" No time(index) time(no-index) diff(col3-col2) rate(col2/col3) sql") + for i in range(cnt): + key = db.sqls[i] + value = db.spends[i] + key1 = db1.sqls[i] + value1 = db1.spends[i] + diff = value1 - value + rate = value/value1*100 + print(" %d %.3fs %.3fs %.3fs %d%% %s"%(i+1, value, value1, diff, rate, key)) + print(" --------------------- end ------------------------") + return True + + def show_diskspace(self): + #calc + selfPath = os.path.dirname(os.path.realpath(__file__)) + projPath = "" + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + # total + vnode2_size = pathSize(projPath + "sim/dnode2/data/vnode/vnode2/") + vnode3_size = pathSize(projPath + "sim/dnode3/data/vnode/vnode3/") + vnode4_size = pathSize(projPath + "sim/dnode4/data/vnode/vnode4/") + vnode5_size = pathSize(projPath + "sim/dnode5/data/vnode/vnode5/") + + # show + print(" ----------------- disk space --------------------") + idx_size = vnode2_size + vnode3_size + noidx_size = vnode4_size + vnode5_size + + print(" index = %.02f M"%(idx_size/1024/1024)) + print(" no-index = %.02f M"%(noidx_size/1024/1024)) + print(" index/no-index = %.2f multiple"%(idx_size/noidx_size)) + + print(" -------------------- end ------------------------") + + + + + # main + def testdb(self, dbname, stable, tbname, count, createidx): + # cur + if createidx: + self.cur = 0 + else : + self.cur = 1 + + # do + self.create_database(dbname, 2, 1) + self.create_table(stable, tbname, count) + if(createidx): + self.create_tagidx(stable) + self.insert_data(tbname) + if(createidx): + self.show_tagidx(dbname,stable) + self.query_tagidx(stable) + #self.drop_tables(tbname, count) + #if(createidx): + # self.drop_tagidx(dbname, stable) + # query after delete , expect no crash + #self.query_tagidx(stable) + tdSql.execute(f'flush database {dbname}') + + # run + def run(self): + self.tagCluster.run() + + # var + dbname = "tagindex" + dbname1 = dbname + "1" + stable = "meters" + tbname = "d" + count = 10000 + + # test db + tdLog.info(f" ------------- {dbname} ----------") + self.testdb(dbname, stable, tbname, count, True) + tdLog.info(f" ------------- {dbname1} ----------") + self.testdb(dbname1, stable, tbname, count, False) + + # show test result + self.show_performance(count) + + self.tagCluster.TDDnodes.stopAll() + sec = 10 + print(f" sleep {sec}s wait taosd stopping ...") + time.sleep(sec) + + self.show_diskspace() + + + def stop(self): + self.tagCluster.stop() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/0-others/tag_index_basic.py b/tests/system-test/0-others/tag_index_basic.py index 195d8910e7..96c3dca016 100644 --- a/tests/system-test/0-others/tag_index_basic.py +++ b/tests/system-test/0-others/tag_index_basic.py @@ -107,11 +107,11 @@ class TDTestCase: def insert_data(self, tbname): # d1 insert 3 rows for i in range(3): - sql = f'insert into {tbname}1(ts,col1) values(now,{i});' + sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});' tdSql.execute(sql) # d20 insert 4 for i in range(4): - sql = f'insert into {tbname}20(ts,col1) values(now,{i});' + sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});' tdSql.execute(sql) # check show indexs @@ -150,6 +150,22 @@ class TDTestCase: tdSql.query(sql) tdSql.checkRows(0) + sql = f'select t12 ,t13,tbname from meters where t13="nch20"' + tdSql.query(sql) + tdSql.checkRows(4) + + sql = f'select * from meters where t12 like "%ab%" ' + tdSql.query(sql) + tdSql.checkRows(0) + + sql = f'select * from meters where t13 = "d20" ' + tdSql.query(sql) + tdSql.checkRows(0) + + sql = f'select * from meters where tbname = "d20" ' + tdSql.query(sql) + tdSql.checkRows(4) + sql = f'select * from meters where (t4 < 10 or t5 = 20) and t6= 30' tdSql.query(sql) tdSql.checkRows(0) @@ -188,6 +204,22 @@ class TDTestCase: tdSql.checkRows(0) tdLog.info(f' drop {cnt} tag indexs ok.') + # create long name idx + def longname_idx(self, stbname): + long_name = "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjjkkkkkkkkkkllllllllllmmmmmmmmmm" + key = "t3" + sql = f'create index {long_name} on {stbname} ({key})' + tdSql.error(sql) + + long_name = "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff" + key = "t3" + sql = f'create index {long_name} on {stbname} ({key})' + tdLog.info(f"{sql}") + tdSql.execute(sql) + sql = f'drop index {long_name}' + tdLog.info(f"{sql}") + tdSql.execute(sql) + # run def run(self): # var @@ -204,6 +236,7 @@ class TDTestCase: self.drop_tagidx(stable) # query after delete , expect no crash self.query_tagidx(stable) + self.longname_idx(stable) def stop(self): diff --git a/tests/system-test/pytest.sh b/tests/system-test/pytest.sh index 792cab98f7..a76efb62f3 100755 --- a/tests/system-test/pytest.sh +++ b/tests/system-test/pytest.sh @@ -94,7 +94,7 @@ else sleep 1 done - AsanFileSuccessLen=$(grep -w successfully $AsanFile | wc -l) + AsanFileSuccessLen=$(grep -w "successfully executed" $AsanFile | wc -l) echo "AsanFileSuccessLen:" $AsanFileSuccessLen if [ $AsanFileSuccessLen -gt 0 ]; then @@ -104,4 +104,4 @@ else echo "Execute script failure" exit 1 fi -fi +fi \ No newline at end of file diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 2017aad1ca..0c62c182f7 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -318,7 +318,7 @@ if __name__ == "__main__": for i in range(cursor.rowcount): if res[i][0] == "queryPolicy" : if int(res[i][1]) == int(queryPolicy): - tdLog.success(f'alter queryPolicy to {queryPolicy} successfully') + tdLog.info(f'alter queryPolicy to {queryPolicy} successfully') else: tdLog.debug(res) tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") @@ -371,7 +371,7 @@ if __name__ == "__main__": for i in range(cursor.rowcount): if res[i][0] == "queryPolicy" : if int(res[i][1]) == int(queryPolicy): - tdLog.success(f'alter queryPolicy to {queryPolicy} successfully') + tdLog.info(f'alter queryPolicy to {queryPolicy} successfully') else: tdLog.debug(res) tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") @@ -439,7 +439,7 @@ if __name__ == "__main__": # for i in range(tdSql.queryRows): # if tdSql.queryResult[i][0] == "queryPolicy" : # if int(tdSql.queryResult[i][1]) == int(queryPolicy): - # tdLog.success('alter queryPolicy to %d successfully'%queryPolicy) + # tdLog.info('alter queryPolicy to %d successfully'%queryPolicy) # else : # tdLog.debug(tdSql.queryResult) # tdLog.exit("alter queryPolicy to %d failed"%queryPolicy) @@ -452,7 +452,7 @@ if __name__ == "__main__": for i in range(cursor.rowcount): if res[i][0] == "queryPolicy" : if int(res[i][1]) == int(queryPolicy): - tdLog.success(f'alter queryPolicy to {queryPolicy} successfully') + tdLog.info(f'alter queryPolicy to {queryPolicy} successfully') else: tdLog.debug(res) tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") @@ -509,7 +509,7 @@ if __name__ == "__main__": for i in range(cursor.rowcount): if res[i][0] == "queryPolicy" : if int(res[i][1]) == int(queryPolicy): - tdLog.success(f'alter queryPolicy to {queryPolicy} successfully') + tdLog.info(f'alter queryPolicy to {queryPolicy} successfully') else: tdLog.debug(res) tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") From b92de1e9e6a5d6b32b89cd8356c0b231ca2adb8b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 28 Feb 2023 09:34:16 +0800 Subject: [PATCH 06/27] fix(query): fix invalid read. --- source/libs/executor/src/executil.c | 46 +++++++++++++------------- source/libs/nodes/src/nodesUtilFuncs.c | 6 ++-- tools/shell/src/shellAuto.c | 9 +++-- 3 files changed, 33 insertions(+), 28 deletions(-) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 6b6f5cfe93..9537751ff0 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -850,32 +850,32 @@ static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTa tagVal.cid = pColInfo->info.colId; if (p1->pTagVal == NULL) { colDataSetNULL(pColInfo, i); - } - - const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal); - - if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { - colDataSetNULL(pColInfo, i); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { - colDataSetVal(pColInfo, i, p, false); - } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { - char* tmp = taosMemoryMalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1); - varDataSetLen(tmp, tagVal.nData); - memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); - colDataSetVal(pColInfo, i, tmp, false); -#if TAG_FILTER_DEBUG - qDebug("tagfilter varch:%s", tmp + 2); -#endif - taosMemoryFree(tmp); } else { - colDataSetVal(pColInfo, i, (const char*)&tagVal.i64, false); + const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataSetNULL(pColInfo, i); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataSetVal(pColInfo, i, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = taosMemoryMalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataSetVal(pColInfo, i, tmp, false); #if TAG_FILTER_DEBUG - if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { - qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { - qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); - } + qDebug("tagfilter varch:%s", tmp + 2); #endif + taosMemoryFree(tmp); + } else { + colDataSetVal(pColInfo, i, (const char*)&tagVal.i64, false); +#if TAG_FILTER_DEBUG + if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { + qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { + qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); + } +#endif + } } } } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 0419c883e6..024130b5f8 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -2101,9 +2101,9 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) { case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: - pVal->pz = taosMemoryMalloc(pVal->nLen + VARSTR_HEADER_SIZE + 1); - memcpy(pVal->pz, pNode->datum.p, pVal->nLen + VARSTR_HEADER_SIZE); - pVal->pz[pVal->nLen + VARSTR_HEADER_SIZE] = 0; + pVal->pz = taosMemoryMalloc(pVal->nLen + 1); + memcpy(pVal->pz, pNode->datum.p, pVal->nLen); + pVal->pz[pVal->nLen] = 0; break; case TSDB_DATA_TYPE_JSON: pVal->nLen = getJsonValueLen(pNode->datum.p); diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 1d872e3f0d..a8bc93dd0c 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -98,6 +98,7 @@ SWords shellCommands[] = { {"describe ", 0, 0, NULL}, {"delete from where ", 0, 0, NULL}, {"drop database ", 0, 0, NULL}, + {"drop index ", 0, 0, NULL}, {"drop table ", 0, 0, NULL}, {"drop dnode ", 0, 0, NULL}, {"drop mnode on dnode ;", 0, 0, NULL}, @@ -384,7 +385,7 @@ void showHelp() { create table using tags ...\n\ create database ...\n\ create dnode \"fqdn:port\" ...\n\ - create index ...\n\ + create index on (tag_column_name);\n\ create mnode on dnode ;\n\ create qnode on dnode ;\n\ create stream into as select ...\n\ @@ -404,6 +405,7 @@ void showHelp() { drop consumer group ... \n\ drop topic ;\n\ drop stream ;\n\ + drop index ;\n\ ----- E ----- \n\ explain select clause ...\n\ ----- F ----- \n\ @@ -534,7 +536,7 @@ SWord* addWord(const char* p, int32_t len, bool pattern) { word->len = len; // check format - if (pattern) { + if (pattern && len > 0) { word->type = wordType(p, len); } else { word->type = WT_TEXT; @@ -1724,6 +1726,9 @@ bool matchEnd(TAOS* con, SShellCmd* cmd) { if (strlen(last) == 0) { goto _return; } + if (strcmp(last, " ") == 0) { + goto _return; + } // match database if (elast == NULL) { From 5c70ea58f809e449938822d7ef2ff9c6691d99ae Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Tue, 28 Feb 2023 10:31:36 +0800 Subject: [PATCH 07/27] fix: explorer packaging --- packaging/tools/install.sh | 3 +++ packaging/tools/make_install.bat | 3 +++ packaging/tools/makepkg.sh | 2 ++ packaging/tools/remove.sh | 1 + 4 files changed, 9 insertions(+) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 5aeff0e2fa..2495e177e1 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -34,6 +34,7 @@ benchmarkName="taosBenchmark" dumpName="taosdump" demoName="taosdemo" xname="taosx" +explorerName="${clientName}-explorer" clientName2="taos" serverName2="taosd" @@ -214,6 +215,7 @@ function install_bin() { ${csudo}rm -f ${bin_link_dir}/${benchmarkName} || : ${csudo}rm -f ${bin_link_dir}/${dumpName} || : ${csudo}rm -f ${bin_link_dir}/${xname} || : + ${csudo}rm -f ${bin_link_dir}/${explorerName} || : ${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : @@ -228,6 +230,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : [ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || : [ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -s ${install_main_dir}/bin/${xname} ${bin_link_dir}/${xname} || : + [ -x ${install_main_dir}/bin/${explorerName} ] && ${csudo}ln -s ${install_main_dir}/bin/${explorerName} ${bin_link_dir}/${explorerName} || : [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat index f5ed1cdf66..6dc7c356cd 100644 --- a/packaging/tools/make_install.bat +++ b/packaging/tools/make_install.bat @@ -70,6 +70,9 @@ if %Enterprise% == TRUE ( if exist %binary_dir%\\build\\bin\\taosx.exe ( copy %binary_dir%\\build\\bin\\taosx.exe %target_dir% > nul ) + if exist %binary_dir%\\build\\bin\\taos-explorer.exe ( + copy %binary_dir%\\build\\bin\\taos-explorer.exe %target_dir% > nul + ) if exist %binary_dir%\\build\\bin\\tmq_sim.exe ( copy %binary_dir%\\build\\bin\\tmq_sim.exe %target_dir% > nul ) diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 0a34d81b7f..92a20418c5 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -89,11 +89,13 @@ else ${build_dir}/bin/tdengine-datasource.zip \ ${build_dir}/bin/tdengine-datasource.zip.md5sum" [ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx" + explorer_bin_files=$(sh -c "ls ${build_dir}/bin/*-explorer") bin_files="${build_dir}/bin/${serverName} \ ${build_dir}/bin/${clientName} \ ${taostools_bin_files} \ ${taosx_bin} \ + ${explorer_bin_files} \ ${build_dir}/bin/taosadapter \ ${build_dir}/bin/udfd \ ${script_dir}/remove.sh \ diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 9c50c4582d..2479e48670 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -119,6 +119,7 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : ${csudo}rm -f ${bin_link_dir}/taoskeeper || : ${csudo}rm -f ${bin_link_dir}/taosx || : + ${csudo}rm -f ${bin_link_dir}/taos-explorer || : if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then ${csudo}rm -f ${bin_link_dir}/${clientName2} || : From 5b7ec8ade5b78d059135d525cb56627ae3dbc1a9 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 28 Feb 2023 10:41:13 +0800 Subject: [PATCH 08/27] fix: invalid write memory when query policy is 4 --- source/libs/nodes/src/nodesCloneFuncs.c | 18 +++++++++--------- source/libs/nodes/src/nodesCodeFuncs.c | 2 +- source/libs/nodes/src/nodesMsgFuncs.c | 2 +- source/libs/nodes/src/nodesUtilFuncs.c | 6 +++--- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index b4f7ea866a..d9a4c5178f 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -35,15 +35,15 @@ memcpy(&((pDst)->fldname), &((pSrc)->fldname), size); \ } while (0) -#define COPY_CHAR_POINT_FIELD(fldname) \ - do { \ - if (NULL == (pSrc)->fldname) { \ - break; \ - } \ +#define COPY_CHAR_POINT_FIELD(fldname) \ + do { \ + if (NULL == (pSrc)->fldname) { \ + break; \ + } \ (pDst)->fldname = taosStrdup((pSrc)->fldname); \ - if (NULL == (pDst)->fldname) { \ - return TSDB_CODE_OUT_OF_MEMORY; \ - } \ + if (NULL == (pDst)->fldname) { \ + return TSDB_CODE_OUT_OF_MEMORY; \ + } \ } while (0) #define CLONE_NODE_FIELD(fldname) \ @@ -158,7 +158,7 @@ static int32_t valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) { case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: { - int32_t len = varDataTLen(pSrc->datum.p) + 1; + int32_t len = pSrc->node.resType.bytes + 1; pDst->datum.p = taosMemoryCalloc(1, len); if (NULL == pDst->datum.p) { return TSDB_CODE_OUT_OF_MEMORY; diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 099cd0d3b3..e18de1c1d2 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -3269,7 +3269,7 @@ static int32_t jsonToDatum(const SJson* pJson, void* pObj) { case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: { - pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + VARSTR_HEADER_SIZE + 1); + pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + 1); if (NULL == pNode->datum.p) { code = TSDB_CODE_OUT_OF_MEMORY; break; diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index ad80508c64..6c6b6c0e81 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -928,7 +928,7 @@ static int32_t msgToDatum(STlv* pTlv, void* pObj) { code = TSDB_CODE_FAILED; break; } - pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + VARSTR_HEADER_SIZE + 1); + pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + 1); if (NULL == pNode->datum.p) { code = TSDB_CODE_OUT_OF_MEMORY; break; diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 0419c883e6..024130b5f8 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -2101,9 +2101,9 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) { case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: - pVal->pz = taosMemoryMalloc(pVal->nLen + VARSTR_HEADER_SIZE + 1); - memcpy(pVal->pz, pNode->datum.p, pVal->nLen + VARSTR_HEADER_SIZE); - pVal->pz[pVal->nLen + VARSTR_HEADER_SIZE] = 0; + pVal->pz = taosMemoryMalloc(pVal->nLen + 1); + memcpy(pVal->pz, pNode->datum.p, pVal->nLen); + pVal->pz[pVal->nLen] = 0; break; case TSDB_DATA_TYPE_JSON: pVal->nLen = getJsonValueLen(pNode->datum.p); From c2460da390d2fee0c12c3282ee2f3ed27b6efdf1 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 28 Feb 2023 10:45:20 +0800 Subject: [PATCH 09/27] fix: invalid write memory when query policy is 4 --- tests/parallel_test/cases.task | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 90b71751f5..479d0243b1 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -913,13 +913,13 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQueryInterval.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity_1.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity_1.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/avg.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/elapsed.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/csum.py -Q 3 @@ -927,9 +927,9 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tail.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 3 @@ -945,7 +945,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 3 @@ -991,7 +991,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timetruncate.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py -Q 4 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -Q 4 @@ -1086,7 +1086,7 @@ ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/json_tag.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/query_json.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sample_csv_json.py -#,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py +,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py -R ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py -R From 01ededfbf5e59ce15f8a6c4f0c5c9fe28f659e8c Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 28 Feb 2023 10:45:47 +0800 Subject: [PATCH 10/27] fix: invalid write memory when query policy is 4 --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 479d0243b1..e6384a87e9 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1086,7 +1086,7 @@ ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/json_tag.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/query_json.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sample_csv_json.py -,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py +#,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py -R ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py -R From ce38d80480ffc45ac25b60aed7950725260867b7 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 28 Feb 2023 11:19:13 +0800 Subject: [PATCH 11/27] fix: ensure capacity and move to next group --- source/libs/executor/src/executorimpl.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 89e0dd363c..3cbeb55f8a 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1197,16 +1197,11 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS } if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) { - // expand the result datablock capacity - if (pRow->numOfRows > pBlock->info.capacity) { - blockDataEnsureCapacity(pBlock, pRow->numOfRows); - qDebug("datablock capacity not sufficient, expand to requried:%d, current capacity:%d, %s", pRow->numOfRows, - pBlock->info.capacity, GET_TASKID(pTaskInfo)); + blockDataEnsureCapacity(pBlock, pBlock->info.rows + pRow->numOfRows); + qDebug("datablock capacity not sufficient, expand to required:%d, current capacity:%d, %s", + (pRow->numOfRows+pBlock->info.rows), + pBlock->info.capacity, GET_TASKID(pTaskInfo)); // todo set the pOperator->resultInfo size - } else { - releaseBufPage(pBuf, page); - break; - } } pGroupResInfo->index += 1; From 586ca254becd02088b737258729a31877411273a Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 28 Feb 2023 13:02:29 +0800 Subject: [PATCH 12/27] fix: invalid write memory when query policy is 4 --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index e6384a87e9..c70a50867b 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -945,7 +945,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 3 From cc1bc9304a087f97dfba14ab5b223c5a6f2fa556 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 28 Feb 2023 13:23:59 +0800 Subject: [PATCH 13/27] fix: trace id two character missing issue --- include/util/ttrace.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/util/ttrace.h b/include/util/ttrace.h index 5cdb1eecaa..e672882f57 100644 --- a/include/util/ttrace.h +++ b/include/util/ttrace.h @@ -59,11 +59,13 @@ typedef struct STraceId { char* _t = _buf; \ _t[0] = '0'; \ _t[1] = 'x'; \ - _t += titoa(rootId, 16, &_t[2]); \ + _t += 2; \ + _t += titoa(rootId, 16, &_t[0]); \ _t[0] = ':'; \ _t[1] = '0'; \ _t[2] = 'x'; \ - _t += titoa(msgId, 16, &_t[3]); \ + _t += 3; \ + _t += titoa(msgId, 16, &_t[0]); \ } while (0) #ifdef __cplusplus From 2310dfca22e7e7c76f1b399668653840369efdd1 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Tue, 28 Feb 2023 15:02:57 +0800 Subject: [PATCH 14/27] test: del data from async to sync --- tests/system-test/7-tmq/tmqDelete-1ctb.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/system-test/7-tmq/tmqDelete-1ctb.py b/tests/system-test/7-tmq/tmqDelete-1ctb.py index 4b45b1a834..4c62bb757b 100644 --- a/tests/system-test/7-tmq/tmqDelete-1ctb.py +++ b/tests/system-test/7-tmq/tmqDelete-1ctb.py @@ -80,16 +80,16 @@ class TDTestCase: tdLog.debug("del data ............ [OK]") return - def threadFunctionForDeletaData(self, **paraDict): + def threadFunctionForDeletaData(self, paraDict): # create new connector for new tdSql instance in my thread newTdSql = tdCom.newTdSql() self.delData(newTdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["startTs"],paraDict["endTs"],paraDict["ctbStartIdx"]) return - def asyncDeleteData(self, paraDict): - pThread = threading.Thread(target=self.threadFunctionForDeletaData, kwargs=paraDict) - pThread.start() - return pThread + # def asyncDeleteData(self, paraDict): + # pThread = threading.Thread(target=self.threadFunctionForDeletaData, kwargs=paraDict) + # pThread.start() + # return pThread def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") @@ -340,7 +340,8 @@ class TDTestCase: # del some data rowsOfDelete = int(self.rowsPerTbl / 4 ) paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1 - pDeleteThread = self.asyncDeleteData(paraDict) + # pDeleteThread = self.asyncDeleteData(paraDict) + self.threadFunctionForDeletaData(paraDict) tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) From e7a19755c66118ae120e1b0e1da03ca298a15028 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 28 Feb 2023 16:02:49 +0800 Subject: [PATCH 15/27] fix: a plan error of set operator subquery --- source/libs/parser/src/parCalcConst.c | 57 ++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 5 deletions(-) diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index d4f4949df0..5afd61a054 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -320,6 +320,57 @@ static int32_t calcConstInsert(SCalcConstContext* pCxt, SInsertStmt* pInsert) { return code; } +static SNodeList* getChildProjection(SNode* pStmt) { + switch (nodeType(pStmt)) { + case QUERY_NODE_SELECT_STMT: + return ((SSelectStmt*)pStmt)->pProjectionList; + case QUERY_NODE_SET_OPERATOR: + return ((SSetOperator*)pStmt)->pProjectionList; + default: + break; + } + return NULL; +} + +static void eraseSetOpChildProjection(SSetOperator* pSetOp, int32_t index) { + SNodeList* pLeftProjs = getChildProjection(pSetOp->pLeft); + nodesListErase(pLeftProjs, nodesListGetCell(pLeftProjs, index)); + SNodeList* pRightProjs = getChildProjection(pSetOp->pRight); + nodesListErase(pRightProjs, nodesListGetCell(pRightProjs, index)); +} + +static int32_t calcConstSetOpProjections(SCalcConstContext* pCxt, SSetOperator* pSetOp, bool subquery) { + int32_t index = 0; + SNode* pProj = NULL; + WHERE_EACH(pProj, pSetOp->pProjectionList) { + if (subquery && isUselessCol((SExprNode*)pProj)) { + ERASE_NODE(pSetOp->pProjectionList); + eraseSetOpChildProjection(pSetOp, index); + continue; + } + ++index; + WHERE_NEXT; + } + if (0 == LIST_LENGTH(pSetOp->pProjectionList)) { + return nodesListStrictAppend(pSetOp->pProjectionList, createConstantValue()); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t calcConstSetOperator(SCalcConstContext* pCxt, SSetOperator* pSetOp, bool subquery) { + int32_t code = calcConstSetOpProjections(pCxt, pSetOp, subquery); + if (TSDB_CODE_SUCCESS == code) { + code = calcConstQuery(pCxt, pSetOp->pLeft, false); + } + if (TSDB_CODE_SUCCESS == code) { + code = calcConstQuery(pCxt, pSetOp->pRight, false); + } + if (TSDB_CODE_SUCCESS == code) { + code = calcConstList(pSetOp->pOrderByList); + } + return code; +} + static int32_t calcConstQuery(SCalcConstContext* pCxt, SNode* pStmt, bool subquery) { int32_t code = TSDB_CODE_SUCCESS; switch (nodeType(pStmt)) { @@ -330,11 +381,7 @@ static int32_t calcConstQuery(SCalcConstContext* pCxt, SNode* pStmt, bool subque code = calcConstQuery(pCxt, ((SExplainStmt*)pStmt)->pQuery, subquery); break; case QUERY_NODE_SET_OPERATOR: { - SSetOperator* pSetOp = (SSetOperator*)pStmt; - code = calcConstQuery(pCxt, pSetOp->pLeft, false); - if (TSDB_CODE_SUCCESS == code) { - code = calcConstQuery(pCxt, pSetOp->pRight, false); - } + code = calcConstSetOperator(pCxt, (SSetOperator*)pStmt, subquery); break; } case QUERY_NODE_DELETE_STMT: From 5ad0d18c2e193e5720254a8b7d2e8984d21a4752 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 28 Feb 2023 16:05:41 +0800 Subject: [PATCH 16/27] fix(query): make exchange operator inherit input order from upstream --- source/libs/executor/src/executorimpl.c | 11 +++++++---- tests/parallel_test/cases.task | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 89e0dd363c..0b8bd2a817 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1452,13 +1452,16 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag) { // todo add more information about exchange operation int32_t type = pOperator->operatorType; - if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || - type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN || - type == QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN || - type == QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN) { + if (type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN || + type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN || + type == QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN) { *order = TSDB_ORDER_ASC; *scanFlag = MAIN_SCAN; return TSDB_CODE_SUCCESS; + } else if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE) { + // for exchange operator inherit order from upstream and do not overwrite here + *scanFlag = MAIN_SCAN; + return TSDB_CODE_SUCCESS; } else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) { STableScanInfo* pTableScanInfo = pOperator->info; *order = pTableScanInfo->base.cond.order; diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c70a50867b..e6384a87e9 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -945,7 +945,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 3 From ecf166bd0fad0d8df6d713bc13a91497130620aa Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 28 Feb 2023 17:04:48 +0800 Subject: [PATCH 17/27] test: remove cross-threaded operations that use Python connections in test cases --- .../6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py index 66c2fdd14f..04c69ad618 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py @@ -143,7 +143,8 @@ class TDTestCase: stableName= '%s_%d'%(paraDict['stbName'],i) newTdSql=tdCom.newTdSql() threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]))) - threads.append(threading.Thread(target=self.reCreateUser,args=(newTdSql,i,"user","passwd"))) + createTdSql=tdCom.newTdSql() + threads.append(threading.Thread(target=self.reCreateUser,args=(createTdSql,i,"user","passwd"))) for tr in threads: tr.start() From ca624678b139a3d657122d3921b8f4d81a1834d4 Mon Sep 17 00:00:00 2001 From: Xiaoyu Wang Date: Tue, 28 Feb 2023 17:28:23 +0800 Subject: [PATCH 18/27] fix: a plan error of set operator subquery --- source/libs/parser/src/parCalcConst.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index 5afd61a054..b2fc88add1 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -339,11 +339,33 @@ static void eraseSetOpChildProjection(SSetOperator* pSetOp, int32_t index) { nodesListErase(pRightProjs, nodesListGetCell(pRightProjs, index)); } +typedef struct SNotRefByOrderByCxt { + SColumnNode* pCol; + bool hasThisCol; +} SNotRefByOrderByCxt; + +static EDealRes notRefByOrderByImpl(SNode* pNode, void* pContext) { + if (QUERY_NODE_COLUMN == nodeType(pNode)) { + SNotRefByOrderByCxt* pCxt = (SNotRefByOrderByCxt*)pContext; + if (nodesEqualNode((SNode*)pCxt->pCol, pNode)) { + pCxt->hasThisCol = true; + return DEAL_RES_END; + } + } + return DEAL_RES_CONTINUE; +} + +static bool notRefByOrderBy(SColumnNode* pCol, SNodeList* pOrderByList) { + SNotRefByOrderByCxt cxt = {.pCol = pCol, .hasThisCol = false}; + nodesWalkExprs(pOrderByList, notRefByOrderByImpl, &cxt); + return !cxt.hasThisCol; +} + static int32_t calcConstSetOpProjections(SCalcConstContext* pCxt, SSetOperator* pSetOp, bool subquery) { int32_t index = 0; SNode* pProj = NULL; WHERE_EACH(pProj, pSetOp->pProjectionList) { - if (subquery && isUselessCol((SExprNode*)pProj)) { + if (subquery && notRefByOrderBy((SColumnNode*)pProj, pSetOp->pOrderByList) && isUselessCol((SExprNode*)pProj)) { ERASE_NODE(pSetOp->pProjectionList); eraseSetOpChildProjection(pSetOp, index); continue; From 3249b838905acab2e3cafd70de8e9152ef5704f1 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Tue, 28 Feb 2023 17:38:35 +0800 Subject: [PATCH 19/27] fix: destroy node with wrong type coversion --- source/libs/nodes/src/nodesUtilFuncs.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 024130b5f8..22afc7ef55 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -963,8 +963,6 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_SHOW_USERS_STMT: case QUERY_NODE_SHOW_LICENCES_STMT: case QUERY_NODE_SHOW_VGROUPS_STMT: - case QUERY_NODE_SHOW_DB_ALIVE_STMT: - case QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT: case QUERY_NODE_SHOW_TOPICS_STMT: case QUERY_NODE_SHOW_CONSUMERS_STMT: case QUERY_NODE_SHOW_CONNECTIONS_STMT: From abbc69a2ed29a465ce464dfba04603c6b60aa1f1 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Mon, 27 Feb 2023 18:59:38 +0800 Subject: [PATCH 20/27] fix:asan error --- source/dnode/mnode/impl/src/mndStream.c | 2 +- tests/script/tsim/stream/basic1.sim | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 2a05511134..b7f80f6b0e 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1223,7 +1223,7 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock // node id pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - int32_t nodeId = TMAX(pTask->nodeId, 0); + int64_t nodeId = TMAX(pTask->nodeId, 0); colDataSetVal(pColInfo, numOfRows, (const char *)&nodeId, false); // level diff --git a/tests/script/tsim/stream/basic1.sim b/tests/script/tsim/stream/basic1.sim index c61c7667f8..a8f04aac64 100644 --- a/tests/script/tsim/stream/basic1.sim +++ b/tests/script/tsim/stream/basic1.sim @@ -887,4 +887,20 @@ if $rows != 1 then goto loop19 endi +print select * from information_schema.ins_stream_tasks; +sql select * from information_schema.ins_stream_tasks; + +if $rows == 0 then + print =====rows=$rows + return -1 +endi + +print select * from information_schema.ins_streams; +sql select * from information_schema.ins_streams; + +if $rows == 0 then + print =====rows=$rows + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT From fe4123e814db9f76460df070d84186361b937a6a Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Tue, 28 Feb 2023 14:19:21 +0800 Subject: [PATCH 21/27] feat:window close && ignore expired --- include/common/tmsg.h | 2 +- source/libs/parser/src/parAstCreater.c | 2 +- source/libs/parser/test/parInitialCTest.cpp | 10 ++----- tests/script/tsim/stream/basic1.sim | 16 +++++----- tests/script/tsim/stream/deleteInterval.sim | 6 ++-- tests/script/tsim/stream/deleteSession.sim | 10 +++---- tests/script/tsim/stream/deleteState.sim | 6 ++-- .../stream/distributeIntervalRetrive0.sim | 4 +-- .../script/tsim/stream/distributeSession0.sim | 2 +- .../script/tsim/stream/fillHistoryBasic1.sim | 4 +-- .../script/tsim/stream/fillHistoryBasic2.sim | 6 ++-- .../script/tsim/stream/fillHistoryBasic3.sim | 2 +- .../tsim/stream/fillIntervalDelete0.sim | 20 ++++++------- .../tsim/stream/fillIntervalDelete1.sim | 30 +++++++++---------- .../script/tsim/stream/fillIntervalLinear.sim | 6 ++-- .../tsim/stream/fillIntervalPartitionBy.sim | 10 +++---- .../tsim/stream/fillIntervalPrevNext.sim | 8 ++--- .../tsim/stream/fillIntervalPrevNext1.sim | 4 +-- .../script/tsim/stream/fillIntervalRange.sim | 6 ++-- .../script/tsim/stream/fillIntervalValue.sim | 12 ++++---- .../script/tsim/stream/ignoreCheckUpdate.sim | 4 +-- tests/script/tsim/stream/partitionby.sim | 6 ++-- tests/script/tsim/stream/partitionby1.sim | 6 ++-- .../tsim/stream/partitionbyColumnInterval.sim | 10 +++---- .../tsim/stream/partitionbyColumnSession.sim | 8 ++--- .../tsim/stream/partitionbyColumnState.sim | 4 +-- tests/script/tsim/stream/schedSnode.sim | 2 +- tests/script/tsim/stream/session0.sim | 18 +++++------ tests/script/tsim/stream/session1.sim | 4 +-- tests/script/tsim/stream/sliding.sim | 20 ++++++------- tests/script/tsim/stream/state0.sim | 20 ++++++------- tests/script/tsim/stream/triggerInterval0.sim | 2 +- tests/script/tsim/stream/triggerSession0.sim | 2 +- tests/script/tsim/stream/udTableAndTag0.sim | 12 ++++---- tests/script/tsim/stream/udTableAndTag1.sim | 10 +++---- tests/script/tsim/stream/udTableAndTag2.sim | 14 ++++----- .../system-test/1-insert/database_pre_suf.py | 2 +- tests/system-test/1-insert/drop.py | 8 ++--- utils/test/c/tmq_taosx_ci.c | 2 +- 39 files changed, 158 insertions(+), 162 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index cd63f7d278..d6a301c38b 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1812,7 +1812,7 @@ typedef struct { #define STREAM_TRIGGER_AT_ONCE 1 #define STREAM_TRIGGER_WINDOW_CLOSE 2 #define STREAM_TRIGGER_MAX_DELAY 3 -#define STREAM_DEFAULT_IGNORE_EXPIRED 0 +#define STREAM_DEFAULT_IGNORE_EXPIRED 1 #define STREAM_FILL_HISTORY_ON 1 #define STREAM_FILL_HISTORY_OFF 0 #define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index f613b1bd3d..634a239399 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1810,7 +1810,7 @@ SNode* createStreamOptions(SAstCreateContext* pCxt) { CHECK_PARSER_STATUS(pCxt); SStreamOptions* pOptions = (SStreamOptions*)nodesMakeNode(QUERY_NODE_STREAM_OPTIONS); CHECK_OUT_OF_MEM(pOptions); - pOptions->triggerType = STREAM_TRIGGER_AT_ONCE; + pOptions->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; pOptions->fillHistory = STREAM_DEFAULT_FILL_HISTORY; pOptions->ignoreExpired = STREAM_DEFAULT_IGNORE_EXPIRED; pOptions->ignoreUpdate = STREAM_DEFAULT_IGNORE_UPDATE; diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index 8ba5802ad6..2dac35590e 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -781,16 +781,10 @@ TEST_F(ParserInitialCTest, createStream) { snprintf(expect.targetStbFullName, sizeof(expect.targetStbFullName), "0.test.%s", pDstStb); expect.igExists = igExists; expect.sql = taosStrdup(pSql); - expect.createStb = STREAM_CREATE_STABLE_TRUE; - expect.triggerType = STREAM_TRIGGER_AT_ONCE; - expect.maxDelay = 0; - expect.watermark = 0; - expect.fillHistory = STREAM_DEFAULT_FILL_HISTORY; - expect.igExpired = STREAM_DEFAULT_IGNORE_EXPIRED; }; auto setStreamOptions = - [&](int8_t createStb = STREAM_CREATE_STABLE_TRUE, int8_t triggerType = STREAM_TRIGGER_AT_ONCE, + [&](int8_t createStb = STREAM_CREATE_STABLE_TRUE, int8_t triggerType = STREAM_TRIGGER_WINDOW_CLOSE, int64_t maxDelay = 0, int64_t watermark = 0, int8_t igExpired = STREAM_DEFAULT_IGNORE_EXPIRED, int8_t fillHistory = STREAM_DEFAULT_FILL_HISTORY, int8_t igUpdate = STREAM_DEFAULT_IGNORE_UPDATE) { expect.createStb = createStb; @@ -852,6 +846,7 @@ TEST_F(ParserInitialCTest, createStream) { }); setCreateStreamReq("s1", "test", "create stream s1 into st3 as select count(*) from t1 interval(10s)", "st3"); + setStreamOptions(); run("CREATE STREAM s1 INTO st3 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); @@ -872,6 +867,7 @@ TEST_F(ParserInitialCTest, createStream) { "st3"); addTag("tname", TSDB_DATA_TYPE_VARCHAR, 10 + VARSTR_HEADER_SIZE); addTag("id", TSDB_DATA_TYPE_INT); + setStreamOptions(); run("CREATE STREAM s1 INTO st3 TAGS(tname VARCHAR(10), id INT) SUBTABLE(CONCAT('new-', tname)) " "AS SELECT _WSTART wstart, COUNT(*) cnt FROM st1 PARTITION BY TBNAME tname, tag1 id INTERVAL(10S)"); clearCreateStreamReq(); diff --git a/tests/script/tsim/stream/basic1.sim b/tests/script/tsim/stream/basic1.sim index a8f04aac64..e69875d69f 100644 --- a/tests/script/tsim/stream/basic1.sim +++ b/tests/script/tsim/stream/basic1.sim @@ -17,7 +17,7 @@ sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams1 trigger at_once into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); sql insert into t1 values(1648791213000,1,2,3,1.0); sql insert into t1 values(1648791223001,2,2,3,1.1); sql insert into t1 values(1648791233002,3,2,3,2.1); @@ -545,8 +545,8 @@ sql create table t2 using st tags(2,2,2); sql create table t3 using st tags(2,2,2); sql create table t4 using st tags(2,2,2); sql create table t5 using st tags(2,2,2); -sql create stream streams2 trigger at_once into streamt as select _wstart, count(*) c1, sum(a) c3,max(b) c4 from st partition by tbname interval(10s); -sql create stream streams3 trigger at_once into streamt3 as select _wstart, count(*) c1, sum(a) c3,max(b) c4, now c5 from st partition by tbname interval(10s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, sum(a) c3,max(b) c4 from st partition by tbname interval(10s); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart, count(*) c1, sum(a) c3,max(b) c4, now c5 from st partition by tbname interval(10s); sql insert into t1 values(1648791213000,1,1,1,1.0) t2 values(1648791213000,2,2,2,2.0) t3 values(1648791213000,3,3,3,3.0) t4 values(1648791213000,4,4,4,4.0); @@ -667,7 +667,7 @@ sql create database test3 vgroups 1; sql use test3; sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int); sql create table ts1 using st tags(1,1,1); -sql create stream stream_t3 trigger at_once into streamtST3 as select ts, min(a) c6, a, b, c, ta, tb, tc from ts1 interval(10s) ; +sql create stream stream_t3 trigger at_once IGNORE EXPIRED 0 into streamtST3 as select ts, min(a) c6, a, b, c, ta, tb, tc from ts1 interval(10s) ; sql insert into ts1 values(1648791211000,1,2,3); sleep 50 @@ -701,7 +701,7 @@ endi sql create database test4 vgroups 1; sql use test4; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from t1 where a > 5 interval(10s); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart, count(*) c1 from t1 where a > 5 interval(10s); sql insert into t1 values(1648791213000,1,2,3,1.0); $loop_count = 0 @@ -797,8 +797,8 @@ sql create database test5 vgroups 1; sql use test5; sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int); sql create table ts1 using st tags(1,1,1); -sql create stream streams5 trigger at_once into streamt5 as select count(*), _wstart, _wend, max(a) from ts1 interval(10s) ; -sql create stream streams6 trigger at_once into streamt6 as select count(*), _wstart, _wend, max(a), _wstart as ts from ts1 interval(10s) ; +sql create stream streams5 trigger at_once IGNORE EXPIRED 0 into streamt5 as select count(*), _wstart, _wend, max(a) from ts1 interval(10s) ; +sql create stream streams6 trigger at_once IGNORE EXPIRED 0 into streamt6 as select count(*), _wstart, _wend, max(a), _wstart as ts from ts1 interval(10s) ; sql_error create stream streams7 trigger at_once into streamt7 as select _wstart, count(*), _wstart, _wend, max(a) from ts1 interval(10s) ; sql_error create stream streams8 trigger at_once into streamt8 as select count(*), _wstart, _wstart, _wend, max(a) from ts1 interval(10s) ; @@ -840,7 +840,7 @@ sql create database test7 vgroups 1; sql use test7; sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int); sql create table ts1 using st tags(1,1,1); -sql create stream streams7 trigger at_once into streamt7 as select _wstart, count(*) from ts1 interval(10s) ; +sql create stream streams7 trigger at_once IGNORE EXPIRED 0 into streamt7 as select _wstart, count(*) from ts1 interval(10s) ; sql insert into ts1 values(1648791211000,1,2,3); sql_error insert into ts1 values(-1648791211000,1,2,3); diff --git a/tests/script/tsim/stream/deleteInterval.sim b/tests/script/tsim/stream/deleteInterval.sim index 7532b2d5de..9540e448d4 100644 --- a/tests/script/tsim/stream/deleteInterval.sim +++ b/tests/script/tsim/stream/deleteInterval.sim @@ -16,7 +16,7 @@ sql drop database if exists test; sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3 from t1 interval(10s); +sql create stream streams0 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart c1, count(*) c2, max(a) c3 from t1 interval(10s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sleep 200 @@ -193,7 +193,7 @@ sql use test2; sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st interval(10s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st interval(10s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL); @@ -419,7 +419,7 @@ sql use test3; sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams3 trigger at_once into test.streamt3 as select _wstart c1, count(*) c2, max(a) c3 from st interval(10s); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into test.streamt3 as select _wstart c1, count(*) c2, max(a) c3 from st interval(10s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL); diff --git a/tests/script/tsim/stream/deleteSession.sim b/tests/script/tsim/stream/deleteSession.sim index c3c64a5977..3645939178 100644 --- a/tests/script/tsim/stream/deleteSession.sim +++ b/tests/script/tsim/stream/deleteSession.sim @@ -16,7 +16,7 @@ sql drop database if exists test; sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3 from t1 session(ts, 5s); +sql create stream streams0 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart c1, count(*) c2, max(a) c3 from t1 session(ts, 5s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sleep 200 @@ -191,7 +191,7 @@ sql use test2; sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st session(ts,5s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st session(ts,5s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL); @@ -422,7 +422,7 @@ sql use test3; sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams3 trigger at_once into test.streamt3 as select _wstart c1, count(*) c2, max(a) c3 from st session(ts,5s); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into test.streamt3 as select _wstart c1, count(*) c2, max(a) c3 from st session(ts,5s); sql insert into t1 values(1648791210000,1,1,1,NULL); sql insert into t1 values(1648791210001,2,2,2,NULL); @@ -532,8 +532,8 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -print create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname session(ts, 2s); -sql create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname session(ts, 2s); +print create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart, count(*) c1 from st partition by tbname session(ts, 2s); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart, count(*) c1 from st partition by tbname session(ts, 2s); sql insert into t1 values(1648791210000,1,2,3); sql insert into t1 values(1648791220000,2,2,3); diff --git a/tests/script/tsim/stream/deleteState.sim b/tests/script/tsim/stream/deleteState.sim index 45d9bc3e39..dd74b73dce 100644 --- a/tests/script/tsim/stream/deleteState.sim +++ b/tests/script/tsim/stream/deleteState.sim @@ -16,7 +16,7 @@ sql drop database if exists test; sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(b) c3 from t1 state_window(a); +sql create stream streams0 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart c1, count(*) c2, max(b) c3 from t1 state_window(a); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sleep 200 @@ -197,8 +197,8 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -print create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname state_window(c); -sql create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname state_window(c); +print create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart, count(*) c1 from st partition by tbname state_window(c); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart, count(*) c1 from st partition by tbname state_window(c); sql insert into t1 values(1648791210000,1,2,1); sql insert into t1 values(1648791220000,2,2,2); diff --git a/tests/script/tsim/stream/distributeIntervalRetrive0.sim b/tests/script/tsim/stream/distributeIntervalRetrive0.sim index ae2f9afdb5..529a2a1b30 100644 --- a/tests/script/tsim/stream/distributeIntervalRetrive0.sim +++ b/tests/script/tsim/stream/distributeIntervalRetrive0.sim @@ -44,7 +44,7 @@ sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); sql create table ts3 using st tags(3,2,2); sql create table ts4 using st tags(4,2,2); -sql create stream stream_t1 trigger at_once into streamtST1 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s); +sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 into streamtST1 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s); sleep 1000 @@ -243,7 +243,7 @@ sql use test1; sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams1 trigger at_once into streamt1 as select _wstart as c0, count(*) c1, count(a) c2 from st interval(10s) ; +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart as c0, count(*) c1, count(a) c2 from st interval(10s) ; sql insert into t1 values(1648791211000,1,2,3); diff --git a/tests/script/tsim/stream/distributeSession0.sim b/tests/script/tsim/stream/distributeSession0.sim index d752f5c29c..25ac479a38 100644 --- a/tests/script/tsim/stream/distributeSession0.sim +++ b/tests/script/tsim/stream/distributeSession0.sim @@ -39,7 +39,7 @@ sql use test; sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); -sql create stream stream_t1 trigger at_once into streamtST as select _wstart, count(*) c1, sum(a) c2 , max(b) c3 from st session(ts, 10s) ; +sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 into streamtST as select _wstart, count(*) c1, sum(a) c2 , max(b) c3 from st session(ts, 10s) ; sleep 1000 diff --git a/tests/script/tsim/stream/fillHistoryBasic1.sim b/tests/script/tsim/stream/fillHistoryBasic1.sim index 772a09c017..e7a8da90e2 100644 --- a/tests/script/tsim/stream/fillHistoryBasic1.sim +++ b/tests/script/tsim/stream/fillHistoryBasic1.sim @@ -17,7 +17,7 @@ sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream stream1 trigger at_once fill_history 1 into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); +sql create stream stream1 trigger at_once fill_history 1 IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); sql insert into t1 values(1648791213000,1,2,3,1.0); sql insert into t1 values(1648791223001,2,2,3,1.1); @@ -479,7 +479,7 @@ sql insert into t1 values(1648791233002,3,2,3,2.1); sql insert into t1 values(1648791243003,4,2,3,3.1); sql insert into t1 values(1648791213004,4,2,3,4.1); -sql create stream stream2 trigger at_once fill_history 1 into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); +sql create stream stream2 trigger at_once fill_history 1 IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); sleep 5000 sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt; diff --git a/tests/script/tsim/stream/fillHistoryBasic2.sim b/tests/script/tsim/stream/fillHistoryBasic2.sim index 3af198259d..2f6c3ea92d 100644 --- a/tests/script/tsim/stream/fillHistoryBasic2.sim +++ b/tests/script/tsim/stream/fillHistoryBasic2.sim @@ -79,7 +79,7 @@ sql insert into ts3 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) sql insert into ts4 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) (1648791233004,13,12,13,2.1) ; sql insert into ts3 values(1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ; -sql create stream stream_t1 trigger at_once fill_history 1 watermark 1d into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s); +sql create stream stream_t1 trigger at_once fill_history 1 watermark 1d IGNORE EXPIRED 0 into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s); sleep 1000 @@ -211,7 +211,7 @@ sql insert into ts1 values(1648791222001,2,2,3); sql insert into ts2 values(1648791211000,1,2,3); sql insert into ts2 values(1648791222001,2,2,3); -sql create stream stream_t2 trigger at_once fill_history 1 watermark 20s into streamtST1 as select _wstart, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st interval(10s) ; +sql create stream stream_t2 trigger at_once fill_history 1 watermark 20s IGNORE EXPIRED 0 into streamtST1 as select _wstart, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st interval(10s) ; $loop_count = 0 loop2: @@ -241,7 +241,7 @@ sql use test3; sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int); sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); -sql create stream stream_t3 trigger at_once into streamtST3 as select ts, min(a) c6, a, b, c, ta, tb, tc from st interval(10s) ; +sql create stream stream_t3 trigger at_once IGNORE EXPIRED 0 into streamtST3 as select ts, min(a) c6, a, b, c, ta, tb, tc from st interval(10s) ; sql insert into ts1 values(1648791211000,1,2,3); sleep 50 diff --git a/tests/script/tsim/stream/fillHistoryBasic3.sim b/tests/script/tsim/stream/fillHistoryBasic3.sim index db8d6bc2d0..44d7ee9d9e 100644 --- a/tests/script/tsim/stream/fillHistoryBasic3.sim +++ b/tests/script/tsim/stream/fillHistoryBasic3.sim @@ -17,7 +17,7 @@ sql create table t2 using st tags(2,2,2); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL); -sql create stream streams2 trigger at_once fill_history 1 into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); +sql create stream streams2 trigger at_once fill_history 1 IGNORE EXPIRED 0 into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); sleep 3000 diff --git a/tests/script/tsim/stream/fillIntervalDelete0.sim b/tests/script/tsim/stream/fillIntervalDelete0.sim index 1c0647d57b..41b018a862 100644 --- a/tests/script/tsim/stream/fillIntervalDelete0.sim +++ b/tests/script/tsim/stream/fillIntervalDelete0.sim @@ -16,11 +16,11 @@ sql drop database if exists test1; sql create database test1 vgroups 1; sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams1 trigger at_once into streamt1 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL); -sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300); -sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); -sql create stream streams4 trigger at_once into streamt4 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); -sql create stream streams5 trigger at_once into streamt5 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt2 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); +sql create stream streams5 trigger at_once IGNORE EXPIRED 0 into streamt5 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); sql insert into t1 values(1648791213000,1,1,1,1.0,'aaa'); sleep 200 @@ -256,11 +256,11 @@ sql use test6; sql create stable st(ts timestamp, a int, b int , c int, d double, s varchar(20)) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(1,1,1); -sql create stream streams6 trigger at_once into streamt6 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL); -sql create stream streams7 trigger at_once into streamt7 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300); -sql create stream streams8 trigger at_once into streamt8 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); -sql create stream streams9 trigger at_once into streamt9 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); -sql create stream streams10 trigger at_once into streamt10 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); +sql create stream streams6 trigger at_once IGNORE EXPIRED 0 into streamt6 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL); +sql create stream streams7 trigger at_once IGNORE EXPIRED 0 into streamt7 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300); +sql create stream streams8 trigger at_once IGNORE EXPIRED 0 into streamt8 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); +sql create stream streams9 trigger at_once IGNORE EXPIRED 0 into streamt9 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); +sql create stream streams10 trigger at_once IGNORE EXPIRED 0 into streamt10 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); sql insert into t1 values(1648791210000,1,1,1,1.0,'aaa'); sql insert into t1 values(1648791217000,1,1,1,1.0,'aaa'); diff --git a/tests/script/tsim/stream/fillIntervalDelete1.sim b/tests/script/tsim/stream/fillIntervalDelete1.sim index 0206b88fdc..108f5f862d 100644 --- a/tests/script/tsim/stream/fillIntervalDelete1.sim +++ b/tests/script/tsim/stream/fillIntervalDelete1.sim @@ -18,11 +18,11 @@ sql drop database if exists test1; sql create database test1 vgroups 1; sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams1 trigger at_once into streamt1 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL); -sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300); -sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); -sql create stream streams4 trigger at_once into streamt4 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); -sql create stream streams5 trigger at_once into streamt5 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt2 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); +sql create stream streams5 trigger at_once IGNORE EXPIRED 0 into streamt5 as select _wstart as ts, max(a), sum(b), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); sql insert into t1 values(1648791210000,0,0,0,0.0,'aaa'); sql insert into t1 values(1648791213000,1,1,1,1.0,'bbb'); @@ -221,11 +221,11 @@ sql use test6; sql create stable st(ts timestamp, a int, b int , c int, d double, s varchar(20)) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(1,1,1); -sql create stream streams6 trigger at_once into streamt6 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL); -sql create stream streams7 trigger at_once into streamt7 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300); -sql create stream streams8 trigger at_once into streamt8 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); -sql create stream streams9 trigger at_once into streamt9 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); -sql create stream streams10 trigger at_once into streamt10 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); +sql create stream streams6 trigger at_once IGNORE EXPIRED 0 into streamt6 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(NULL); +sql create stream streams7 trigger at_once IGNORE EXPIRED 0 into streamt7 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value,100,200,300); +sql create stream streams8 trigger at_once IGNORE EXPIRED 0 into streamt8 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); +sql create stream streams9 trigger at_once IGNORE EXPIRED 0 into streamt9 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); +sql create stream streams10 trigger at_once IGNORE EXPIRED 0 into streamt10 as select _wstart as ts, max(a), sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); sql insert into t1 values(1648791210000,1,1,1,1.0,'aaa'); sql insert into t1 values(1648791215000,6,8,8,8.0,'bbb'); @@ -353,11 +353,11 @@ sql drop database if exists test7; sql create database test7 vgroups 1; sql use test7; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams11 trigger at_once into streamt11 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(NULL); -sql create stream streams12 trigger at_once into streamt12 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(value,100.0,200); -sql create stream streams13 trigger at_once into streamt13 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(next); -sql create stream streams14 trigger at_once into streamt14 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(prev); -sql create stream streams15 trigger at_once into streamt15 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(linear); +sql create stream streams11 trigger at_once IGNORE EXPIRED 0 into streamt11 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(NULL); +sql create stream streams12 trigger at_once IGNORE EXPIRED 0 into streamt12 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(value,100.0,200); +sql create stream streams13 trigger at_once IGNORE EXPIRED 0 into streamt13 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(next); +sql create stream streams14 trigger at_once IGNORE EXPIRED 0 into streamt14 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(prev); +sql create stream streams15 trigger at_once IGNORE EXPIRED 0 into streamt15 as select _wstart as ts, avg(a), count(*), timezone(), to_iso8601(1) from t1 where ts >= 1648791210000 and ts < 1648791240000 interval(1s) fill(linear); sql insert into t1 values(1648791210000,1,1,1,1.0,'aaa'); diff --git a/tests/script/tsim/stream/fillIntervalLinear.sim b/tests/script/tsim/stream/fillIntervalLinear.sim index b9f301d81b..3fa369d8d5 100644 --- a/tests/script/tsim/stream/fillIntervalLinear.sim +++ b/tests/script/tsim/stream/fillIntervalLinear.sim @@ -16,7 +16,7 @@ sql drop database if exists test1; sql create database test1 vgroups 1; sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams1 trigger at_once into streamt1 as select _wstart as ts, max(a)+sum(c), avg(b), first(s), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart as ts, max(a)+sum(c), avg(b), first(s), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); sql insert into t1 values(1648791213000,4,4,4,4.0,'aaa') (1648791216000,5,5,5,5.0,'bbb'); sql insert into t1 values(1648791210000,1,1,1,1.0,'ccc') (1648791219000,2,2,2,2.0,'ddd') (1648791222000,3,3,3,3.0,'eee'); @@ -205,7 +205,7 @@ sql drop database if exists test2; sql create database test2 vgroups 1; sql use test2; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, max(a)+sum(c), avg(b), first(s), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt2 as select _wstart as ts, max(a)+sum(c), avg(b), first(s), count(*) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(linear); sql insert into t1 values(1648791210000,1,1,1,1.0,'ccc') (1648791219000,2,2,2,2.0,'ddd') (1648791222000,3,3,3,3.0,'eee'); sql insert into t1 values(1648791213000,4,4,4,4.0,'aaa') (1648791216000,5,5,5,5.0,'bbb'); @@ -393,7 +393,7 @@ sql drop database if exists test3; sql create database test3 vgroups 1; sql use test3; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(a), b+c, s, b+1, 1 from t1 where ts >= 1648791150000 and ts < 1648791261000 interval(1s) fill(linear); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart as ts, max(a), b+c, s, b+1, 1 from t1 where ts >= 1648791150000 and ts < 1648791261000 interval(1s) fill(linear); sql insert into t1 values(1648791215000,1,1,1,1.0,'aaa'); sql insert into t1 values(1648791217000,2,2,2,2.0,'bbb'); sql insert into t1 values(1648791211000,3,3,3,3.0,'ccc'); diff --git a/tests/script/tsim/stream/fillIntervalPartitionBy.sim b/tests/script/tsim/stream/fillIntervalPartitionBy.sim index 168452fdc8..6a11b9952c 100644 --- a/tests/script/tsim/stream/fillIntervalPartitionBy.sim +++ b/tests/script/tsim/stream/fillIntervalPartitionBy.sim @@ -18,11 +18,11 @@ sql use test1; sql create stable st(ts timestamp, a int, b int , c int, d double, s varchar(20)) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams1 trigger at_once into streamt1 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(NULL); -sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(value,100,200,300); -sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(next); -sql create stream streams4 trigger at_once into streamt4 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(prev); -sql create stream streams5 trigger at_once into streamt5 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(linear); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(NULL); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt2 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(value,100,200,300); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(next); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(prev); +sql create stream streams5 trigger at_once IGNORE EXPIRED 0 into streamt5 as select _wstart as ts, max(a) c1, sum(b), count(*) from st where ts >= 1648791210000 and ts < 1648791261000 partition by ta interval(1s) fill(linear); sql insert into t1 values(1648791210000,0,0,0,0.0,'aaa'); sql insert into t1 values(1648791213000,1,1,1,1.0,'bbb'); diff --git a/tests/script/tsim/stream/fillIntervalPrevNext.sim b/tests/script/tsim/stream/fillIntervalPrevNext.sim index 99f7dcb5cb..ec963e1d4a 100644 --- a/tests/script/tsim/stream/fillIntervalPrevNext.sim +++ b/tests/script/tsim/stream/fillIntervalPrevNext.sim @@ -15,8 +15,8 @@ sql drop database if exists test1; sql create database test1 vgroups 1; sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams1 trigger at_once into streamt1 as select _wstart as ts, count(*) c1, max(b)+sum(a) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); -sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, count(*) c1, max(a)+min(c), avg(b) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart as ts, count(*) c1, max(b)+sum(a) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt2 as select _wstart as ts, count(*) c1, max(a)+min(c), avg(b) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); sql insert into t1 values(1648791213000,4,4,4,4.0,'aaa') (1648791215000,5,5,5,5.0,'aaa'); sql insert into t1 values(1648791211000,1,1,1,1.0,'aaa') (1648791217000,2,2,2,2.0,'aaa') (1648791220000,3,3,3,3.0,'aaa'); @@ -263,8 +263,8 @@ sql drop database if exists test5; sql create database test5 vgroups 1; sql use test5; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams5 trigger at_once into streamt5 as select _wstart as ts, count(*) c1, max(b)+sum(a) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); -sql create stream streams6 trigger at_once into streamt6 as select _wstart as ts, count(*) c1, max(a)+min(c), avg(b) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); +sql create stream streams5 trigger at_once IGNORE EXPIRED 0 into streamt5 as select _wstart as ts, count(*) c1, max(b)+sum(a) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(prev); +sql create stream streams6 trigger at_once IGNORE EXPIRED 0 into streamt6 as select _wstart as ts, count(*) c1, max(a)+min(c), avg(b) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(next); sql insert into t1 values(1648791211000,1,1,1,1.0,'aaa') (1648791217000,2,2,2,2.0,'aaa') (1648791220000,3,3,3,3.0,'aaa'); sql insert into t1 values(1648791213000,4,4,4,4.0,'aaa') (1648791215000,5,5,5,5.0,'aaa'); diff --git a/tests/script/tsim/stream/fillIntervalPrevNext1.sim b/tests/script/tsim/stream/fillIntervalPrevNext1.sim index 8058065bcf..40ef895c5a 100644 --- a/tests/script/tsim/stream/fillIntervalPrevNext1.sim +++ b/tests/script/tsim/stream/fillIntervalPrevNext1.sim @@ -16,8 +16,8 @@ sql drop database if exists test7; sql create database test7 vgroups 1; sql use test7; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams7 trigger at_once into streamt7 as select _wstart as ts, max(a), b+c, s from t1 where ts >= 1648791150000 and ts < 1648791261000 interval(1s) fill(prev); -sql create stream streams8 trigger at_once into streamt8 as select _wstart as ts, max(a), 1, b+1 from t1 where ts >= 1648791150000 and ts < 1648791261000 interval(1s) fill(next); +sql create stream streams7 trigger at_once IGNORE EXPIRED 0 into streamt7 as select _wstart as ts, max(a), b+c, s from t1 where ts >= 1648791150000 and ts < 1648791261000 interval(1s) fill(prev); +sql create stream streams8 trigger at_once IGNORE EXPIRED 0 into streamt8 as select _wstart as ts, max(a), 1, b+1 from t1 where ts >= 1648791150000 and ts < 1648791261000 interval(1s) fill(next); sql insert into t1 values(1648791215000,1,1,1,1.0,'aaa'); sql insert into t1 values(1648791217000,2,2,2,2.0,'bbb'); sql insert into t1 values(1648791211000,3,3,3,3.0,'ccc'); diff --git a/tests/script/tsim/stream/fillIntervalRange.sim b/tests/script/tsim/stream/fillIntervalRange.sim index a0905141f2..0e0dfb46d8 100644 --- a/tests/script/tsim/stream/fillIntervalRange.sim +++ b/tests/script/tsim/stream/fillIntervalRange.sim @@ -13,7 +13,7 @@ sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));; -sql create stream streams1 trigger at_once into streamt as select _wstart ts, count(*) c1 from t1 interval(1s) fill(NULL); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart ts, count(*) c1 from t1 interval(1s) fill(NULL); sql insert into t1 values(1648791211000,1,2,3,1.0,'aaa'); sleep 100 sql insert into t1 values(1648795308000,1,2,3,1.0,'aaa'); @@ -126,10 +126,10 @@ sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); print create stream streams1 trigger at_once into streamt as select _wstart ts, max(a) c1 from t1 interval(1s) fill(linear); -sql create stream streams1 trigger at_once into streamt as select _wstart ts, max(a) c1 from t1 interval(1s) fill(linear); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart ts, max(a) c1 from t1 interval(1s) fill(linear); print create stream streams2 trigger at_once into streamt2 as select _wstart ts, max(a) c1 from t1 interval(1s) fill(prev); -sql create stream streams2 trigger at_once into streamt2 as select _wstart ts, max(a) c1 from t1 interval(1s) fill(prev); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt2 as select _wstart ts, max(a) c1 from t1 interval(1s) fill(prev); sql insert into t1 values(1648791211000,1,2,3,1.0,'aaa'); sleep 100 diff --git a/tests/script/tsim/stream/fillIntervalValue.sim b/tests/script/tsim/stream/fillIntervalValue.sim index 2cd419397f..b447e9a559 100644 --- a/tests/script/tsim/stream/fillIntervalValue.sim +++ b/tests/script/tsim/stream/fillIntervalValue.sim @@ -13,8 +13,8 @@ sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));; -sql create stream streams1 trigger at_once into streamt as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(value, 100); -sql create stream streams1a trigger at_once into streamta as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(value_f, 100); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(value, 100); +sql create stream streams1a trigger at_once IGNORE EXPIRED 0 into streamta as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(value_f, 100); sql insert into t1 values(1648791213000,1,2,3,1.0,'aaa'); sleep 100 sql insert into t1 values(1648791233000,1,2,3,1.0,'aaa'); @@ -146,7 +146,7 @@ sql drop database if exists test2; sql create database test2 vgroups 1; sql use test2; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams2 trigger at_once into streamt2 as select _wstart as ts, count(*) c1, max(b)+sum(a) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value, 100,200); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt2 as select _wstart as ts, count(*) c1, max(b)+sum(a) from t1 where ts >= 1648791210000 and ts < 1648791261000 interval(1s) fill(value, 100,200); sql insert into t1 values(1648791211000,1,1,1,1.0,'aaa') (1648791217000,2,2,2,2.0,'aaa') (1648791220000,3,3,3,3.0,'aaa'); sql insert into t1 values(1648791213000,4,4,4,4.0,'aaa') (1648791215000,5,5,5,5.0,'aaa'); @@ -280,7 +280,7 @@ sql drop database if exists test3; sql create database test3 vgroups 1; sql use test3; sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); -sql create stream streams3 trigger at_once into streamt3 as select _wstart as ts, max(b), a+b, c from t1 where ts >= 1648791200000 and ts < 1648791261000 interval(10s) sliding(3s) fill(value, 100,200,300); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart as ts, max(b), a+b, c from t1 where ts >= 1648791200000 and ts < 1648791261000 interval(10s) sliding(3s) fill(value, 100,200,300); sql insert into t1 values(1648791220000,1,1,1,1.0,'aaa'); sleep 100 @@ -471,8 +471,8 @@ sql create stable st(ts timestamp,a int,b int,c int, d double, s varchar(20) ) t sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams4 trigger at_once into streamt4 as select _wstart ts, count(*) c1, concat(tbname, 'aaa') as pname, timezone() from st where ts > 1648791000000 and ts < 1648793000000 partition by tbname interval(10s) fill(NULL); -sql create stream streams4a trigger at_once into streamt4a as select _wstart ts, count(*) c1, concat(tbname, 'aaa') as pname, timezone() from st where ts > 1648791000000 and ts < 1648793000000 partition by tbname interval(10s) fill(NULL_F); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart ts, count(*) c1, concat(tbname, 'aaa') as pname, timezone() from st where ts > 1648791000000 and ts < 1648793000000 partition by tbname interval(10s) fill(NULL); +sql create stream streams4a trigger at_once IGNORE EXPIRED 0 into streamt4a as select _wstart ts, count(*) c1, concat(tbname, 'aaa') as pname, timezone() from st where ts > 1648791000000 and ts < 1648793000000 partition by tbname interval(10s) fill(NULL_F); sql insert into t1 values(1648791213000,1,2,3,1.0,'aaa'); sql insert into t1 values(1648791233000,1,2,3,1.0,'aaa'); sql insert into t1 values(1648791273000,1,2,3,1.0,'aaa'); diff --git a/tests/script/tsim/stream/ignoreCheckUpdate.sim b/tests/script/tsim/stream/ignoreCheckUpdate.sim index 7f99c534c8..2cd0117feb 100644 --- a/tests/script/tsim/stream/ignoreCheckUpdate.sim +++ b/tests/script/tsim/stream/ignoreCheckUpdate.sim @@ -12,9 +12,9 @@ sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int); -print create stream streams0 trigger at_once ignore update 1 into streamt as select _wstart c1, count(*) c2, max(b) c3 from t1 interval(10s); +print create stream streams0 trigger at_once IGNORE EXPIRED 0 ignore update 1 into streamt as select _wstart c1, count(*) c2, max(b) c3 from t1 interval(10s); -sql create stream streams0 trigger at_once ignore update 1 into streamt as select _wstart c1, count(*) c2, max(b) c3 from t1 interval(10s); +sql create stream streams0 trigger at_once IGNORE EXPIRED 0 ignore update 1 into streamt as select _wstart c1, count(*) c2, max(b) c3 from t1 interval(10s); sql insert into t1 values(1648791213000,1,1,1); sql insert into t1 values(1648791213000,2,2,2); diff --git a/tests/script/tsim/stream/partitionby.sim b/tests/script/tsim/stream/partitionby.sim index bc2c07b951..e63459e97d 100644 --- a/tests/script/tsim/stream/partitionby.sim +++ b/tests/script/tsim/stream/partitionby.sim @@ -12,7 +12,7 @@ sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); sql create table ts3 using st tags(3,2,2); sql create table ts4 using st tags(4,2,2); -sql create stream stream_t1 trigger at_once into test0.streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by ta,tb,tc interval(10s); +sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 into test0.streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by ta,tb,tc interval(10s); sql insert into ts1 values(1648791213001,1,12,3,1.0); sql insert into ts2 values(1648791213001,1,12,3,1.0); @@ -67,7 +67,7 @@ sql create table ts1 using st tags(1,2,3); sql create table ts2 using st tags(1,3,4); sql create table ts3 using st tags(1,4,5); -sql create stream streams1 trigger at_once into streamt as select _wstart, count(*) c1, count(a) c2 from st partition by ta,tb,tc interval(10s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, count(a) c2 from st partition by ta,tb,tc interval(10s); sql insert into ts1 values(1648791211000,1,2,3); @@ -98,7 +98,7 @@ sql create stable st(ts timestamp,a int,b int,c int,id int) tags(ta int,tb int,t sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); -sql create stream stream_t2 trigger at_once watermark 20s into streamtST as select _wstart, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6, max(id) c7 from st partition by ta interval(10s) ; +sql create stream stream_t2 trigger at_once watermark 20s IGNORE EXPIRED 0 into streamtST as select _wstart, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6, max(id) c7 from st partition by ta interval(10s) ; sql insert into ts1 values(1648791211000,1,2,3,1); sql insert into ts1 values(1648791222001,2,2,3,2); sql insert into ts2 values(1648791211000,1,2,3,3); diff --git a/tests/script/tsim/stream/partitionby1.sim b/tests/script/tsim/stream/partitionby1.sim index b29666cad7..c8bb25e0dd 100644 --- a/tests/script/tsim/stream/partitionby1.sim +++ b/tests/script/tsim/stream/partitionby1.sim @@ -11,7 +11,7 @@ sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); sql create table ts3 using st tags(3,2,2); sql create table ts4 using st tags(4,2,2); -sql create stream stream_t1 trigger at_once into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by tbname interval(10s); +sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by tbname interval(10s); sql insert into ts1 values(1648791213001,1,12,3,1.0); sql insert into ts2 values(1648791213001,1,12,3,1.0); @@ -43,7 +43,7 @@ sql create table ts1 using st tags(1,2,3); sql create table ts2 using st tags(1,3,4); sql create table ts3 using st tags(1,4,5); -sql create stream streams1 trigger at_once into streamt as select _wstart, count(*) c1, count(a) c2 from st partition by tbname interval(10s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, count(a) c2 from st partition by tbname interval(10s); sql insert into ts1 values(1648791211000,1,2,3); @@ -74,7 +74,7 @@ sql create stable st(ts timestamp,a int,b int,c int,id int) tags(ta int,tb int,t sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); -sql create stream stream_t2 trigger at_once into streamtST as select _wstart, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6, max(id) c7 from st partition by tbname interval(10s) ; +sql create stream stream_t2 trigger at_once IGNORE EXPIRED 0 into streamtST as select _wstart, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6, max(id) c7 from st partition by tbname interval(10s) ; sql insert into ts1 values(1648791211000,1,2,3,1); sql insert into ts1 values(1648791222001,2,2,3,2); sql insert into ts2 values(1648791211000,1,2,3,3); diff --git a/tests/script/tsim/stream/partitionbyColumnInterval.sim b/tests/script/tsim/stream/partitionbyColumnInterval.sim index 2e57e8d699..94053990e4 100644 --- a/tests/script/tsim/stream/partitionbyColumnInterval.sim +++ b/tests/script/tsim/stream/partitionbyColumnInterval.sim @@ -16,7 +16,7 @@ sql drop database if exists test; sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a interval(10s); +sql create stream streams0 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a interval(10s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -197,7 +197,7 @@ sql drop database if exists test1; sql create database test1 vgroups 1; sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams1 trigger at_once into streamt1 as select _wstart c1, count(*) c2, max(c) c3, _group_key(a+b) c4 from t1 partition by a+b interval(10s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart c1, count(*) c2, max(c) c3, _group_key(a+b) c4 from t1 partition by a+b interval(10s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -284,7 +284,7 @@ sql use test2; sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -481,7 +481,7 @@ sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create table t3 using st tags(2,2,2); sql create table t4 using st tags(2,2,2); -sql create stream streams4 trigger at_once into test.streamt4 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into test.streamt4 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); sql insert into t1 values(1648791213000,2,2,3,1.0); sql insert into t2 values(1648791213000,2,2,3,1.0); @@ -571,7 +571,7 @@ sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create table t3 using st tags(2,2,2); sql create table t4 using st tags(2,2,2); -sql create stream streams5 trigger at_once into test.streamt5 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); +sql create stream streams5 trigger at_once IGNORE EXPIRED 0 into test.streamt5 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s); sql insert into t1 values(1648791213000,1,2,3,1.0); sql insert into t2 values(1648791213000,2,2,3,1.0); diff --git a/tests/script/tsim/stream/partitionbyColumnSession.sim b/tests/script/tsim/stream/partitionbyColumnSession.sim index 1742d52cf0..bb3f6015c7 100644 --- a/tests/script/tsim/stream/partitionbyColumnSession.sim +++ b/tests/script/tsim/stream/partitionbyColumnSession.sim @@ -16,7 +16,7 @@ sql drop database if exists test; sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a session(ts, 5s); +sql create stream streams0 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a session(ts, 5s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -196,7 +196,7 @@ sql drop database if exists test1; sql create database test1 vgroups 1; sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams1 trigger at_once into streamt1 as select _wstart c1, count(*) c2, max(c) c3, _group_key(a+b) c4 from t1 partition by a+b session(ts, 5s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart c1, count(*) c2, max(c) c3, _group_key(a+b) c4 from t1 partition by a+b session(ts, 5s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -282,7 +282,7 @@ sql use test2; sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a session(ts, 5s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a session(ts, 5s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -478,7 +478,7 @@ sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create table t3 using st tags(2,2,2); sql create table t4 using st tags(2,2,2); -sql create stream streams4 trigger at_once into test.streamt4 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a session(ts, 5s); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into test.streamt4 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a session(ts, 5s); sql insert into t1 values(1648791213000,2,2,3,1.0); sql insert into t2 values(1648791213000,2,2,3,1.0); diff --git a/tests/script/tsim/stream/partitionbyColumnState.sim b/tests/script/tsim/stream/partitionbyColumnState.sim index 75d01b17ec..62262a490c 100644 --- a/tests/script/tsim/stream/partitionbyColumnState.sim +++ b/tests/script/tsim/stream/partitionbyColumnState.sim @@ -11,7 +11,7 @@ sql drop database if exists test; sql create database test vgroups 1; sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a state_window(b); +sql create stream streams0 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a state_window(b); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); @@ -191,7 +191,7 @@ sql drop database if exists test1; sql create database test1 vgroups 1; sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d int); -sql create stream streams1 trigger at_once into streamt1 as select _wstart c1, count(*) c2, max(d) c3, _group_key(a+b) c4 from t1 partition by a+b state_window(c); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart c1, count(*) c2, max(d) c3, _group_key(a+b) c4 from t1 partition by a+b state_window(c); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL); diff --git a/tests/script/tsim/stream/schedSnode.sim b/tests/script/tsim/stream/schedSnode.sim index 2caecf50a2..6a4d6f79bb 100644 --- a/tests/script/tsim/stream/schedSnode.sim +++ b/tests/script/tsim/stream/schedSnode.sim @@ -20,7 +20,7 @@ sql create table ts1 using st tags(1,1,1); sql create table ts2 using st tags(2,2,2); sql create table ts3 using st tags(3,2,2); sql create table ts4 using st tags(4,2,2); -sql create stream stream_t1 trigger at_once into target.streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s); +sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 into target.streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s); sleep 1000 diff --git a/tests/script/tsim/stream/session0.sim b/tests/script/tsim/stream/session0.sim index 5e95428e0a..622c5f7c6d 100644 --- a/tests/script/tsim/stream/session0.sim +++ b/tests/script/tsim/stream/session0.sim @@ -17,7 +17,7 @@ sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double,id int); -sql create stream streams1 trigger at_once into streamt as select _wstart, count(*) c1, sum(a), max(a), min(d), stddev(a), last(a), first(d), max(id) s from t1 session(ts,10s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, sum(a), max(a), min(d), stddev(a), last(a), first(d), max(id) s from t1 session(ts,10s); sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL,1); sql insert into t1 values(1648791223001,10,2,3,1.1,2); sql insert into t1 values(1648791233002,3,2,3,2.1,3); @@ -179,7 +179,7 @@ endi sql create database test2 vgroups 1; sql use test2; sql create table t2(ts timestamp, a int, b int , c int, d double, id int); -sql create stream streams2 trigger at_once watermark 1d into streamt2 as select _wstart,apercentile(a,30) c1, apercentile(a,70), apercentile(a,20,"t-digest") c2, apercentile(a,60,"t-digest") c3, max(id) c4 from t2 session(ts,10s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 watermark 1d into streamt2 as select _wstart,apercentile(a,30) c1, apercentile(a,70), apercentile(a,20,"t-digest") c2, apercentile(a,60,"t-digest") c3, max(id) c4 from t2 session(ts,10s); sql insert into t2 values(1648791213001,1,1,3,1.0,1); sql insert into t2 values(1648791213002,2,2,6,3.4,2); sql insert into t2 values(1648791213003,4,9,3,4.8,3); @@ -229,13 +229,13 @@ endi sql create database test3 vgroups 1; sql use test3; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams3 trigger at_once watermark 1d into streamt3 as select _wstart, min(b), a,c from t1 session(ts,10s); -sql create stream streams4 trigger at_once watermark 1d into streamt4 as select _wstart, max(b), a,c from t1 session(ts,10s); -# sql create stream streams5 trigger at_once watermark 1d into streamt5 as select _wstart, top(b,3), a,c from t1 session(ts,10s); -# sql create stream streams6 trigger at_once watermark 1d into streamt6 as select _wstart, bottom(b,3), a,c from t1 session(ts,10s); -# sql create stream streams7 trigger at_once watermark 1d into streamt7 as select _wstart, spread(a), elapsed(ts), hyperloglog(a) from t1 session(ts,10s); -sql create stream streams7 trigger at_once watermark 1d into streamt7 as select _wstart, spread(a), hyperloglog(a) from t1 session(ts,10s); -# sql create stream streams8 trigger at_once watermark 1d into streamt8 as select _wstart, histogram(a,"user_input", "[1,3,5,7]", 1), histogram(a,"user_input", "[1,3,5,7]", 0) from t1 session(ts,10s); +sql create stream streams3 trigger at_once watermark 1d IGNORE EXPIRED 0 into streamt3 as select _wstart, min(b), a,c from t1 session(ts,10s); +sql create stream streams4 trigger at_once watermark 1d IGNORE EXPIRED 0 into streamt4 as select _wstart, max(b), a,c from t1 session(ts,10s); +# sql create stream streams5 trigger at_once watermark 1d IGNORE EXPIRED 0 into streamt5 as select _wstart, top(b,3), a,c from t1 session(ts,10s); +# sql create stream streams6 trigger at_once watermark 1d IGNORE EXPIRED 0 into streamt6 as select _wstart, bottom(b,3), a,c from t1 session(ts,10s); +# sql create stream streams7 trigger at_once watermark 1d IGNORE EXPIRED 0 into streamt7 as select _wstart, spread(a), elapsed(ts), hyperloglog(a) from t1 session(ts,10s); +sql create stream streams7 trigger at_once watermark 1d IGNORE EXPIRED 0 into streamt7 as select _wstart, spread(a), hyperloglog(a) from t1 session(ts,10s); +# sql create stream streams8 trigger at_once watermark 1d IGNORE EXPIRED 0 into streamt8 as select _wstart, histogram(a,"user_input", "[1,3,5,7]", 1), histogram(a,"user_input", "[1,3,5,7]", 0) from t1 session(ts,10s); sql insert into t1 values(1648791213001,1,1,1,1.0); sql insert into t1 values(1648791213002,2,3,2,3.4); sql insert into t1 values(1648791213003,4,9,3,4.8); diff --git a/tests/script/tsim/stream/session1.sim b/tests/script/tsim/stream/session1.sim index f535fd619f..3ad7c6f04e 100644 --- a/tests/script/tsim/stream/session1.sim +++ b/tests/script/tsim/stream/session1.sim @@ -17,7 +17,7 @@ sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double,id int); -sql create stream streams2 trigger at_once into streamt as select _wstart, count(*) c1, sum(a), min(b), max(id) s from t1 session(ts,10s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, sum(a), min(b), max(id) s from t1 session(ts,10s); sql insert into t1 values(1648791210000,1,1,1,1.1,1); sql insert into t1 values(1648791220000,2,2,2,2.1,2); sql insert into t1 values(1648791230000,3,3,3,3.1,3); @@ -200,7 +200,7 @@ endi sql create database test1 vgroups 1; sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams3 trigger at_once into streamt3 as select _wstart, count(*) c1 from t1 where a > 5 session(ts, 5s); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart, count(*) c1 from t1 where a > 5 session(ts, 5s); sql insert into t1 values(1648791213000,1,2,3,1.0); $loop_count = 0 diff --git a/tests/script/tsim/stream/sliding.sim b/tests/script/tsim/stream/sliding.sim index 8287274cd2..e12e3c6686 100644 --- a/tests/script/tsim/stream/sliding.sim +++ b/tests/script/tsim/stream/sliding.sim @@ -17,10 +17,10 @@ sql use test; sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams1 trigger at_once into streamt as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s) sliding (5s); -sql create stream streams2 trigger at_once watermark 1d into streamt2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s) sliding (5s); -sql create stream stream_t1 trigger at_once into streamtST as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s) sliding (5s); -sql create stream stream_t2 trigger at_once watermark 1d into streamtST2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s) sliding (5s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s) sliding (5s); +sql create stream streams2 trigger at_once watermark 1d IGNORE EXPIRED 0 into streamt2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s) sliding (5s); +sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 into streamtST as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s) sliding (5s); +sql create stream stream_t2 trigger at_once watermark 1d IGNORE EXPIRED 0 into streamtST2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s) sliding (5s); sql insert into t1 values(1648791210000,1,2,3,1.0); sql insert into t1 values(1648791216000,2,2,3,1.1); @@ -309,8 +309,8 @@ sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams11 trigger at_once into streamt as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s, 5s); -sql create stream streams12 trigger at_once into streamt2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s, 5s); +sql create stream streams11 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s, 5s); +sql create stream streams12 trigger at_once IGNORE EXPIRED 0 into streamt2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s, 5s); sql insert into t1 values(1648791213000,1,2,3,1.0); sql insert into t1 values(1648791223001,2,2,3,1.1); @@ -442,9 +442,9 @@ sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams21 trigger at_once into streamt as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s, 5s); -sql create stream streams22 trigger at_once into streamt2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s, 5s); -sql create stream streams23 trigger at_once into streamt3 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(20s) sliding(10s); +sql create stream streams21 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s, 5s); +sql create stream streams22 trigger at_once IGNORE EXPIRED 0 into streamt2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s, 5s); +sql create stream streams23 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(20s) sliding(10s); sql insert into t1 values(1648791213000,1,1,1,1.0); sql insert into t1 values(1648791223001,2,2,2,1.1); @@ -683,7 +683,7 @@ sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams4 trigger at_once into streamt4 as select _wstart as ts, count(*),min(a) c1 from st interval(10s) sliding(5s); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into streamt4 as select _wstart as ts, count(*),min(a) c1 from st interval(10s) sliding(5s); sql insert into t1 values(1648791213000,1,1,1,1.0); sql insert into t1 values(1648791243000,2,1,1,1.0); diff --git a/tests/script/tsim/stream/state0.sim b/tests/script/tsim/stream/state0.sim index 07abdc0040..d009f742b7 100644 --- a/tests/script/tsim/stream/state0.sim +++ b/tests/script/tsim/stream/state0.sim @@ -17,9 +17,9 @@ sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double, id int); -print create stream streams1 trigger at_once into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a); +print create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a); -sql create stream streams1 trigger at_once into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a); sql insert into t1 values(1648791213000,1,2,3,1.0,1); sql insert into t1 values(1648791213000,1,2,3,1.0,2); @@ -453,9 +453,9 @@ sql use test1; sql create table t1(ts timestamp, a int, b int , c int, d double, id int); -print create stream streams2 trigger at_once into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a); +print create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a); -sql create stream streams2 trigger at_once into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a); sql insert into t1 values(1648791212000,2,2,3,1.0,1); sql insert into t1 values(1648791213000,1,2,3,1.0,1); @@ -501,9 +501,9 @@ sql use test3; sql create table t1(ts timestamp, a int, b int , c int, d double, id int); -print create stream streams3 trigger at_once into streamt3 as select _wstart, count(*) c1, sum(b) c3 from t1 state_window(a); +print create stream streams3 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart, count(*) c1, sum(b) c3 from t1 state_window(a); -sql create stream streams3 trigger at_once into streamt3 as select _wstart, count(*) c1, sum(b) c3 from t1 state_window(a); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into streamt3 as select _wstart, count(*) c1, sum(b) c3 from t1 state_window(a); sql insert into t1 values(1648791212000,1,2,3,1.0,1); sql insert into t1 values(1648791213000,2,2,3,1.0,1); sql insert into t1 values(1648791214000,3,2,4,1.0,2); @@ -553,9 +553,9 @@ sql create table st (ts timestamp, c1 tinyint, c2 smallint) tags (t1 tinyint) ; sql create table t1 using st tags (-81) ; sql create table t2 using st tags (-81) ; -print create stream if not exists streams4 trigger window_close into streamt4 as select _wstart AS startts, min(c1),count(c1) from t1 state_window(c1); +print create stream if not exists streams4 trigger window_close IGNORE EXPIRED 0 into streamt4 as select _wstart AS startts, min(c1),count(c1) from t1 state_window(c1); -sql create stream if not exists streams4 trigger window_close into streamt4 as select _wstart AS startts, min(c1),count(c1) from t1 state_window(c1); +sql create stream if not exists streams4 trigger window_close IGNORE EXPIRED 0 into streamt4 as select _wstart AS startts, min(c1),count(c1) from t1 state_window(c1); sql insert into t1 (ts, c1) values (1668073288209, 11); sql insert into t1 (ts, c1) values (1668073288210, 11); @@ -742,9 +742,9 @@ sql create table tb (ts timestamp, a int); sql insert into tb values (now + 1m , 1 ); sql create table b (c timestamp, d int, e int , f int, g double); -print create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3 from tb state_window(a); +print create stream streams0 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart c1, count(*) c2, max(a) c3 from tb state_window(a); -sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3 from tb state_window(a); +sql create stream streams0 trigger at_once IGNORE EXPIRED 0 into streamt as select _wstart c1, count(*) c2, max(a) c3 from tb state_window(a); sql insert into b values(1648791213000,NULL,NULL,NULL,NULL); sql select * from streamt order by c1, c2, c3; diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim index b522dcf035..1c62f689ac 100644 --- a/tests/script/tsim/stream/triggerInterval0.sim +++ b/tests/script/tsim/stream/triggerInterval0.sim @@ -15,7 +15,7 @@ print $data00 $data01 $data02 sql use test sql create table t1(ts timestamp, a int, b int , c int, d double); -sql create stream streams1 trigger window_close into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); +sql create stream streams1 trigger window_close IGNORE EXPIRED 0 into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); sql insert into t1 values(1648791213001,1,2,3,1.0); sleep 300 diff --git a/tests/script/tsim/stream/triggerSession0.sim b/tests/script/tsim/stream/triggerSession0.sim index 4c664cf7c7..81a016be2b 100644 --- a/tests/script/tsim/stream/triggerSession0.sim +++ b/tests/script/tsim/stream/triggerSession0.sim @@ -15,7 +15,7 @@ print $data00 $data01 $data02 sql use test; sql create table t2(ts timestamp, a int, b int , c int, d double); -sql create stream streams2 trigger window_close into streamt2 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 session(ts, 10s); +sql create stream streams2 trigger window_close IGNORE EXPIRED 0 into streamt2 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 session(ts, 10s); sql insert into t2 values(1648791213000,1,2,3,1.0); sql insert into t2 values(1648791222999,1,2,3,1.0); diff --git a/tests/script/tsim/stream/udTableAndTag0.sim b/tests/script/tsim/stream/udTableAndTag0.sim index 8bf34dc54c..e3ab344bbe 100644 --- a/tests/script/tsim/stream/udTableAndTag0.sim +++ b/tests/script/tsim/stream/udTableAndTag0.sim @@ -20,8 +20,8 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -#sql_error create stream streams1 trigger at_once into result.streamt SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); -sql create stream streams1 trigger at_once into result.streamt SUBTABLE(concat("aaa-", tbname)) as select _wstart, count(*) c1 from st partition by tbname interval(10s); +#sql_error create stream streams1 trigger at_once IGNORE EXPIRED 0 into result.streamt SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into result.streamt SUBTABLE(concat("aaa-", tbname)) as select _wstart, count(*) c1 from st partition by tbname interval(10s); sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,1,2,3); @@ -88,7 +88,7 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams2 trigger at_once into result2.streamt2 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as cc interval(10s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into result2.streamt2 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as cc interval(10s); sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,1,2,3); @@ -173,7 +173,7 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams3 trigger at_once into result3.streamt3 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", tbname)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as dd, tbname interval(10s); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into result3.streamt3 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", tbname)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as dd, tbname interval(10s); sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,1,2,3); @@ -285,7 +285,7 @@ sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create table t3 using st tags(3,3,3); -sql create stream streams4 trigger at_once into result4.streamt4 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", tbname)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as dd, tbname interval(10s); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into result4.streamt4 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", tbname)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as dd, tbname interval(10s); sql insert into t1 values(1648791213000,1,1,1) t2 values(1648791213000,2,2,2) t3 values(1648791213000,3,3,3); @@ -404,7 +404,7 @@ sql create table t1 using st tags("1",1,1); sql create table t2 using st tags("2",2,2); sql create table t3 using st tags("3",3,3); -sql create stream streams6 trigger at_once into result6.streamt6 TAGS(dd int) as select _wstart, count(*) c1 from st partition by concat(ta, "0") as dd, tbname interval(10s); +sql create stream streams6 trigger at_once IGNORE EXPIRED 0 into result6.streamt6 TAGS(dd int) as select _wstart, count(*) c1 from st partition by concat(ta, "0") as dd, tbname interval(10s); sql insert into t1 values(1648791213000,1,1,1) t2 values(1648791213000,2,2,2) t3 values(1648791213000,3,3,3); diff --git a/tests/script/tsim/stream/udTableAndTag1.sim b/tests/script/tsim/stream/udTableAndTag1.sim index 4229de2cf0..fbedaa6e4e 100644 --- a/tests/script/tsim/stream/udTableAndTag1.sim +++ b/tests/script/tsim/stream/udTableAndTag1.sim @@ -20,8 +20,8 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -#sql_error create stream streams1 trigger at_once into result.streamt SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); -sql create stream streams1 trigger at_once into result.streamt SUBTABLE( concat("aaa-", cast(a as varchar(10) ) ) ) as select _wstart, count(*) c1 from st partition by a interval(10s); +#sql_error create stream streams1 trigger at_once IGNORE EXPIRED 0 into result.streamt SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into result.streamt SUBTABLE( concat("aaa-", cast(a as varchar(10) ) ) ) as select _wstart, count(*) c1 from st partition by a interval(10s); print ===== insert into 1 sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,2,2,3); @@ -87,7 +87,7 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams2 trigger at_once into result2.streamt2 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st partition by concat("col-", cast(a as varchar(10) ) ) as cc interval(10s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into result2.streamt2 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st partition by concat("col-", cast(a as varchar(10) ) ) as cc interval(10s); print ===== insert into 2 sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,2,2,3); @@ -171,7 +171,7 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams3 trigger at_once into result3.streamt3 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", cast(a as varchar(10) ) ) ) as select _wstart, count(*) c1 from st partition by concat("col-", cast(a as varchar(10) ) ) as dd, a interval(10s); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into result3.streamt3 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", cast(a as varchar(10) ) ) ) as select _wstart, count(*) c1 from st partition by concat("col-", cast(a as varchar(10) ) ) as dd, a interval(10s); print ===== insert into 3 sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,2,2,3); @@ -283,7 +283,7 @@ sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create table t3 using st tags(3,3,3); -sql create stream streams4 trigger at_once into result4.streamt4 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", dd)) as select _wstart, count(*) c1 from st partition by concat("t", cast(a as varchar(10) ) ) as dd interval(10s); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into result4.streamt4 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", dd)) as select _wstart, count(*) c1 from st partition by concat("t", cast(a as varchar(10) ) ) as dd interval(10s); sql insert into t1 values(1648791213000,1,1,1) t2 values(1648791213000,2,2,2) t3 values(1648791213000,3,3,3); diff --git a/tests/script/tsim/stream/udTableAndTag2.sim b/tests/script/tsim/stream/udTableAndTag2.sim index bacc301ad0..c0e72712df 100644 --- a/tests/script/tsim/stream/udTableAndTag2.sim +++ b/tests/script/tsim/stream/udTableAndTag2.sim @@ -20,7 +20,7 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams1 trigger at_once into result.streamt SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 into result.streamt SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); print ===== insert into 1 sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,2,2,3); @@ -93,7 +93,7 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams2 trigger at_once into result2.streamt2 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 into result2.streamt2 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st interval(10s); print ===== insert into 2 sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,2,2,3); @@ -195,7 +195,7 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams3 trigger at_once into result3.streamt3 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", "1") ) as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 into result3.streamt3 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", "1") ) as select _wstart, count(*) c1 from st interval(10s); print ===== insert into 3 sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,2,2,3); @@ -305,7 +305,7 @@ sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); sql create table t3 using st tags(3,3,3); -sql create stream streams4 trigger at_once into result4.streamt4 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", "1")) as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 into result4.streamt4 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", "1")) as select _wstart, count(*) c1 from st interval(10s); sql insert into t1 values(1648791213000,1,1,1) t2 values(1648791213000,2,2,2) t3 values(1648791213000,3,3,3); @@ -375,9 +375,9 @@ sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create table t2 using st tags(2,2,2); -sql create stream streams51 trigger at_once into result5.streamt51 SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); -sql create stream streams52 trigger at_once into result5.streamt52 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st interval(10s); -sql create stream streams53 trigger at_once into result5.streamt53 TAGS(dd varchar(100)) SUBTABLE(concat("aaa-", "1") ) as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams51 trigger at_once IGNORE EXPIRED 0 into result5.streamt51 SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams52 trigger at_once IGNORE EXPIRED 0 into result5.streamt52 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams53 trigger at_once IGNORE EXPIRED 0 into result5.streamt53 TAGS(dd varchar(100)) SUBTABLE(concat("aaa-", "1") ) as select _wstart, count(*) c1 from st interval(10s); sql insert into t1 values(1648791213000,1,2,3); sql insert into t2 values(1648791213000,2,2,3); diff --git a/tests/system-test/1-insert/database_pre_suf.py b/tests/system-test/1-insert/database_pre_suf.py index 488dfebff5..a6ff95ab3f 100755 --- a/tests/system-test/1-insert/database_pre_suf.py +++ b/tests/system-test/1-insert/database_pre_suf.py @@ -108,7 +108,7 @@ class TDTestCase: # create stream - tdSql.execute('''create stream current_stream into stream_max_stable_1 as select _wstart as startts, _wend as wend, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 where ts is not null interval (5s);''') + tdSql.execute('''create stream current_stream trigger at_once IGNORE EXPIRED 0 into stream_max_stable_1 as select _wstart as startts, _wend as wend, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 where ts is not null interval (5s);''') # insert data for i in range(num_random*n): diff --git a/tests/system-test/1-insert/drop.py b/tests/system-test/1-insert/drop.py index c15a9bbc35..a8bfea2741 100644 --- a/tests/system-test/1-insert/drop.py +++ b/tests/system-test/1-insert/drop.py @@ -138,14 +138,14 @@ class TDTestCase: stream_name = tdCom.getLongName(5,"letters") tdSql.execute(f'create table {stbname} (ts timestamp,c0 int) tags(t0 int)') tdSql.execute(f'create table tb using {stbname} tags(1)') - tdSql.execute(f'create stream {stream_name} into stb as select * from {self.dbname}.{stbname} partition by tbname') + tdSql.execute(f'create stream {stream_name} trigger at_once ignore expired 0 into stb as select * from {self.dbname}.{stbname} partition by tbname') tdSql.query(f'select * from information_schema.ins_streams where stream_name = "{stream_name}"') print(tdSql.queryResult) - tdSql.checkEqual(tdSql.queryResult[0][2],f'create stream {stream_name} into stb as select * from {self.dbname}.{stbname} partition by tbname') + tdSql.checkEqual(tdSql.queryResult[0][2],f'create stream {stream_name} trigger at_once ignore expired 0 into stb as select * from {self.dbname}.{stbname} partition by tbname') tdSql.execute(f'drop stream {stream_name}') - tdSql.execute(f'create stream {stream_name} into stb1 as select * from tb') + tdSql.execute(f'create stream {stream_name} trigger at_once ignore expired 0 into stb1 as select * from tb') tdSql.query(f'select * from information_schema.ins_streams where stream_name = "{stream_name}"') - tdSql.checkEqual(tdSql.queryResult[0][2],f'create stream {stream_name} into stb1 as select * from tb') + tdSql.checkEqual(tdSql.queryResult[0][2],f'create stream {stream_name} trigger at_once ignore expired 0 into stb1 as select * from tb') tdSql.execute(f'drop database {self.dbname}') def run(self): self.drop_ntb_check() diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c index 3d458d90c1..1f25eae366 100644 --- a/utils/test/c/tmq_taosx_ci.c +++ b/utils/test/c/tmq_taosx_ci.c @@ -409,7 +409,7 @@ int buildStable(TAOS* pConn, TAOS_RES* pRes) { taos_free_result(pRes); pRes = taos_query(pConn, - "create stream meters_summary_s into meters_summary as select _wstart, max(current) as current, " + "create stream meters_summary_s trigger at_once IGNORE EXPIRED 0 into meters_summary as select _wstart, max(current) as current, " "groupid, location from meters partition by groupid, location interval(10m)"); if (taos_errno(pRes) != 0) { printf("failed to create super table meters_summary, reason:%s\n", taos_errstr(pRes)); From cad32b12a5018c5ccbb9320421052ee81f232681 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 28 Feb 2023 18:30:51 +0800 Subject: [PATCH 22/27] add inherit from upstream order option for exchange --- source/libs/executor/inc/executorimpl.h | 2 +- source/libs/executor/src/executorimpl.c | 10 ++++++---- source/libs/executor/src/filloperator.c | 4 ++-- source/libs/executor/src/groupoperator.c | 2 +- source/libs/executor/src/projectoperator.c | 4 ++-- source/libs/executor/src/timewindowoperator.c | 6 +++--- 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 5df3b14a5b..be79054c1b 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -732,7 +732,7 @@ void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int64_t numOfRows, int3 STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order); -int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag); +int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag, bool inheritUsOrder); int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz); extern void doDestroyExchangeOperatorInfo(void* param); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 0b8bd2a817..6b7378cb67 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1449,7 +1449,7 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t return TSDB_CODE_SUCCESS; } -int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag) { +int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag, bool inheritUsOrder) { // todo add more information about exchange operation int32_t type = pOperator->operatorType; if (type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN || @@ -1459,7 +1459,9 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan *scanFlag = MAIN_SCAN; return TSDB_CODE_SUCCESS; } else if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE) { - // for exchange operator inherit order from upstream and do not overwrite here + if (!inheritUsOrder) { + *order = TSDB_ORDER_ASC; + } *scanFlag = MAIN_SCAN; return TSDB_CODE_SUCCESS; } else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) { @@ -1476,7 +1478,7 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) { return TSDB_CODE_INVALID_PARA; } else { - return getTableScanInfo(pOperator->pDownstream[0], order, scanFlag); + return getTableScanInfo(pOperator->pDownstream[0], order, scanFlag, inheritUsOrder); } } } @@ -1592,7 +1594,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { } hasValidBlock = true; - int32_t code = getTableScanInfo(pOperator, &order, &scanFlag); + int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false); if (code != TSDB_CODE_SUCCESS) { destroyDataBlockForEmptyInput(blockAllocated, &pBlock); T_LONG_JMP(pTaskInfo->env, code); diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index adb045a69f..2a33e3527a 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -69,7 +69,7 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp int32_t order = TSDB_ORDER_ASC; int32_t scanFlag = MAIN_SCAN; - getTableScanInfo(pOperator, &order, &scanFlag); + getTableScanInfo(pOperator, &order, &scanFlag, false); int64_t ekey = pInfo->existNewGroupBlock->info.window.ekey; taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo)); @@ -128,7 +128,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { int32_t order = TSDB_ORDER_ASC; int32_t scanFlag = MAIN_SCAN; - getTableScanInfo(pOperator, &order, &scanFlag); + getTableScanInfo(pOperator, &order, &scanFlag, false); doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, pTaskInfo); if (pResBlock->info.rows > 0) { diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 8f5c3786e0..65146edfac 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -383,7 +383,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { break; } - int32_t code = getTableScanInfo(pOperator, &order, &scanFlag); + int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); } diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 3ae114c656..49bc5af634 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -289,7 +289,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { } // the pDataBlock are always the same one, no need to call this again - int32_t code = getTableScanInfo(downstream, &order, &scanFlag); + int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); } @@ -441,7 +441,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp SExprSupp* pSup = &pOperator->exprSupp; // the pDataBlock are always the same one, no need to call this again - int32_t code = getTableScanInfo(downstream, &order, &scanFlag); + int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); } diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 20d4f46eaf..6411d862ae 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1072,7 +1072,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { break; } - getTableScanInfo(pOperator, &pInfo->inputOrder, &scanFlag); + getTableScanInfo(pOperator, &pInfo->inputOrder, &scanFlag, true); if (pInfo->scalarSupp.pExprInfo != NULL) { SExprSupp* pExprSup = &pInfo->scalarSupp; @@ -4294,7 +4294,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { } } - getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag); + getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag, false); setInputDataBlock(pSup, pBlock, pIaInfo->inputOrder, scanFlag, true); doMergeAlignedIntervalAggImpl(pOperator, &pIaInfo->binfo.resultRowInfo, pBlock, pRes); @@ -4621,7 +4621,7 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) { break; } - getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag); + getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag, false); setInputDataBlock(pExpSupp, pBlock, iaInfo->inputOrder, scanFlag, true); doMergeIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes); From 10531d401a8ffef341498513f796a1a2ce88b06d Mon Sep 17 00:00:00 2001 From: lafirest Date: Tue, 28 Feb 2023 18:35:39 +0800 Subject: [PATCH 23/27] fix command options name error (#19874) --- docs/zh/14-reference/07-tdinsight/index.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/14-reference/07-tdinsight/index.mdx b/docs/zh/14-reference/07-tdinsight/index.mdx index 8ec11378ee..319fa8df53 100644 --- a/docs/zh/14-reference/07-tdinsight/index.mdx +++ b/docs/zh/14-reference/07-tdinsight/index.mdx @@ -140,10 +140,10 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste | -n | --tdengine-ds-name | TDENGINE_DS_NAME | TDengine 数据源名称,默认为 TDengine。 | | -a | --tdengine-api | TDENGINE_API | TDengine REST API 端点。默认为`http://127.0.0.1:6041`。 | | -u | --tdengine-user | TDENGINE_USER | TDengine 用户名。 [默认值:root] | -| -p | --tdengine-密码 | TDENGINE_PASSWORD | TDengine 密码。 [默认:taosdata] | +| -p | --tdengine-password | TDENGINE_PASSWORD | TDengine 密码。 [默认:taosdata] | | -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值:tdinsight] | | -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] | -| -e | --tdinsight-可编辑 | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] | +| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] | | -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 | 假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本: From 60f24a5c3b96efbf87b489f4e9ca016a61392cee Mon Sep 17 00:00:00 2001 From: Xuefeng Tan <1172915550@qq.com> Date: Tue, 28 Feb 2023 19:32:49 +0800 Subject: [PATCH 24/27] enh(taosAdapter): configurable Http status code (#20212) --- cmake/taosadapter_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index a0e50a02d3..0ff1371618 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 9cfe416 + GIT_TAG 7920f98 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From b5efd1ba057201fb4d0e3abe3bcd99a8f8ee8d27 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Tue, 28 Feb 2023 20:31:40 +0800 Subject: [PATCH 25/27] Update cases.task --- tests/parallel_test/cases.task | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c70a50867b..e933bdb693 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -680,8 +680,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 6 -M 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3 -n 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 From ba83826ce1031a97c412786a8ecea9eae1c10115 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 28 Feb 2023 21:27:04 +0800 Subject: [PATCH 26/27] fix: taosdump time range (#20215) --- cmake/taostools_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index db2ae92f6e..aecb8a1750 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 1e15545 + GIT_TAG 0111c66 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From ffa1deb07716c1349e33ac607548c1046cb2e2b1 Mon Sep 17 00:00:00 2001 From: Hui Li <52318143+plum-lihui@users.noreply.github.com> Date: Wed, 1 Mar 2023 11:53:00 +0800 Subject: [PATCH 27/27] 'test: add timeout to poll' --- tests/system-test/7-tmq/tmqDelete-1ctb.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/system-test/7-tmq/tmqDelete-1ctb.py b/tests/system-test/7-tmq/tmqDelete-1ctb.py index 4c62bb757b..b09efdd1e6 100644 --- a/tests/system-test/7-tmq/tmqDelete-1ctb.py +++ b/tests/system-test/7-tmq/tmqDelete-1ctb.py @@ -109,7 +109,7 @@ class TDTestCase: 'batchNum': 3000, 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 'endTs': 0, - 'pollDelay': 5, + 'pollDelay': 15, 'showMsg': 1, 'showRow': 1, 'snapshot': 0} @@ -194,7 +194,7 @@ class TDTestCase: 'rowsPerTbl': 10000, 'batchNum': 5000, 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 - 'pollDelay': 5, + 'pollDelay': 15, 'showMsg': 1, 'showRow': 1, 'snapshot': 0} @@ -296,7 +296,7 @@ class TDTestCase: 'rowsPerTbl': 10000, 'batchNum': 5000, 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 - 'pollDelay': 5, + 'pollDelay': 15, 'showMsg': 1, 'showRow': 1, 'snapshot': 0}